This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via 65f2959da164c4fed23b713c99842213c7ab04e1 (commit) via 1d5c2ffab0051390641628a458f657c49ea54d41 (commit) via 406054d9c9ce31990d59640aa5d3dfd49e2d1e9d (commit) via b7da3cd005ab576b55e066ffa854697f41bb6f82 (commit) via a7f23e35b596dada847205235821336e9e8d7166 (commit) via 0db006b5ea8fb6c760a4e48a5dd953b65d1895fc (commit) via d966e0e452517e1e0a29ca59b892f6685d560c78 (commit) via 2dd51f19e1d9c84fd09962ad2d134c376b46c45f (commit) via d46c341e321cb5085d5577dd5fb8ca0b3f7ba554 (commit) via 21f383e9fa9f0f0c127dbb251e08ae91da56d817 (commit) from f6b06ba4ef87d2c62fe47fe8ffe4e240b37c8fc1 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 65f2959da164c4fed23b713c99842213c7ab04e1 Author: Maxim Uvarov maxim.uvarov@linaro.org Date: Thu Sep 13 22:15:53 2018 +0300
linux-gen: do not use huge pages for internal allocations
Some linux-generic internal shared memory allocations have to be in normal pages due to small required data. Relaying on odp_sys_huge_page_size() is not really correct, because call returns default huge page size. But default huge page size is definned in kernel boot parameter as: default_hugepagesz=1G hugepagesz=1G hugepages=2 So in that case for small allocation linux-gen will create 1GB huge page. This patch introduces internal flag to shm funtion to allocate hp. This patch remains allowing changes on top of it: For now pools are in huge page. And for apps with small pool it's big overhead of unused memory. We should take into account odp_sys_huge_page_size_all() call and found best sized huge pages. https://bugs.linaro.org/show_bug.cgi?id=3954
Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org
diff --git a/platform/linux-generic/include/odp_shm_internal.h b/platform/linux-generic/include/odp_shm_internal.h index a835b8f3..9827af75 100644 --- a/platform/linux-generic/include/odp_shm_internal.h +++ b/platform/linux-generic/include/odp_shm_internal.h @@ -16,8 +16,9 @@ extern "C" { #define SHM_DEVNAME_MAXLEN (ODP_SHM_NAME_LEN + 16) #define SHM_DEVNAME_FORMAT "/odp-%d-%s" /* /dev/shm/odp-<pid>-<name> */
-#define _ODP_SHM_PROC_NOCREAT 0x40 /**< Do not create shm if not exist */ -#define _ODP_SHM_O_EXCL 0x80 /**< Do not create shm if exist */ +#define _ODP_SHM_PROC_NOCREAT (1 << 6) /**< Do not create shm if not exist */ +#define _ODP_SHM_O_EXCL (1 << 7) /**< Do not create shm if exist */ +#define _ODP_SHM_NO_HP (1 << 8) /**< Do not use huge pages */
#ifdef __cplusplus } diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c index d02fb507..901b5044 100644 --- a/platform/linux-generic/odp_classification.c +++ b/platform/linux-generic/odp_classification.c @@ -11,6 +11,7 @@ #include <odp/api/queue.h> #include <odp/api/debug.h> #include <odp_init_internal.h> +#include <odp_shm_internal.h> #include <odp_debug_internal.h> #include <odp_packet_internal.h> #include <odp/api/packet_io.h> @@ -84,9 +85,10 @@ int odp_classification_init_global(void) odp_shm_t queue_grp_shm; int i;
- cos_shm = odp_shm_reserve("shm_odp_cos_tbl", + cos_shm = odp_shm_reserve("_odp_shm_odp_cos_tbl", sizeof(cos_tbl_t), - sizeof(cos_t), 0); + sizeof(cos_t), + _ODP_SHM_NO_HP);
if (cos_shm == ODP_SHM_INVALID) { ODP_ERR("shm allocation failed for shm_odp_cos_tbl"); @@ -104,9 +106,10 @@ int odp_classification_init_global(void) LOCK_INIT(&cos->s.lock); }
- pmr_shm = odp_shm_reserve("shm_odp_pmr_tbl", + pmr_shm = odp_shm_reserve("_odp_shm_odp_pmr_tbl", sizeof(pmr_tbl_t), - sizeof(pmr_t), 0); + sizeof(pmr_t), + _ODP_SHM_NO_HP);
if (pmr_shm == ODP_SHM_INVALID) { ODP_ERR("shm allocation failed for shm_odp_pmr_tbl"); @@ -124,9 +127,10 @@ int odp_classification_init_global(void) LOCK_INIT(&pmr->s.lock); }
- queue_grp_shm = odp_shm_reserve("shm_odp_cls_queue_grp_tbl", + queue_grp_shm = odp_shm_reserve("_odp_shm_cls_queue_grp_tbl", sizeof(_cls_queue_grp_tbl_t), - sizeof(queue_entry_t *), 0); + sizeof(queue_entry_t *), + _ODP_SHM_NO_HP);
if (queue_grp_shm == ODP_SHM_INVALID) { ODP_ERR("shm allocation failed for queue_grp_tbl"); @@ -153,19 +157,19 @@ int odp_classification_term_global(void) int ret = 0; int rc = 0;
- ret = odp_shm_free(odp_shm_lookup("shm_odp_cos_tbl")); + ret = odp_shm_free(odp_shm_lookup("_odp_shm_odp_cos_tbl")); if (ret < 0) { ODP_ERR("shm free failed for shm_odp_cos_tbl"); rc = -1; }
- ret = odp_shm_free(odp_shm_lookup("shm_odp_pmr_tbl")); + ret = odp_shm_free(odp_shm_lookup("_odp_shm_odp_pmr_tbl")); if (ret < 0) { ODP_ERR("shm free failed for shm_odp_pmr_tbl"); rc = -1; }
- ret = odp_shm_free(odp_shm_lookup("shm_odp_cls_queue_grp_tbl")); + ret = odp_shm_free(odp_shm_lookup("_odp_shm_cls_queue_grp_tbl")); if (ret < 0) { ODP_ERR("shm free failed for shm_odp_cls_queue_grp_tbl"); rc = -1; diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c index 13aae9bc..939b2fbb 100644 --- a/platform/linux-generic/odp_crypto_null.c +++ b/platform/linux-generic/odp_crypto_null.c @@ -21,6 +21,7 @@ #include <odp/api/plat/thread_inlines.h> #include <odp_packet_internal.h> #include <odp/api/plat/queue_inlines.h> +#include <odp_shm_internal.h>
/* Inlined API functions */ #include <odp/api/plat/event_inlines.h> @@ -316,8 +317,9 @@ odp_crypto_init_global(void) mem_size = sizeof(odp_crypto_global_t);
/* Allocate our globally shared memory */ - shm = odp_shm_reserve("crypto_pool", mem_size, - ODP_CACHE_LINE_SIZE, 0); + shm = odp_shm_reserve("_odp_crypto_pool_null", mem_size, + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP); if (ODP_SHM_INVALID == shm) { ODP_ERR("unable to allocate crypto pool\n"); return -1; @@ -352,9 +354,9 @@ int odp_crypto_term_global(void) rc = -1; }
- ret = odp_shm_free(odp_shm_lookup("crypto_pool")); + ret = odp_shm_free(odp_shm_lookup("_odp_crypto_pool_null")); if (ret < 0) { - ODP_ERR("shm free failed for crypto_pool\n"); + ODP_ERR("shm free failed for _odp_crypto_pool_null\n"); rc = -1; }
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c index d98c87b0..ec3e85bf 100644 --- a/platform/linux-generic/odp_crypto_openssl.c +++ b/platform/linux-generic/odp_crypto_openssl.c @@ -21,6 +21,7 @@ #include <odp/api/plat/thread_inlines.h> #include <odp_packet_internal.h> #include <odp/api/plat/queue_inlines.h> +#include <odp_shm_internal.h>
/* Inlined API functions */ #include <odp/api/plat/event_inlines.h> @@ -1856,8 +1857,9 @@ odp_crypto_init_global(void) mem_size += nlocks * sizeof(odp_ticketlock_t);
/* Allocate our globally shared memory */ - shm = odp_shm_reserve("crypto_pool", mem_size, - ODP_CACHE_LINE_SIZE, 0); + shm = odp_shm_reserve("_odp_crypto_pool_ssl", mem_size, + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP); if (ODP_SHM_INVALID == shm) { ODP_ERR("unable to allocate crypto pool\n"); return -1; @@ -1903,7 +1905,7 @@ int odp_crypto_term_global(void) CRYPTO_set_locking_callback(NULL); CRYPTO_set_id_callback(NULL);
- ret = odp_shm_free(odp_shm_lookup("crypto_pool")); + ret = odp_shm_free(odp_shm_lookup("_odp_crypto_pool_ssl")); if (ret < 0) { ODP_ERR("shm free failed for crypto_pool\n"); rc = -1; diff --git a/platform/linux-generic/odp_ipsec_events.c b/platform/linux-generic/odp_ipsec_events.c index ea9ce961..c1d153c0 100644 --- a/platform/linux-generic/odp_ipsec_events.c +++ b/platform/linux-generic/odp_ipsec_events.c @@ -41,7 +41,7 @@ int _odp_ipsec_events_init_global(void) param.buf.num = IPSEC_EVENTS_POOL_BUF_COUNT; param.type = ODP_POOL_BUFFER;
- ipsec_status_pool = odp_pool_create("ipsec_status_pool", ¶m); + ipsec_status_pool = odp_pool_create("_odp_ipsec_status_pool", ¶m); if (ODP_POOL_INVALID == ipsec_status_pool) { ODP_ERR("Error: status pool create failed.\n"); goto err_status; @@ -55,16 +55,15 @@ err_status:
int _odp_ipsec_events_term_global(void) { - int ret = 0; - int rc = 0; + int ret;
ret = odp_pool_destroy(ipsec_status_pool); if (ret < 0) { ODP_ERR("status pool destroy failed"); - rc = -1; + return -1; }
- return rc; + return 0; }
ipsec_status_t _odp_ipsec_status_from_event(odp_event_t ev) diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c index 5557e314..6dd7ec71 100644 --- a/platform/linux-generic/odp_ipsec_sad.c +++ b/platform/linux-generic/odp_ipsec_sad.c @@ -14,6 +14,7 @@ #include <odp_init_internal.h> #include <odp_debug_internal.h> #include <odp_ipsec_internal.h> +#include <odp_shm_internal.h>
#include <odp/api/plat/atomic_inlines.h> #include <odp/api/plat/cpu_inlines.h> @@ -51,14 +52,14 @@ int _odp_ipsec_sad_init_global(void) odp_shm_t shm; unsigned i;
- shm = odp_shm_reserve("ipsec_sa_table", + shm = odp_shm_reserve("_odp_ipsec_sa_table", sizeof(ipsec_sa_table_t), - ODP_CACHE_LINE_SIZE, 0); - - ipsec_sa_tbl = odp_shm_addr(shm); - if (ipsec_sa_tbl == NULL) + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP); + if (shm == ODP_SHM_INVALID) return -1;
+ ipsec_sa_tbl = odp_shm_addr(shm); memset(ipsec_sa_tbl, 0, sizeof(ipsec_sa_table_t)); ipsec_sa_tbl->shm = shm;
diff --git a/platform/linux-generic/odp_ishm.c b/platform/linux-generic/odp_ishm.c index 1a4ca947..cf37bd9b 100644 --- a/platform/linux-generic/odp_ishm.c +++ b/platform/linux-generic/odp_ishm.c @@ -1036,7 +1036,8 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
/* Get system page sizes: page_hp_size is 0 if no huge page available*/ page_sz = odp_sys_page_size(); - page_hp_size = odp_sys_huge_page_size(); + page_hp_size = user_flags & _ODP_SHM_NO_HP ? + 0 : odp_sys_huge_page_size();
/* grab a new entry: */ for (new_index = 0; new_index < ISHM_MAX_NB_BLOCKS; new_index++) { diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index 91efba1a..26be844c 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -29,6 +29,7 @@ #include <odp/api/plat/time_inlines.h> #include <odp_pcapng.h> #include <odp/api/plat/queue_inlines.h> +#include <odp_shm_internal.h>
#include <string.h> #include <inttypes.h> @@ -65,9 +66,10 @@ int odp_pktio_init_global(void) odp_shm_t shm; int pktio_if;
- shm = odp_shm_reserve("odp_pktio_entries", + shm = odp_shm_reserve("_odp_pktio_entries", sizeof(pktio_table_t), - sizeof(pktio_entry_t), 0); + sizeof(pktio_entry_t), + _ODP_SHM_NO_HP); if (shm == ODP_SHM_INVALID) return -1;
@@ -1326,9 +1328,9 @@ int odp_pktio_term_global(void) pktio_if); }
- ret = odp_shm_free(odp_shm_lookup("odp_pktio_entries")); + ret = odp_shm_free(odp_shm_lookup("_odp_pktio_entries")); if (ret != 0) - ODP_ERR("shm free failed for odp_pktio_entries"); + ODP_ERR("shm free failed for _odp_pktio_entries");
return ret; } diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index bb6aa98b..8ae0e4e3 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -19,6 +19,7 @@ #include <odp_config_internal.h> #include <odp_debug_internal.h> #include <odp_ring_internal.h> +#include <odp_shm_internal.h> #include <odp_global_data.h>
#include <string.h> @@ -88,7 +89,8 @@ int odp_pool_init_global(void)
shm = odp_shm_reserve("_odp_pool_table", sizeof(pool_table_t), - ODP_CACHE_LINE_SIZE, 0); + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP);
pool_tbl = odp_shm_addr(shm);
@@ -200,7 +202,7 @@ static pool_t *reserve_pool(uint32_t shmflags) if (pool->reserved == 0) { pool->reserved = 1; UNLOCK(&pool->lock); - sprintf(ring_name, "pool_ring_%d", i); + sprintf(ring_name, "_odp_pool_ring_%d", i); pool->ring_shm = odp_shm_reserve(ring_name, sizeof(pool_ring_t), diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index dffe6ee9..96409b75 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -15,6 +15,7 @@ #include <odp_buffer_internal.h> #include <odp_pool_internal.h> #include <odp_init_internal.h> +#include <odp_shm_internal.h> #include <odp/api/shared_memory.h> #include <odp/api/schedule.h> #include <odp_schedule_if.h> @@ -134,13 +135,13 @@ static int queue_init_global(void)
shm = odp_shm_reserve("_odp_queue_gbl", sizeof(queue_global_t), - sizeof(queue_entry_t), 0); + sizeof(queue_entry_t), + _ODP_SHM_NO_HP); + if (shm == ODP_SHM_INVALID) + return -1;
queue_glb = odp_shm_addr(shm);
- if (queue_glb == NULL) - return -1; - memset(queue_glb, 0, sizeof(queue_global_t));
for (i = 0; i < ODP_CONFIG_QUEUES; i++) { @@ -161,7 +162,8 @@ static int queue_init_global(void) (uint64_t)queue_glb->config.max_queue_size;
shm = odp_shm_reserve("_odp_queue_rings", mem_size, - ODP_CACHE_LINE_SIZE, 0); + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP);
if (shm == ODP_SHM_INVALID) { odp_shm_free(queue_glb->queue_gbl_shm); diff --git a/platform/linux-generic/odp_queue_lf.c b/platform/linux-generic/odp_queue_lf.c index a28da2c7..ac8b4226 100644 --- a/platform/linux-generic/odp_queue_lf.c +++ b/platform/linux-generic/odp_queue_lf.c @@ -9,6 +9,7 @@ #include <odp/api/plat/atomic_inlines.h> #include <odp/api/shared_memory.h> #include <odp_queue_basic_internal.h> +#include <odp_shm_internal.h> #include <string.h> #include <stdio.h>
@@ -318,8 +319,11 @@ uint32_t queue_lf_init_global(uint32_t *queue_lf_size, if (!lockfree) return 0;
- shm = odp_shm_reserve("odp_queues_lf", sizeof(queue_lf_global_t), - ODP_CACHE_LINE_SIZE, 0); + shm = odp_shm_reserve("_odp_queues_lf", sizeof(queue_lf_global_t), + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP); + if (shm == ODP_SHM_INVALID) + return 0;
queue_lf_glb = odp_shm_addr(shm); memset(queue_lf_glb, 0, sizeof(queue_lf_global_t)); diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index e9cd5366..f9f45670 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -29,6 +29,7 @@ #include <odp_queue_basic_internal.h> #include <odp_libconfig_internal.h> #include <odp/api/plat/queue_inlines.h> +#include <odp_shm_internal.h>
/* No synchronization context */ #define NO_SYNC_CONTEXT ODP_SCHED_SYNC_PARALLEL @@ -353,17 +354,16 @@ static int schedule_init_global(void)
ODP_DBG("Schedule init ... ");
- shm = odp_shm_reserve("odp_scheduler", + shm = odp_shm_reserve("_odp_scheduler", sizeof(sched_global_t), - ODP_CACHE_LINE_SIZE, 0); - - sched = odp_shm_addr(shm); - - if (sched == NULL) { + ODP_CACHE_LINE_SIZE, + _ODP_SHM_NO_HP); + if (shm == ODP_SHM_INVALID) { ODP_ERR("Schedule init: Shm reserve failed.\n"); return -1; }
+ sched = odp_shm_addr(shm); memset(sched, 0, sizeof(sched_global_t));
if (read_config_file(sched)) { diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c index 5345cee8..9c9139bb 100644 --- a/platform/linux-generic/odp_shared_memory.c +++ b/platform/linux-generic/odp_shared_memory.c @@ -59,7 +59,7 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, uint32_t flags) { int block_index; - int flgs = 0; /* internal ishm flags */ + uint32_t flgs = 0; /* internal ishm flags */
flgs = get_ishm_flags(flags);
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c index a5f62ec7..4fc6acc9 100644 --- a/platform/linux-generic/odp_thread.c +++ b/platform/linux-generic/odp_thread.c @@ -54,7 +54,7 @@ int odp_thread_init_global(void) { odp_shm_t shm;
- shm = odp_shm_reserve("odp_thread_globals", + shm = odp_shm_reserve("_odp_thread_globals", sizeof(thread_globals_t), ODP_CACHE_LINE_SIZE, 0);
@@ -73,9 +73,9 @@ int odp_thread_term_global(void) { int ret;
- ret = odp_shm_free(odp_shm_lookup("odp_thread_globals")); + ret = odp_shm_free(odp_shm_lookup("_odp_thread_globals")); if (ret < 0) - ODP_ERR("shm free failed for odp_thread_globals"); + ODP_ERR("shm free failed for _odp_thread_globals");
return ret; }
commit 1d5c2ffab0051390641628a458f657c49ea54d41 Author: Maxim Uvarov maxim.uvarov@linaro.org Date: Thu Sep 13 22:36:56 2018 +0300
linux-gen: shm: do not print map error
map error just spams output screen and it's impossible to see anything in the log. No need this extra debug print. It's ok if memory can not be allocated in HP and reservation fails to normal pages.
Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org
diff --git a/platform/linux-generic/odp_ishmphy.c b/platform/linux-generic/odp_ishmphy.c index e770b7bc..94a20f8f 100644 --- a/platform/linux-generic/odp_ishmphy.c +++ b/platform/linux-generic/odp_ishmphy.c @@ -147,10 +147,8 @@ void *_odp_ishmphy_map(int fd, void *start, uint64_t size, } }
- if (mapped_addr == MAP_FAILED) { - ODP_ERR("mmap failed:%s\n", strerror(errno)); + if (mapped_addr == MAP_FAILED) return NULL; - }
/* if locking is requested, lock it...*/ if (flags & _ODP_ISHM_LOCK) {
commit 406054d9c9ce31990d59640aa5d3dfd49e2d1e9d Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Sep 27 15:00:49 2018 +0300
test: build: configure option to disable test build
Test applications are not always needed. Added a configuration option to disable test application build and install. This makes build faster and install footprint smaller.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/Makefile.am b/Makefile.am index 9038203a..e4dab7e3 100644 --- a/Makefile.am +++ b/Makefile.am @@ -12,15 +12,18 @@ SUBDIRS = \ include \ $(PLATFORM_DIR) \ helper \ - test \ - $(PLATFORM_TEST_DIR) \ - helper/test \ doc
if WITH_EXAMPLES SUBDIRS += example endif
+if WITH_TESTS +SUBDIRS += test +SUBDIRS += helper/test +SUBDIRS += $(PLATFORM_TEST_DIR) +endif + @DX_RULES@
EXTRA_DIST = bootstrap CHANGELOG config/README config/odp-$(with_platform).conf diff --git a/configure.ac b/configure.ac index 99f5ef43..3b71ef51 100644 --- a/configure.ac +++ b/configure.ac @@ -399,6 +399,7 @@ AC_MSG_RESULT([ cunit: ${cunit_support} static tests linkage: ${enable_static_applications} with_examples: ${with_examples} + with_tests: ${with_tests} test_vald: ${test_vald} test_perf: ${test_perf} test_perf_proc: ${test_perf_proc} diff --git a/test/m4/configure.m4 b/test/m4/configure.m4 index dd07839f..309c18ae 100644 --- a/test/m4/configure.m4 +++ b/test/m4/configure.m4 @@ -1,3 +1,13 @@ +########################################################################## +# Build and install test applications +########################################################################## +AC_ARG_WITH([tests], + [AS_HELP_STRING([--without-tests], + [don't build and install test applications])], + [], + [with_tests=yes]) +AM_CONDITIONAL([WITH_TESTS], [test x$with_tests != xno]) + m4_include([test/m4/miscellaneous.m4]) m4_include([test/m4/performance.m4]) m4_include([test/m4/validation.m4])
commit b7da3cd005ab576b55e066ffa854697f41bb6f82 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Sep 27 14:40:32 2018 +0300
example: build: configure option to disable example build
Example applications are not always needed. Added a configuration option to disable example application build and install. This makes build faster and install footprint smaller.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/Makefile.am b/Makefile.am index 4f3e0020..9038203a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -15,8 +15,11 @@ SUBDIRS = \ test \ $(PLATFORM_TEST_DIR) \ helper/test \ - doc \ - example + doc + +if WITH_EXAMPLES +SUBDIRS += example +endif
@DX_RULES@
diff --git a/configure.ac b/configure.ac index a187fba7..99f5ef43 100644 --- a/configure.ac +++ b/configure.ac @@ -398,6 +398,7 @@ AC_MSG_RESULT([ debug: ${enable_debug} cunit: ${cunit_support} static tests linkage: ${enable_static_applications} + with_examples: ${with_examples} test_vald: ${test_vald} test_perf: ${test_perf} test_perf_proc: ${test_perf_proc} diff --git a/example/m4/configure.m4 b/example/m4/configure.m4 index cbac0914..ee4f44ba 100644 --- a/example/m4/configure.m4 +++ b/example/m4/configure.m4 @@ -1,5 +1,15 @@ ########################################################################## -# Enable/disable test-example +# Build and install example applications +########################################################################## +AC_ARG_WITH([examples], + [AS_HELP_STRING([--without-examples], + [don't build and install example applications])], + [], + [with_examples=yes]) +AM_CONDITIONAL([WITH_EXAMPLES], [test x$with_examples != xno]) + +########################################################################## +# Test examples during 'make check' ########################################################################## AC_ARG_ENABLE([test-example], [AS_HELP_STRING([--enable-test-example], [run basic test against examples])],
commit a7f23e35b596dada847205235821336e9e8d7166 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 4 16:13:30 2018 +0300
test: sched_pktio: add pipeline queue size option
Added option to set pipeline queue size. Default is 256 to avoid large buffering in pipeline queues.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c index d0682527..91771e87 100644 --- a/test/performance/odp_sched_pktio.c +++ b/test/performance/odp_sched_pktio.c @@ -22,7 +22,7 @@ #define MAX_PIPE_STAGES 64 #define MAX_PIPE_QUEUES 1024 #define MAX_PKT_LEN 1514 -#define MAX_PKT_NUM (16 * 1024) +#define MAX_PKT_NUM (128 * 1024) #define MIN_PKT_SEG_LEN 64 #define BURST_SIZE 32 #define CHECK_PERIOD 10000 @@ -40,6 +40,7 @@ typedef struct test_options_t { int num_pktio_queue; int pipe_stages; int pipe_queues; + uint32_t pipe_queue_size; uint8_t collect_stat; char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
@@ -536,6 +537,7 @@ static void print_usage(const char *progname) " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n" " --pipe-stages <number> Number of pipeline stages per interface\n" " --pipe-queues <number> Number of queues per pipeline stage\n" + " --pipe-queue-size <num> Number of events a pipeline queue must be able to store. Default 256.\n" " -m, --sched_mode <mode> Scheduler synchronization mode for all queues. 1: parallel, 2: atomic, 3: ordered. Default: 2\n" " -s, --stat Collect statistics.\n" " -h, --help Display help and exit.\n\n", @@ -555,6 +557,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"sched_mode", required_argument, NULL, 'm'}, {"pipe-stages", required_argument, NULL, 0}, {"pipe-queues", required_argument, NULL, 1}, + {"pipe-queue-size", required_argument, NULL, 2}, {"stat", no_argument, NULL, 's'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} @@ -567,6 +570,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) test_options->sched_mode = SCHED_MODE_ATOMIC; test_options->num_worker = 1; test_options->num_pktio_queue = 0; + test_options->pipe_queue_size = 256;
/* let helper collect its own arguments (e.g. --odph_proc) */ argc = odph_parse_options(argc, argv); @@ -584,6 +588,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 1: test_options->pipe_queues = atoi(optarg); break; + case 2: + test_options->pipe_queue_size = atoi(optarg); + break; case 'i': i = 0; str = optarg; @@ -721,8 +728,10 @@ static int config_setup(test_global_t *test_global) if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len) pkt_len = pool_capa.pkt.max_len;
- if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num) + if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num) { pkt_num = pool_capa.pkt.max_num; + printf("Warning: Pool size rounded down to %u\n", pkt_num); + }
test_global->pkt_len = pkt_len; test_global->pkt_num = pkt_num; @@ -1093,9 +1102,15 @@ static int create_pipeline_queues(test_global_t *test_global) int i, j, k, num_pktio, stages, queues, ctx_size; pipe_queue_context_t *ctx; odp_queue_param_t queue_param; + odp_queue_capability_t queue_capa; odp_schedule_sync_t sched_sync; int ret = 0;
+ if (odp_queue_capability(&queue_capa)) { + printf("Error: Queue capability failed\n"); + return -1; + } + num_pktio = test_global->opt.num_pktio; stages = test_global->opt.pipe_stages; queues = test_global->opt.pipe_queues; @@ -1107,6 +1122,14 @@ static int create_pipeline_queues(test_global_t *test_global) queue_param.sched.sync = sched_sync; queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.size = test_global->opt.pipe_queue_size; + if (queue_capa.sched.max_size && + queue_param.size > queue_capa.sched.max_size) { + printf("Error: Pipeline queue max size is %u\n", + queue_capa.sched.max_size); + return -1; + } + ctx_size = sizeof(pipe_queue_context_t);
for (i = 0; i < stages; i++) {
commit 0db006b5ea8fb6c760a4e48a5dd953b65d1895fc Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 4 11:34:09 2018 +0300
test: sched_pktio: add scheduler sync mode option
Added option to select scheduler synchronization mode (parallel, atomic, ordered) for all queues.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c index 1ae73f92..d0682527 100644 --- a/test/performance/odp_sched_pktio.c +++ b/test/performance/odp_sched_pktio.c @@ -28,9 +28,13 @@ #define CHECK_PERIOD 10000 #define TEST_PASSED_LIMIT 5000 #define TIMEOUT_OFFSET_NS 1000000 +#define SCHED_MODE_PARAL 1 +#define SCHED_MODE_ATOMIC 2 +#define SCHED_MODE_ORDER 3
typedef struct test_options_t { long int timeout_us; + int sched_mode; int num_worker; int num_pktio; int num_pktio_queue; @@ -532,6 +536,7 @@ static void print_usage(const char *progname) " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n" " --pipe-stages <number> Number of pipeline stages per interface\n" " --pipe-queues <number> Number of queues per pipeline stage\n" + " -m, --sched_mode <mode> Scheduler synchronization mode for all queues. 1: parallel, 2: atomic, 3: ordered. Default: 2\n" " -s, --stat Collect statistics.\n" " -h, --help Display help and exit.\n\n", NO_PATH(progname)); @@ -541,23 +546,25 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) { int i, opt, long_index; char *name, *str; - int len, str_len; + int len, str_len, sched_mode; const struct option longopts[] = { {"interface", required_argument, NULL, 'i'}, {"num_cpu", required_argument, NULL, 'c'}, {"num_queue", required_argument, NULL, 'q'}, {"timeout", required_argument, NULL, 't'}, + {"sched_mode", required_argument, NULL, 'm'}, {"pipe-stages", required_argument, NULL, 0}, {"pipe-queues", required_argument, NULL, 1}, {"stat", no_argument, NULL, 's'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; - const char *shortopts = "+i:c:q:t:sh"; + const char *shortopts = "+i:c:q:t:m:sh"; int ret = 0;
memset(test_options, 0, sizeof(test_options_t));
+ test_options->sched_mode = SCHED_MODE_ATOMIC; test_options->num_worker = 1; test_options->num_pktio_queue = 0;
@@ -617,6 +624,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 't': test_options->timeout_us = atol(optarg); break; + case 'm': + test_options->sched_mode = atoi(optarg); + break; case 's': test_options->collect_stat = 1; break; @@ -650,12 +660,34 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) ret = -1; }
+ sched_mode = test_options->sched_mode; + if (sched_mode != SCHED_MODE_PARAL && + sched_mode != SCHED_MODE_ATOMIC && + sched_mode != SCHED_MODE_ORDER) { + printf("Error: Bad scheduler mode: %i\n", sched_mode); + ret = -1; + } + if (test_options->num_pktio_queue == 0) test_options->num_pktio_queue = test_options->num_worker;
return ret; }
+static odp_schedule_sync_t sched_sync_mode(test_global_t *test_global) +{ + switch (test_global->opt.sched_mode) { + case SCHED_MODE_PARAL: + return ODP_SCHED_SYNC_PARALLEL; + case SCHED_MODE_ATOMIC: + return ODP_SCHED_SYNC_ATOMIC; + case SCHED_MODE_ORDER: + return ODP_SCHED_SYNC_ORDERED; + default: + return -1; + } +} + static int config_setup(test_global_t *test_global) { int i, cpu; @@ -820,7 +852,7 @@ static int open_pktios(test_global_t *test_global) pktio_param.in_mode = ODP_PKTIN_MODE_SCHED; pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
- sched_sync = ODP_SCHED_SYNC_ATOMIC; + sched_sync = sched_sync_mode(test_global);
for (i = 0; i < num_pktio; i++) test_global->pktio[i].pktio = ODP_PKTIO_INVALID; @@ -1067,7 +1099,7 @@ static int create_pipeline_queues(test_global_t *test_global) num_pktio = test_global->opt.num_pktio; stages = test_global->opt.pipe_stages; queues = test_global->opt.pipe_queues; - sched_sync = ODP_SCHED_SYNC_ATOMIC; + sched_sync = sched_sync_mode(test_global);
odp_queue_param_init(&queue_param); queue_param.type = ODP_QUEUE_TYPE_SCHED;
commit d966e0e452517e1e0a29ca59b892f6685d560c78 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 3 14:41:29 2018 +0300
test: sched_pktio: add queue pipeline options
Added options to test performance when received packets are pushed through a set of queues (pipeline stages) before packet output.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c index d81994cd..1ae73f92 100644 --- a/test/performance/odp_sched_pktio.c +++ b/test/performance/odp_sched_pktio.c @@ -19,6 +19,8 @@ #define MAX_PKTIOS 32 #define MAX_PKTIO_NAME 31 #define MAX_PKTIO_QUEUES MAX_WORKERS +#define MAX_PIPE_STAGES 64 +#define MAX_PIPE_QUEUES 1024 #define MAX_PKT_LEN 1514 #define MAX_PKT_NUM (16 * 1024) #define MIN_PKT_SEG_LEN 64 @@ -32,6 +34,8 @@ typedef struct test_options_t { int num_worker; int num_pktio; int num_pktio_queue; + int pipe_stages; + int pipe_queues; uint8_t collect_stat; char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
@@ -45,16 +49,29 @@ typedef struct { typedef struct ODP_ALIGNED_CACHE { uint64_t rx_pkt; uint64_t tx_pkt; + uint64_t pipe_pkt; + uint64_t tx_drop; + uint64_t pipe_drop; uint64_t tmo; } worker_stat_t;
-typedef struct queue_context_t { +typedef struct pktin_queue_context_t { + /* Queue context must start with stage and idx */ + uint16_t stage; + uint16_t queue_idx; + + uint8_t dst_pktio; + uint8_t dst_queue; + uint8_t src_pktio; + uint8_t src_queue; odp_pktout_queue_t dst_pktout; - uint8_t dst_pktio; - uint8_t dst_queue; - uint8_t src_pktio; - uint8_t src_queue; -} queue_context_t; +} pktin_queue_context_t; + +typedef struct pipe_queue_context_t { + /* Queue context must start with stage and idx. */ + uint16_t stage; + uint16_t queue_idx; +} pipe_queue_context_t;
typedef struct { volatile int stop_workers; @@ -79,7 +96,7 @@ typedef struct { odph_ethaddr_t my_addr; odp_queue_t input_queue[MAX_PKTIO_QUEUES]; odp_pktout_queue_t pktout[MAX_PKTIO_QUEUES]; - queue_context_t queue_context[MAX_PKTIO_QUEUES]; + pktin_queue_context_t queue_context[MAX_PKTIO_QUEUES];
} pktio[MAX_PKTIOS];
@@ -91,6 +108,14 @@ typedef struct {
} timer;
+ struct { + odp_queue_t queue[MAX_PIPE_QUEUES]; + } pipe_queue[MAX_PKTIOS][MAX_PIPE_STAGES]; + + struct { + pipe_queue_context_t ctx; + } pipe_queue_ctx[MAX_PIPE_STAGES][MAX_PIPE_QUEUES]; + worker_arg_t worker_arg[MAX_WORKERS];
worker_stat_t worker_stat[MAX_WORKERS]; @@ -125,13 +150,38 @@ static inline void fill_eth_addr(odp_packet_t pkt[], int num, } }
-static int worker_thread(void *arg) +static inline void send_packets(test_global_t *test_global, + odp_packet_t pkt[], int num_pkt, + int output, odp_pktout_queue_t pktout, + int worker_id) +{ + int sent, drop; + + fill_eth_addr(pkt, num_pkt, test_global, output); + + sent = odp_pktout_send(pktout, pkt, num_pkt); + + if (odp_unlikely(sent < 0)) + sent = 0; + + drop = num_pkt - sent; + + if (odp_unlikely(drop)) + odp_packet_free_multi(&pkt[sent], drop); + + if (odp_unlikely(test_global->opt.collect_stat)) { + test_global->worker_stat[worker_id].tx_pkt += sent; + test_global->worker_stat[worker_id].tx_drop += drop; + } +} + +static int worker_thread_direct(void *arg) { odp_event_t ev[BURST_SIZE]; - int num_pkt, sent, drop, out; + int num_pkt, out; odp_pktout_queue_t pktout; odp_queue_t queue; - queue_context_t *queue_context; + pktin_queue_context_t *queue_context; worker_arg_t *worker_arg = arg; test_global_t *test_global = worker_arg->test_global_ptr; int worker_id = worker_arg->worker_id; @@ -174,22 +224,167 @@ static int worker_thread(void *arg) pktout = queue_context->dst_pktout; out = queue_context->dst_pktio;
- fill_eth_addr(pkt, num_pkt, test_global, out); + send_packets(test_global, pkt, num_pkt, out, pktout, worker_id); + + if (odp_unlikely(test_global->opt.collect_stat)) + test_global->worker_stat[worker_id].rx_pkt += num_pkt; + } + + printf("Worker %i stopped\n", worker_id); + + return 0; +} + +static inline void enqueue_events(odp_queue_t dst_queue, odp_event_t ev[], + int num, int worker_id) +{ + int sent, drop;
- sent = odp_pktout_send(pktout, pkt, num_pkt); + sent = odp_queue_enq_multi(dst_queue, ev, num);
- if (odp_unlikely(sent < 0)) - sent = 0; + if (odp_unlikely(sent < 0)) + sent = 0;
- drop = num_pkt - sent; + drop = num - sent;
- if (odp_unlikely(drop)) - odp_packet_free_multi(&pkt[sent], drop); + if (odp_unlikely(drop)) + odp_event_free_multi(&ev[sent], drop);
- if (odp_unlikely(test_global->opt.collect_stat)) { - test_global->worker_stat[worker_id].rx_pkt += num_pkt; - test_global->worker_stat[worker_id].tx_pkt += sent; + if (odp_unlikely(test_global->opt.collect_stat)) + test_global->worker_stat[worker_id].pipe_drop += drop; +} + +static inline odp_queue_t next_queue(test_global_t *test_global, int input, + uint16_t stage, uint16_t queue_idx) +{ + return test_global->pipe_queue[input][stage].queue[queue_idx]; +} + +static int worker_thread_pipeline(void *arg) +{ + odp_event_t ev[BURST_SIZE]; + int i, num_pkt, input, output, output_queue; + odp_queue_t queue, dst_queue; + odp_pktout_queue_t pktout; + pipe_queue_context_t *pipe_context; + uint16_t stage, queue_idx; + worker_arg_t *worker_arg = arg; + test_global_t *test_global = worker_arg->test_global_ptr; + int worker_id = worker_arg->worker_id; + int pipe_stages = test_global->opt.pipe_stages; + int pipe_queues = test_global->opt.pipe_queues; + int num_pktio = test_global->opt.num_pktio; + int num_pktio_queue = test_global->opt.num_pktio_queue; + uint32_t polls = 0; + + printf("Worker %i started\n", worker_id); + + /* Wait for other workers to start */ + odp_barrier_wait(&test_global->worker_start); + + while (1) { + odp_packet_t pkt[BURST_SIZE]; + + num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, + ev, BURST_SIZE); + + polls++; + + if (polls == CHECK_PERIOD) { + polls = 0; + if (test_global->stop_workers) + break; } + + if (num_pkt <= 0) + continue; + + pipe_context = odp_queue_context(queue); + stage = pipe_context->stage; + queue_idx = pipe_context->queue_idx; + + /* A queue is connected to a single input interface. All + * packets from a queue are from the same interface. */ + input = odp_packet_input_index(odp_packet_from_event(ev[0])); + + if (DEBUG_PRINT) + printf("worker %i: stage %u, idx %u, %i packets\n", + worker_id, stage, queue_idx, num_pkt); + + if (stage == 0) { + if (odp_unlikely(test_global->opt.collect_stat)) + test_global->worker_stat[worker_id].rx_pkt += + num_pkt; + + /* The first stage (packet input). Forward packet flows + * into first pipeline queues. */ + if (pipe_queues > num_pktio_queue) { + /* More pipeline queues than input queues. + * Use flow hash to spread flows into pipeline + * queues. */ + odp_packet_t p; + worker_stat_t *stat; + uint32_t hash; + uint16_t idx; + int drop = 0; + + stat = &test_global->worker_stat[worker_id]; + + for (i = 0; i < num_pkt; i++) { + p = odp_packet_from_event(ev[i]); + hash = odp_packet_flow_hash(p); + idx = queue_idx; + + if (odp_packet_has_flow_hash(p)) + idx = hash % pipe_queues; + + dst_queue = next_queue(test_global, + input, stage, + idx); + + if (odp_queue_enq(dst_queue, ev[i])) { + odp_event_free(ev[i]); + drop++; + } + } + + if (odp_unlikely(test_global->opt.collect_stat)) + stat->pipe_drop += drop; + } else { + queue_idx = queue_idx % pipe_queues; + dst_queue = next_queue(test_global, input, + stage, queue_idx); + + enqueue_events(dst_queue, ev, num_pkt, + worker_id); + } + continue; + } + + if (stage < pipe_stages) { + /* Middle stages */ + dst_queue = next_queue(test_global, input, stage, + queue_idx); + enqueue_events(dst_queue, ev, num_pkt, worker_id); + + if (odp_unlikely(test_global->opt.collect_stat)) + test_global->worker_stat[worker_id].pipe_pkt += + num_pkt; + + continue; + } + + /* The last stage, send packets out */ + odp_packet_from_event_multi(pkt, ev, num_pkt); + + /* If single interface loopback, otherwise forward to the next + * interface. */ + output = (input + 1) % num_pktio; + output_queue = queue_idx % num_pktio_queue; + pktout = test_global->pktio[output].pktout[output_queue]; + + send_packets(test_global, pkt, num_pkt, output, pktout, + worker_id); }
printf("Worker %i stopped\n", worker_id); @@ -200,10 +395,10 @@ static int worker_thread(void *arg) static int worker_thread_timers(void *arg) { odp_event_t ev[BURST_SIZE]; - int num, num_pkt, sent, drop, out, tmos, i, src_pktio, src_queue; + int num, num_pkt, out, tmos, i, src_pktio, src_queue; odp_pktout_queue_t pktout; odp_queue_t queue; - queue_context_t *queue_context; + pktin_queue_context_t *queue_context; odp_timer_t timer; odp_timer_set_t ret; worker_arg_t *worker_arg = arg; @@ -299,22 +494,10 @@ static int worker_thread_timers(void *arg) pktout = queue_context->dst_pktout; out = queue_context->dst_pktio;
- fill_eth_addr(pkt, num_pkt, test_global, out); - - sent = odp_pktout_send(pktout, pkt, num_pkt); - - if (odp_unlikely(sent < 0)) - sent = 0; - - drop = num_pkt - sent; - - if (odp_unlikely(drop)) - odp_packet_free_multi(&pkt[sent], drop); + send_packets(test_global, pkt, num_pkt, out, pktout, worker_id);
- if (odp_unlikely(test_global->opt.collect_stat)) { + if (odp_unlikely(test_global->opt.collect_stat)) test_global->worker_stat[worker_id].rx_pkt += num_pkt; - test_global->worker_stat[worker_id].tx_pkt += sent; - } }
printf("Worker %i stopped\n", worker_id); @@ -343,12 +526,14 @@ static void print_usage(const char *progname) "Usage: %s [options]\n" "\n" "OPTIONS:\n" - " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n" - " -c, --num_cpu <number> Worker thread count. Default: 1\n" - " -q, --num_queue <number> Number of pktio queues. Default: Worker thread count\n" - " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n" - " -s, --stat Collect statistics.\n" - " -h, --help Display help and exit.\n\n", + " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n" + " -c, --num_cpu <number> Worker thread count. Default: 1\n" + " -q, --num_queue <number> Number of pktio queues. Default: Worker thread count\n" + " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n" + " --pipe-stages <number> Number of pipeline stages per interface\n" + " --pipe-queues <number> Number of queues per pipeline stage\n" + " -s, --stat Collect statistics.\n" + " -h, --help Display help and exit.\n\n", NO_PATH(progname)); }
@@ -358,12 +543,14 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) char *name, *str; int len, str_len; const struct option longopts[] = { - {"interface", required_argument, NULL, 'i'}, - {"num_cpu", required_argument, NULL, 'c'}, - {"num_queue", required_argument, NULL, 'q'}, - {"timeout", required_argument, NULL, 't'}, - {"stat", no_argument, NULL, 's'}, - {"help", no_argument, NULL, 'h'}, + {"interface", required_argument, NULL, 'i'}, + {"num_cpu", required_argument, NULL, 'c'}, + {"num_queue", required_argument, NULL, 'q'}, + {"timeout", required_argument, NULL, 't'}, + {"pipe-stages", required_argument, NULL, 0}, + {"pipe-queues", required_argument, NULL, 1}, + {"stat", no_argument, NULL, 's'}, + {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; const char *shortopts = "+i:c:q:t:sh"; @@ -384,6 +571,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) break; /* No more options */
switch (opt) { + case 0: + test_options->pipe_stages = atoi(optarg); + break; + case 1: + test_options->pipe_queues = atoi(optarg); + break; case 'i': i = 0; str = optarg; @@ -437,6 +630,26 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) } }
+ if (test_options->timeout_us && test_options->pipe_stages) { + printf("Error: Cannot run timeout and pipeline tests simultaneously\n"); + ret = -1; + } + + if (test_options->pipe_stages > MAX_PIPE_STAGES) { + printf("Error: Too many pipeline stages\n"); + ret = -1; + } + + if (test_options->pipe_queues > MAX_PIPE_QUEUES) { + printf("Error: Too many queues per pipeline stage\n"); + ret = -1; + } + + if (test_options->num_pktio == 0) { + printf("Error: At least one pktio interface needed.\n"); + ret = -1; + } + if (test_options->num_pktio_queue == 0) test_options->num_pktio_queue = test_options->num_worker;
@@ -465,11 +678,6 @@ static int config_setup(test_global_t *test_global) cpu = odp_cpumask_next(cpumask, cpu); }
- if (test_global->opt.num_pktio == 0) { - printf("Error: At least one pktio interface needed.\n"); - return -1; - } - if (odp_pool_capability(&pool_capa)) { printf("Error: Pool capability failed.\n"); return -1; @@ -531,34 +739,40 @@ static void print_config(test_global_t *test_global) static void print_stat(test_global_t *test_global, uint64_t nsec) { int i; - uint64_t rx, tx, drop, tmo; + uint64_t rx, tx, pipe, drop, tmo; uint64_t rx_sum = 0; uint64_t tx_sum = 0; + uint64_t pipe_sum = 0; uint64_t tmo_sum = 0; double sec = 0.0;
printf("\nTest statistics\n"); - printf(" worker rx_pkt tx_pkt dropped tmo\n"); + printf(" worker rx_pkt tx_pkt pipe dropped tmo\n");
for (i = 0; i < test_global->opt.num_worker; i++) { rx = test_global->worker_stat[i].rx_pkt; tx = test_global->worker_stat[i].tx_pkt; + pipe = test_global->worker_stat[i].pipe_pkt; tmo = test_global->worker_stat[i].tmo; rx_sum += rx; tx_sum += tx; + pipe_sum += pipe; tmo_sum += tmo; + drop = test_global->worker_stat[i].tx_drop + + test_global->worker_stat[i].pipe_drop;
printf(" %6i %16" PRIu64 " %16" PRIu64 " %16" PRIu64 " %16" - PRIu64 "\n", i, rx, tx, rx - tx, tmo); + PRIu64 " %16" PRIu64 "\n", i, rx, tx, pipe, drop, tmo); }
test_global->rx_pkt_sum = rx_sum; test_global->tx_pkt_sum = tx_sum; drop = rx_sum - tx_sum;
- printf(" -------------------------------------------------------------------\n"); + printf(" ------------------------------------------------------------------------------------\n"); printf(" total %16" PRIu64 " %16" PRIu64 " %16" PRIu64 " %16" - PRIu64 "\n\n", rx_sum, tx_sum, drop, tmo_sum); + PRIu64 " %16" PRIu64 "\n\n", rx_sum, tx_sum, pipe_sum, drop, + tmo_sum);
sec = nsec / 1000000000.0; printf(" Total test time: %.2f sec\n", sec); @@ -684,7 +898,7 @@ static int open_pktios(test_global_t *test_global) for (j = 0; j < num_queue; j++) { odp_queue_t queue; void *ctx; - uint32_t len = sizeof(queue_context_t); + uint32_t len = sizeof(pktin_queue_context_t);
queue = test_global->pktio[i].input_queue[j]; ctx = &test_global->pktio[i].queue_context[j]; @@ -700,6 +914,9 @@ static int open_pktios(test_global_t *test_global) pktout_param.num_queues = num_queue; pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ if (test_global->opt.pipe_stages) + pktout_param.op_mode = ODP_PKTIO_OP_MT; + if (odp_pktout_queue_config(pktio, &pktout_param)) { printf("Error (%s): Pktout config failed.\n", name); return -1; @@ -722,7 +939,7 @@ static void link_pktios(test_global_t *test_global) int i, num_pktio, input, output; int num_queue; odp_pktout_queue_t pktout; - queue_context_t *ctx; + pktin_queue_context_t *ctx;
num_pktio = test_global->opt.num_pktio; num_queue = test_global->opt.num_pktio_queue; @@ -738,6 +955,8 @@ static void link_pktios(test_global_t *test_global) for (i = 0; i < num_queue; i++) { ctx = &test_global->pktio[input].queue_context[i]; pktout = test_global->pktio[output].pktout[i]; + ctx->stage = 0; + ctx->queue_idx = i; ctx->dst_pktout = pktout; ctx->dst_pktio = output; ctx->dst_queue = i; @@ -837,6 +1056,97 @@ static int close_pktios(test_global_t *test_global) return ret; }
+static int create_pipeline_queues(test_global_t *test_global) +{ + int i, j, k, num_pktio, stages, queues, ctx_size; + pipe_queue_context_t *ctx; + odp_queue_param_t queue_param; + odp_schedule_sync_t sched_sync; + int ret = 0; + + num_pktio = test_global->opt.num_pktio; + stages = test_global->opt.pipe_stages; + queues = test_global->opt.pipe_queues; + sched_sync = ODP_SCHED_SYNC_ATOMIC; + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_SCHED; + queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT; + queue_param.sched.sync = sched_sync; + queue_param.sched.group = ODP_SCHED_GROUP_ALL; + + ctx_size = sizeof(pipe_queue_context_t); + + for (i = 0; i < stages; i++) { + for (j = 0; j < queues; j++) { + ctx = &test_global->pipe_queue_ctx[i][j].ctx; + + /* packet input is stage 0 */ + ctx->stage = i + 1; + ctx->queue_idx = j; + } + } + + for (k = 0; k < num_pktio; k++) { + for (i = 0; i < stages; i++) { + for (j = 0; j < queues; j++) { + odp_queue_t q; + + q = odp_queue_create(NULL, &queue_param); + test_global->pipe_queue[k][i].queue[j] = q; + + if (q == ODP_QUEUE_INVALID) { + printf("Error: Queue create failed [%i] %i/%i\n", + k, i, j); + ret = -1; + break; + } + + ctx = &test_global->pipe_queue_ctx[i][j].ctx; + + if (odp_queue_context_set(q, ctx, ctx_size)) { + printf("Error: Queue ctx set failed [%i] %i/%i\n", + k, i, j); + ret = -1; + break; + } + } + } + } + + return ret; +} + +static void destroy_pipeline_queues(test_global_t *test_global) +{ + int i, j, k, num_pktio, stages, queues; + odp_queue_t queue; + + num_pktio = test_global->opt.num_pktio; + stages = test_global->opt.pipe_stages; + queues = test_global->opt.pipe_queues; + + for (k = 0; k < num_pktio; k++) { + for (i = 0; i < stages; i++) { + for (j = 0; j < queues; j++) { + queue = test_global->pipe_queue[k][i].queue[j]; + + if (queue == ODP_QUEUE_INVALID) { + printf("Error: Bad queue handle [%i] %i/%i\n", + k, i, j); + return; + } + + if (odp_queue_destroy(queue)) { + printf("Error: Queue destroy failed [%i] %i/%i\n", + k, i, j); + return; + } + } + } + } +} + static int create_timers(test_global_t *test_global) { int num_timer, num_pktio, num_queue, i, j; @@ -1021,8 +1331,10 @@ static void start_workers(odph_odpthread_t thread[],
if (test_global->opt.timeout_us) param.start = worker_thread_timers; + else if (test_global->opt.pipe_stages) + param.start = worker_thread_pipeline; else - param.start = worker_thread; + param.start = worker_thread_direct;
param.thr_type = ODP_THREAD_WORKER; param.instance = test_global->instance; @@ -1115,6 +1427,9 @@ int main(int argc, char *argv[])
link_pktios(test_global);
+ if (create_pipeline_queues(test_global)) + goto quit; + if (create_timers(test_global)) goto quit;
@@ -1145,6 +1460,7 @@ quit: stop_pktios(test_global); empty_queues(); close_pktios(test_global); + destroy_pipeline_queues(test_global); destroy_timers(test_global);
if (test_global->opt.collect_stat) {
commit 2dd51f19e1d9c84fd09962ad2d134c376b46c45f Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 4 14:23:35 2018 +0300
linux-gen: queue: use queue size parameter
Don't round up small queue sizes to default, but to minimum queue size. Application may need to set small size on some queues.
Large queue size mean large buffering capacity. E.g. when large queues are served with a low priority, service level of higher priority queues may suffer as many/most events of a shared pool may be stored in low priority queues (while the pool is empty).
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index 9eb3c79e..dffe6ee9 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -38,7 +38,7 @@ #include <string.h> #include <inttypes.h>
-#define MIN_QUEUE_SIZE 8 +#define MIN_QUEUE_SIZE 32 #define MAX_QUEUE_SIZE (1 * 1024 * 1024)
static int queue_init(queue_entry_t *queue, const char *name, @@ -816,11 +816,12 @@ static int queue_init(queue_entry_t *queue, const char *name, queue->s.pktin = PKTIN_INVALID; queue->s.pktout = PKTOUT_INVALID;
- /* Use default size for all small queues to quarantee performance - * level. */ - queue_size = queue_glb->config.default_queue_size; - if (param->size > queue_glb->config.default_queue_size) - queue_size = param->size; + queue_size = param->size; + if (queue_size == 0) + queue_size = queue_glb->config.default_queue_size; + + if (queue_size < MIN_QUEUE_SIZE) + queue_size = MIN_QUEUE_SIZE;
/* Round up if not already a power of two */ queue_size = ROUNDUP_POWER2_U32(queue_size);
commit d46c341e321cb5085d5577dd5fb8ca0b3f7ba554 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 5 11:07:41 2018 +0300
validation: sched: add queue size test
Added test case which uses small queue size for scheduled queues.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c index 0e7e6c57..a4e8e218 100644 --- a/test/validation/api/scheduler/scheduler.c +++ b/test/validation/api/scheduler/scheduler.c @@ -21,6 +21,8 @@ #define NUM_BUFS_BEFORE_PAUSE 10 #define NUM_GROUPS 2
+#define TEST_QUEUE_SIZE_NUM_EV 50 + #define GLOBALS_SHM_NAME "test_globals" #define MSG_POOL_NAME "msg_pool" #define QUEUE_CTX_POOL_NAME "queue_ctx_pool" @@ -277,6 +279,91 @@ static void scheduler_test_queue_destroy(void) CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); }
+static void scheduler_test_queue_size(void) +{ + odp_queue_capability_t queue_capa; + odp_pool_t pool; + odp_pool_param_t pool_param; + odp_queue_param_t queue_param; + odp_queue_t queue, from; + odp_event_t ev; + odp_buffer_t buf; + uint32_t i, j, queue_size, num; + int ret; + odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL, + ODP_SCHED_SYNC_ATOMIC, + ODP_SCHED_SYNC_ORDERED}; + + CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0); + queue_size = TEST_QUEUE_SIZE_NUM_EV; + if (queue_capa.sched.max_size && + queue_size > queue_capa.sched.max_size) + queue_size = queue_capa.sched.max_size; + + odp_pool_param_init(&pool_param); + pool_param.buf.size = 100; + pool_param.buf.align = 0; + pool_param.buf.num = TEST_QUEUE_SIZE_NUM_EV; + pool_param.type = ODP_POOL_BUFFER; + + pool = odp_pool_create("test_queue_size", &pool_param); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + for (i = 0; i < 3; i++) { + /* Ensure that scheduler is empty */ + for (j = 0; j < 10;) { + ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); + CU_ASSERT(ev == ODP_EVENT_INVALID); + + if (ev != ODP_EVENT_INVALID) + odp_event_free(ev); + else + j++; + } + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_SCHED; + queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT; + queue_param.sched.sync = sync[i]; + queue_param.sched.group = ODP_SCHED_GROUP_ALL; + queue_param.size = queue_size; + + queue = odp_queue_create("test_queue_size", &queue_param); + + CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); + + for (j = 0; j < queue_size; j++) { + buf = odp_buffer_alloc(pool); + CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); + + ev = odp_buffer_to_event(buf); + ret = odp_queue_enq(queue, ev); + CU_ASSERT(ret == 0); + + if (ret) + odp_event_free(ev); + } + + num = 0; + for (j = 0; j < 100 * TEST_QUEUE_SIZE_NUM_EV; j++) { + ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); + + if (ev == ODP_EVENT_INVALID) + continue; + + CU_ASSERT(from == queue); + odp_event_free(ev); + num++; + } + + CU_ASSERT(num == queue_size); + CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0); + } + + CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0); +} + static void scheduler_test_groups(void) { odp_pool_t p; @@ -1745,6 +1832,7 @@ odp_testinfo_t scheduler_suite[] = { ODP_TEST_INFO(scheduler_test_wait_time), ODP_TEST_INFO(scheduler_test_num_prio), ODP_TEST_INFO(scheduler_test_queue_destroy), + ODP_TEST_INFO(scheduler_test_queue_size), ODP_TEST_INFO(scheduler_test_groups), ODP_TEST_INFO(scheduler_test_pause_resume), ODP_TEST_INFO(scheduler_test_ordered_lock),
commit 21f383e9fa9f0f0c127dbb251e08ae91da56d817 Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Oct 8 11:26:10 2018 +0300
linux-gen: pool: decrease minimum segment size to 2k
Decrease minimum segment size from about 8k to about 2k bytes. Normal Ethernet frames still fit into first segment, but SHM reservation size is reduced into about 1/3 what it was. There is 1GB limit in process mode SHM allocations, so 1/3 reduction makes a big difference there.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index 99c31af7..bfe203bf 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -125,7 +125,9 @@ extern "C" { * defined segment length (seg_len in odp_pool_param_t) will be rounded up into * this value. */ -#define CONFIG_PACKET_SEG_LEN_MIN CONFIG_PACKET_MAX_SEG_LEN +#define CONFIG_PACKET_SEG_LEN_MIN ((2 * 1024) - \ + CONFIG_PACKET_HEADROOM - \ + CONFIG_PACKET_TAILROOM)
/* Maximum number of shared memory blocks. *
-----------------------------------------------------------------------
Summary of changes: Makefile.am | 16 +- configure.ac | 2 + example/m4/configure.m4 | 12 +- .../linux-generic/include/odp_config_internal.h | 4 +- platform/linux-generic/include/odp_shm_internal.h | 5 +- platform/linux-generic/odp_classification.c | 22 +- platform/linux-generic/odp_crypto_null.c | 10 +- platform/linux-generic/odp_crypto_openssl.c | 8 +- platform/linux-generic/odp_ipsec_events.c | 9 +- platform/linux-generic/odp_ipsec_sad.c | 11 +- platform/linux-generic/odp_ishm.c | 3 +- platform/linux-generic/odp_ishmphy.c | 4 +- platform/linux-generic/odp_packet_io.c | 10 +- platform/linux-generic/odp_pool.c | 6 +- platform/linux-generic/odp_queue_basic.c | 25 +- platform/linux-generic/odp_queue_lf.c | 8 +- platform/linux-generic/odp_schedule_basic.c | 12 +- platform/linux-generic/odp_shared_memory.c | 2 +- platform/linux-generic/odp_thread.c | 6 +- test/m4/configure.m4 | 10 + test/performance/odp_sched_pktio.c | 503 ++++++++++++++++++--- test/validation/api/scheduler/scheduler.c | 88 ++++ 22 files changed, 642 insertions(+), 134 deletions(-)
hooks/post-receive