This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, api-next has been updated via 6459109ccb46369a8d45d531c76e54c4267b30d6 (commit) via 4dc8160d4897a041a14de578c3fc777fa27e78f5 (commit) via 13b3e7ee716c9f5a296817b5143542d736ae3595 (commit) via b68aa21885ec3bdfa914c66d811662efbf380127 (commit) via f2be51f38a5115b5f5e2d6ee59df645f624adb7a (commit) via 898d8b479149308de40331bf3a646bca1c99449e (commit) via 794e94ae421c9587df1a11ff3027e6076974ec23 (commit) via 93774c205c20951e51b0ed8d9c03bd21f95857bc (commit) via a83e3c5ce2caa4ba4ba92f6e383c2a28d9aa3956 (commit) via 97eb06b4cd2f57e9033f5d09a5e2b7a7b9d641b9 (commit) via 6c0ffbf9689e36cc94d303d5ac30575fef041cbe (commit) via 7d2f7dbf7d97cea348bb0499b684103fd8ad750f (commit) via d184c87a690969571734fcd0c135da4f0da8ab55 (commit) via ae53f9f80ee5fb06c2a3c8512a14bc1e773a58e4 (commit) via bfb4e078a70e5510c1e3eb9deab5baeadb1c7fc3 (commit) via 29cf3cb9e63df80849f1b1c59b2174086e7dfe47 (commit) via 0817968af5f7ceebe96c4ca4661da2734304a329 (commit) via ec1be8784c85b263f52e763b7890f097b2366212 (commit) via 45bca0d051dd1f946e2d1ca188c481c1d179ad07 (commit) via bb2e2a4b2a538cb437f5b3331c95a7b7dc573cb3 (commit) via 7c8a226008d9f6fc9cd04f87539ab2715fef522a (commit) via 594a4a0171ab82512b05a06eb34ff7acb73730ab (commit) via 0b5dfae55a5a16e8c6b820f2458e48210cf9d762 (commit) via e236795c0941fd7fc01b162a899b5dc42ba1d05e (commit) via 9f39ccb68e57fa56a6d5e0b968292a2af0abc812 (commit) via 415e197affb7eb3db5d806911d3e4b8e6ff05779 (commit) via 3a817119eae94a2eb0c12d3a7e82eda35133b42c (commit) via 01d1a8db345fed332416b3a6066ae7be1f28f0ad (commit) via 168d7168447b99097f3bbe397ca76b6cb87d34da (commit) via ebff1c15f1a0ccad57d26720ba7357a6b194d7fe (commit) via 07b1c56979139118d47d15ff969365c08814dfd5 (commit) via fa9fd355c5404b46c3cfaf375666796cc5333aca (commit) via 173ef79e5306807d994869c7fb62c66ee82e4beb (commit) via 055da43cde1c7a9acc674a2db3d9d2a7a3d1ff8f (commit) via 0b880228e5e19bf2446f31f50e6df41c64e9502b (commit) via fb41b3eb2075505cf2f77fad48a8b6b5ed5da302 (commit) via d3d950ab1af5d1823a44950ea5a78e41dbe44dde (commit) via c97618bb548f6b7c5f27f4de497f72f705e184f6 (commit) via eee3800f367d61ccdb9051d484e133f618aad9e4 (commit) via d32bca1d3bd13efdef8b01a459fa1ce13042975b (commit) via 3cd53f15344c8ccf05dcf7812b4e2ff6a0b20961 (commit) via e40699a6950fedea5ebce172299a9a6aa2dec424 (commit) via ac4ac579bba217043604c3f4ad21ed0446214572 (commit) via a0857f0d63bccdc8a16eab5068b63029ea84f005 (commit) via cb97c894c05ef5a43dce262cf178ce5f54b1c806 (commit) via 81b8a6b38d3f71102527675529edbe5293bcd30b (commit) via f11e8bfd0599a717c457f2afd17e1a89febd5f8d (commit) via aab53b9cd951dc5b4f76e4acee8aa602ddd9ad99 (commit) via e1656a13f7a4a69dd743fd80c37a461bbdbf7d3f (commit) via 42146102091d6201399a39eadbb4a897768c27ab (commit) via 84f5ac969eb50e83cfa87a529e5a59a94196bcba (commit) via 9c15202e04ce97601d9910ce013da22fc5a3a2b0 (commit) via 177fc4cce6485a2bb80b99309eb7947e634d37fc (commit) via 386f6f8932a7897fdf8adc60aed8de7ee0174ad4 (commit) from 79f23e85b2c7e8f977efc2d10bb06639f08edbaf (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 6459109ccb46369a8d45d531c76e54c4267b30d6 Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Oct 29 14:14:46 2018 +0200
linux-gen: pktio: remove sched_cb_pktin_poll_old
The function is not used anymore.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index df875c48..a131b6eb 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -84,7 +84,6 @@ extern const schedule_fn_t *sched_fn; /* Interface for the scheduler */ int sched_cb_pktin_poll(int pktio_index, int pktin_index, odp_buffer_hdr_t *hdr_tbl[], int num); -int sched_cb_pktin_poll_old(int pktio_index, int num_queue, int index[]); int sched_cb_pktin_poll_one(int pktio_index, int rx_queue, odp_event_t evts[]); void sched_cb_pktio_stop_finalize(int pktio_index);
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index a473a3c9..c5efdc42 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -872,54 +872,6 @@ int sched_cb_pktin_poll(int pktio_index, int pktin_index, return pktin_recv_buf(entry, pktin_index, hdr_tbl, num); }
-int sched_cb_pktin_poll_old(int pktio_index, int num_queue, int index[]) -{ - odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX]; - int num, idx; - pktio_entry_t *entry = pktio_entry_by_index(pktio_index); - int state = entry->s.state; - - if (odp_unlikely(state != PKTIO_STATE_STARTED)) { - if (state < PKTIO_STATE_ACTIVE || - state == PKTIO_STATE_STOP_PENDING) - return -1; - - ODP_DBG("interface not started\n"); - return 0; - } - - for (idx = 0; idx < num_queue; idx++) { - odp_queue_t queue; - int num_enq; - - num = pktin_recv_buf(entry, index[idx], hdr_tbl, - QUEUE_MULTI_MAX); - - if (num == 0) - continue; - - if (num < 0) { - ODP_ERR("Packet recv error\n"); - return -1; - } - - queue = entry->s.in_queue[index[idx]].queue; - num_enq = odp_queue_enq_multi(queue, - (odp_event_t *)hdr_tbl, num); - - if (odp_unlikely(num_enq < num)) { - if (odp_unlikely(num_enq < 0)) - num_enq = 0; - - ODP_DBG("Interface %s dropped %i packets\n", - entry->s.name, num - num_enq); - buffer_free_multi(&hdr_tbl[num_enq], num - num_enq); - } - } - - return 0; -} - void sched_cb_pktio_stop_finalize(int pktio_index) { int state;
commit 4dc8160d4897a041a14de578c3fc777fa27e78f5 Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Oct 29 13:57:21 2018 +0200
linux-gen: schedule_sp: use sched_cb_pktin_poll
Use sched_cb_pktin_poll instead of sched_cb_pktin_poll_old, so that the old pktin poll function can be removed.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index e63bd061..8dcf9b0e 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -62,6 +62,7 @@ struct sched_cmd_s { int init; int num_pktin; int pktin_idx[NUM_PKTIN]; + odp_queue_t queue[NUM_PKTIN]; };
typedef struct ODP_ALIGNED_CACHE sched_cmd_t { @@ -438,7 +439,7 @@ static int ord_enq_multi(odp_queue_t queue, void *buf_hdr[], int num, static void pktio_start(int pktio_index, int num, int pktin_idx[], - odp_queue_t odpq[] ODP_UNUSED) + odp_queue_t queue[]) { int i; sched_cmd_t *cmd; @@ -452,8 +453,10 @@ static void pktio_start(int pktio_index, ODP_ABORT("Supports only %i pktin queues per interface\n", NUM_PKTIN);
- for (i = 0; i < num; i++) + for (i = 0; i < num; i++) { cmd->s.pktin_idx[i] = pktin_idx[i]; + cmd->s.queue[i] = queue[i]; + }
cmd->s.num_pktin = num;
@@ -507,6 +510,26 @@ static uint64_t schedule_wait_time(uint64_t ns) return ns; }
+static inline void enqueue_packets(odp_queue_t queue, + odp_buffer_hdr_t *hdr_tbl[], int num_pkt) +{ + int num_enq, num_drop; + + num_enq = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl, + num_pkt); + + if (num_enq < 0) + num_enq = 0; + + if (num_enq < num_pkt) { + num_drop = num_pkt - num_enq; + + ODP_DBG("Dropped %i packets\n", num_drop); + odp_packet_free_multi((odp_packet_t *)&hdr_tbl[num_enq], + num_drop); + } +} + static int schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[], int max_events ODP_UNUSED) { @@ -534,12 +557,33 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, cmd = sched_cmd();
if (cmd && cmd->s.type == CMD_PKTIO) { - if (sched_cb_pktin_poll_old(cmd->s.index, - cmd->s.num_pktin, - cmd->s.pktin_idx)) { - /* Pktio stopped or closed. */ - sched_cb_pktio_stop_finalize(cmd->s.index); - } else { + odp_buffer_hdr_t *hdr_tbl[CONFIG_BURST_SIZE]; + int i; + int num_pkt = 0; + int max_num = CONFIG_BURST_SIZE; + int pktio_idx = cmd->s.index; + int num_pktin = cmd->s.num_pktin; + int *pktin_idx = cmd->s.pktin_idx; + odp_queue_t *queue = cmd->s.queue; + + for (i = 0; i < num_pktin; i++) { + num_pkt = sched_cb_pktin_poll(pktio_idx, + pktin_idx[i], + hdr_tbl, max_num); + + if (num_pkt < 0) { + /* Pktio stopped or closed. */ + sched_cb_pktio_stop_finalize(pktio_idx); + break; + } + + if (num_pkt == 0) + continue; + + enqueue_packets(queue[i], hdr_tbl, num_pkt); + } + + if (num_pkt >= 0) { /* Continue polling pktio. */ add_tail(cmd); }
commit 13b3e7ee716c9f5a296817b5143542d736ae3595 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 25 17:38:24 2018 +0300
linux-gen: thread: use automatic schedule group configuration
Read from scheduler configuration if an automatic schedule group is enabled or disabled.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c index 4fc6acc9..7728929b 100644 --- a/platform/linux-generic/odp_thread.c +++ b/platform/linux-generic/odp_thread.c @@ -136,6 +136,20 @@ int odp_thread_init_local(odp_thread_type_t type) { int id; int cpu; + int group_all, group_worker, group_control; + + group_all = 1; + group_worker = 1; + group_control = 1; + + if (sched_fn->config) { + schedule_config_t schedule_config; + + sched_fn->config(&schedule_config); + group_all = schedule_config.group_enable.all; + group_worker = schedule_config.group_enable.worker; + group_control = schedule_config.group_enable.control; + }
odp_spinlock_lock(&thread_globals->lock); id = alloc_id(type); @@ -159,11 +173,13 @@ int odp_thread_init_local(odp_thread_type_t type)
_odp_this_thread = &thread_globals->thr[id];
- sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id); + if (group_all) + sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
- if (type == ODP_THREAD_WORKER) + if (type == ODP_THREAD_WORKER && group_worker) sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id); - else if (type == ODP_THREAD_CONTROL) + + if (type == ODP_THREAD_CONTROL && group_control) sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
return 0; @@ -172,14 +188,30 @@ int odp_thread_init_local(odp_thread_type_t type) int odp_thread_term_local(void) { int num; + int group_all, group_worker, group_control; int id = _odp_this_thread->thr; odp_thread_type_t type = _odp_this_thread->type;
- sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id); + group_all = 1; + group_worker = 1; + group_control = 1;
- if (type == ODP_THREAD_WORKER) + if (sched_fn->config) { + schedule_config_t schedule_config; + + sched_fn->config(&schedule_config); + group_all = schedule_config.group_enable.all; + group_worker = schedule_config.group_enable.worker; + group_control = schedule_config.group_enable.control; + } + + if (group_all) + sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id); + + if (type == ODP_THREAD_WORKER && group_worker) sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id); - else if (type == ODP_THREAD_CONTROL) + + if (type == ODP_THREAD_CONTROL && group_control) sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
odp_spinlock_lock(&thread_globals->lock);
commit b68aa21885ec3bdfa914c66d811662efbf380127 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 25 17:26:37 2018 +0300
linux-gen: sched: add config request function to interface
Added config request function to scheduler internal interface. Other modules may use this to examine scheduler configuration.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index 13e209f2..df875c48 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -19,6 +19,15 @@ extern "C" { /* Number of ordered locks per queue */ #define SCHEDULE_ORDERED_LOCKS_PER_QUEUE 2
+typedef struct schedule_config_t { + struct { + int all; + int worker; + int control; + } group_enable; + +} schedule_config_t; + typedef void (*schedule_pktio_start_fn_t)(int pktio_index, int num_in_queue, int in_queue_idx[], @@ -44,7 +53,7 @@ typedef void (*schedule_order_unlock_lock_fn_t)(void); typedef void (*schedule_order_lock_start_fn_t)(void); typedef void (*schedule_order_lock_wait_fn_t)(void); typedef uint32_t (*schedule_max_ordered_locks_fn_t)(void); -typedef void (*schedule_save_context_fn_t)(uint32_t queue_index); +typedef void (*schedule_config_fn_t)(schedule_config_t *config);
typedef struct schedule_fn_t { schedule_pktio_start_fn_t pktio_start; @@ -65,6 +74,7 @@ typedef struct schedule_fn_t { schedule_order_lock_wait_fn_t wait_order_lock; schedule_order_unlock_lock_fn_t order_unlock_lock; schedule_max_ordered_locks_fn_t max_ordered_locks; + schedule_config_fn_t config;
} schedule_fn_t;
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index 7208200a..74942341 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -226,6 +226,9 @@ typedef struct {
order_context_t order[ODP_CONFIG_QUEUES];
+ /* Scheduler interface config options (not used in fast path) */ + schedule_config_t config_if; + } sched_global_t;
/* Check that queue[] variables are large enough */ @@ -320,7 +323,37 @@ static int read_config_file(sched_global_t *sched) return -1; } } - ODP_PRINT("\n\n"); + + ODP_PRINT("\n"); + + str = "sched_basic.group_enable.all"; + if (!_odp_libconfig_lookup_int(str, &val)) { + ODP_ERR("Config option '%s' not found.\n", str); + return -1; + } + + sched->config_if.group_enable.all = val; + ODP_PRINT(" %s: %i\n", str, val); + + str = "sched_basic.group_enable.worker"; + if (!_odp_libconfig_lookup_int(str, &val)) { + ODP_ERR("Config option '%s' not found.\n", str); + return -1; + } + + sched->config_if.group_enable.worker = val; + ODP_PRINT(" %s: %i\n", str, val); + + str = "sched_basic.group_enable.control"; + if (!_odp_libconfig_lookup_int(str, &val)) { + ODP_ERR("Config option '%s' not found.\n", str); + return -1; + } + + sched->config_if.group_enable.control = val; + ODP_PRINT(" %s: %i\n", str, val); + + ODP_PRINT("\n");
return 0; } @@ -1474,6 +1507,11 @@ static int schedule_num_grps(void) return NUM_SCHED_GRPS; }
+static void schedule_config(schedule_config_t *config) +{ + *config = *(&sched->config_if); +} + /* Fill in scheduler interface */ const schedule_fn_t schedule_basic_fn = { .pktio_start = schedule_pktio_start, @@ -1490,7 +1528,8 @@ const schedule_fn_t schedule_basic_fn = { .term_local = schedule_term_local, .order_lock = order_lock, .order_unlock = order_unlock, - .max_ordered_locks = schedule_max_ordered_locks + .max_ordered_locks = schedule_max_ordered_locks, + .config = schedule_config };
/* Fill in scheduler API calls */
commit f2be51f38a5115b5f5e2d6ee59df645f624adb7a Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 25 16:53:39 2018 +0300
linux-gen: config: add schedule group config file options
Added options to disable unused automatic schedule groups.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index 2417d23f..4585a896 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -16,7 +16,7 @@
# Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.2" +config_file_version = "0.1.3"
# Shared memory options shm: { @@ -114,4 +114,15 @@ sched_basic: { burst_size_default = [ 32, 32, 32, 32, 32, 16, 8, 4] burst_size_max = [255, 255, 255, 255, 255, 16, 16, 8]
+ # Automatically updated schedule groups + # + # API specification defines that ODP_SCHED_GROUP_ALL, + # _WORKER and _CONTROL are updated automatically. These options can be + # used to disable these group when not used. Set value to 0 to disable + # a group. Performance may improve when unused groups are disabled. + group_enable: { + all = 1 + worker = 1 + control = 1 + } } diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf index 7a06544a..f631f54b 100644 --- a/platform/linux-generic/test/process-mode.conf +++ b/platform/linux-generic/test/process-mode.conf @@ -1,6 +1,6 @@ # Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.2" +config_file_version = "0.1.3"
# Shared memory options shm: {
commit 898d8b479149308de40331bf3a646bca1c99449e Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 25 16:26:42 2018 +0300
test: sched_pktio: add burst size option
Added option to control maximum burst size, which is requested from scheduler.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c index 91771e87..82f74422 100644 --- a/test/performance/odp_sched_pktio.c +++ b/test/performance/odp_sched_pktio.c @@ -24,7 +24,6 @@ #define MAX_PKT_LEN 1514 #define MAX_PKT_NUM (128 * 1024) #define MIN_PKT_SEG_LEN 64 -#define BURST_SIZE 32 #define CHECK_PERIOD 10000 #define TEST_PASSED_LIMIT 5000 #define TIMEOUT_OFFSET_NS 1000000 @@ -38,6 +37,7 @@ typedef struct test_options_t { int num_worker; int num_pktio; int num_pktio_queue; + int burst_size; int pipe_stages; int pipe_queues; uint32_t pipe_queue_size; @@ -182,7 +182,6 @@ static inline void send_packets(test_global_t *test_global,
static int worker_thread_direct(void *arg) { - odp_event_t ev[BURST_SIZE]; int num_pkt, out; odp_pktout_queue_t pktout; odp_queue_t queue; @@ -191,6 +190,7 @@ static int worker_thread_direct(void *arg) test_global_t *test_global = worker_arg->test_global_ptr; int worker_id = worker_arg->worker_id; uint32_t polls = 0; + int burst_size = test_global->opt.burst_size;
printf("Worker %i started\n", worker_id);
@@ -198,10 +198,11 @@ static int worker_thread_direct(void *arg) odp_barrier_wait(&test_global->worker_start);
while (1) { - odp_packet_t pkt[BURST_SIZE]; + odp_event_t ev[burst_size]; + odp_packet_t pkt[burst_size];
num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, - ev, BURST_SIZE); + ev, burst_size);
polls++;
@@ -267,7 +268,6 @@ static inline odp_queue_t next_queue(test_global_t *test_global, int input,
static int worker_thread_pipeline(void *arg) { - odp_event_t ev[BURST_SIZE]; int i, num_pkt, input, output, output_queue; odp_queue_t queue, dst_queue; odp_pktout_queue_t pktout; @@ -281,6 +281,7 @@ static int worker_thread_pipeline(void *arg) int num_pktio = test_global->opt.num_pktio; int num_pktio_queue = test_global->opt.num_pktio_queue; uint32_t polls = 0; + int burst_size = test_global->opt.burst_size;
printf("Worker %i started\n", worker_id);
@@ -288,10 +289,11 @@ static int worker_thread_pipeline(void *arg) odp_barrier_wait(&test_global->worker_start);
while (1) { - odp_packet_t pkt[BURST_SIZE]; + odp_event_t ev[burst_size]; + odp_packet_t pkt[burst_size];
num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, - ev, BURST_SIZE); + ev, burst_size);
polls++;
@@ -399,7 +401,6 @@ static int worker_thread_pipeline(void *arg)
static int worker_thread_timers(void *arg) { - odp_event_t ev[BURST_SIZE]; int num, num_pkt, out, tmos, i, src_pktio, src_queue; odp_pktout_queue_t pktout; odp_queue_t queue; @@ -410,6 +411,7 @@ static int worker_thread_timers(void *arg) test_global_t *test_global = worker_arg->test_global_ptr; int worker_id = worker_arg->worker_id; uint32_t polls = 0; + int burst_size = test_global->opt.burst_size; uint64_t tick = test_global->timer.timeout_tick;
printf("Worker (timers) %i started\n", worker_id); @@ -418,10 +420,11 @@ static int worker_thread_timers(void *arg) odp_barrier_wait(&test_global->worker_start);
while (1) { - odp_packet_t pkt[BURST_SIZE]; + odp_event_t ev[burst_size]; + odp_packet_t pkt[burst_size];
num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, - ev, BURST_SIZE); + ev, burst_size);
polls++;
@@ -534,6 +537,7 @@ static void print_usage(const char *progname) " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n" " -c, --num_cpu <number> Worker thread count. Default: 1\n" " -q, --num_queue <number> Number of pktio queues. Default: Worker thread count\n" + " -b, --burst <number> Maximum number of events requested from scheduler. Default: 32\n" " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n" " --pipe-stages <number> Number of pipeline stages per interface\n" " --pipe-queues <number> Number of queues per pipeline stage\n" @@ -553,6 +557,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"interface", required_argument, NULL, 'i'}, {"num_cpu", required_argument, NULL, 'c'}, {"num_queue", required_argument, NULL, 'q'}, + {"burst", required_argument, NULL, 'b'}, {"timeout", required_argument, NULL, 't'}, {"sched_mode", required_argument, NULL, 'm'}, {"pipe-stages", required_argument, NULL, 0}, @@ -562,7 +567,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; - const char *shortopts = "+i:c:q:t:m:sh"; + const char *shortopts = "+i:c:q:b:t:m:sh"; int ret = 0;
memset(test_options, 0, sizeof(test_options_t)); @@ -570,6 +575,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) test_options->sched_mode = SCHED_MODE_ATOMIC; test_options->num_worker = 1; test_options->num_pktio_queue = 0; + test_options->burst_size = 32; test_options->pipe_queue_size = 256;
/* let helper collect its own arguments (e.g. --odph_proc) */ @@ -628,6 +634,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 'q': test_options->num_pktio_queue = atoi(optarg); break; + case 'b': + test_options->burst_size = atoi(optarg); + break; case 't': test_options->timeout_us = atol(optarg); break; @@ -771,6 +780,7 @@ static void print_config(test_global_t *test_global) " queues per interface: %i\n", test_global->opt.num_pktio_queue);
+ printf(" burst size: %u\n", test_global->opt.burst_size); printf(" collect statistics: %u\n", test_global->opt.collect_stat); printf(" timeout usec: %li\n", test_global->opt.timeout_us);
commit 794e94ae421c9587df1a11ff3027e6076974ec23 Author: Janne Peltonen janne.peltonen@nokia.com Date: Mon Sep 10 13:19:31 2018 +0300
linux-gen: ipsec: make SA lifetime checking more scalable to multiple threads
Enforcing the packet and byte based SA life times is currently slow if the same SA is being handled by multiple threads since the threads keep checking and updating the same shared byte and packet counters.
Make the implementation more scalable by having a thread-local quota of packets and bytes for each SA and by updating the shared byte and packet counters less frequently (i.e. when the quota runs out).
This introduces some inexactness to life time warnings and errors. The warnings and errors about soft and hard limits being reached may come a bit earlier than expected based on the packets sent to the wire.
Signed-off-by: Janne Peltonen janne.peltonen@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c index fa0d7096..178b01c0 100644 --- a/platform/linux-generic/odp_ipsec_sad.c +++ b/platform/linux-generic/odp_ipsec_sad.c @@ -42,7 +42,42 @@ #warning IPV4_ID_RING_SIZE is too small for the maximum number of threads. #endif
+/* + * To avoid checking and updating the packet and byte counters in the + * SA for every packet, we increment the global counters once for several + * packets. We decrement a preallocated thread-local quota for every + * packet. When the quota runs out, we get a new quota by incementing the + * global counter. + * + * This improves performance but the looser synchronization between + * threads makes life time warnings and errors somewhat inaccurate. + * The warnings and errors may get triggered a bit too early since + * some threads may still have unused quota when the first thread + * hits the limit. + */ +#define SA_LIFE_PACKETS_PREALLOC 64 +#define SA_LIFE_BYTES_PREALLOC 4000 + +typedef struct sa_thread_local_s { + /* + * Packets that can be processed in this thread before looking at + * the SA-global packet counter and checking hard and soft limits. + */ + uint32_t packet_quota; + /* + * Bytes that can be processed in this thread before looking at + * at the SA-global byte counter and checking hard and soft limits. + */ + uint32_t byte_quota; + /* + * Life time status when this thread last checked the global + * counter(s). + */ + odp_ipsec_op_status_t lifetime_status; +} sa_thread_local_t; + typedef struct ODP_ALIGNED_CACHE ipsec_thread_local_s { + sa_thread_local_t sa[ODP_CONFIG_IPSEC_SAS]; uint16_t first_ipv4_id; /* first ID of current block of IDs */ uint16_t next_ipv4_id; /* next ID to be used */ } ipsec_thread_local_t; @@ -80,6 +115,24 @@ ipsec_sa_t *_odp_ipsec_sa_entry_from_hdl(odp_ipsec_sa_t sa) return ipsec_sa_entry_from_hdl(sa); }
+static inline sa_thread_local_t *ipsec_sa_thread_local(ipsec_sa_t *sa) +{ + return &ipsec_sa_tbl->per_thread[odp_thread_id()].sa[sa->ipsec_sa_idx]; +} + +static void init_sa_thread_local(ipsec_sa_t *sa) +{ + sa_thread_local_t *sa_tl; + int n; + + for (n = 0; n < ODP_THREAD_COUNT_MAX; n++) { + sa_tl = &ipsec_sa_tbl->per_thread[n].sa[sa->ipsec_sa_idx]; + sa_tl->packet_quota = 0; + sa_tl->byte_quota = 0; + sa_tl->lifetime_status.all = 0; + } +} + int _odp_ipsec_sad_init_global(void) { odp_shm_t shm; @@ -540,6 +593,8 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param) &ses_create_rc)) goto error;
+ init_sa_thread_local(ipsec_sa); + ipsec_sa_publish(ipsec_sa);
return ipsec_sa->ipsec_sa_hdl; @@ -681,17 +736,11 @@ int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa, odp_ipsec_op_status_t *status) { int rc = 0; + sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa);
- if (ipsec_sa->hard_limit_bytes > 0 && - odp_atomic_load_u64(&ipsec_sa->hot.bytes) > - ipsec_sa->hard_limit_bytes) { - status->error.hard_exp_bytes = 1; - rc = -1; - } - if (ipsec_sa->hard_limit_packets > 0 && - odp_atomic_load_u64(&ipsec_sa->hot.packets) > - ipsec_sa->hard_limit_packets) { - status->error.hard_exp_packets = 1; + if (sa_tl->lifetime_status.error.hard_exp_packets || + sa_tl->lifetime_status.error.hard_exp_bytes) { + status->all |= sa_tl->lifetime_status.all; rc = -1; }
@@ -701,30 +750,47 @@ int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa, int _odp_ipsec_sa_stats_update(ipsec_sa_t *ipsec_sa, uint32_t len, odp_ipsec_op_status_t *status) { - uint64_t bytes = odp_atomic_fetch_add_u64(&ipsec_sa->hot.bytes, len) + len; - uint64_t packets = odp_atomic_fetch_add_u64(&ipsec_sa->hot.packets, 1) + 1; - int rc = 0; + sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa); + uint64_t packets, bytes; + + if (odp_unlikely(sa_tl->packet_quota == 0)) { + packets = odp_atomic_fetch_add_u64(&ipsec_sa->hot.packets, + SA_LIFE_PACKETS_PREALLOC); + packets += SA_LIFE_PACKETS_PREALLOC; + sa_tl->packet_quota += SA_LIFE_PACKETS_PREALLOC; + + if (ipsec_sa->soft_limit_packets > 0 && + packets >= ipsec_sa->soft_limit_packets) + sa_tl->lifetime_status.warn.soft_exp_packets = 1; + + if (ipsec_sa->hard_limit_packets > 0 && + packets >= ipsec_sa->hard_limit_packets) + sa_tl->lifetime_status.error.hard_exp_packets = 1; + } + sa_tl->packet_quota--;
- if (ipsec_sa->soft_limit_bytes > 0 && - bytes > ipsec_sa->soft_limit_bytes) - status->warn.soft_exp_bytes = 1; + if (odp_unlikely(sa_tl->byte_quota < len)) { + bytes = odp_atomic_fetch_add_u64(&ipsec_sa->hot.bytes, + len + SA_LIFE_BYTES_PREALLOC); + bytes += len + SA_LIFE_BYTES_PREALLOC; + sa_tl->byte_quota += len + SA_LIFE_BYTES_PREALLOC;
- if (ipsec_sa->soft_limit_packets > 0 && - packets > ipsec_sa->soft_limit_packets) - status->warn.soft_exp_packets = 1; + if (ipsec_sa->soft_limit_bytes > 0 && + bytes >= ipsec_sa->soft_limit_bytes) + sa_tl->lifetime_status.warn.soft_exp_bytes = 1;
- if (ipsec_sa->hard_limit_bytes > 0 && - bytes > ipsec_sa->hard_limit_bytes) { - status->error.hard_exp_bytes = 1; - rc = -1; - } - if (ipsec_sa->hard_limit_packets > 0 && - packets > ipsec_sa->hard_limit_packets) { - status->error.hard_exp_packets = 1; - rc = -1; + if (ipsec_sa->hard_limit_bytes > 0 && + bytes >= ipsec_sa->hard_limit_bytes) + sa_tl->lifetime_status.error.hard_exp_bytes = 1; } + sa_tl->byte_quota -= len;
- return rc; + status->all |= sa_tl->lifetime_status.all; + + if (sa_tl->lifetime_status.error.hard_exp_packets || + sa_tl->lifetime_status.error.hard_exp_bytes) + return -1; + return 0; }
int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
commit 93774c205c20951e51b0ed8d9c03bd21f95857bc Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 24 15:54:53 2018 +0300
linux-gen: sched: increase max spread weight
Increase max spread weight. The default value is kept the same.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index 456704a0..7208200a 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -58,7 +58,7 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
/* A thread polls a non preferred sched queue every this many polls * of the prefer queue. */ -#define MAX_PREFER_WEIGHT 63 +#define MAX_PREFER_WEIGHT 127 #define MIN_PREFER_WEIGHT 1 #define MAX_PREFER_RATIO (MAX_PREFER_WEIGHT + 1)
commit a83e3c5ce2caa4ba4ba92f6e383c2a28d9aa3956 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 24 15:45:38 2018 +0300
linux-gen: sched: use spread weight from config file
Use the new config file option instead of fixed prefer ratio.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index 214d42c8..456704a0 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -333,7 +333,7 @@ static inline uint8_t prio_spread_index(uint32_t index) static void sched_local_init(void) { int i; - uint8_t spread; + uint8_t spread, prefer_ratio; uint8_t num_spread = sched->config.num_spread; uint8_t offset = 1;
@@ -344,11 +344,12 @@ static void sched_local_init(void) sched_local.stash.queue = ODP_QUEUE_INVALID;
spread = prio_spread_index(sched_local.thr); + prefer_ratio = sched->config.prefer_ratio;
for (i = 0; i < SPREAD_TBL_SIZE; i++) { sched_local.spread_tbl[i] = spread;
- if (num_spread > 1 && (i % MAX_PREFER_RATIO) == 0) { + if (num_spread > 1 && (i % prefer_ratio) == 0) { sched_local.spread_tbl[i] = prio_spread_index(spread + offset); offset++; @@ -362,6 +363,7 @@ static int schedule_init_global(void) { odp_shm_t shm; int i, j, grp; + int prefer_ratio;
ODP_DBG("Schedule init ... ");
@@ -382,8 +384,10 @@ static int schedule_init_global(void) return -1; }
+ prefer_ratio = sched->config.prefer_ratio; + /* When num_spread == 1, only spread_tbl[0] is used. */ - sched->max_spread = (sched->config.num_spread - 1) * MAX_PREFER_RATIO; + sched->max_spread = (sched->config.num_spread - 1) * prefer_ratio; sched->shm = shm; odp_spinlock_init(&sched->mask_lock);
commit 97eb06b4cd2f57e9033f5d09a5e2b7a7b9d641b9 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 24 15:20:21 2018 +0300
linux-gen: sched: add spread weight config file option
Add new config file option to control scheduler internal queue preference ratio.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index af651d7f..2417d23f 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -16,7 +16,7 @@
# Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.1" +config_file_version = "0.1.2"
# Shared memory options shm: { @@ -80,13 +80,27 @@ queue_basic: { }
sched_basic: { - # Priority level spread. Each priority level is spread into multiple - # scheduler internal queues. A higher spread value typically improves - # parallelism and thus is better for high thread counts, but causes - # uneven service level for low thread counts. Typically, optimal - # value is the number of threads using the scheduler. + # Priority level spread + # + # Each priority level is spread into multiple scheduler internal queues. + # This value defines the number of those queues. Minimum value is 1. + # Each thread prefers one of the queues over other queues. A higher + # spread value typically improves parallelism and thus is better for + # high thread counts, but causes uneven service level for low thread + # counts. Typically, optimal value is the number of threads using + # the scheduler. prio_spread = 4
+ # Weight of the preferred scheduler internal queue + # + # Each thread prefers one of the internal queues over other queues. + # This value controls how many times the preferred queue is polled + # between a poll to another internal queue. Minimum value is 1. A higher + # value typically improves parallelism as threads work mostly on their + # preferred queues, but causes uneven service level for low thread + # counts as non-preferred queues are served less often + prio_spread_weight = 63 + # Burst size configuration per priority. The first array element # represents the highest queue priority. The scheduler tries to get # burst_size_default[prio] events from a queue and stashes those that diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index 58396293..214d42c8 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -58,10 +58,12 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
/* A thread polls a non preferred sched queue every this many polls * of the prefer queue. */ -#define PREFER_RATIO 64 +#define MAX_PREFER_WEIGHT 63 +#define MIN_PREFER_WEIGHT 1 +#define MAX_PREFER_RATIO (MAX_PREFER_WEIGHT + 1)
/* Spread weight table */ -#define SPREAD_TBL_SIZE ((MAX_SPREAD - 1) * PREFER_RATIO) +#define SPREAD_TBL_SIZE ((MAX_SPREAD - 1) * MAX_PREFER_RATIO)
/* Maximum number of packet IO interfaces */ #define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES @@ -182,6 +184,7 @@ typedef struct { uint8_t burst_default[NUM_PRIO]; uint8_t burst_max[NUM_PRIO]; uint8_t num_spread; + uint8_t prefer_ratio; } config;
uint16_t max_spread; @@ -256,13 +259,29 @@ static int read_config_file(sched_global_t *sched) }
if (val > MAX_SPREAD || val < MIN_SPREAD) { - ODP_ERR("Bad value %s = %u\n", str, val); + ODP_ERR("Bad value %s = %u [min: %u, max: %u]\n", str, val, + MIN_SPREAD, MAX_SPREAD); return -1; }
sched->config.num_spread = val; ODP_PRINT(" %s: %i\n", str, val);
+ str = "sched_basic.prio_spread_weight"; + if (!_odp_libconfig_lookup_int(str, &val)) { + ODP_ERR("Config option '%s' not found.\n", str); + return -1; + } + + if (val > MAX_PREFER_WEIGHT || val < MIN_PREFER_WEIGHT) { + ODP_ERR("Bad value %s = %u [min: %u, max: %u]\n", str, val, + MIN_PREFER_WEIGHT, MAX_PREFER_WEIGHT); + return -1; + } + + sched->config.prefer_ratio = val + 1; + ODP_PRINT(" %s: %i\n", str, val); + str = "sched_basic.burst_size_default"; if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) != NUM_PRIO) { @@ -329,7 +348,7 @@ static void sched_local_init(void) for (i = 0; i < SPREAD_TBL_SIZE; i++) { sched_local.spread_tbl[i] = spread;
- if (num_spread > 1 && (i % PREFER_RATIO) == 0) { + if (num_spread > 1 && (i % MAX_PREFER_RATIO) == 0) { sched_local.spread_tbl[i] = prio_spread_index(spread + offset); offset++; @@ -364,7 +383,7 @@ static int schedule_init_global(void) }
/* When num_spread == 1, only spread_tbl[0] is used. */ - sched->max_spread = (sched->config.num_spread - 1) * PREFER_RATIO; + sched->max_spread = (sched->config.num_spread - 1) * MAX_PREFER_RATIO; sched->shm = shm; odp_spinlock_init(&sched->mask_lock);
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf index d80df25c..7a06544a 100644 --- a/platform/linux-generic/test/process-mode.conf +++ b/platform/linux-generic/test/process-mode.conf @@ -1,6 +1,6 @@ # Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.1" +config_file_version = "0.1.2"
# Shared memory options shm: {
commit 6c0ffbf9689e36cc94d303d5ac30575fef041cbe Author: Maxim Uvarov maxim.uvarov@linaro.org Date: Sun Oct 21 10:19:41 2018 +0300
remove scripts/build-pktio-dpdk
Script was introduced to quick build odp with dpdk pktio support for dev or testing propose. Now it's more easy to run docker container with same parameters as CI does.
Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org Signed-off-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Reviewed-and-tested-by: Matias Elo matias.elo@nokia.com
diff --git a/DEPENDENCIES b/DEPENDENCIES index 6b345b9c..abe6d741 100644 --- a/DEPENDENCIES +++ b/DEPENDENCIES @@ -166,23 +166,42 @@ Prerequisites for building the OpenDataPlane (ODP) API
Use DPDK for ODP packet I/O.
+ Note: only packet I/O is accelerated with DPDK. Use + https://github.com/Linaro/odp-dpdk.git + for fully accelerated odp dpdk platform. + +3.4.1 DPDK pktio requirements + DPDK pktio adds a depency to NUMA library. # Debian/Ubuntu $ sudo apt-get install libnuma-dev # CentOS/RedHat/Fedora $ sudo yum install numactl-devel
- Note: only packet I/O is accelerated with DPDK. Use - https://github.com/Linaro/odp-dpdk.git - for fully accelerated odp dpdk platform. +3.4.2 Native DPDK install + # Debian/Ubuntu starting from 18.04 + $ sudo apt-get install dpdk-dev + +3.4.2 Built DPDK from src + git clone --branch=17.11 http://dpdk.org/git/dpdk-stable dpdk + + #Make and edit DPDK configuration + TARGET="x86_64-native-linuxapp-gcc" + make config T=${TARGET} O=${TARGET} + pushd ${TARGET}
-3.4.1 Building DPDK and ODP with DPDK pktio support + #To use I/O without DPDK supported NIC's enable pcap pmd: + sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config + popd
- DPDK packet I/O has been tested to work with DPDK v17.11. + #Build DPDK + make build O=${TARGET} EXTRA_CFLAGS="-fPIC" + make install O=${TARGET} DESTDIR=${TARGET}
- Follow steps in ./scripts/build-pktio-dpdk + #compile ODP + ./configure --with-dpdk-path=`pwd`/dpdk/${TARGET}/usr/local
-3.4.2 Setup system +3.4.3 Setup system
# Load DPDK modules $ sudo /sbin/modprobe uio @@ -194,13 +213,13 @@ Prerequisites for building the OpenDataPlane (ODP) API 512 x 2MB hugepages. All this can be done with the DPDK setup script (<dpdk-dir>/tools/dpdk-setup.sh).
-3.4.3 Running ODP with DPDK pktio +3.4.4 Running ODP with DPDK pktio
ODP applications will try use DPDK for packet I/O by default. If some other I/O type is desired instead, DPDK I/O can be disabled by setting the environment variable ODP_PKTIO_DISABLE_DPDK.
- DPDK interfaces are accessed using indices. For example, two first DPDK + DPDK interfaces are accessed using indexes. For example, two first DPDK interfaces can be used with the odp_l2fwd example as follows: $ cd <odp_dir> $ sudo ./test/performance/odp_l2fwd -i 0,1 -c 2 -m 0 diff --git a/scripts/build-pktio-dpdk b/scripts/build-pktio-dpdk deleted file mode 100755 index b0c0a4d0..00000000 --- a/scripts/build-pktio-dpdk +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -TARGET=${TARGET:-"x86_64-native-linuxapp-gcc"} - -export ROOT_DIR=$(readlink -e $(dirname $0) | sed 's|/scripts||') -pushd ${ROOT_DIR} - -echo '#include "pcap.h"' | cpp -H -o /dev/null 2>&1 -if [ "$?" != "0" ]; then - echo "Error: pcap is not installed. You may need to install libpcap-dev" -fi - -echo '#include "numa.h"' | cpp -H -o /dev/null 2>&1 -if [ "$?" != "0" ]; then - echo "Error: NUMA library is not installed. You need to install libnuma-dev" - exit 1 -fi - -git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=17.11 http://dpdk.org/git/dpdk-stable dpdk -pushd dpdk -git log --oneline --decorate - -#Make and edit DPDK configuration -make config T=${TARGET} O=${TARGET} -pushd ${TARGET} -#To use I/O without DPDK supported NIC's enable pcap pmd: -sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config -popd - -#Build DPDK -make build O=${TARGET} EXTRA_CFLAGS="-fPIC" -make install O=${TARGET} DESTDIR=${TARGET} -popd - -#Build ODP -./bootstrap; -./configure --enable-test-vald --enable-test-perf --enable-test-cpp \ - --enable-debug --enable-debug-print \ - --with-dpdk-path=`pwd`/dpdk/${TARGET}/usr/local -make
commit 7d2f7dbf7d97cea348bb0499b684103fd8ad750f Author: Matias Elo matias.elo@nokia.com Date: Wed Oct 17 15:34:53 2018 +0300
linux-gen: timer: run inline timers during queue dequeue operations
Process inline timers (if enabled) during odp_queue_deq() and odp_queue_deq_multi() calls. This fixes inline timers not working if odp_schedule() or odp_schedule_multi() was never called.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index 73a5536d..4e23fb53 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -16,6 +16,7 @@ #include <odp_pool_internal.h> #include <odp_init_internal.h> #include <odp_shm_internal.h> +#include <odp_timer_internal.h> #include <odp/api/shared_memory.h> #include <odp/api/schedule.h> #include <odp_schedule_if.h> @@ -28,6 +29,7 @@ #include <odp/api/traffic_mngr.h> #include <odp_libconfig_internal.h> #include <odp/api/plat/queue_inline_types.h> +#include <odp_global_data.h>
#define NUM_INTERNAL_QUEUES 64
@@ -976,6 +978,10 @@ static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num) { queue_entry_t *queue = qentry_from_handle(handle);
+ if (odp_global_rw->inline_timers && + odp_atomic_load_u64(&queue->s.num_timers)) + timer_run(); + if (num > QUEUE_MULTI_MAX) num = QUEUE_MULTI_MAX;
@@ -987,6 +993,10 @@ static odp_event_t queue_api_deq(odp_queue_t handle) { queue_entry_t *queue = qentry_from_handle(handle);
+ if (odp_global_rw->inline_timers && + odp_atomic_load_u64(&queue->s.num_timers)) + timer_run(); + return (odp_event_t)queue->s.dequeue(handle); }
diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c index 6ec7f14b..3acfc084 100644 --- a/platform/linux-generic/odp_queue_scalable.c +++ b/platform/linux-generic/odp_queue_scalable.c @@ -24,9 +24,11 @@ #include <odp_pool_internal.h> #include <odp_queue_scalable_internal.h> #include <odp_schedule_if.h> +#include <odp_timer_internal.h> #include <odp_ishm_internal.h> #include <odp_ishmpool_internal.h> #include <odp/api/plat/queue_inline_types.h> +#include <odp_global_data.h>
#include <string.h> #include <inttypes.h> @@ -845,6 +847,11 @@ static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num) num = QUEUE_MULTI_MAX;
queue = qentry_from_ext(handle); + + if (odp_global_rw->inline_timers && + odp_atomic_load_u64(&queue->s.num_timers)) + timer_run(); + return queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num); }
@@ -853,6 +860,11 @@ static odp_event_t queue_deq(odp_queue_t handle) queue_entry_t *queue;
queue = qentry_from_ext(handle); + + if (odp_global_rw->inline_timers && + odp_atomic_load_u64(&queue->s.num_timers)) + timer_run(); + return (odp_event_t)queue->s.dequeue(handle); }
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 70b43035..1e1eb019 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -55,6 +55,7 @@ #include <odp/api/time.h> #include <odp/api/plat/time_inlines.h> #include <odp/api/timer.h> +#include <odp_queue_if.h> #include <odp_timer_internal.h> #include <odp/api/plat/queue_inlines.h> #include <odp_global_data.h> @@ -454,6 +455,8 @@ static inline odp_timer_t timer_alloc(timer_pool_t *tp, tp->num_alloc, _ODP_MEMMODEL_RLS); hdl = tp_idx_to_handle(tp, idx); + /* Add timer to queue */ + queue_fn->timer_add(queue); } else { __odp_errno = ENFILE; /* Reusing file table overflow */ hdl = ODP_TIMER_INVALID; @@ -473,6 +476,9 @@ static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx) * grab any timeout buffer */ odp_buffer_t old_buf = timer_set_unused(tp, idx);
+ /* Remove timer from queue */ + queue_fn->timer_rem(tim->queue); + /* Destroy timer */ timer_fini(tim, &tp->tick_buf[idx]);
commit d184c87a690969571734fcd0c135da4f0da8ab55 Author: Matias Elo matias.elo@nokia.com Date: Wed Oct 17 13:29:42 2018 +0300
linux-gen: timer: reduce inline timer overhead
Reduce inline timer overhead by not polling timers until at least one timer pool has been created.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_global_data.h b/platform/linux-generic/include/odp_global_data.h index f1be1502..a12d1297 100644 --- a/platform/linux-generic/include/odp_global_data.h +++ b/platform/linux-generic/include/odp_global_data.h @@ -40,6 +40,7 @@ typedef struct { /* Read-only global data. Members should not be modified after global init * to enable process more support. */ struct odp_global_data_ro_t { + odp_init_t init_param; /* directory for odp mmaped files */ char *shm_dir; /* overload default with env */ @@ -69,6 +70,7 @@ struct odp_global_data_rw_t { pthread_t inotify_thread; int inotify_pcapng_is_running; odp_bool_t dpdk_initialized; + odp_bool_t inline_timers; };
extern struct odp_global_data_ro_t odp_global_ro; diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h index 8dda9e6f..02ba92e0 100644 --- a/platform/linux-generic/include/odp_timer_internal.h +++ b/platform/linux-generic/include/odp_timer_internal.h @@ -19,6 +19,7 @@ #include <odp_buffer_internal.h> #include <odp_pool_internal.h> #include <odp/api/timer.h> +#include <odp_global_data.h>
/* Minimum number of scheduling rounds between checking timer pools. */ #define CONFIG_TIMER_RUN_RATELIMIT_ROUNDS 1 @@ -38,22 +39,12 @@ typedef struct { odp_timer_t timer; } odp_timeout_hdr_t;
-/* - * Whether to run timer pool processing 'inline' (on worker cores) or in - * background threads (thread-per-timerpool). - * - * If the application will use both scheduler and timer this flag is set - * to true, otherwise false. This application conveys this information via - * the 'not_used' bits in odp_init_t which are passed to odp_global_init(). - */ -extern odp_bool_t inline_timers; - unsigned _timer_run(void);
/* Static inline wrapper to minimize modification of schedulers. */ static inline unsigned timer_run(void) { - return inline_timers ? _timer_run() : 0; + return odp_global_rw->inline_timers ? _timer_run() : 0; }
#endif diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c index dca06641..0f7e0df2 100644 --- a/platform/linux-generic/odp_init.c +++ b/platform/linux-generic/odp_init.c @@ -268,6 +268,8 @@ int odp_init_global(odp_instance_t *instance, odp_global_ro.abort_fn = odp_override_abort;
if (params != NULL) { + odp_global_ro.init_param = *params; + if (params->log_fn != NULL) odp_global_ro.log_fn = params->log_fn; if (params->abort_fn != NULL) diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 0d62b152..70b43035 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -68,8 +68,6 @@ * for checking the freshness of received timeouts */ #define TMO_INACTIVE ((uint64_t)0x8000000000000000)
-odp_bool_t inline_timers = false; - /****************************************************************************** * Mutual exclusion in the absence of CAS16 *****************************************************************************/ @@ -345,8 +343,26 @@ static odp_timer_pool_t timer_pool_new(const char *name, odp_spinlock_init(&tp->lock); odp_ticketlock_lock(&timer_global->lock); timer_global->timer_pool[tp_idx] = tp; + + if (timer_global->num_timer_pools == 1) { + odp_bool_t inline_tim; + + /* + * Whether to run timer pool processing 'inline' (on worker + * cores) or in background threads (thread-per-timerpool). + * + * If the application will use scheduler this flag is set to + * true, otherwise false. This application conveys this + * information via the 'not_used' bits in odp_init_t which are + * passed to odp_global_init(). + */ + inline_tim = !odp_global_ro.init_param.not_used.feat.schedule; + + odp_global_rw->inline_timers = inline_tim; + } + odp_ticketlock_unlock(&timer_global->lock); - if (!inline_timers) { + if (!odp_global_rw->inline_timers) { if (tp->param.clk_src == ODP_CLOCK_CPU) itimer_init(tp); } @@ -380,7 +396,7 @@ static void odp_timer_pool_del(timer_pool_t *tp)
odp_spinlock_lock(&tp->lock);
- if (!inline_timers) { + if (!odp_global_rw->inline_timers) { /* Stop POSIX itimer signals */ if (tp->param.clk_src == ODP_CLOCK_CPU) itimer_fini(tp); @@ -402,6 +418,11 @@ static void odp_timer_pool_del(timer_pool_t *tp) timer_global->timer_pool[tp->tp_idx] = NULL; timer_global->timer_pool_used[tp->tp_idx] = 0; timer_global->num_timer_pools--; + + /* Disable inline timer polling */ + if (timer_global->num_timer_pools == 0) + odp_global_rw->inline_timers = false; + odp_ticketlock_unlock(&timer_global->lock);
rc = odp_shm_free(shm); @@ -1110,6 +1131,11 @@ int odp_timer_capability(odp_timer_clk_src_t clk_src, odp_timer_pool_t odp_timer_pool_create(const char *name, const odp_timer_pool_param_t *param) { + if (odp_global_ro.init_param.not_used.feat.timer) { + ODP_ERR("Trying to use disabled ODP feature.\n"); + return ODP_TIMER_POOL_INVALID; + } + if (param->res_ns < timer_global->highest_res_ns) { __odp_errno = EINVAL; return ODP_TIMER_POOL_INVALID; @@ -1322,6 +1348,7 @@ void odp_timeout_free(odp_timeout_t tmo) int odp_timer_init_global(const odp_init_t *params) { odp_shm_t shm; + odp_bool_t inline_timers = false;
shm = odp_shm_reserve("_odp_timer", sizeof(timer_global_t), ODP_CACHE_LINE_SIZE, 0);
commit ae53f9f80ee5fb06c2a3c8512a14bc1e773a58e4 Author: Matias Elo matias.elo@nokia.com Date: Tue Oct 2 14:13:35 2018 +0300
linux-gen: queue: add internal interface for adding/removing inline timers
Add internal queue interface functions queue_timer_add_fn_t and queue_timer_rem_fn_t for adding/removing inline timers.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_queue_basic_internal.h b/platform/linux-generic/include/odp_queue_basic_internal.h index 41ca424c..c8ed978c 100644 --- a/platform/linux-generic/include/odp_queue_basic_internal.h +++ b/platform/linux-generic/include/odp_queue_basic_internal.h @@ -54,6 +54,7 @@ struct queue_entry_s { ring_spsc_t ring_spsc; };
+ odp_atomic_u64_t num_timers; int status;
queue_deq_multi_fn_t orig_dequeue_multi; diff --git a/platform/linux-generic/include/odp_queue_if.h b/platform/linux-generic/include/odp_queue_if.h index 6ec7e24c..5fe28dac 100644 --- a/platform/linux-generic/include/odp_queue_if.h +++ b/platform/linux-generic/include/odp_queue_if.h @@ -39,6 +39,8 @@ typedef void (*queue_set_enq_deq_fn_t)(odp_queue_t queue, queue_enq_multi_fn_t enq_multi, queue_deq_fn_t deq, queue_deq_multi_fn_t deq_multi); +typedef void (*queue_timer_add_fn_t)(odp_queue_t queue); +typedef void (*queue_timer_rem_fn_t)(odp_queue_t queue);
/* Queue functions towards other internal components */ typedef struct { @@ -51,6 +53,8 @@ typedef struct { queue_get_pktin_fn_t get_pktin; queue_set_pktin_fn_t set_pktin; queue_set_enq_deq_fn_t set_enq_deq_fn; + queue_timer_add_fn_t timer_add; + queue_timer_rem_fn_t timer_rem;
/* Original queue dequeue multi function (before override). May be used * by an overriding dequeue function. */ diff --git a/platform/linux-generic/include/odp_queue_scalable_internal.h b/platform/linux-generic/include/odp_queue_scalable_internal.h index 05932a36..3c582076 100644 --- a/platform/linux-generic/include/odp_queue_scalable_internal.h +++ b/platform/linux-generic/include/odp_queue_scalable_internal.h @@ -35,6 +35,7 @@ struct queue_entry_s { sched_elem_t sched_elem;
odp_ticketlock_t ODP_ALIGNED_CACHE lock; + odp_atomic_u64_t num_timers; int status;
queue_enq_fn_t ODP_ALIGNED_CACHE enqueue; diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index ccd97da1..73a5536d 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -806,6 +806,7 @@ static int queue_init(queue_entry_t *queue, const char *name, queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
queue->s.type = queue_type; + odp_atomic_init_u64(&queue->s.num_timers, 0);
queue->s.pktin = PKTIN_INVALID; queue->s.pktout = PKTOUT_INVALID; @@ -949,6 +950,20 @@ static int queue_api_enq_multi(odp_queue_t handle, (odp_buffer_hdr_t **)(uintptr_t)ev, num); }
+static void queue_timer_add(odp_queue_t handle) +{ + queue_entry_t *queue = qentry_from_handle(handle); + + odp_atomic_inc_u64(&queue->s.num_timers); +} + +static void queue_timer_rem(odp_queue_t handle) +{ + queue_entry_t *queue = qentry_from_handle(handle); + + odp_atomic_dec_u64(&queue->s.num_timers); +} + static int queue_api_enq(odp_queue_t handle, odp_event_t ev) { queue_entry_t *queue = qentry_from_handle(handle); @@ -1007,5 +1022,7 @@ queue_fn_t queue_basic_fn = { .get_pktin = queue_get_pktin, .set_pktin = queue_set_pktin, .set_enq_deq_fn = queue_set_enq_deq_func, - .orig_deq_multi = queue_orig_multi + .orig_deq_multi = queue_orig_multi, + .timer_add = queue_timer_add, + .timer_rem = queue_timer_rem }; diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c index b7ff2195..6ec7f14b 100644 --- a/platform/linux-generic/odp_queue_scalable.c +++ b/platform/linux-generic/odp_queue_scalable.c @@ -124,6 +124,8 @@ static int queue_init(queue_entry_t *queue, const char *name, ring[ring_idx] = NULL;
queue->s.type = queue->s.param.type; + odp_atomic_init_u64(&queue->s.num_timers, 0); + queue->s.enqueue = _queue_enq; queue->s.dequeue = _queue_deq; queue->s.enqueue_multi = _queue_enq_multi; @@ -958,6 +960,20 @@ static int queue_orig_multi(odp_queue_t handle, buf_hdr, num); }
+static void queue_timer_add(odp_queue_t handle) +{ + queue_entry_t *queue = qentry_from_ext(handle); + + odp_atomic_inc_u64(&queue->s.num_timers); +} + +static void queue_timer_rem(odp_queue_t handle) +{ + queue_entry_t *queue = qentry_from_ext(handle); + + odp_atomic_dec_u64(&queue->s.num_timers); +} + /* API functions */ _odp_queue_api_fn_t queue_scalable_api = { .queue_create = queue_create, @@ -990,5 +1006,7 @@ queue_fn_t queue_scalable_fn = { .get_pktin = queue_get_pktin, .set_pktin = queue_set_pktin, .set_enq_deq_fn = queue_set_enq_deq_func, - .orig_deq_multi = queue_orig_multi + .orig_deq_multi = queue_orig_multi, + .timer_add = queue_timer_add, + .timer_rem = queue_timer_rem };
commit bfb4e078a70e5510c1e3eb9deab5baeadb1c7fc3 Author: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Date: Mon Oct 22 17:01:14 2018 +0300
m4: update ax_prog_doxygen.m4 to latest version
Signed-off-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/m4/ax_prog_doxygen.m4 b/m4/ax_prog_doxygen.m4 index 426ba0df..ed1dc83b 100644 --- a/m4/ax_prog_doxygen.m4 +++ b/m4/ax_prog_doxygen.m4 @@ -1,5 +1,5 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_prog_doxygen.html +# https://www.gnu.org/software/autoconf-archive/ax_prog_doxygen.html # =========================================================================== # # SYNOPSIS @@ -21,7 +21,7 @@ # The DX_*_FEATURE macros control the default setting for the given # Doxygen feature. Supported features are 'DOXYGEN' itself, 'DOT' for # generating graphics, 'HTML' for plain HTML, 'CHM' for compressed HTML -# help (for MS users), 'CHI' for generating a seperate .chi file by the +# help (for MS users), 'CHI' for generating a separate .chi file by the # .chm file, and 'MAN', 'RTF', 'XML', 'PDF' and 'PS' for the appropriate # output formats. The environment variable DOXYGEN_PAPER_SIZE may be # specified to override the default 'a4wide' paper size. @@ -97,7 +97,7 @@ # and this notice are preserved. This file is offered as-is, without any # warranty.
-#serial 18 +#serial 24
## ----------## ## Defaults. ## @@ -164,7 +164,7 @@ AC_DEFUN([DX_TEST_FEATURE], [test "$DX_FLAG_$1" = 1]) AC_DEFUN([DX_CHECK_DEPEND], [ test "$DX_FLAG_$1" = "$2" \ || AC_MSG_ERROR([doxygen-DX_CURRENT_FEATURE ifelse([$2], 1, - requires, contradicts) doxygen-DX_CURRENT_FEATURE]) + requires, contradicts) doxygen-$1]) ])
# DX_CLEAR_DEPEND(FEATURE, REQUIRED_FEATURE, REQUIRED_STATE) @@ -265,14 +265,10 @@ m4_define([DX_loop], m4_dquote(m4_if(m4_eval(3 < m4_count($@)), 1, [m4_for([DX_i], 4, m4_count($@), 2, [, m4_eval(DX_i[/2])])], [])))dnl
-# Environment variables used inside Doxyfile: +# Environment variables used inside doxygen.cfg: DX_ENV_APPEND(SRCDIR, $srcdir) -DX_ENV_APPEND(BUILDDIR, $builddir) -DX_ENV_APPEND(VERSION, $VERSION) -DX_ENV_APPEND(WITH_PLATFORM, $with_platform) DX_ENV_APPEND(PROJECT, $DX_PROJECT) DX_ENV_APPEND(VERSION, $PACKAGE_VERSION) -DX_ENV_APPEND(WITH_ARCH, $ARCH_DIR)
# Doxygen itself: DX_ARG_ABLE(doc, [generate any doxygen documentation], @@ -325,8 +321,8 @@ DX_ARG_ABLE(chm, [generate doxygen compressed HTML help documentation], DX_ENV_APPEND(GENERATE_HTMLHELP, YES)], [DX_ENV_APPEND(GENERATE_HTMLHELP, NO)])
-# Seperate CHI file generation. -DX_ARG_ABLE(chi, [generate doxygen seperate compressed HTML help index file], +# Separate CHI file generation. +DX_ARG_ABLE(chi, [generate doxygen separate compressed HTML help index file], [DX_CHECK_DEPEND(chm, 1)], [DX_CLEAR_DEPEND(chm, 1)], [], @@ -382,94 +378,82 @@ a4wide|a4|letter|legal|executive) esac
# Rules: -if test $DX_FLAG_html -eq 1; then - DX_SNIPPET_html="## ------------------------------- ## +AS_IF([[test $DX_FLAG_html -eq 1]], +[[DX_SNIPPET_html="## ------------------------------- ## ## Rules specific for HTML output. ## ## ------------------------------- ##
-DX_CLEAN_HTML = $(DX_DOCDIR)/html[]dnl +DX_CLEAN_HTML = $(DX_DOCDIR)/html]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/html]]) + $(DX_DOCDIR]DX_i[)/html]])[
-" -else - DX_SNIPPET_html="" -fi -if test $DX_FLAG_chi -eq 1; then - DX_SNIPPET_chi=" -DX_CLEAN_CHI = $(DX_DOCDIR)/$(PACKAGE).chi[]dnl +"]], +[[DX_SNIPPET_html=""]]) +AS_IF([[test $DX_FLAG_chi -eq 1]], +[[DX_SNIPPET_chi=" +DX_CLEAN_CHI = $(DX_DOCDIR)/$(PACKAGE).chi]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/$(PACKAGE).chi]])" -else - DX_SNIPPET_chi="" -fi -if test $DX_FLAG_chm -eq 1; then - DX_SNIPPET_chm="## ------------------------------ ## + $(DX_DOCDIR]DX_i[)/$(PACKAGE).chi]])["]], +[[DX_SNIPPET_chi=""]]) +AS_IF([[test $DX_FLAG_chm -eq 1]], +[[DX_SNIPPET_chm="## ------------------------------ ## ## Rules specific for CHM output. ## ## ------------------------------ ##
-DX_CLEAN_CHM = $(DX_DOCDIR)/chm[]dnl +DX_CLEAN_CHM = $(DX_DOCDIR)/chm]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/chm]])\ + $(DX_DOCDIR]DX_i[)/chm]])[\ ${DX_SNIPPET_chi}
-" -else - DX_SNIPPET_chm="" -fi -if test $DX_FLAG_man -eq 1; then - DX_SNIPPET_man="## ------------------------------ ## +"]], +[[DX_SNIPPET_chm=""]]) +AS_IF([[test $DX_FLAG_man -eq 1]], +[[DX_SNIPPET_man="## ------------------------------ ## ## Rules specific for MAN output. ## ## ------------------------------ ##
-DX_CLEAN_MAN = $(DX_DOCDIR)/man[]dnl +DX_CLEAN_MAN = $(DX_DOCDIR)/man]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/man]]) + $(DX_DOCDIR]DX_i[)/man]])[
-" -else - DX_SNIPPET_man="" -fi -if test $DX_FLAG_rtf -eq 1; then - DX_SNIPPET_rtf="## ------------------------------ ## +"]], +[[DX_SNIPPET_man=""]]) +AS_IF([[test $DX_FLAG_rtf -eq 1]], +[[DX_SNIPPET_rtf="## ------------------------------ ## ## Rules specific for RTF output. ## ## ------------------------------ ##
-DX_CLEAN_RTF = $(DX_DOCDIR)/rtf[]dnl +DX_CLEAN_RTF = $(DX_DOCDIR)/rtf]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/rtf]]) + $(DX_DOCDIR]DX_i[)/rtf]])[
-" -else - DX_SNIPPET_rtf="" -fi -if test $DX_FLAG_xml -eq 1; then - DX_SNIPPET_xml="## ------------------------------ ## +"]], +[[DX_SNIPPET_rtf=""]]) +AS_IF([[test $DX_FLAG_xml -eq 1]], +[[DX_SNIPPET_xml="## ------------------------------ ## ## Rules specific for XML output. ## ## ------------------------------ ##
-DX_CLEAN_XML = $(DX_DOCDIR)/xml[]dnl +DX_CLEAN_XML = $(DX_DOCDIR)/xml]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/xml]]) + $(DX_DOCDIR]DX_i[)/xml]])[
-" -else - DX_SNIPPET_xml="" -fi -if test $DX_FLAG_ps -eq 1; then - DX_SNIPPET_ps="## ----------------------------- ## +"]], +[[DX_SNIPPET_xml=""]]) +AS_IF([[test $DX_FLAG_ps -eq 1]], +[[DX_SNIPPET_ps="## ----------------------------- ## ## Rules specific for PS output. ## ## ----------------------------- ##
-DX_CLEAN_PS = $(DX_DOCDIR)/$(PACKAGE).ps[]dnl +DX_CLEAN_PS = $(DX_DOCDIR)/$(PACKAGE).ps]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/$(PACKAGE).ps]]) + $(DX_DOCDIR]DX_i[)/$(PACKAGE).ps]])[
DX_PS_GOAL = doxygen-ps
doxygen-ps: $(DX_CLEAN_PS)
-m4_foreach([DX_i], [DX_loop], +]m4_foreach([DX_i], [DX_loop], [[$(DX_DOCDIR]DX_i[)/$(PACKAGE).ps: $(DX_DOCDIR]DX_i[)/$(PACKAGE).tag $(DX_V_LATEX)cd $(DX_DOCDIR]DX_i[)/latex; \ rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \ @@ -485,25 +469,22 @@ m4_foreach([DX_i], [DX_loop], done; \ $(DX_DVIPS) -o ../$(PACKAGE).ps refman.dvi
-]])dnl -" -else - DX_SNIPPET_ps="" -fi -if test $DX_FLAG_pdf -eq 1; then - DX_SNIPPET_pdf="## ------------------------------ ## +]])["]], +[[DX_SNIPPET_ps=""]]) +AS_IF([[test $DX_FLAG_pdf -eq 1]], +[[DX_SNIPPET_pdf="## ------------------------------ ## ## Rules specific for PDF output. ## ## ------------------------------ ##
-DX_CLEAN_PDF = $(DX_DOCDIR)/$(PACKAGE).pdf[]dnl +DX_CLEAN_PDF = $(DX_DOCDIR)/$(PACKAGE).pdf]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/$(PACKAGE).pdf]]) + $(DX_DOCDIR]DX_i[)/$(PACKAGE).pdf]])[
DX_PDF_GOAL = doxygen-pdf
doxygen-pdf: $(DX_CLEAN_PDF)
-m4_foreach([DX_i], [DX_loop], +]m4_foreach([DX_i], [DX_loop], [[$(DX_DOCDIR]DX_i[)/$(PACKAGE).pdf: $(DX_DOCDIR]DX_i[)/$(PACKAGE).tag $(DX_V_LATEX)cd $(DX_DOCDIR]DX_i[)/latex; \ rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \ @@ -519,31 +500,26 @@ m4_foreach([DX_i], [DX_loop], done; \ mv refman.pdf ../$(PACKAGE).pdf
-]])dnl -" -else - DX_SNIPPET_pdf="" -fi -if test $DX_FLAG_ps -eq 1 -o $DX_FLAG_pdf -eq 1; then - DX_SNIPPET_latex="## ------------------------------------------------- ## +]])["]], +[[DX_SNIPPET_pdf=""]]) +AS_IF([[test $DX_FLAG_ps -eq 1 -o $DX_FLAG_pdf -eq 1]], +[[DX_SNIPPET_latex="## ------------------------------------------------- ## ## Rules specific for LaTeX (shared for PS and PDF). ## ## ------------------------------------------------- ##
DX_V_LATEX = $(_DX_v_LATEX_$(V)) _DX_v_LATEX_ = $(_DX_v_LATEX_$(AM_DEFAULT_VERBOSITY)) -_DX_v_LATEX_0 = @echo " LATEX " $[]][[]@; +_DX_v_LATEX_0 = @echo " LATEX " $][@;
-DX_CLEAN_LATEX = $(DX_DOCDIR)/latex[]dnl +DX_CLEAN_LATEX = $(DX_DOCDIR)/latex]dnl m4_foreach([DX_i], [m4_shift(DX_loop)], [[\ - $(DX_DOCDIR]DX_i[)/latex]]) + $(DX_DOCDIR]DX_i[)/latex]])[
-" -else - DX_SNIPPET_latex="" -fi +"]], +[[DX_SNIPPET_latex=""]])
-if test $DX_FLAG_doc -eq 1; then - DX_SNIPPET_doc="## --------------------------------- ## +AS_IF([[test $DX_FLAG_doc -eq 1]], +[[DX_SNIPPET_doc="## --------------------------------- ## ## Format-independent Doxygen rules. ## ## --------------------------------- ##
@@ -563,23 +539,24 @@ _DX_v_DXGEN_0 = @echo " DXGEN " $<;
.INTERMEDIATE: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
-doxygen-run:[]m4_foreach([DX_i], [DX_loop], - [[ $(DX_DOCDIR]DX_i[)/$(PACKAGE).tag]]) +doxygen-run:]m4_foreach([DX_i], [DX_loop], + [[ $(DX_DOCDIR]DX_i[)/$(PACKAGE).tag]])[
doxygen-doc: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
-m4_foreach([DX_i], [DX_loop], +]m4_foreach([DX_i], [DX_loop], [[$(DX_DOCDIR]DX_i[)/$(PACKAGE).tag: $(DX_CONFIG]DX_i[) $(pkginclude_HEADERS) $(A""M_V_at)rm -rf $(DX_DOCDIR]DX_i[) $(DX_V_DXGEN)$(DX_ENV) DOCDIR=$(DX_DOCDIR]DX_i[) $(DX_DOXYGEN) $(DX_CONFIG]DX_i[) $(A""M_V_at)echo Timestamp >$][@
]])dnl -DX_CLEANFILES = \ +[DX_CLEANFILES = \] m4_foreach([DX_i], [DX_loop], -[[ $(DX_DOCDIR]DX_i[)/$(PACKAGE).tag \ +[[ $(DX_DOCDIR]DX_i[)/doxygen_sqlite3.db \ + $(DX_DOCDIR]DX_i[)/$(PACKAGE).tag \ ]])dnl - -r \ +[ -r \ $(DX_CLEAN_HTML) \ $(DX_CLEAN_CHM) \ $(DX_CLEAN_CHI) \ @@ -588,19 +565,17 @@ m4_foreach([DX_i], [DX_loop], $(DX_CLEAN_XML) \ $(DX_CLEAN_PS) \ $(DX_CLEAN_PDF) \ - $(DX_CLEAN_LATEX)" -else - DX_SNIPPET_doc="" -fi + $(DX_CLEAN_LATEX)"]], +[[DX_SNIPPET_doc=""]]) AC_SUBST([DX_RULES], ["${DX_SNIPPET_doc}"])dnl AM_SUBST_NOTMAKE([DX_RULES])
#For debugging: -echo DX_FLAG_doc=$DX_FLAG_doc +#echo DX_FLAG_doc=$DX_FLAG_doc #echo DX_FLAG_dot=$DX_FLAG_dot #echo DX_FLAG_man=$DX_FLAG_man -echo DX_FLAG_html=$DX_FLAG_html +#echo DX_FLAG_html=$DX_FLAG_html #echo DX_FLAG_chm=$DX_FLAG_chm #echo DX_FLAG_chi=$DX_FLAG_chi #echo DX_FLAG_rtf=$DX_FLAG_rtf
commit 29cf3cb9e63df80849f1b1c59b2174086e7dfe47 Author: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Date: Mon Oct 22 17:00:39 2018 +0300
doc: pass generated include files to Doxygen
Pass generated files to doxygen to let it know about ODP_DEPRECATED()/ODP_VERSION_API_xxx
Signed-off-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/doc/application-api-guide/Doxyfile b/doc/application-api-guide/Doxyfile index 00c18df2..b101153d 100644 --- a/doc/application-api-guide/Doxyfile +++ b/doc/application-api-guide/Doxyfile @@ -4,6 +4,7 @@ PROJECT_NAME = "API Reference Manual" PROJECT_NUMBER = $(VERSION) PROJECT_LOGO = $(SRCDIR)/doc/images/ODP-Logo-HQ.svg INPUT = $(SRCDIR)/doc/application-api-guide \ + include \ $(SRCDIR)/include EXAMPLE_PATH = $(SRCDIR)/example $(SRCDIR) WARNINGS = NO diff --git a/doc/platform-api-guide/Doxyfile b/doc/platform-api-guide/Doxyfile index d716b4a3..02e325cc 100644 --- a/doc/platform-api-guide/Doxyfile +++ b/doc/platform-api-guide/Doxyfile @@ -5,8 +5,8 @@ PROJECT_NUMBER = $(VERSION) PROJECT_LOGO = $(SRCDIR)/doc/images/ODP-Logo-HQ.svg INPUT = $(SRCDIR)/doc/application-api-guide \ $(SRCDIR)/doc/platform-api-guide \ + include/odp/api \ $(SRCDIR)/include/odp/api \ $(SRCDIR)/platform/$(WITH_PLATFORM)/doc \ - $(SRCDIR)/platform/$(WITH_PLATFORM)/include/odp/api \ - $(SRCDIR)/platform/$(WITH_PLATFORM)/arch/$(WITH_ARCH) + $(SRCDIR)/platform/$(WITH_PLATFORM)/include/odp/api EXAMPLE_PATH = $(SRCDIR)/example $(SRCDIR)/platform $(SRCDIR)
commit 0817968af5f7ceebe96c4ca4661da2734304a329 Author: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Date: Mon Oct 22 17:00:29 2018 +0300
configure.ac: set DX environment WITH_PLATFORM from configure.ac
Rather than manually modifying ax_prog_doxygen.m4 set variable from configure.ac.
Signed-off-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/configure.ac b/configure.ac index 3b71ef51..68da20df 100644 --- a/configure.ac +++ b/configure.ac @@ -283,6 +283,8 @@ AM_CONDITIONAL([ARCH_IS_X86_64], [test "x${ARCH_ABI}" = "xx86_64-linux"]) DX_HTML_FEATURE(ON) DX_PDF_FEATURE(OFF) DX_PS_FEATURE(OFF) +DX_ENV_APPEND(WITH_PLATFORM, $with_platform) + DX_INIT_DOXYGEN($PACKAGE_NAME, ${srcdir}/doc/application-api-guide/Doxyfile, ${builddir}/doc/application-api-guide/output,
commit ec1be8784c85b263f52e763b7890f097b2366212 Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Oct 15 15:30:54 2018 +0300
travis: run all test with gcc first
Change build matrix format from N x 2 to 2 x N. Run first all tests with GCC, then with clang.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index e0794287..4e29102f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,6 +36,10 @@ cache: - netmap - $HOME/doxygen-install
+compiler: + - gcc + - clang + env: global: # @@ -74,10 +78,6 @@ matrix: - compiler: gcc env: BUILD_ONLY=1 ARCH="i386"
-compiler: - - gcc - - clang - install: - if [ ${NETMAP} -eq 1 ] ; then echo "Installing NETMAP";
commit 45bca0d051dd1f946e2d1ca188c481c1d179ad07 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 15:57:01 2018 +0300
travis: explicit netmap test cases
Build, install and test with netmap only on couple of test cases. This saves travis test time and validates that ODP passes tests also without netmap (on x86).
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index 18bf4790..e0794287 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,9 +45,11 @@ env: - CODECOV_TOKEN=a733c34c-5f5c-4ff1-af4b-e9f5edb1ab5e - UBUNTU_VERS="16.04" - BUILD_ONLY=0 + - NETMAP=0 matrix: - CONF="" - CONF="--disable-abi-compat" + - NETMAP=1 CONF="" - BUILD_ONLY=1 ARCH="arm64" - BUILD_ONLY=1 ARCH="armhf" - BUILD_ONLY=1 ARCH="powerpc" @@ -58,7 +60,7 @@ env: - BUILD_ONLY=1 ARCH="i386" CONF="--disable-abi-compat" - CONF="--enable-deprecated" - CONF="--enable-dpdk-zero-copy --disable-static-applications" - - CONF="--disable-static-applications" + - NETMAP=1 CONF="--disable-static-applications" - CONF="--disable-host-optimization" - CONF="--disable-host-optimization --disable-abi-compat" - BUILD_ONLY=1 ARCH="x86_64" CONF="--enable-pcapng-support" @@ -77,13 +79,13 @@ compiler: - clang
install: - - sudo apt-get install linux-headers-`uname -r` - - if [ -z "${ARCH}" -o "${ARCH}" == "x86_64" ] ; then - echo "compilling netmap"; + - if [ ${NETMAP} -eq 1 ] ; then + echo "Installing NETMAP"; + sudo apt-get install linux-headers-`uname -r` ; CDIR=`pwd` ; git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=v11.2 https://github.com/luigirizzo/netmap.git; pushd netmap/LINUX; - ./configure; + ./configure --drivers= ; make -j $(nproc); popd; sudo insmod ./netmap/LINUX/netmap.ko;
commit bb2e2a4b2a538cb437f5b3331c95a7b7dc573cb3 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 15:28:56 2018 +0300
travis: start coverage test early
Coverage test takes a long time. Start it early, so that other jobs run in parallel with it.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index ffafe650..18bf4790 100644 --- a/.travis.yml +++ b/.travis.yml @@ -106,6 +106,16 @@ script: fi jobs: include: + - stage: test + env: TEST=coverage + compiler: gcc + script: + - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi + - docker run --privileged -i -t + -v `pwd`:/odp --shm-size 8g + -e CODECOV_TOKEN="${CODECOV_TOKEN}" + -e CC="${CC}" + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/coverage.sh - stage: test env: TEST=scheduler_sp compiler: gcc @@ -142,17 +152,6 @@ jobs: -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/process-mode.conf -e ODPH_PROC_MODE=1 ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh - - stage: test - env: TEST=coverage - compiler: gcc - script: - - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi - - docker run --privileged -i -t - -v `pwd`:/odp --shm-size 8g - -e CODECOV_TOKEN="${CODECOV_TOKEN}" - -e CC="${CC}" - -e CONF="${CONF}" - ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/coverage.sh - stage: test env: TEST=distcheck compiler: gcc
commit 7c8a226008d9f6fc9cd04f87539ab2715fef522a Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 12:48:30 2018 +0300
travis: exclude duplicate tests
arm64/i386 with GCC build tests are excluded from the matrix as those are executed in the first phase.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index ea26b053..ffafe650 100644 --- a/.travis.yml +++ b/.travis.yml @@ -65,6 +65,13 @@ env: - CONF="--without-openssl" - CONF="" UBUNTU_VERS="18.04"
+matrix: + exclude: + - compiler: gcc + env: BUILD_ONLY=1 ARCH="arm64" + - compiler: gcc + env: BUILD_ONLY=1 ARCH="i386" + compiler: - gcc - clang
commit 594a4a0171ab82512b05a06eb34ff7acb73730ab Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 10:24:53 2018 +0300
travis: change pcapng test to build only
Pcapng does not have specific validation tests, so running 'make check' on it is not important. Change it to a build only test to save travis execution time.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index 8c369c53..ea26b053 100644 --- a/.travis.yml +++ b/.travis.yml @@ -61,7 +61,7 @@ env: - CONF="--disable-static-applications" - CONF="--disable-host-optimization" - CONF="--disable-host-optimization --disable-abi-compat" - - CONF="--enable-pcapng-support" + - BUILD_ONLY=1 ARCH="x86_64" CONF="--enable-pcapng-support" - CONF="--without-openssl" - CONF="" UBUNTU_VERS="18.04"
@@ -85,7 +85,7 @@ install: script: - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi - if [ ${BUILD_ONLY} -eq 1 ] ; then - docker run -i -t -v `pwd`:/odp + docker run -i -t -v `pwd`:/odp --shm-size 8g -e CC="${CC}" -e CONF="${CONF}" ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_${UBUNTU_VERS} /odp/scripts/ci/build_${ARCH}.sh ;
commit 0b5dfae55a5a16e8c6b820f2458e48210cf9d762 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 10:16:16 2018 +0300
travis: explicit build only tests
Use new BUILD_ONLY variable to select test cases that are built only, and not checked with 'make check'. This enables to mark some x86_64 test cases to "build only".
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index 7b912af2..8c369c53 100644 --- a/.travis.yml +++ b/.travis.yml @@ -44,17 +44,18 @@ env: # you need generated new one at https://codecov.io specific for your repo. - CODECOV_TOKEN=a733c34c-5f5c-4ff1-af4b-e9f5edb1ab5e - UBUNTU_VERS="16.04" + - BUILD_ONLY=0 matrix: - CONF="" - CONF="--disable-abi-compat" - - CROSS_ARCH="arm64" - - CROSS_ARCH="armhf" - - CROSS_ARCH="powerpc" - - CROSS_ARCH="i386" - - CROSS_ARCH="arm64" CONF="--disable-abi-compat" - - CROSS_ARCH="armhf" CONF="--disable-abi-compat" - - CROSS_ARCH="powerpc" CONF="--disable-abi-compat" - - CROSS_ARCH="i386" CONF="--disable-abi-compat" + - BUILD_ONLY=1 ARCH="arm64" + - BUILD_ONLY=1 ARCH="armhf" + - BUILD_ONLY=1 ARCH="powerpc" + - BUILD_ONLY=1 ARCH="i386" + - BUILD_ONLY=1 ARCH="arm64" CONF="--disable-abi-compat" + - BUILD_ONLY=1 ARCH="armhf" CONF="--disable-abi-compat" + - BUILD_ONLY=1 ARCH="powerpc" CONF="--disable-abi-compat" + - BUILD_ONLY=1 ARCH="i386" CONF="--disable-abi-compat" - CONF="--enable-deprecated" - CONF="--enable-dpdk-zero-copy --disable-static-applications" - CONF="--disable-static-applications" @@ -70,7 +71,7 @@ compiler:
install: - sudo apt-get install linux-headers-`uname -r` - - if [ -z "${CROSS_ARCH}" ] ; then + - if [ -z "${ARCH}" -o "${ARCH}" == "x86_64" ] ; then echo "compilling netmap"; CDIR=`pwd` ; git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=v11.2 https://github.com/luigirizzo/netmap.git; @@ -83,11 +84,11 @@ install: fi script: - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi - - if [ -n "${CROSS_ARCH}" ] ; then + - if [ ${BUILD_ONLY} -eq 1 ] ; then docker run -i -t -v `pwd`:/odp -e CC="${CC}" -e CONF="${CONF}" - ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_${UBUNTU_VERS} /odp/scripts/ci/build_${CROSS_ARCH}.sh ; + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_${UBUNTU_VERS} /odp/scripts/ci/build_${ARCH}.sh ; else echo "Running test" ; docker run --privileged -i -t
commit e236795c0941fd7fc01b162a899b5dc42ba1d05e Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 08:55:38 2018 +0300
travis: clean process mode test script
Removed extra environment variable. Use default configure. Explicitly use ubuntu 16.04 as other tests under jobs.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index d39cf93d..7b912af2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -129,12 +129,11 @@ jobs: - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi - docker run --privileged -i -t -v `pwd`:/odp --shm-size 8g - -e CODECOV_TOKEN="${CODECOV_TOKEN}" -e CC="${CC}" - -e CONF="${CONF}" + -e CONF="" -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/process-mode.conf -e ODPH_PROC_MODE=1 - ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_${UBUNTU_VERS} /odp/scripts/ci/check.sh ; + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh - stage: test env: TEST=coverage compiler: gcc
commit 9f39ccb68e57fa56a6d5e0b968292a2af0abc812 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Oct 16 08:47:50 2018 +0300
travis: split distcheck test
Distcheck test takes over 20 minutes. Split it into two test cases, so that those can run in parallel.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index f6e0e49f..d39cf93d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -154,7 +154,17 @@ jobs: - docker run --privileged -i -t -v `pwd`:/odp --shm-size 8g -e CC="${CC}" - -e CONF="${CONF}" + -e CONF="--enable-user-guides" + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/distcheck.sh + - stage: test + env: TEST=distcheck_nonabi + compiler: gcc + script: + - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi + - docker run --privileged -i -t + -v `pwd`:/odp --shm-size 8g + -e CC="${CC}" + -e CONF="--enable-user-guides --disable-abi-compat" ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/distcheck.sh - stage: "build only" env: TEST=doxygen diff --git a/scripts/ci/distcheck.sh b/scripts/ci/distcheck.sh index c78349e2..9d45536f 100755 --- a/scripts/ci/distcheck.sh +++ b/scripts/ci/distcheck.sh @@ -7,15 +7,13 @@ fi
cd "$(dirname "$0")"/../.. ./bootstrap -./configure \ - --enable-user-guides +./configure ${CONF}
# Ignore possible failures there because these tests depends on measurements # and systems might differ in performance. export CI="true"
-make distcheck - -make clean +# Additional configure flags for distcheck +export DISTCHECK_CONFIGURE_FLAGS="${CONF}"
-make distcheck DISTCHECK__CONFIGURE_FLAGS=--disable-abi-compat +make distcheck
commit 415e197affb7eb3db5d806911d3e4b8e6ff05779 Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Oct 15 17:12:10 2018 +0300
travis: add test cases for optional schedulers
Added separate tests for schedulers, so that those can be executed in parallel. Schedulers are tested with default compiler flags. Previously, optional schedulers were tested only without compiler optimizations (-O0) in coverage test.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index 16e3a310..f6e0e49f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -98,6 +98,28 @@ script: fi jobs: include: + - stage: test + env: TEST=scheduler_sp + compiler: gcc + script: + - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi + - docker run --privileged -i -t + -v `pwd`:/odp --shm-size 8g + -e CC="${CC}" + -e CONF="" + -e ODP_SCHEDULER=sp + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh + - stage: test + env: TEST=scheduler_scalable + compiler: gcc + script: + - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi + - docker run --privileged -i -t + -v `pwd`:/odp --shm-size 8g + -e CC="${CC}" + -e CONF="" + -e ODP_SCHEDULER=scalable + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh - stage: test env: TEST=process_mode install:
commit 3a817119eae94a2eb0c12d3a7e82eda35133b42c Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Oct 15 14:42:55 2018 +0300
travis: build ODP on multiple threads
Build ODP on multiple threads with 'make -j $(nproc)'.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/scripts/ci/build.sh b/scripts/ci/build.sh index 995c797b..21922a49 100755 --- a/scripts/ci/build.sh +++ b/scripts/ci/build.sh @@ -9,7 +9,7 @@ cd "$(dirname "$0")"/../.. --prefix=/opt/odp \ ${CONF}
-make -j 8 +make -j $(nproc)
make install
commit 01d1a8db345fed332416b3a6066ae7be1f28f0ad Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 17 16:51:49 2018 +0300
linux-gen: config: maximum pool size 1M
Change maximum pool size back to 1M. Maximum packet pool capability is defined in the config file. This is the upper limit for that config. Also this is max capability of buffer and tmo pools.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index 7a573bd7..65f75197 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -150,7 +150,7 @@ extern "C" { * Maximum number of events in a pool. Power of two minus one results optimal * memory usage for the ring. */ -#define CONFIG_POOL_MAX_NUM ((256 * 1024) - 1) +#define CONFIG_POOL_MAX_NUM ((1024 * 1024) - 1)
/* * Maximum number of events in a thread local pool cache
commit 168d7168447b99097f3bbe397ca76b6cb87d34da Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 17 16:41:52 2018 +0300
linux-gen: pool: add max num packets in config file
This config is used to for maximum capability. The default capability needs to be modest so that system memory limit is not exceeded. User may increase maximum number of packets when system memory size allows (and SHM single VA is not used).
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index 4db9ed48..af651d7f 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -16,7 +16,7 @@
# Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.0" +config_file_version = "0.1.1"
# Shared memory options shm: { @@ -39,6 +39,16 @@ shm: { single_va = 0 }
+# Pool options +pool: { + # Packet pool options + pkt: { + # Maximum number of packets per pool. Power of two minus one + # results optimal memory usage (e.g. (256 * 1024) - 1). + max_num = 262143 + } +} + # DPDK pktio options pktio_dpdk: { # Default options diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index 2696e8ee..94f859de 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -89,6 +89,11 @@ typedef struct pool_t { typedef struct pool_table_t { pool_t pool[ODP_CONFIG_POOLS]; odp_shm_t shm; + + struct { + uint32_t pkt_max_num; + } config; + } pool_table_t;
extern pool_table_t *pool_tbl; diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index db7a8da3..d08be437 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -22,6 +22,7 @@ #include <odp_ring_internal.h> #include <odp_shm_internal.h> #include <odp_global_data.h> +#include <odp_libconfig_internal.h>
#include <string.h> #include <stdio.h> @@ -33,8 +34,9 @@ #define UNLOCK(a) odp_ticketlock_unlock(a) #define LOCK_INIT(a) odp_ticketlock_init(a)
-#define CACHE_BURST 32 -#define RING_SIZE_MIN (2 * CACHE_BURST) +#define CACHE_BURST 32 +#define RING_SIZE_MIN (2 * CACHE_BURST) +#define POOL_MAX_NUM_MIN RING_SIZE_MIN
/* Make sure packet buffers don't cross huge page boundaries starting from this * page size. 2MB is typically the smallest used huge page size. */ @@ -83,6 +85,32 @@ static inline pool_t *pool_from_buf(odp_buffer_t buf) return buf_hdr->pool_ptr; }
+static int read_config_file(pool_table_t *pool_tbl) +{ + const char *str; + int val = 0; + + ODP_PRINT("Pool config:\n"); + + str = "pool.pkt.max_num"; + if (!_odp_libconfig_lookup_int(str, &val)) { + ODP_ERR("Config option '%s' not found.\n", str); + return -1; + } + + if (val > CONFIG_POOL_MAX_NUM || val < POOL_MAX_NUM_MIN) { + ODP_ERR("Bad value %s = %u\n", str, val); + return -1; + } + + pool_tbl->config.pkt_max_num = val; + ODP_PRINT(" %s: %i\n", str, val); + + ODP_PRINT("\n"); + + return 0; +} + int odp_pool_init_global(void) { uint32_t i; @@ -101,6 +129,11 @@ int odp_pool_init_global(void) memset(pool_tbl, 0, sizeof(pool_table_t)); pool_tbl->shm = shm;
+ if (read_config_file(pool_tbl)) { + odp_shm_free(shm); + return -1; + } + for (i = 0; i < ODP_CONFIG_POOLS; i++) { pool_t *pool = pool_entry(i);
@@ -950,7 +983,7 @@ int odp_pool_capability(odp_pool_capability_t *capa) /* Packet pools */ capa->pkt.max_pools = ODP_CONFIG_POOLS; capa->pkt.max_len = CONFIG_PACKET_MAX_LEN; - capa->pkt.max_num = CONFIG_POOL_MAX_NUM; + capa->pkt.max_num = pool_tbl->config.pkt_max_num; capa->pkt.min_headroom = CONFIG_PACKET_HEADROOM; capa->pkt.max_headroom = CONFIG_PACKET_HEADROOM; capa->pkt.min_tailroom = CONFIG_PACKET_TAILROOM; diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf index 8618f2fd..d80df25c 100644 --- a/platform/linux-generic/test/process-mode.conf +++ b/platform/linux-generic/test/process-mode.conf @@ -1,6 +1,6 @@ # Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.0" +config_file_version = "0.1.1"
# Shared memory options shm: {
commit ebff1c15f1a0ccad57d26720ba7357a6b194d7fe Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 17 14:43:43 2018 +0300
linux-gen: pool: add packet param checks
Do also checks against maximum number of packets and max packet headroom size.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 54434818..db7a8da3 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -572,6 +572,17 @@ static int check_params(odp_pool_param_t *params) break;
case ODP_POOL_PACKET: + if (params->pkt.num > capa.pkt.max_num) { + ODP_ERR("pkt.num too large %u\n", params->pkt.num); + return -1; + } + + if (params->pkt.max_num > capa.pkt.max_num) { + ODP_ERR("pkt.max_num too large %u\n", + params->pkt.max_num); + return -1; + } + if (params->pkt.len > capa.pkt.max_len) { ODP_ERR("pkt.len too large %u\n", params->pkt.len); return -1; @@ -595,6 +606,12 @@ static int check_params(odp_pool_param_t *params) return -1; }
+ if (params->pkt.headroom > capa.pkt.max_headroom) { + ODP_ERR("pkt.headroom too large %u\n", + params->pkt.headroom); + return -1; + } + break;
case ODP_POOL_TIMEOUT:
commit 07b1c56979139118d47d15ff969365c08814dfd5 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 17 14:38:16 2018 +0300
linux-gen: pool: output error on pool create
Change from debug to error message on pool create checks. These are slow path errors and it's helpful to see those also when not debugging.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 7a4a9eb9..54434818 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -555,17 +555,17 @@ static int check_params(odp_pool_param_t *params) switch (params->type) { case ODP_POOL_BUFFER: if (params->buf.num > capa.buf.max_num) { - ODP_DBG("buf.num too large %u\n", params->buf.num); + ODP_ERR("buf.num too large %u\n", params->buf.num); return -1; }
if (params->buf.size > capa.buf.max_size) { - ODP_DBG("buf.size too large %u\n", params->buf.size); + ODP_ERR("buf.size too large %u\n", params->buf.size); return -1; }
if (params->buf.align > capa.buf.max_align) { - ODP_DBG("buf.align too large %u\n", params->buf.align); + ODP_ERR("buf.align too large %u\n", params->buf.align); return -1; }
@@ -573,24 +573,24 @@ static int check_params(odp_pool_param_t *params)
case ODP_POOL_PACKET: if (params->pkt.len > capa.pkt.max_len) { - ODP_DBG("pkt.len too large %u\n", params->pkt.len); + ODP_ERR("pkt.len too large %u\n", params->pkt.len); return -1; }
if (params->pkt.max_len > capa.pkt.max_len) { - ODP_DBG("pkt.max_len too large %u\n", + ODP_ERR("pkt.max_len too large %u\n", params->pkt.max_len); return -1; }
if (params->pkt.seg_len > capa.pkt.max_seg_len) { - ODP_DBG("pkt.seg_len too large %u\n", + ODP_ERR("pkt.seg_len too large %u\n", params->pkt.seg_len); return -1; }
if (params->pkt.uarea_size > capa.pkt.max_uarea_size) { - ODP_DBG("pkt.uarea_size too large %u\n", + ODP_ERR("pkt.uarea_size too large %u\n", params->pkt.uarea_size); return -1; } @@ -599,13 +599,13 @@ static int check_params(odp_pool_param_t *params)
case ODP_POOL_TIMEOUT: if (params->tmo.num > capa.tmo.max_num) { - ODP_DBG("tmo.num too large %u\n", params->tmo.num); + ODP_ERR("tmo.num too large %u\n", params->tmo.num); return -1; } break;
default: - ODP_DBG("bad pool type %i\n", params->type); + ODP_ERR("bad pool type %i\n", params->type); return -1; }
commit fa9fd355c5404b46c3cfaf375666796cc5333aca Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 17 13:13:20 2018 +0300
linux-gen: config: move queue size config to scalable
Only scalable queues use build time the queue size config. Move it to scalable config file.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index 8ceb325b..7a573bd7 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -26,13 +26,6 @@ extern "C" { */ #define ODP_CONFIG_QUEUES 1024
-/* - * Maximum queue depth. Maximum number of elements that can be stored in a - * queue. This value is used only when the size is not explicitly provided - * during queue creation. - */ -#define CONFIG_QUEUE_SIZE 4096 - /* * Maximum number of ordered locks per queue */ diff --git a/platform/linux-generic/include/odp_schedule_scalable_config.h b/platform/linux-generic/include/odp_schedule_scalable_config.h index a84dc072..3462d047 100644 --- a/platform/linux-generic/include/odp_schedule_scalable_config.h +++ b/platform/linux-generic/include/odp_schedule_scalable_config.h @@ -9,6 +9,9 @@ #ifndef ODP_SCHEDULE_SCALABLE_CONFIG_H_ #define ODP_SCHEDULE_SCALABLE_CONFIG_H_
+/* Maximum number of events that can be stored in a queue */ +#define CONFIG_SCAL_QUEUE_SIZE 4096 + /* * Default scaling factor for the scheduler group * diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c index bbc57e44..b7ff2195 100644 --- a/platform/linux-generic/odp_queue_scalable.c +++ b/platform/linux-generic/odp_queue_scalable.c @@ -110,7 +110,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
sched_elem = &queue->s.sched_elem; ring_size = param->size > 0 ? - ROUNDUP_POWER2_U32(param->size) : CONFIG_QUEUE_SIZE; + ROUNDUP_POWER2_U32(param->size) : CONFIG_SCAL_QUEUE_SIZE; strncpy(queue->s.name, name ? name : "", ODP_QUEUE_NAME_LEN - 1); queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0; memcpy(&queue->s.param, param, sizeof(odp_queue_param_t)); @@ -212,15 +212,16 @@ static int queue_init_global(void) /* Add size of the array holding the queues */ pool_size = sizeof(queue_table_t); /* Add storage required for queues */ - pool_size += (CONFIG_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *)) * - ODP_CONFIG_QUEUES; + pool_size += (CONFIG_SCAL_QUEUE_SIZE * + sizeof(odp_buffer_hdr_t *)) * ODP_CONFIG_QUEUES; + /* Add the reorder window size */ pool_size += sizeof(reorder_window_t) * ODP_CONFIG_QUEUES; /* Choose min_alloc and max_alloc such that buddy allocator is * is selected. */ min_alloc = 0; - max_alloc = CONFIG_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *); + max_alloc = CONFIG_SCAL_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *); queue_shm_pool = _odp_ishm_pool_create("queue_shm_pool", pool_size, min_alloc, max_alloc,
commit 173ef79e5306807d994869c7fb62c66ee82e4beb Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 18 09:37:47 2018 +0300
linux-gen: config: improve config file check error output
It was hard to notice from log that config file version has a mismatch. This is a common error when config file version has updated, but a user has not updated his own config file. Improve error check output, so that failure reason is better highlighted.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_libconfig.c b/platform/linux-generic/odp_libconfig.c index baf825bb..014409e2 100644 --- a/platform/linux-generic/odp_libconfig.c +++ b/platform/linux-generic/odp_libconfig.c @@ -21,10 +21,12 @@ int _odp_libconfig_init_global(void) const char *filename; const char *vers; const char *vers_rt; - const char *ipml; - const char *ipml_rt; + const char *impl; + const char *impl_rt; config_t *config = &odp_global_ro.libconfig_default; config_t *config_rt = &odp_global_ro.libconfig_runtime; + const char *impl_field = "odp_implementation"; + const char *vers_field = "config_file_version";
config_init(config); config_init(config_rt); @@ -40,34 +42,45 @@ int _odp_libconfig_init_global(void) if (filename == NULL) return 0;
- ODP_PRINT("CONFIG FILE: %s\n", filename); + ODP_PRINT("ODP CONFIG FILE: %s\n", filename);
if (!config_read_file(config_rt, filename)) { - ODP_ERR("Failed to read config file: %s(%d): %s\n", - config_error_file(config_rt), - config_error_line(config_rt), - config_error_text(config_rt)); + ODP_PRINT(" ERROR: failed to read config file: %s(%d): %s\n\n", + config_error_file(config_rt), + config_error_line(config_rt), + config_error_text(config_rt)); goto fail; }
/* Check runtime configuration's implementation name and version */ - if (!config_lookup_string(config, "odp_implementation", &ipml) || - !config_lookup_string(config_rt, "odp_implementation", &ipml_rt)) { - ODP_ERR("Configuration missing 'odp_implementation' field\n"); + if (!config_lookup_string(config, impl_field, &impl) || + !config_lookup_string(config_rt, impl_field, &impl_rt)) { + ODP_PRINT(" ERROR: missing mandatory field: %s\n\n", + impl_field); goto fail; } - if (!config_lookup_string(config, "config_file_version", &vers) || - !config_lookup_string(config_rt, "config_file_version", &vers_rt)) { - ODP_ERR("Configuration missing 'config_file_version' field\n"); + if (!config_lookup_string(config, vers_field, &vers) || + !config_lookup_string(config_rt, vers_field, &vers_rt)) { + ODP_PRINT(" ERROR: missing mandatory field: %s\n\n", + vers_field); goto fail; } - if (strcmp(vers, vers_rt) || strcmp(ipml, ipml_rt)) { - ODP_ERR("Runtime configuration mismatch\n"); + if (strcmp(impl, impl_rt)) { + ODP_PRINT(" ERROR: ODP implementation name mismatch:\n" + " Expected: "%s"\n" + " Found: "%s"\n\n", impl, impl_rt); + goto fail; + } + if (strcmp(vers, vers_rt)) { + ODP_PRINT(" ERROR: config file version number mismatch:\n" + " Expected: "%s"\n" + " Found: "%s"\n\n", vers, vers_rt); goto fail; }
return 0; fail: + ODP_ERR("Config file failure\n"); config_destroy(config); config_destroy(config_rt); return -1;
commit 055da43cde1c7a9acc674a2db3d9d2a7a3d1ff8f Author: Matias Elo matias.elo@nokia.com Date: Thu Oct 18 09:49:01 2018 +0300
linux-gen: pool: increase minimum packet segment length
Some DPDK NICs need at least 2176 byte buffers (2048B + headroom) to not segment standard ethernet frames. Increase minimum segment length to avoid this and add matching check to zero-copy dpdk pktio pool create.
Reported-by: P. Gyanesh Kumar Patra pgyanesh.patra@gmail.com Signed-off-by: Matias Elo matias.elo@nokia.com Tested-by: P. Gyanesh Kumar Patra pgyanesh.patra@gmail.com Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index a94012ac..8ceb325b 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -125,8 +125,8 @@ extern "C" { * defined segment length (seg_len in odp_pool_param_t) will be rounded up into * this value. */ -#define CONFIG_PACKET_SEG_LEN_MIN ((2 * 1024) - \ - CONFIG_PACKET_HEADROOM - \ +#define CONFIG_PACKET_SEG_LEN_MIN ((2 * 1024) + \ + CONFIG_PACKET_HEADROOM + \ CONFIG_PACKET_TAILROOM)
/* Maximum number of shared memory blocks. diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c index 23f39445..4253451f 100644 --- a/platform/linux-generic/pktio/dpdk.c +++ b/platform/linux-generic/pktio/dpdk.c @@ -327,6 +327,13 @@ static struct rte_mempool *mbuf_pool_create(const char *name, goto fail; }
+ if (pool_entry->seg_len < RTE_MBUF_DEFAULT_BUF_SIZE) { + ODP_ERR("Some NICs need at least %dB buffers to not segment " + "standard ethernet frames. Increase pool seg_len.\n", + RTE_MBUF_DEFAULT_BUF_SIZE); + goto fail; + } + total_size = rte_mempool_calc_obj_size(elt_size, MEMPOOL_F_NO_SPREAD, &sz); if (total_size != pool_entry->block_size) {
commit 0b880228e5e19bf2446f31f50e6df41c64e9502b Author: Matias Elo matias.elo@nokia.com Date: Thu Oct 18 13:23:56 2018 +0300
example: generator: remove print from packet tx loop
The printf() would flood output when using small tx interval. Increase global stats print interval instead.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c index 8544da55..54194c66 100644 --- a/example/generator/odp_generator.c +++ b/example/generator/odp_generator.c @@ -31,6 +31,7 @@ #define MAX_UDP_TX_BURST 512 #define DEFAULT_RX_BURST 32 #define MAX_RX_BURST 512 +#define STATS_INTERVAL 10 /* Interval between stats prints (sec) */
#define APPL_MODE_UDP 0 /**< UDP mode */ #define APPL_MODE_PING 1 /**< ping mode */ @@ -775,14 +776,9 @@ static int gen_send_thread(void *arg)
counters->ctr_pkt_snd += pkt_array_size - burst_size;
- if (args->appl.interval != 0) { - printf(" [%02i] send pkt no:%ju seq %ju\n", - thr, - counters->ctr_seq, - counters->ctr_seq % 0xffff); + if (args->appl.interval != 0) odp_time_wait_ns((uint64_t)args->appl.interval * ODP_TIME_MSEC_IN_NS); - } counters->ctr_seq += seq_step; }
@@ -1014,7 +1010,7 @@ static void print_global_stats(int num_workers) uint64_t pkts_rcv = 0, pkts_rcv_prev = 0; uint64_t pps_rcv = 0, maximum_pps_rcv = 0; uint64_t stall, pkts_snd_drop; - int verbose_interval = 20, i; + int verbose_interval = STATS_INTERVAL, i; odp_thrmask_t thrd_mask;
odp_barrier_wait(&args->barrier);
commit fb41b3eb2075505cf2f77fad48a8b6b5ed5da302 Author: Matias Elo matias.elo@nokia.com Date: Mon Oct 15 12:49:47 2018 +0300
example: generator: use odp_wait_time_ns() instead of timers
Simplify code by using odp_wait_time_ns() instead of timers.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c index 4e06115c..8544da55 100644 --- a/example/generator/odp_generator.c +++ b/example/generator/odp_generator.c @@ -132,10 +132,6 @@ typedef struct { } rx; }; odp_pool_t pool; /**< Pool for packet IO */ - odp_timer_pool_t tp; /**< Timer pool handle */ - odp_queue_t tq; /**< Queue for timeouts */ - odp_timer_t tim; /**< Timer handle */ - odp_timeout_t tmo_ev; /**< Timeout event */ int mode; /**< Thread mode */ } thread_args_t;
@@ -171,27 +167,6 @@ static void usage(char *progname); static int scan_ip(char *buf, unsigned int *paddr); static void print_global_stats(int num_workers);
-/** - * Sleep for the specified amount of milliseconds - * Use ODP timer, busy wait until timer expired and timeout event received - */ -static void millisleep(uint32_t ms, - odp_timer_pool_t tp, - odp_timer_t tim, - odp_queue_t q, - odp_timeout_t tmo) -{ - uint64_t ticks = odp_timer_ns_to_tick(tp, 1000000ULL * ms); - odp_event_t ev = odp_timeout_to_event(tmo); - int rc = odp_timer_set_rel(tim, ticks, &ev); - - if (rc != ODP_TIMER_SUCCESS) - EXAMPLE_ABORT("odp_timer_set_rel() failed\n"); - /* Spin waiting for timeout event */ - while ((ev = odp_queue_deq(q)) == ODP_EVENT_INVALID) - (void)0; -} - /** * Scan ip * Parse ip address. @@ -805,11 +780,8 @@ static int gen_send_thread(void *arg) thr, counters->ctr_seq, counters->ctr_seq % 0xffff); - millisleep(args->appl.interval, - thr_args->tp, - thr_args->tim, - thr_args->tq, - thr_args->tmo_ev); + odp_time_wait_ns((uint64_t)args->appl.interval * + ODP_TIME_MSEC_IN_NS); } counters->ctr_seq += seq_step; } @@ -1131,15 +1103,9 @@ int main(int argc, char *argv[]) odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odp_pool_param_t params; - odp_timer_pool_param_t tparams; - odp_timer_pool_t tp; - odp_pool_t tmop; - odp_queue_t tq; - odp_event_t ev; interface_t *ifs; odp_instance_t instance; odph_odpthread_params_t thr_params; - odp_timer_capability_t timer_capa;
/* Init ODP before calling anything else */ if (odp_init_global(&instance, NULL, NULL)) { @@ -1227,36 +1193,6 @@ int main(int argc, char *argv[]) } odp_pool_print(pool);
- /* Create timer pool */ - if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) { - EXAMPLE_ERR("Error: get timer capacity failed.\n"); - exit(EXIT_FAILURE); - } - tparams.res_ns = MAX(1 * ODP_TIME_MSEC_IN_NS, - timer_capa.highest_res_ns); - tparams.min_tmo = 0; - tparams.max_tmo = 10000 * ODP_TIME_SEC_IN_NS; - tparams.num_timers = num_workers; /* One timer per worker */ - tparams.priv = 0; /* Shared */ - tparams.clk_src = ODP_CLOCK_CPU; - tp = odp_timer_pool_create("timer_pool", &tparams); - if (tp == ODP_TIMER_POOL_INVALID) { - EXAMPLE_ERR("Timer pool create failed.\n"); - exit(EXIT_FAILURE); - } - odp_timer_pool_start(); - - /* Create timeout pool */ - odp_pool_param_init(¶ms); - params.tmo.num = tparams.num_timers; /* One timeout per timer */ - params.type = ODP_POOL_TIMEOUT; - - tmop = odp_pool_create("timeout_pool", ¶ms); - if (tmop == ODP_POOL_INVALID) { - EXAMPLE_ERR("Error: timeout pool create failed.\n"); - exit(EXIT_FAILURE); - } - ifs = malloc(sizeof(interface_t) * args->appl.if_count);
for (i = 0; i < args->appl.if_count; ++i) { @@ -1303,27 +1239,10 @@ int main(int argc, char *argv[]) cpu_first = odp_cpumask_first(&cpumask); odp_cpumask_set(&cpu_mask, cpu_first);
- tq = odp_queue_create("", NULL); - if (tq == ODP_QUEUE_INVALID) { - EXAMPLE_ERR("queue_create failed\n"); - abort(); - } thr_args = &args->thread[PING_THR_RX]; if (!args->appl.sched) thr_args->rx.pktin = ifs[0].pktin[0]; thr_args->pool = pool; - thr_args->tp = tp; - thr_args->tq = tq; - thr_args->tim = odp_timer_alloc(tp, tq, NULL); - if (thr_args->tim == ODP_TIMER_INVALID) { - EXAMPLE_ERR("timer_alloc failed\n"); - abort(); - } - thr_args->tmo_ev = odp_timeout_alloc(tmop); - if (thr_args->tmo_ev == ODP_TIMEOUT_INVALID) { - EXAMPLE_ERR("timeout_alloc failed\n"); - abort(); - } thr_args->mode = args->appl.mode;
memset(&thr_params, 0, sizeof(thr_params)); @@ -1338,27 +1257,10 @@ int main(int argc, char *argv[]) odph_odpthreads_create(&thread_tbl[PING_THR_RX], &cpu_mask, &thr_params);
- tq = odp_queue_create("", NULL); - if (tq == ODP_QUEUE_INVALID) { - EXAMPLE_ERR("queue_create failed\n"); - abort(); - } thr_args = &args->thread[PING_THR_TX]; thr_args->tx.pktout = ifs[0].pktout[0]; thr_args->tx.pktout_cfg = &ifs[0].config.pktout; thr_args->pool = pool; - thr_args->tp = tp; - thr_args->tq = tq; - thr_args->tim = odp_timer_alloc(tp, tq, NULL); - if (thr_args->tim == ODP_TIMER_INVALID) { - EXAMPLE_ERR("timer_alloc failed\n"); - abort(); - } - thr_args->tmo_ev = odp_timeout_alloc(tmop); - if (thr_args->tmo_ev == ODP_TIMEOUT_INVALID) { - EXAMPLE_ERR("timeout_alloc failed\n"); - abort(); - } thr_args->mode = args->appl.mode; cpu_next = odp_cpumask_next(&cpumask, cpu_first); odp_cpumask_zero(&cpu_mask); @@ -1427,24 +1329,7 @@ int main(int argc, char *argv[])
args->thread[i].counters.ctr_seq = start_seq; } - tq = odp_queue_create("", NULL); - if (tq == ODP_QUEUE_INVALID) { - EXAMPLE_ERR("queue_create failed\n"); - abort(); - } args->thread[i].pool = pool; - args->thread[i].tp = tp; - args->thread[i].tq = tq; - args->thread[i].tim = odp_timer_alloc(tp, tq, NULL); - if (args->thread[i].tim == ODP_TIMER_INVALID) { - EXAMPLE_ERR("timer_alloc failed\n"); - abort(); - } - args->thread[i].tmo_ev = odp_timeout_alloc(tmop); - if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID) { - EXAMPLE_ERR("timeout_alloc failed\n"); - abort(); - } args->thread[i].mode = args->appl.mode;
if (args->appl.mode == APPL_MODE_UDP) { @@ -1484,22 +1369,6 @@ int main(int argc, char *argv[]) for (i = 0; i < args->appl.if_count; ++i) odp_pktio_stop(ifs[i].pktio);
- for (i = 0; i < num_workers; ++i) { - odp_timer_cancel(args->thread[i].tim, &ev); - odp_timer_free(args->thread[i].tim); - odp_timeout_free(args->thread[i].tmo_ev); - } - - for (i = 0; i < num_workers; ++i) { - while (1) { - ev = odp_queue_deq(args->thread[i].tq); - if (ev == ODP_EVENT_INVALID) - break; - odp_event_free(ev); - } - odp_queue_destroy(args->thread[i].tq); - } - for (i = 0; i < args->appl.if_count; ++i) odp_pktio_close(ifs[i].pktio); free(ifs); @@ -1507,9 +1376,6 @@ int main(int argc, char *argv[]) free(args->appl.if_str); if (0 != odp_pool_destroy(pool)) fprintf(stderr, "unable to destroy pool "pool"\n"); - odp_timer_pool_destroy(tp); - if (0 != odp_pool_destroy(tmop)) - fprintf(stderr, "unable to destroy pool "tmop"\n"); if (0 != odp_shm_free(shm)) fprintf(stderr, "unable to free "shm"\n"); odp_term_local();
commit d3d950ab1af5d1823a44950ea5a78e41dbe44dde Author: Matias Elo matias.elo@nokia.com Date: Wed Oct 17 10:40:53 2018 +0300
example: stop and close pktio devices on exit
Stop and close used pktio devices on exit to free used resources.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c index ee63451c..b45a82d7 100644 --- a/example/ipsec/odp_ipsec.c +++ b/example/ipsec/odp_ipsec.c @@ -1325,6 +1325,20 @@ main(int argc, char *argv[]) odph_odpthreads_join(thread_tbl); }
+ /* Stop and close used pktio devices */ + for (i = 0; i < global->appl.if_count; i++) { + odp_pktio_t pktio = odp_pktio_lookup(global->appl.if_names[i]); + + if (pktio == ODP_PKTIO_INVALID) + continue; + + if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) { + EXAMPLE_ERR("Error: failed to close pktio %s\n", + global->appl.if_names[i]); + exit(EXIT_FAILURE); + } + } + free(global->appl.if_names); free(global->appl.if_str);
diff --git a/example/ipsec_api/odp_ipsec.c b/example/ipsec_api/odp_ipsec.c index b402002c..d72109cf 100644 --- a/example/ipsec_api/odp_ipsec.c +++ b/example/ipsec_api/odp_ipsec.c @@ -1025,6 +1025,20 @@ main(int argc, char *argv[]) odph_odpthreads_join(thread_tbl); }
+ /* Stop and close used pktio devices */ + for (i = 0; i < global->appl.if_count; i++) { + odp_pktio_t pktio = odp_pktio_lookup(global->appl.if_names[i]); + + if (pktio == ODP_PKTIO_INVALID) + continue; + + if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) { + EXAMPLE_ERR("Error: failed to close pktio %s\n", + global->appl.if_names[i]); + exit(EXIT_FAILURE); + } + } + free(global->appl.if_names); free(global->appl.if_str);
diff --git a/example/ipsec_offload/odp_ipsec_offload.c b/example/ipsec_offload/odp_ipsec_offload.c index 89b9dddf..90b3f640 100644 --- a/example/ipsec_offload/odp_ipsec_offload.c +++ b/example/ipsec_offload/odp_ipsec_offload.c @@ -627,6 +627,20 @@ main(int argc, char *argv[]) &thr_params); odph_odpthreads_join(thread_tbl);
+ /* Stop and close used pktio devices */ + for (i = 0; i < global->appl.if_count; i++) { + odp_pktio_t pktio = odp_pktio_lookup(global->appl.if_names[i]); + + if (pktio == ODP_PKTIO_INVALID) + continue; + + if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) { + EXAMPLE_ERR("Error: failed to close pktio %s\n", + global->appl.if_names[i]); + exit(EXIT_FAILURE); + } + } + free(global->appl.if_names); free(global->appl.if_str);
diff --git a/example/l2fwd_simple/odp_l2fwd_simple.c b/example/l2fwd_simple/odp_l2fwd_simple.c index 6835e5bb..8daeec4c 100644 --- a/example/l2fwd_simple/odp_l2fwd_simple.c +++ b/example/l2fwd_simple/odp_l2fwd_simple.c @@ -240,6 +240,15 @@ int main(int argc, char **argv) odph_odpthreads_create(thd, &cpumask, &thr_params); odph_odpthreads_join(thd);
+ if (odp_pktio_stop(global->if0) || odp_pktio_close(global->if0)) { + printf("Error: failed to close interface %s\n", argv[1]); + exit(EXIT_FAILURE); + } + if (odp_pktio_stop(global->if1) || odp_pktio_close(global->if1)) { + printf("Error: failed to close interface %s\n", argv[2]); + exit(EXIT_FAILURE); + } + if (odp_pool_destroy(pool)) { printf("Error: pool destroy\n"); exit(EXIT_FAILURE); diff --git a/example/l3fwd/odp_l3fwd.c b/example/l3fwd/odp_l3fwd.c index f72cf373..708c4df9 100644 --- a/example/l3fwd/odp_l3fwd.c +++ b/example/l3fwd/odp_l3fwd.c @@ -1126,6 +1126,16 @@ int main(int argc, char **argv) for (i = 0; i < nb_worker; i++) odph_odpthreads_join(&thread_tbl[i]);
+ /* Stop and close used pktio devices */ + for (i = 0; i < args->if_count; i++) { + odp_pktio_t pktio = global->l3fwd_pktios[i].pktio; + + if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) { + printf("Error: failed to close pktio\n"); + exit(EXIT_FAILURE); + } + } + /* if_names share a single buffer, so only one free */ free(args->if_names[0]);
diff --git a/example/switch/odp_switch.c b/example/switch/odp_switch.c index dab60e83..a67fa180 100644 --- a/example/switch/odp_switch.c +++ b/example/switch/odp_switch.c @@ -1019,6 +1019,16 @@ int main(int argc, char **argv) for (i = 0; i < num_workers; ++i) odph_odpthreads_join(&thread_tbl[i]);
+ /* Stop and close used pktio devices */ + for (i = 0; i < if_count; i++) { + odp_pktio_t pktio = gbl_args->pktios[i].pktio; + + if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) { + printf("Error: failed to close pktio\n"); + exit(EXIT_FAILURE); + } + } + free(gbl_args->appl.if_names); free(gbl_args->appl.if_str);
diff --git a/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c b/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c index 5066eede..faa51dad 100644 --- a/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c +++ b/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c @@ -203,6 +203,15 @@ int main(int argc, char **argv)
ret = global->g_ret;
+ if (odp_pktio_stop(global->if0) || odp_pktio_close(global->if0)) { + printf("Error: failed to close interface %s\n", argv[1]); + exit(EXIT_FAILURE); + } + if (odp_pktio_stop(global->if1) || odp_pktio_close(global->if1)) { + printf("Error: failed to close interface %s\n", argv[2]); + exit(EXIT_FAILURE); + } + if (odp_pool_destroy(pool)) { printf("Error: pool destroy\n"); exit(EXIT_FAILURE);
commit c97618bb548f6b7c5f27f4de497f72f705e184f6 Author: Matias Elo matias.elo@nokia.com Date: Tue Oct 2 14:13:35 2018 +0300
linux-gen: ring: allocate global data from shm
Enables using ipc in process mode.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_packet_io_ring_internal.h b/platform/linux-generic/include/odp_packet_io_ring_internal.h index 889a6559..6b4e06a4 100644 --- a/platform/linux-generic/include/odp_packet_io_ring_internal.h +++ b/platform/linux-generic/include/odp_packet_io_ring_internal.h @@ -398,9 +398,14 @@ int _ring_mc_dequeue_burst(_ring_t *r, void **obj_table, unsigned n); void _ring_list_dump(void);
/** - * initialise ring tailq + * Initialize ring tailq */ -void _ring_tailq_init(void); +int _ring_tailq_init(void); + +/** + * Terminate ring tailq + */ +int _ring_tailq_term(void);
#ifdef __cplusplus } diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c index 74af0b97..fc565443 100644 --- a/platform/linux-generic/pktio/ipc.c +++ b/platform/linux-generic/pktio/ipc.c @@ -826,9 +826,13 @@ static int ipc_close(pktio_entry_t *pktio_entry)
static int ipc_pktio_init_global(void) { - _ring_tailq_init(); - ODP_PRINT("PKTIO: initialized ipc interface.\n"); - return 0; + ODP_DBG("PKTIO: initializing ipc interface.\n"); + return _ring_tailq_init(); +} + +static int ipc_pktio_term_global(void) +{ + return _ring_tailq_term(); }
const pktio_if_ops_t ipc_pktio_ops = { @@ -836,7 +840,7 @@ const pktio_if_ops_t ipc_pktio_ops = { .print = NULL, .init_global = ipc_pktio_init_global, .init_local = NULL, - .term = NULL, + .term = ipc_pktio_term_global, .open = ipc_pktio_open, .close = ipc_close, .recv = ipc_pktio_recv, diff --git a/platform/linux-generic/pktio/ring.c b/platform/linux-generic/pktio/ring.c index 488dc1a1..6a354cb1 100644 --- a/platform/linux-generic/pktio/ring.c +++ b/platform/linux-generic/pktio/ring.c @@ -80,13 +80,21 @@ #include <odp_packet_io_ring_internal.h> #include <odp_errno_define.h> #include <odp_global_data.h> +#include <odp_shm_internal.h>
#include <odp/api/plat/cpu_inlines.h>
-static TAILQ_HEAD(, _ring) odp_ring_list; - #define RING_VAL_IS_POWER_2(x) ((((x) - 1) & (x)) == 0)
+typedef struct { + TAILQ_HEAD(, _ring) ring_list; + /* Rings tailq lock */ + odp_rwlock_t qlock; + odp_shm_t shm; +} global_data_t; + +static global_data_t *global; + /* * the enqueue of pointers on the ring. */ @@ -149,13 +157,37 @@ static TAILQ_HEAD(, _ring) odp_ring_list; } \ } while (0)
-static odp_rwlock_t qlock; /* rings tailq lock */
-/* init tailq_ring */ -void _ring_tailq_init(void) +/* Initialize tailq_ring */ +int _ring_tailq_init(void) +{ odp_shm_t shm; + + /* Allocate globally shared memory */ + shm = odp_shm_reserve("_odp_ring_global", sizeof(global_data_t), + ODP_CACHE_LINE_SIZE, _ODP_SHM_NO_HP); + if (ODP_SHM_INVALID == shm) { + ODP_ERR("Shm reserve failed for pktio ring\n"); + return -1; + } + + global = odp_shm_addr(shm); + memset(global, 0, sizeof(global_data_t)); + global->shm = shm; + + TAILQ_INIT(&global->ring_list); + odp_rwlock_init(&global->qlock); + + return 0; +} + +/* Terminate tailq_ring */ +int _ring_tailq_term(void) { - TAILQ_INIT(&odp_ring_list); - odp_rwlock_init(&qlock); + if (odp_shm_free(global->shm)) { + ODP_ERR("Shm free failed for pktio ring\n"); + return -1; + } + return 0; }
/* create the ring */ @@ -187,7 +219,7 @@ _ring_create(const char *name, unsigned count, unsigned flags) snprintf(ring_name, sizeof(ring_name), "%s", name); ring_size = count * sizeof(void *) + sizeof(_ring_t);
- odp_rwlock_write_lock(&qlock); + odp_rwlock_write_lock(&global->qlock); /* reserve a memory zone for this ring.*/ shm = odp_shm_reserve(ring_name, ring_size, ODP_CACHE_LINE_SIZE, shm_flag); @@ -208,13 +240,13 @@ _ring_create(const char *name, unsigned count, unsigned flags) r->cons.tail = 0;
if (!(flags & _RING_NO_LIST)) - TAILQ_INSERT_TAIL(&odp_ring_list, r, next); + TAILQ_INSERT_TAIL(&global->ring_list, r, next); } else { __odp_errno = ENOMEM; ODP_ERR("Cannot reserve memory\n"); }
- odp_rwlock_write_unlock(&qlock); + odp_rwlock_write_unlock(&global->qlock); return r; }
@@ -225,10 +257,10 @@ int _ring_destroy(const char *name) if (shm != ODP_SHM_INVALID) { _ring_t *r = odp_shm_addr(shm);
- odp_rwlock_write_lock(&qlock); + odp_rwlock_write_lock(&global->qlock); if (!(r->flags & _RING_NO_LIST)) - TAILQ_REMOVE(&odp_ring_list, r, next); - odp_rwlock_write_unlock(&qlock); + TAILQ_REMOVE(&global->ring_list, r, next); + odp_rwlock_write_unlock(&global->qlock);
return odp_shm_free(shm); } @@ -442,13 +474,13 @@ void _ring_list_dump(void) { const _ring_t *mp = NULL;
- odp_rwlock_read_lock(&qlock); + odp_rwlock_read_lock(&global->qlock);
- TAILQ_FOREACH(mp, &odp_ring_list, next) { + TAILQ_FOREACH(mp, &global->ring_list, next) { _ring_dump(mp); }
- odp_rwlock_read_unlock(&qlock); + odp_rwlock_read_unlock(&global->qlock); }
/* search a ring from its name */ @@ -456,12 +488,12 @@ _ring_t *_ring_lookup(const char *name) { _ring_t *r;
- odp_rwlock_read_lock(&qlock); - TAILQ_FOREACH(r, &odp_ring_list, next) { + odp_rwlock_read_lock(&global->qlock); + TAILQ_FOREACH(r, &global->ring_list, next) { if (strncmp(name, r->name, _RING_NAMESIZE) == 0) break; } - odp_rwlock_read_unlock(&qlock); + odp_rwlock_read_unlock(&global->qlock);
return r; }
commit eee3800f367d61ccdb9051d484e133f618aad9e4 Author: Matias Elo matias.elo@nokia.com Date: Tue Oct 2 11:20:23 2018 +0300
linux-gen: sched scalable: allocate global data from shm
Enables using scalable scheduler in process mode.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c index 9acb997a..a21728d8 100644 --- a/platform/linux-generic/odp_schedule_scalable.c +++ b/platform/linux-generic/odp_schedule_scalable.c @@ -32,6 +32,7 @@ #include <odp_schedule_if.h> #include <odp_bitset.h> #include <odp_packet_io_internal.h> +#include <odp_shm_internal.h> #include <odp_timer_internal.h>
#include <limits.h> @@ -46,8 +47,6 @@
#define FLAG_PKTIN 0x80
-static _odp_ishm_pool_t *sched_shm_pool; - ODP_STATIC_ASSERT(ODP_SCHED_PRIO_LOWEST == (ODP_SCHED_PRIO_NUM - 2), "lowest_prio_does_not_match_with_num_prios");
@@ -58,22 +57,24 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) && ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES), "Number_of_queues_is_not_power_of_two");
-/* - * Scheduler group related variables. - */ -/* Currently used scheduler groups */ -static sched_group_mask_t sg_free; -static sched_group_t *sg_vec[MAX_SCHED_GROUP]; -/* Group lock for MT-safe APIs */ -static odp_spinlock_t sched_grp_lock; - #define SCHED_GROUP_JOIN 0 #define SCHED_GROUP_LEAVE 1
-/* - * Per thread state - */ -static sched_scalable_thread_state_t thread_state[MAXTHREADS]; +typedef struct { + odp_shm_t shm; + _odp_ishm_pool_t *sched_shm_pool; + /** Currently used scheduler groups */ + sched_group_mask_t sg_free; + sched_group_t *sg_vec[MAX_SCHED_GROUP]; + /** Group lock for MT-safe APIs */ + odp_spinlock_t sched_grp_lock; + /** Per thread state */ + sched_scalable_thread_state_t thread_state[MAXTHREADS]; + uint16_t poll_count[ODP_CONFIG_PKTIO_ENTRIES]; +} sched_global_t; + +static sched_global_t *global; + __thread sched_scalable_thread_state_t *sched_ts;
static int thread_state_init(int tidx) @@ -82,7 +83,7 @@ static int thread_state_init(int tidx) uint32_t i;
ODP_ASSERT(tidx < MAXTHREADS); - ts = &thread_state[tidx]; + ts = &global->thread_state[tidx]; ts->atomq = NULL; ts->src_schedq = NULL; ts->rctx = NULL; @@ -523,9 +524,9 @@ static void signal_threads_add(sched_group_t *sg, uint32_t sgi, uint32_t prio) /* Notify the thread about membership in this * group/priority. */ - atom_bitset_set(&thread_state[thr].sg_wanted[prio], + atom_bitset_set(&global->thread_state[thr].sg_wanted[prio], sgi, __ATOMIC_RELEASE); - __atomic_store_n(&thread_state[thr].sg_sem, 1, + __atomic_store_n(&global->thread_state[thr].sg_sem, 1, __ATOMIC_RELEASE); } } @@ -537,11 +538,11 @@ sched_queue_t *sched_queue_add(odp_schedule_group_t grp, uint32_t prio) uint32_t x;
ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP); - ODP_ASSERT((sg_free & (1ULL << grp)) == 0); + ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0); ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
sgi = grp; - sg = sg_vec[sgi]; + sg = global->sg_vec[sgi];
/* Use xcount to spread queues over the xfactor schedq's * per priority. @@ -563,11 +564,11 @@ static uint32_t sched_pktin_add(odp_schedule_group_t grp, uint32_t prio) sched_group_t *sg;
ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP); - ODP_ASSERT((sg_free & (1ULL << grp)) == 0); + ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0); ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
sgi = grp; - sg = sg_vec[sgi]; + sg = global->sg_vec[sgi];
(void)sched_queue_add(grp, ODP_SCHED_PRIO_PKTIN); return (ODP_SCHED_PRIO_PKTIN - prio) * sg->xfactor; @@ -584,9 +585,9 @@ static void signal_threads_rem(sched_group_t *sg, uint32_t sgi, uint32_t prio) /* Notify the thread about membership in this * group/priority. */ - atom_bitset_clr(&thread_state[thr].sg_wanted[prio], + atom_bitset_clr(&global->thread_state[thr].sg_wanted[prio], sgi, __ATOMIC_RELEASE); - __atomic_store_n(&thread_state[thr].sg_sem, 1, + __atomic_store_n(&global->thread_state[thr].sg_sem, 1, __ATOMIC_RELEASE); } } @@ -598,11 +599,11 @@ void sched_queue_rem(odp_schedule_group_t grp, uint32_t prio) uint32_t x;
ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP); - ODP_ASSERT((sg_free & (1ULL << grp)) == 0); + ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0); ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
sgi = grp; - sg = sg_vec[sgi]; + sg = global->sg_vec[sgi];
x = __atomic_sub_fetch(&sg->xcount[prio], 1, __ATOMIC_RELAXED); if (x == 0) { @@ -631,7 +632,7 @@ static void update_sg_add(sched_scalable_thread_state_t *ts, added = bitset_andn(sg_wanted, ts->sg_actual[p]); while (!bitset_is_null(added)) { sgi = bitset_ffs(added) - 1; - sg = sg_vec[sgi]; + sg = global->sg_vec[sgi]; for (x = 0; x < sg->xfactor; x++) { /* Include our thread index to shift * (rotate) the order of schedq's @@ -657,7 +658,7 @@ static void update_sg_rem(sched_scalable_thread_state_t *ts, removed = bitset_andn(ts->sg_actual[p], sg_wanted); while (!bitset_is_null(removed)) { sgi = bitset_ffs(removed) - 1; - sg = sg_vec[sgi]; + sg = global->sg_vec[sgi]; for (x = 0; x < sg->xfactor; x++) { remove_schedq_from_list(ts, &sg->schedq[p * @@ -710,8 +711,6 @@ static inline void _schedule_release_ordered(sched_scalable_thread_state_t *ts) ts->rctx = NULL; }
-static uint16_t poll_count[ODP_CONFIG_PKTIO_ENTRIES]; - static void pktio_start(int pktio_idx, int num_in_queue, int in_queue_idx[], @@ -725,7 +724,8 @@ static void pktio_start(int pktio_idx, for (i = 0; i < num_in_queue; i++) { rxq = in_queue_idx[i]; ODP_ASSERT(rxq < PKTIO_MAX_QUEUES); - __atomic_fetch_add(&poll_count[pktio_idx], 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&global->poll_count[pktio_idx], 1, + __ATOMIC_RELAXED); qentry = qentry_from_ext(odpq[i]); elem = &qentry->s.sched_elem; elem->cons_type |= FLAG_PKTIN; /* Set pktin queue flag */ @@ -742,7 +742,7 @@ static void pktio_stop(sched_elem_t *elem) { elem->cons_type &= ~FLAG_PKTIN; /* Clear pktin queue flag */ sched_pktin_rem(elem->sched_grp); - if (__atomic_sub_fetch(&poll_count[elem->pktio_idx], + if (__atomic_sub_fetch(&global->poll_count[elem->pktio_idx], 1, __ATOMIC_RELAXED) == 0) { /* Call stop_finalize when all queues * of the pktio have been removed */ @@ -1389,22 +1389,21 @@ static int schedule_group_update(sched_group_t *sg, atom_bitset_clr(&sg->thr_wanted, thr, __ATOMIC_RELAXED); for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) { if (sg->xcount[p] != 0) { + sched_scalable_thread_state_t *state; + + state = &global->thread_state[thr]; + /* This priority level has ODP queues * Notify the thread about membership in * this group/priority */ if (join_leave == SCHED_GROUP_JOIN) - atom_bitset_set( - &thread_state[thr].sg_wanted[p], - sgi, - __ATOMIC_RELEASE); + atom_bitset_set(&state->sg_wanted[p], + sgi, __ATOMIC_RELEASE); else - atom_bitset_clr( - &thread_state[thr].sg_wanted[p], - sgi, - __ATOMIC_RELEASE); - __atomic_store_n(&thread_state[thr].sg_sem, - 1, + atom_bitset_clr(&state->sg_wanted[p], + sgi, __ATOMIC_RELEASE); + __atomic_store_n(&state->sg_sem, 1, __ATOMIC_RELEASE); } } @@ -1447,10 +1446,10 @@ static odp_schedule_group_t schedule_group_create(const char *name, if (mask == NULL) ODP_ABORT("mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
/* Allocate a scheduler group */ - free = atom_bitset_load(&sg_free, __ATOMIC_RELAXED); + free = atom_bitset_load(&global->sg_free, __ATOMIC_RELAXED); do { /* All sched_groups in use */ if (bitset_is_null(free)) @@ -1460,7 +1459,7 @@ static odp_schedule_group_t schedule_group_create(const char *name, /* All sched_groups in use */ if (sgi >= MAX_SCHED_GROUP) goto no_free_sched_group; - } while (!atom_bitset_cmpxchg(&sg_free, + } while (!atom_bitset_cmpxchg(&global->sg_free, &free, bitset_clr(free, sgi), true, @@ -1477,12 +1476,13 @@ static odp_schedule_group_t schedule_group_create(const char *name,
size = sizeof(sched_group_t) + (ODP_SCHED_PRIO_NUM * xfactor - 1) * sizeof(sched_queue_t); - sg = (sched_group_t *)shm_pool_alloc_align(sched_shm_pool, size); + sg = (sched_group_t *)shm_pool_alloc_align(global->sched_shm_pool, + size); if (sg == NULL) goto shm_pool_alloc_failed;
strncpy(sg->name, name ? name : "", ODP_SCHED_GROUP_NAME_LEN - 1); - sg_vec[sgi] = sg; + global->sg_vec[sgi] = sg; memset(sg->thr_actual, 0, sizeof(sg->thr_actual)); sg->thr_wanted = bitset_null(); sg->xfactor = xfactor; @@ -1494,16 +1494,16 @@ static odp_schedule_group_t schedule_group_create(const char *name, if (odp_thrmask_count(mask) != 0) schedule_group_update(sg, sgi, mask, SCHED_GROUP_JOIN);
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return (odp_schedule_group_t)(sgi);
shm_pool_alloc_failed: /* Free the allocated group index */ - atom_bitset_set(&sg_free, sgi, __ATOMIC_RELAXED); + atom_bitset_set(&global->sg_free, sgi, __ATOMIC_RELAXED);
no_free_sched_group: - odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return ODP_SCHED_GROUP_INVALID; } @@ -1529,15 +1529,15 @@ static int schedule_group_destroy(odp_schedule_group_t group) sched_ts->sg_sem = 0; update_sg_membership(sched_ts); } - odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group; - if (bitset_is_set(sg_free, sgi)) { + if (bitset_is_set(global->sg_free, sgi)) { ret = -1; goto group_not_found; }
- sg = sg_vec[sgi]; + sg = global->sg_vec[sgi]; /* First ensure all threads have processed group_join/group_leave * requests. */ @@ -1570,18 +1570,18 @@ static int schedule_group_destroy(odp_schedule_group_t group) } }
- _odp_ishm_pool_free(sched_shm_pool, sg); - sg_vec[sgi] = NULL; - atom_bitset_set(&sg_free, sgi, __ATOMIC_RELEASE); + _odp_ishm_pool_free(global->sched_shm_pool, sg); + global->sg_vec[sgi] = NULL; + atom_bitset_set(&global->sg_free, sgi, __ATOMIC_RELEASE);
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
thrd_q_present_in_group:
group_not_found: - odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group:
@@ -1599,19 +1599,19 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
group = ODP_SCHED_GROUP_INVALID;
- odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
/* Scan through the schedule group array */ for (sgi = 0; sgi < MAX_SCHED_GROUP; sgi++) { - if ((sg_vec[sgi] != NULL) && - (strncmp(name, sg_vec[sgi]->name, + if ((global->sg_vec[sgi] != NULL) && + (strncmp(name, global->sg_vec[sgi]->name, ODP_SCHED_GROUP_NAME_LEN) == 0)) { group = (odp_schedule_group_t)sgi; break; } }
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return group; } @@ -1630,18 +1630,18 @@ static int schedule_group_join(odp_schedule_group_t group, if (mask == NULL) ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group; - if (bitset_is_set(sg_free, sgi)) { - odp_spinlock_unlock(&sched_grp_lock); + if (bitset_is_set(global->sg_free, sgi)) { + odp_spinlock_unlock(&global->sched_grp_lock); return -1; }
- sg = sg_vec[sgi]; + sg = global->sg_vec[sgi]; ret = schedule_group_update(sg, sgi, mask, SCHED_GROUP_JOIN);
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return ret; } @@ -1662,24 +1662,24 @@ static int schedule_group_leave(odp_schedule_group_t group, if (mask == NULL) ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group; - if (bitset_is_set(sg_free, sgi)) { + if (bitset_is_set(global->sg_free, sgi)) { ret = -1; goto group_not_found; }
- sg = sg_vec[sgi]; + sg = global->sg_vec[sgi];
ret = schedule_group_update(sg, sgi, mask, SCHED_GROUP_LEAVE);
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
group_not_found: - odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group: return ret; @@ -1701,23 +1701,23 @@ static int schedule_group_thrmask(odp_schedule_group_t group, if (mask == NULL) ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group; - if (bitset_is_set(sg_free, sgi)) { + if (bitset_is_set(global->sg_free, sgi)) { ret = -1; goto group_not_found; }
- sg = sg_vec[sgi]; + sg = global->sg_vec[sgi]; ret = _schedule_group_thrmask(sg, mask);
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
group_not_found: - odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group: return ret; @@ -1739,26 +1739,26 @@ static int schedule_group_info(odp_schedule_group_t group, if (info == NULL) ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock); + odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group; - if (bitset_is_set(sg_free, sgi)) { + if (bitset_is_set(global->sg_free, sgi)) { ret = -1; goto group_not_found; }
- sg = sg_vec[sgi]; + sg = global->sg_vec[sgi];
ret = _schedule_group_thrmask(sg, &info->thrmask);
info->name = sg->name;
- odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
group_not_found: - odp_spinlock_unlock(&sched_grp_lock); + odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group: return ret; @@ -1770,14 +1770,30 @@ static int schedule_init_global(void) odp_schedule_group_t tmp_all; odp_schedule_group_t tmp_wrkr; odp_schedule_group_t tmp_ctrl; + odp_shm_t shm; uint32_t bits; uint32_t pool_size; uint64_t min_alloc; uint64_t max_alloc;
+ shm = odp_shm_reserve("_odp_sched_scalable", + sizeof(sched_global_t), + ODP_CACHE_LINE_SIZE, _ODP_SHM_NO_HP); + + global = odp_shm_addr(shm); + if (global == NULL) { + ODP_ERR("Schedule init: Shm reserve failed.\n"); + return -1; + } + + memset(global, 0, sizeof(sched_global_t)); + global->shm = shm; + /* Attach to the pool if it exists */ - sched_shm_pool = _odp_ishm_pool_lookup("sched_shm_pool"); - if (sched_shm_pool == NULL) { + global->sched_shm_pool = _odp_ishm_pool_lookup("sched_shm_pool"); + if (global->sched_shm_pool == NULL) { + _odp_ishm_pool_t *pool; + /* Add storage required for sched groups. Assume worst case * xfactor of MAXTHREADS. */ @@ -1791,31 +1807,31 @@ static int schedule_init_global(void) (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) * sizeof(sched_queue_t); max_alloc = min_alloc; - sched_shm_pool = _odp_ishm_pool_create("sched_shm_pool", - pool_size, - min_alloc, max_alloc, - _ODP_ISHM_SINGLE_VA); - if (sched_shm_pool == NULL) { + pool = _odp_ishm_pool_create("sched_shm_pool", pool_size, + min_alloc, max_alloc, + _ODP_ISHM_SINGLE_VA); + if (pool == NULL) { ODP_ERR("Failed to allocate shared memory pool " "for sched\n"); goto failed_sched_shm_pool_create; } + global->sched_shm_pool = pool; }
- odp_spinlock_init(&sched_grp_lock); + odp_spinlock_init(&global->sched_grp_lock);
bits = MAX_SCHED_GROUP; - if (MAX_SCHED_GROUP == sizeof(sg_free) * CHAR_BIT) - sg_free = ~0; + if (MAX_SCHED_GROUP == sizeof(global->sg_free) * CHAR_BIT) + global->sg_free = ~0; else - sg_free = (1 << bits) - 1; + global->sg_free = (1 << bits) - 1;
for (uint32_t i = 0; i < MAX_SCHED_GROUP; i++) - sg_vec[i] = NULL; + global->sg_vec[i] = NULL; for (uint32_t i = 0; i < MAXTHREADS; i++) { - thread_state[i].sg_sem = 0; + global->thread_state[i].sg_sem = 0; for (uint32_t j = 0; j < ODP_SCHED_PRIO_NUM; j++) - thread_state[i].sg_wanted[j] = bitset_null(); + global->thread_state[i].sg_wanted[j] = bitset_null(); }
/* Create sched groups for default GROUP_ALL, GROUP_WORKER and @@ -1871,7 +1887,12 @@ static int schedule_term_global(void) if (odp_schedule_group_destroy(ODP_SCHED_GROUP_CONTROL) != 0) ODP_ERR("Failed to destroy ODP_SCHED_GROUP_CONTROL\n");
- _odp_ishm_pool_destroy(sched_shm_pool); + _odp_ishm_pool_destroy(global->sched_shm_pool); + + if (odp_shm_free(global->shm)) { + ODP_ERR("Shm free failed for scalable scheduler"); + return -1; + }
return 0; }
commit d32bca1d3bd13efdef8b01a459fa1ce13042975b Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 10 09:50:05 2018 +0300
validation: cls: interleave tcp test flows
For better test coverage send two packet flows interleaved (in two/three packet patches). Also check that a received packet is from the correct queue.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c index dfda1944..55a86f5b 100644 --- a/test/validation/api/classification/odp_classification_test_pmr.c +++ b/test/validation/api/classification/odp_classification_test_pmr.c @@ -191,7 +191,7 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt) uint32_t seqno[num_pkt]; uint16_t val; uint16_t mask; - int retval, i, num_queue, num_default; + int retval, i, sent_queue, recv_queue, sent_default, recv_default; odp_pktio_t pktio; odp_queue_t queue; odp_queue_t retqueue; @@ -295,6 +295,9 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt) odp_packet_free(pkt); }
+ sent_queue = 0; + sent_default = 0; + /* Both queues simultaneously */ for (i = 0; i < 2 * num_pkt; i++) { pkt = create_packet(default_pkt_info); @@ -305,32 +308,41 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt)
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- if (i < num_pkt) + if ((i % 5) < 2) { + sent_queue++; tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT); - else + } else { + sent_default++; tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1); + }
enqueue_pktio_interface(pkt, pktio); }
- num_queue = 0; - num_default = 0; + recv_queue = 0; + recv_default = 0;
for (i = 0; i < 2 * num_pkt; i++) { pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS); CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID); CU_ASSERT(retqueue == queue || retqueue == default_queue);
- if (retqueue == queue) - num_queue++; - else if (retqueue == default_queue) - num_default++; + tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ if (retqueue == queue) { + recv_queue++; + CU_ASSERT(tcp->dst_port == + odp_cpu_to_be_16(CLS_DEFAULT_DPORT)); + } else if (retqueue == default_queue) { + recv_default++; + CU_ASSERT(tcp->dst_port == + odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1)); + } odp_packet_free(pkt); }
- CU_ASSERT(num_queue == num_pkt); - CU_ASSERT(num_default == num_pkt); + CU_ASSERT(sent_queue == recv_queue); + CU_ASSERT(sent_default == recv_default);
odp_cos_destroy(cos); odp_cos_destroy(default_cos); @@ -1988,7 +2000,7 @@ static void classification_test_pmr_term_ipv6saddr(void)
static void classification_test_pmr_term_tcp_dport(void) { - _classification_test_pmr_term_tcp_dport(1); + _classification_test_pmr_term_tcp_dport(2); }
static void classification_test_pmr_term_tcp_dport_multi(void)
commit 3cd53f15344c8ccf05dcf7812b4e2ff6a0b20961 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 10 10:12:37 2018 +0300
linux-gen: pktio: fix index calculation of multiple dest_queue
Packets to different destination queues interleaved resulted bad indexes when pktin_recv_buf() was recording which packets belong to the same dest_queue.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index 4acce618..a473a3c9 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -614,7 +614,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index, odp_packet_t packets[num]; odp_packet_hdr_t *pkt_hdr; odp_buffer_hdr_t *buf_hdr; - int i, pkts, num_rx, num_ev, num_dst, num_cur, cur_dst; + int i, pkts, num_rx, num_ev, num_dst; odp_queue_t cur_queue; odp_event_t ev[num]; odp_queue_t dst[num]; @@ -622,6 +622,10 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
num_rx = 0; num_dst = 0; + num_ev = 0; + + /* Some compilers need this dummy initialization */ + cur_queue = ODP_QUEUE_INVALID;
pkts = entry->s.ops->recv(entry, pktin_index, packets, num);
@@ -632,29 +636,23 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) { /* Sort events for enqueue multi operation(s) */ - if (num_dst == 0) { - num_ev = 0; + if (odp_unlikely(num_dst == 0)) { num_dst = 1; - num_cur = 0; cur_queue = pkt_hdr->dst_queue; dst[0] = cur_queue; dst_idx[0] = 0; }
ev[num_ev] = odp_packet_to_event(pkt); - num_ev++;
if (cur_queue != pkt_hdr->dst_queue) { - cur_dst = num_dst; - num_dst++; cur_queue = pkt_hdr->dst_queue; - dst[cur_dst] = cur_queue; - dst_idx[cur_dst] = num_cur; - num_cur = 0; + dst[num_dst] = cur_queue; + dst_idx[num_dst] = num_ev; + num_dst++; }
- num_cur++; - + num_ev++; continue; } buffer_hdrs[num_rx++] = buf_hdr;
commit e40699a6950fedea5ebce172299a9a6aa2dec424 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 10:15:18 2018 +0300
validation: pool: add max num pool tests
Test that pools can be created with maximum number of events defined in pool capability. Test that all events can be allocated and freed. Event size is small.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c index 2795e857..71a1a284 100644 --- a/test/validation/api/pool/pool.c +++ b/test/validation/api/pool/pool.c @@ -11,6 +11,7 @@
#define PKT_LEN 400 #define PKT_NUM 500 +#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
static const int default_buffer_size = 1500; static const int default_buffer_num = 1000; @@ -32,7 +33,7 @@ static void pool_test_create_destroy_buffer(void)
odp_pool_param_init(¶m);
- param.type = ODP_POOL_BUFFER, + param.type = ODP_POOL_BUFFER; param.buf.size = default_buffer_size; param.buf.align = ODP_CACHE_LINE_SIZE; param.buf.num = default_buffer_num; @@ -106,7 +107,7 @@ static void pool_test_alloc_packet(void)
odp_pool_param_init(¶m);
- param.type = ODP_POOL_PACKET, + param.type = ODP_POOL_PACKET; param.pkt.num = PKT_NUM; param.pkt.len = PKT_LEN;
@@ -145,7 +146,7 @@ static void pool_test_alloc_packet_subparam(void)
odp_pool_param_init(¶m);
- param.type = ODP_POOL_PACKET, + param.type = ODP_POOL_PACKET; param.pkt.num = PKT_NUM; param.pkt.len = PKT_LEN; param.pkt.num_subparam = num_sub; @@ -270,6 +271,159 @@ static void pool_test_info_data_range(void) CU_ASSERT(odp_pool_destroy(pool) == 0); }
+static void pool_test_buf_max_num(void) +{ + odp_pool_t pool; + odp_pool_param_t param; + odp_pool_capability_t capa; + uint32_t max_num, num, i; + odp_shm_t shm; + odp_buffer_t *buf; + + CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0); + + max_num = MAX_NUM_DEFAULT; + if (capa.buf.max_num) + max_num = capa.buf.max_num; + + odp_pool_param_init(¶m); + + param.type = ODP_POOL_BUFFER; + param.buf.num = max_num; + param.buf.size = 10; + + pool = odp_pool_create("test_buf_max_num", ¶m); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + shm = odp_shm_reserve("test_max_num_shm", + max_num * sizeof(odp_buffer_t), + sizeof(odp_buffer_t), 0); + + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + buf = odp_shm_addr(shm); + + num = 0; + for (i = 0; i < max_num; i++) { + buf[num] = odp_buffer_alloc(pool); + + if (buf[num] != ODP_BUFFER_INVALID) + num++; + } + + CU_ASSERT(num == max_num); + + for (i = 0; i < num; i++) + odp_buffer_free(buf[i]); + + CU_ASSERT(odp_shm_free(shm) == 0); + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + +static void pool_test_pkt_max_num(void) +{ + odp_pool_t pool; + odp_pool_param_t param; + odp_pool_capability_t capa; + uint32_t max_num, num, i; + odp_shm_t shm; + odp_packet_t *pkt; + uint32_t len = 10; + + CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0); + + max_num = MAX_NUM_DEFAULT; + if (capa.pkt.max_num) + max_num = capa.pkt.max_num; + + odp_pool_param_init(¶m); + + param.type = ODP_POOL_PACKET; + param.pkt.num = max_num; + param.pkt.max_num = max_num; + param.pkt.len = len; + param.pkt.max_len = len; + param.pkt.headroom = 0; + + pool = odp_pool_create("test_packet_max_num", ¶m); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + shm = odp_shm_reserve("test_max_num_shm", + max_num * sizeof(odp_packet_t), + sizeof(odp_packet_t), 0); + + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + pkt = odp_shm_addr(shm); + + num = 0; + for (i = 0; i < max_num; i++) { + pkt[num] = odp_packet_alloc(pool, len); + + if (pkt[num] != ODP_PACKET_INVALID) + num++; + } + + CU_ASSERT(num == max_num); + + for (i = 0; i < num; i++) + odp_packet_free(pkt[i]); + + CU_ASSERT(odp_shm_free(shm) == 0); + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + +static void pool_test_tmo_max_num(void) +{ + odp_pool_t pool; + odp_pool_param_t param; + odp_pool_capability_t capa; + uint32_t max_num, num, i; + odp_shm_t shm; + odp_timeout_t *tmo; + + CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0); + + max_num = MAX_NUM_DEFAULT; + if (capa.tmo.max_num) + max_num = capa.tmo.max_num; + + odp_pool_param_init(¶m); + + param.type = ODP_POOL_TIMEOUT; + param.tmo.num = max_num; + + pool = odp_pool_create("test_tmo_max_num", ¶m); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + shm = odp_shm_reserve("test_max_num_shm", + max_num * sizeof(odp_packet_t), + sizeof(odp_packet_t), 0); + + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + tmo = odp_shm_addr(shm); + + num = 0; + for (i = 0; i < max_num; i++) { + tmo[num] = odp_timeout_alloc(pool); + + if (tmo[num] != ODP_TIMEOUT_INVALID) + num++; + } + + CU_ASSERT(num == max_num); + + for (i = 0; i < num; i++) + odp_timeout_free(tmo[i]); + + CU_ASSERT(odp_shm_free(shm) == 0); + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + odp_testinfo_t pool_suite[] = { ODP_TEST_INFO(pool_test_create_destroy_buffer), ODP_TEST_INFO(pool_test_create_destroy_packet), @@ -279,6 +433,9 @@ odp_testinfo_t pool_suite[] = { ODP_TEST_INFO(pool_test_info_packet), ODP_TEST_INFO(pool_test_lookup_info_print), ODP_TEST_INFO(pool_test_info_data_range), + ODP_TEST_INFO(pool_test_buf_max_num), + ODP_TEST_INFO(pool_test_pkt_max_num), + ODP_TEST_INFO(pool_test_tmo_max_num), ODP_TEST_INFO_NULL, };
commit ac4ac579bba217043604c3f4ad21ed0446214572 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 14:43:38 2018 +0300
linux-gen: pool: reduce max pool size
Reduce maximum pool size, so that maximum size (packet) pool requires less than 1GB of SHM memory. The limit of 1GB (for default configuration) comes from maximum SHM reserve size in process mode (single VA) and limited memory size CI virtual machines.
This define can be increased when pool size options are added into the configuration file.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index d87c457b..a94012ac 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -157,7 +157,7 @@ extern "C" { * Maximum number of events in a pool. Power of two minus one results optimal * memory usage for the ring. */ -#define CONFIG_POOL_MAX_NUM ((1 * 1024 * 1024) - 1) +#define CONFIG_POOL_MAX_NUM ((256 * 1024) - 1)
/* * Maximum number of events in a thread local pool cache
commit a0857f0d63bccdc8a16eab5068b63029ea84f005 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 11 15:03:01 2018 +0300
linux-gen: ring: add reader tail check
Reader tail index is needed to detect if a reader is so much behind that a writer is going to overwrite the data it is reading.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h index 9a637afb..ad2f37ef 100644 --- a/platform/linux-generic/include/odp_ring_internal.h +++ b/platform/linux-generic/include/odp_ring_internal.h @@ -23,8 +23,9 @@ extern "C" { * Ring stores head and tail counters. Ring indexes are formed from these * counters with a mask (mask = ring_size - 1), which requires that ring size * must be a power of two. Also ring size must be larger than the maximum - * number of data items that will be stored on it (there's no check against - * overwriting). */ + * number of data items that will be stored on it as write operations are + * assumed to succeed eventually (after readers complete their current + * operations). */ typedef struct ODP_ALIGNED_CACHE { /* Writer head and tail */ odp_atomic_u32_t w_head; @@ -33,6 +34,7 @@ typedef struct ODP_ALIGNED_CACHE {
/* Reader head and tail */ odp_atomic_u32_t r_head; + odp_atomic_u32_t r_tail;
uint32_t data[0]; } ring_t; @@ -53,6 +55,7 @@ static inline void ring_init(ring_t *ring) odp_atomic_init_u32(&ring->w_head, 0); odp_atomic_init_u32(&ring->w_tail, 0); odp_atomic_init_u32(&ring->r_head, 0); + odp_atomic_init_u32(&ring->r_tail, 0); }
/* Dequeue data from the ring head */ @@ -75,12 +78,19 @@ static inline uint32_t ring_deq(ring_t *ring, uint32_t mask, uint32_t *data) new_head = head + 1;
} while (odp_unlikely(cas_mo_u32(&ring->r_head, &head, new_head, - __ATOMIC_ACQ_REL, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0));
- /* Read data. CAS acquire-release ensures that data read - * does not move above from here. */ + /* Read data. */ *data = ring->data[new_head & mask]; + + /* Wait until other readers have updated the tail */ + while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != head)) + odp_cpu_pause(); + + /* Update the tail. Writers acquire it. */ + odp_atomic_store_rel_u32(&ring->r_tail, new_head); + return 1; }
@@ -110,14 +120,20 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask, new_head = head + num;
} while (odp_unlikely(cas_mo_u32(&ring->r_head, &head, new_head, - __ATOMIC_ACQ_REL, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0));
- /* Read data. CAS acquire-release ensures that data read - * does not move above from here. */ + /* Read data. */ for (i = 0; i < num; i++) data[i] = ring->data[(head + 1 + i) & mask];
+ /* Wait until other readers have updated the tail */ + while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != head)) + odp_cpu_pause(); + + /* Update the tail. Writers acquire it. */ + odp_atomic_store_rel_u32(&ring->r_tail, new_head); + return num; }
@@ -125,16 +141,24 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask, static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data) { uint32_t old_head, new_head; + uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */ old_head = odp_atomic_fetch_inc_u32(&ring->w_head); new_head = old_head + 1;
+ /* Wait for the last reader to finish. This prevents overwrite when + * a reader has been left behind (e.g. due to an interrupt) and is + * still reading the same slot. */ + while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r_tail) + >= size)) + odp_cpu_pause(); + /* Write data */ ring->data[new_head & mask] = data;
/* Wait until other writers have updated the tail */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) + while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head)) odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */ @@ -146,17 +170,25 @@ static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[], uint32_t num) { uint32_t old_head, new_head, i; + uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */ old_head = odp_atomic_fetch_add_u32(&ring->w_head, num); new_head = old_head + 1;
+ /* Wait for the last reader to finish. This prevents overwrite when + * a reader has been left behind (e.g. due to an interrupt) and is + * still reading these slots. */ + while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r_tail) + >= size)) + odp_cpu_pause(); + /* Write data */ for (i = 0; i < num; i++) ring->data[(new_head + i) & mask] = data[i];
/* Wait until other writers have updated the tail */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) + while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head)) odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */
commit cb97c894c05ef5a43dce262cf178ce5f54b1c806 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 11 15:01:46 2018 +0300
linux-gen: pool: ring size must be larger than num items
Ensure that ring size is larger than number of events to be stored in there.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index bfe203bf..d87c457b 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -154,9 +154,10 @@ extern "C" { #define CONFIG_BURST_SIZE 32
/* - * Maximum number of events in a pool + * Maximum number of events in a pool. Power of two minus one results optimal + * memory usage for the ring. */ -#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024) +#define CONFIG_POOL_MAX_NUM ((1 * 1024 * 1024) - 1)
/* * Maximum number of events in a thread local pool cache diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index df02fdaf..2696e8ee 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -38,7 +38,7 @@ typedef struct ODP_ALIGNED_CACHE { ring_t hdr;
/* Ring data: buffer handles */ - uint32_t buf[CONFIG_POOL_MAX_NUM]; + uint32_t buf[CONFIG_POOL_MAX_NUM + 1];
} pool_ring_t;
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 5bb5bc63..7a4a9eb9 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -471,10 +471,11 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, FIRST_HP_SIZE - 1) / FIRST_HP_SIZE); }
- if (num <= RING_SIZE_MIN) + /* Ring size must be larger than the number of items stored */ + if (num + 1 <= RING_SIZE_MIN) ring_size = RING_SIZE_MIN; else - ring_size = ROUNDUP_POWER2_U32(num); + ring_size = ROUNDUP_POWER2_U32(num + 1);
pool->ring_mask = ring_size - 1; pool->num = num;
commit 81b8a6b38d3f71102527675529edbe5293bcd30b Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 11 09:33:49 2018 +0300
linux-gen: sched: remove unnecessary queue null index
Ring does not use any more special null index. So, queue index initialization to null index is not needed any more.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index a2eea6b1..58396293 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -69,17 +69,11 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) && /* Maximum pktin index. Needs to fit into 8 bits. */ #define MAX_PKTIN_INDEX 255
-/* Not a valid index */ -#define NULL_INDEX ((uint32_t)-1) - /* Maximum priority queue ring size. A ring must be large enough to store all * queues in the worst case (all queues are scheduled, have the same priority * and no spreading). */ #define MAX_RING_SIZE ODP_CONFIG_QUEUES
-/* Priority queue empty, not a valid queue index. */ -#define PRIO_QUEUE_EMPTY NULL_INDEX - /* For best performance, the number of queues should be a power of two. */ ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES), "Number_of_queues_is_not_power_of_two"); @@ -329,8 +323,6 @@ static void sched_local_init(void) sched_local.thr = odp_thread_id(); sched_local.sync_ctx = NO_SYNC_CONTEXT; sched_local.stash.queue = ODP_QUEUE_INVALID; - sched_local.stash.qi = PRIO_QUEUE_EMPTY; - sched_local.ordered.src_queue = NULL_INDEX;
spread = prio_spread_index(sched_local.thr);
@@ -380,15 +372,9 @@ static int schedule_init_global(void) for (i = 0; i < NUM_PRIO; i++) { for (j = 0; j < MAX_SPREAD; j++) { prio_queue_t *prio_q; - int k;
prio_q = &sched->prio_q[grp][i][j]; ring_init(&prio_q->ring); - - for (k = 0; k < MAX_RING_SIZE; k++) { - prio_q->queue_index[k] = - PRIO_QUEUE_EMPTY; - } } } } @@ -1177,8 +1163,7 @@ static void schedule_order_lock(uint32_t lock_index)
queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue_index != NULL_INDEX && - lock_index <= sched->queue[queue_index].order_lock_count && + ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count && !sched_local.ordered.lock_called.u8[lock_index]);
ord_lock = &sched->order[queue_index].lock[lock_index]; @@ -1207,8 +1192,7 @@ static void schedule_order_unlock(uint32_t lock_index)
queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue_index != NULL_INDEX && - lock_index <= sched->queue[queue_index].order_lock_count); + ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count);
ord_lock = &sched->order[queue_index].lock[lock_index];
commit f11e8bfd0599a717c457f2afd17e1a89febd5f8d Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 10:19:23 2018 +0300
test: sched_latency: honor pool capability limits
Check maximum pool size from pool capability.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c index 64a21983..873738ef 100644 --- a/test/performance/odp_sched_latency.c +++ b/test/performance/odp_sched_latency.c @@ -641,11 +641,13 @@ int main(int argc, char *argv[]) odph_odpthread_params_t thr_params; odp_cpumask_t cpumask; odp_pool_t pool; + odp_pool_capability_t pool_capa; odp_pool_param_t params; odp_shm_t shm; test_globals_t *globals; test_args_t args; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; + uint32_t pool_size; int i, j; int ret = 0; int num_workers = 0; @@ -706,10 +708,19 @@ int main(int argc, char *argv[]) /* * Create event pool */ + if (odp_pool_capability(&pool_capa)) { + LOG_ERR("pool capa failed\n"); + return -1; + } + + pool_size = EVENT_POOL_SIZE; + if (pool_capa.buf.max_num && pool_capa.buf.max_num < EVENT_POOL_SIZE) + pool_size = pool_capa.buf.max_num; + odp_pool_param_init(¶ms); params.buf.size = sizeof(test_event_t); params.buf.align = 0; - params.buf.num = EVENT_POOL_SIZE; + params.buf.num = pool_size; params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("event_pool", ¶ms);
commit aab53b9cd951dc5b4f76e4acee8aa602ddd9ad99 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 16:02:50 2018 +0300
test: scheduling: honor pool capability
Limit pool size to maximum pool capability.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c index 3b75f635..5eeb6926 100644 --- a/test/performance/odp_scheduling.c +++ b/test/performance/odp_scheduling.c @@ -30,7 +30,7 @@ /* GNU lib C */ #include <getopt.h>
-#define NUM_MSG (512 * 1024) /**< Number of msg in pool */ +#define MAX_BUF (512 * 1024) /**< Maximum pool size */ #define MAX_ALLOCS 32 /**< Alloc burst size */ #define QUEUES_PER_PRIO 64 /**< Queue per priority */ #define NUM_PRIOS 2 /**< Number of tested priorities */ @@ -813,7 +813,8 @@ int main(int argc, char *argv[]) odp_instance_t instance; odph_odpthread_params_t thr_params; odp_queue_capability_t capa; - uint32_t num_queues; + odp_pool_capability_t pool_capa; + uint32_t num_queues, num_buf;
printf("\nODP example starts\n\n");
@@ -869,11 +870,19 @@ int main(int argc, char *argv[]) /* * Create message pool */ + if (odp_pool_capability(&pool_capa)) { + LOG_ERR("Pool capabilities failed.\n"); + return -1; + } + + num_buf = MAX_BUF; + if (pool_capa.buf.max_num && pool_capa.buf.max_num < MAX_BUF) + num_buf = pool_capa.buf.max_num;
odp_pool_param_init(¶ms); params.buf.size = sizeof(test_message_t); params.buf.align = 0; - params.buf.num = NUM_MSG; + params.buf.num = num_buf; params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("msg_pool", ¶ms);
commit e1656a13f7a4a69dd743fd80c37a461bbdbf7d3f Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 15:56:14 2018 +0300
test: scheduling: fix script to exit with failure status
It seems that some shells did not notice failure status. Simplified the script to exit on the first failure.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh index ae3d1c8f..57792276 100755 --- a/test/performance/odp_scheduling_run.sh +++ b/test/performance/odp_scheduling_run.sh @@ -17,7 +17,12 @@ run() echo odp_scheduling_run starts requesting $1 worker threads echo ===============================================
- $TEST_DIR/odp_scheduling${EXEEXT} -c $1 || ret=1 + $TEST_DIR/odp_scheduling${EXEEXT} -c $1 + + if [ $? -ne 0 ]; then + echo odp_scheduling FAILED + exit $? + fi }
run 1 @@ -26,4 +31,4 @@ run 8 run 11 run $ALL
-exit $ret +exit 0
commit 42146102091d6201399a39eadbb4a897768c27ab Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 11:06:19 2018 +0300
helper: iplookup: check capabilities
Check pool and queue capabilities instead of assuming e.g. that 1M events can be stored into a queue. Reduced table defines (pool / queue size requirement) as an easy fix to pass tests.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/helper/iplookuptable.c b/helper/iplookuptable.c index 7102eb54..84b4e2cb 100644 --- a/helper/iplookuptable.c +++ b/helper/iplookuptable.c @@ -83,9 +83,9 @@ typedef struct trie_node { } trie_node_t;
/** Number of L2\L3 entries(subtrees) per cache cube. */ -#define CACHE_NUM_SUBTREE (1 << 13) +#define CACHE_NUM_SUBTREE (4 * 1024) /** Number of trie nodes per cache cube. */ -#define CACHE_NUM_TRIE (1 << 20) +#define CACHE_NUM_TRIE (4 * 1024)
/** @typedef cache_type_t * Cache node type @@ -187,12 +187,34 @@ cache_alloc_new_pool( { odp_pool_t pool; odp_pool_param_t param; + odp_pool_capability_t pool_capa; odp_queue_t queue = tbl->free_slots[type];
odp_buffer_t buffer; char pool_name[ODPH_TABLE_NAME_LEN + 8]; uint32_t size = 0, num = 0;
+ if (odp_pool_capability(&pool_capa)) { + ODPH_ERR("pool capa failed\n"); + return -1; + } + + if (pool_capa.buf.max_num) { + if (pool_capa.buf.max_num < CACHE_NUM_TRIE || + pool_capa.buf.max_num < CACHE_NUM_SUBTREE) { + ODPH_ERR("pool size too small\n"); + return -1; + } + } + + if (pool_capa.buf.max_size) { + if (pool_capa.buf.max_size < ENTRY_SIZE * ENTRY_NUM_SUBTREE || + pool_capa.buf.max_size < sizeof(trie_node_t)) { + ODPH_ERR("buffer size too small\n"); + return -1; + } + } + /* Create new pool (new free buffers). */ odp_pool_param_init(¶m); param.type = ODP_POOL_BUFFER; @@ -223,7 +245,11 @@ cache_alloc_new_pool( while ((buffer = odp_buffer_alloc(pool)) != ODP_BUFFER_INVALID) { cache_init_buffer(buffer, type, size); - odp_queue_enq(queue, odp_buffer_to_event(buffer)); + if (odp_queue_enq(queue, odp_buffer_to_event(buffer))) { + ODPH_DBG("queue enqueue failed\n"); + odp_buffer_free(buffer); + break; + } }
tbl->cache_count[type]++; @@ -449,10 +475,28 @@ odph_table_t odph_iplookup_table_create(const char *name, odp_shm_t shm_tbl; odp_queue_t queue; odp_queue_param_t qparam; + odp_queue_capability_t queue_capa; unsigned i; - uint32_t impl_size, l1_size; + uint32_t impl_size, l1_size, queue_size; char queue_name[ODPH_TABLE_NAME_LEN + 2];
+ if (odp_queue_capability(&queue_capa)) { + ODPH_ERR("queue capa failed\n"); + return NULL; + } + + if (queue_capa.plain.max_size) { + if (queue_capa.plain.max_size < CACHE_NUM_TRIE || + queue_capa.plain.max_size < CACHE_NUM_SUBTREE) { + ODPH_ERR("queue size too small\n"); + return NULL; + } + } + + queue_size = CACHE_NUM_TRIE; + if (CACHE_NUM_SUBTREE > CACHE_NUM_TRIE) + queue_size = CACHE_NUM_SUBTREE; + /* Check for valid parameters */ if (strlen(name) == 0) { ODPH_DBG("invalid parameters\n"); @@ -502,6 +546,7 @@ odph_table_t odph_iplookup_table_create(const char *name,
odp_queue_param_init(&qparam); qparam.type = ODP_QUEUE_TYPE_PLAIN; + qparam.size = queue_size; sprintf(queue_name, "%s_%d", name, i); queue = odp_queue_create(queue_name, &qparam); if (queue == ODP_QUEUE_INVALID) {
commit 84f5ac969eb50e83cfa87a529e5a59a94196bcba Author: Matias Elo matias.elo@nokia.com Date: Wed Oct 3 13:38:24 2018 +0300
linux-gen: dpdk: improved zero-copy implementation
Improved zero-copy DPDK pktio implementation which better adheres to DPDK APIs. The new implementation reduces overhead by moving mbuf initialization to ODP pool create and by using offsets instead of saved pointers to do ODP packet / DPDK mbuf conversion.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_packet_dpdk.h b/platform/linux-generic/include/odp_packet_dpdk.h index 0dac296a..1b660bab 100644 --- a/platform/linux-generic/include/odp_packet_dpdk.h +++ b/platform/linux-generic/include/odp_packet_dpdk.h @@ -7,11 +7,25 @@ #ifndef ODP_PACKET_DPDK_H #define ODP_PACKET_DPDK_H
+#include <stdint.h> + #include <odp/api/packet_io.h>
+#include <odp_packet_internal.h> +#include <odp_pool_internal.h> + struct rte_mbuf;
-/** Cache for storing packets */ +/** + * Calculate size of zero-copy DPDK packet pool object + */ +uint32_t _odp_dpdk_pool_obj_size(pool_t *pool, uint32_t block_size); + +/** + * Create zero-copy DPDK packet pool + */ +int _odp_dpdk_pool_create(pool_t *pool); + /** Packet parser using DPDK interface */ int _odp_dpdk_packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr, diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h index 380463f4..b9b11770 100644 --- a/platform/linux-generic/include/odp_packet_internal.h +++ b/platform/linux-generic/include/odp_packet_internal.h @@ -95,9 +95,6 @@ typedef struct { * Members below are not initialized by packet_init() */
- /* Type of extra data */ - uint8_t extra_type; - /* Flow hash value */ uint32_t flow_hash;
@@ -113,11 +110,6 @@ typedef struct { /* Context for IPsec */ odp_ipsec_packet_result_t ipsec_ctx;
-#ifdef ODP_PKTIO_DPDK - /* Extra space for packet descriptors. E.g. DPDK mbuf. Keep as the last - * member before data. */ - uint8_t ODP_ALIGNED_CACHE extra[PKT_EXTRA_LEN]; -#endif /* Packet data storage */ uint8_t data[0]; } odp_packet_hdr_t; diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index e8a04614..df02fdaf 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -67,11 +67,15 @@ typedef struct pool_t { uint32_t max_len; uint32_t uarea_size; uint32_t block_size; + uint32_t block_offset; uint8_t *base_addr; uint8_t *uarea_base_addr;
/* Used by DPDK zero-copy pktio */ - uint8_t mem_from_huge_pages; + uint32_t dpdk_elt_size; + uint32_t skipped_blocks; + uint8_t pool_in_use; + uint8_t mem_from_huge_pages; pool_destroy_cb_fn ext_destroy; void *ext_desc;
@@ -110,7 +114,8 @@ static inline odp_buffer_hdr_t *buf_hdr_from_index(pool_t *pool, uint64_t block_offset; odp_buffer_hdr_t *buf_hdr;
- block_offset = buffer_idx * (uint64_t)pool->block_size; + block_offset = (buffer_idx * (uint64_t)pool->block_size) + + pool->block_offset;
/* clang requires cast to uintptr_t */ buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset]; diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 8ae0e4e3..5bb5bc63 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -16,6 +16,7 @@ #include <odp_pool_internal.h> #include <odp_init_internal.h> #include <odp_packet_internal.h> +#include <odp_packet_dpdk.h> #include <odp_config_internal.h> #include <odp_debug_internal.h> #include <odp_ring_internal.h> @@ -248,7 +249,10 @@ static void init_buffers(pool_t *pool) type = pool->params.type;
for (i = 0; i < pool->num + skipped_blocks ; i++) { - addr = &pool->base_addr[i * pool->block_size]; + int skip = 0; + + addr = &pool->base_addr[(i * pool->block_size) + + pool->block_offset]; buf_hdr = addr; pkt_hdr = addr; /* Skip packet buffers which cross huge page boundaries. Some @@ -265,7 +269,7 @@ static void init_buffers(pool_t *pool) ~(page_size - 1)); if (last_page != first_page) { skipped_blocks++; - continue; + skip = 1; } } if (pool->uarea_size) @@ -310,8 +314,10 @@ static void init_buffers(pool_t *pool) pool->tailroom];
/* Store buffer index into the global pool */ - ring_enq(ring, mask, i); + if (!skip) + ring_enq(ring, mask, i); } + pool->skipped_blocks = skipped_blocks; }
static bool shm_is_from_huge_pages(odp_shm_t shm) @@ -445,6 +451,17 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, block_size = ROUNDUP_CACHE_LINE(hdr_size + align + headroom + seg_len + tailroom);
+ /* Calculate extra space required for storing DPDK objects and mbuf + * headers. NOP if zero-copy is disabled. */ + pool->block_offset = 0; + if (params->type == ODP_POOL_PACKET) { + block_size = _odp_dpdk_pool_obj_size(pool, block_size); + if (!block_size) { + ODP_ERR("Calculating DPDK mempool obj size failed\n"); + return ODP_POOL_INVALID; + } + } + /* Allocate extra memory for skipping packet buffers which cross huge * page boundaries. */ if (params->type == ODP_POOL_PACKET) { @@ -506,6 +523,12 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, ring_init(&pool->ring->hdr); init_buffers(pool);
+ /* Create zero-copy DPDK memory pool. NOP if zero-copy is disabled. */ + if (params->type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) { + ODP_ERR("Creating DPDK packet pool failed\n"); + goto error; + } + return pool->pool_hdl;
error: diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c index 34c23440..23f39445 100644 --- a/platform/linux-generic/pktio/dpdk.c +++ b/platform/linux-generic/pktio/dpdk.c @@ -68,8 +68,6 @@ #if ODP_DPDK_ZERO_COPY ODP_STATIC_ASSERT(CONFIG_PACKET_HEADROOM == RTE_PKTMBUF_HEADROOM, "ODP and DPDK headroom sizes not matching!"); -ODP_STATIC_ASSERT(PKT_EXTRA_LEN >= sizeof(struct rte_mbuf), - "DPDK rte_mbuf won't fit in odp_packet_hdr_t.extra!"); #endif
/* DPDK poll mode drivers requiring minimum RX burst size DPDK_MIN_RX_BURST */ @@ -81,6 +79,8 @@ ODP_STATIC_ASSERT(PKT_EXTRA_LEN >= sizeof(struct rte_mbuf), #define DPDK_MBUF_BUF_SIZE RTE_MBUF_DEFAULT_BUF_SIZE #define DPDK_MEMPOOL_CACHE_SIZE 64
+#define MBUF_OFFSET (ROUNDUP_CACHE_LINE(sizeof(struct rte_mbuf))) + ODP_STATIC_ASSERT((DPDK_NB_MBUF % DPDK_MEMPOOL_CACHE_SIZE == 0) && (DPDK_MEMPOOL_CACHE_SIZE <= RTE_MEMPOOL_CACHE_MAX_SIZE) && (DPDK_MEMPOOL_CACHE_SIZE <= DPDK_MBUF_BUF_SIZE * 10 / 15) @@ -165,6 +165,8 @@ void refer_constructors(void) } #endif
+static int dpdk_pktio_init(void); + static int pool_alloc(struct rte_mempool *mp);
static int lookup_opt(const char *opt_name, const char *drv_name, int *val) @@ -272,34 +274,39 @@ static inline void mbuf_update(struct rte_mbuf *mbuf, odp_packet_hdr_t *pkt_hdr, }
/** - * Initialize mbuf - * - * Called once per ODP packet. + * Initialize packet mbuf. Modified version of standard rte_pktmbuf_init() + * function. */ -static void mbuf_init(struct rte_mempool *mp, struct rte_mbuf *mbuf, - odp_packet_hdr_t *pkt_hdr) +static void pktmbuf_init(struct rte_mempool *mp, void *opaque_arg ODP_UNUSED, + void *_m, unsigned i ODP_UNUSED) { - void *buf_addr = pkt_hdr->buf_hdr.base_data - RTE_PKTMBUF_HEADROOM; - - memset(mbuf, 0, sizeof(struct rte_mbuf)); - - mbuf->priv_size = 0; - mbuf->buf_addr = buf_addr; - mbuf->buf_physaddr = rte_mem_virt2phy(buf_addr); - if (odp_unlikely(mbuf->buf_physaddr == RTE_BAD_PHYS_ADDR || - mbuf->buf_physaddr == 0)) - ODP_ABORT("Failed to map virt addr to phy"); - - mbuf->buf_len = (uint16_t)rte_pktmbuf_data_room_size(mp); - mbuf->data_off = RTE_PKTMBUF_HEADROOM; - mbuf->pool = mp; - mbuf->refcnt = 1; - mbuf->nb_segs = 1; - mbuf->port = 0xff; - - /* Store ODP packet handle inside rte_mbuf */ - mbuf->userdata = packet_handle(pkt_hdr); - pkt_hdr->extra_type = PKT_EXTRA_TYPE_DPDK; + struct rte_mbuf *m = _m; + uint32_t mbuf_size, buf_len; + odp_packet_hdr_t *pkt_hdr; + void *buf_addr; + + pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)((uint8_t *)m + MBUF_OFFSET); + buf_addr = pkt_hdr->buf_hdr.base_data - RTE_PKTMBUF_HEADROOM; + + mbuf_size = sizeof(struct rte_mbuf); + buf_len = rte_pktmbuf_data_room_size(mp); + + memset(m, 0, mbuf_size); + m->priv_size = 0; + m->buf_addr = buf_addr; + m->buf_iova = rte_mem_virt2iova(buf_addr); + m->buf_len = (uint16_t)buf_len; + m->data_off = RTE_PKTMBUF_HEADROOM; + + if (odp_unlikely(m->buf_iova == RTE_BAD_IOVA || m->buf_iova == 0)) + ODP_ABORT("Failed to map virt addr to iova\n"); + + /* Init some constant fields */ + m->pool = mp; + m->nb_segs = 1; + m->port = MBUF_INVALID_PORT; + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; }
/** @@ -308,60 +315,87 @@ static void mbuf_init(struct rte_mempool *mp, struct rte_mbuf *mbuf, static struct rte_mempool *mbuf_pool_create(const char *name, pool_t *pool_entry) { - struct rte_mempool *mp; + struct rte_mempool *mp = NULL; struct rte_pktmbuf_pool_private mbp_priv; - unsigned elt_size; - unsigned num; - uint16_t data_room_size; + struct rte_mempool_objsz sz; + unsigned int elt_size = pool_entry->dpdk_elt_size; + unsigned int num = pool_entry->num; + uint32_t total_size;
if (!(pool_entry->mem_from_huge_pages)) { ODP_ERR("DPDK requires memory is allocated from huge pages\n"); - return NULL; + goto fail; }
- num = pool_entry->num; - data_room_size = pool_entry->seg_len + CONFIG_PACKET_HEADROOM; - elt_size = sizeof(struct rte_mbuf) + (unsigned)data_room_size; - mbp_priv.mbuf_data_room_size = data_room_size; - mbp_priv.mbuf_priv_size = 0; + total_size = rte_mempool_calc_obj_size(elt_size, MEMPOOL_F_NO_SPREAD, + &sz); + if (total_size != pool_entry->block_size) { + ODP_ERR("DPDK pool block size not matching to ODP pool: " + "%" PRIu32 "/%" PRIu32 "\n", total_size, + pool_entry->block_size); + goto fail; + }
- mp = rte_mempool_create_empty(name, num, elt_size, cache_size(num), + /* Skipped buffers have to be taken into account to populate pool + * properly. */ + mp = rte_mempool_create_empty(name, num + pool_entry->skipped_blocks, + elt_size, cache_size(num), sizeof(struct rte_pktmbuf_pool_private), - rte_socket_id(), 0); + rte_socket_id(), MEMPOOL_F_NO_SPREAD); if (mp == NULL) { ODP_ERR("Failed to create empty DPDK packet pool\n"); - return NULL; + goto fail; }
if (rte_mempool_set_ops_byname(mp, "odp_pool", pool_entry)) { ODP_ERR("Failed setting mempool operations\n"); - return NULL; + goto fail; }
+ mbp_priv.mbuf_data_room_size = pool_entry->seg_len; + mbp_priv.mbuf_priv_size = 0; rte_pktmbuf_pool_init(mp, &mbp_priv);
if (pool_alloc(mp)) { ODP_ERR("Failed allocating mempool\n"); - return NULL; + goto fail; + } + + num = rte_mempool_populate_iova(mp, (char *)pool_entry->base_addr, + RTE_BAD_IOVA, pool_entry->shm_size, + NULL, NULL); + if (num <= 0) { + ODP_ERR("Failed to populate mempool: %d\n", num); + goto fail; }
+ rte_mempool_obj_iter(mp, pktmbuf_init, NULL); + return mp; + +fail: + if (mp) + rte_mempool_free(mp); + return NULL; }
/* DPDK external memory pool operations */
-static int pool_enqueue(struct rte_mempool *mp ODP_UNUSED, +static int pool_enqueue(struct rte_mempool *mp, void * const *obj_table, unsigned num) { odp_packet_t pkt_tbl[num]; + pool_t *pool_entry = (pool_t *)mp->pool_config; unsigned i;
- if (odp_unlikely(num == 0)) + if (odp_unlikely(num == 0 || !pool_entry->pool_in_use)) return 0;
- for (i = 0; i < num; i++) - pkt_tbl[i] = (odp_packet_t)((struct rte_mbuf *) - obj_table[i])->userdata; + for (i = 0; i < num; i++) { + odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t) + ((uint8_t *)obj_table[i] + MBUF_OFFSET); + pkt_tbl[i] = packet_handle(pkt_hdr); + }
odp_packet_free_multi(pkt_tbl, num);
@@ -387,13 +421,10 @@ static int pool_dequeue_bulk(struct rte_mempool *mp, void **obj_table, }
for (i = 0; i < pkts; i++) { - odp_packet_t pkt = packet_tbl[i]; - odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt); - struct rte_mbuf *mbuf = (struct rte_mbuf *) - (uintptr_t)pkt_hdr->extra; - if (pkt_hdr->extra_type != PKT_EXTRA_TYPE_DPDK) - mbuf_init(mp, mbuf, pkt_hdr); - obj_table[i] = mbuf; + odp_packet_hdr_t *pkt_hdr = packet_hdr(packet_tbl[i]); + + obj_table[i] = (struct rte_mbuf *)(uintptr_t) + ((uint8_t *)pkt_hdr - MBUF_OFFSET); }
return 0; @@ -438,21 +469,23 @@ static void pool_destroy(void *pool) { struct rte_mempool *mp = (struct rte_mempool *)pool;
- if (mp != NULL) + if (mp != NULL) { + pool_t *pool_entry = (pool_t *)mp->pool_config; + + pool_entry->pool_in_use = 0; rte_mempool_free(mp); + } }
-static struct rte_mempool *pool_create(pool_t *pool) +int _odp_dpdk_pool_create(pool_t *pool) { struct rte_mempool *pkt_pool; char pool_name[RTE_MEMPOOL_NAMESIZE];
- odp_ticketlock_lock(&pool->lock); + if (!ODP_DPDK_ZERO_COPY) + return 0;
- if (pool->ext_desc != NULL) { - odp_ticketlock_unlock(&pool->lock); - return (struct rte_mempool *)pool->ext_desc; - } + pool->pool_in_use = 0;
snprintf(pool_name, sizeof(pool_name), "dpdk_pktpool_%" PRIu32 "_%" PRIu32 "", odp_global_ro.main_pid, @@ -460,17 +493,41 @@ static struct rte_mempool *pool_create(pool_t *pool) pkt_pool = mbuf_pool_create(pool_name, pool);
if (pkt_pool == NULL) { - odp_ticketlock_unlock(&pool->lock); ODP_ERR("Creating external DPDK pool failed\n"); - return NULL; + return -1; }
pool->ext_desc = pkt_pool; pool->ext_destroy = pool_destroy; + pool->pool_in_use = 1; + + return 0; +} + +uint32_t _odp_dpdk_pool_obj_size(pool_t *pool, uint32_t block_size) +{ + struct rte_mempool_objsz sz; + uint32_t total_size; + + if (!ODP_DPDK_ZERO_COPY) + return block_size;
- odp_ticketlock_unlock(&pool->lock); + if (odp_global_rw->dpdk_initialized == 0) { + if (dpdk_pktio_init()) { + ODP_ERR("Initializing DPDK failed\n"); + return 0; + } + odp_global_rw->dpdk_initialized = 1; + } + + block_size += MBUF_OFFSET; + total_size = rte_mempool_calc_obj_size(block_size, MEMPOOL_F_NO_SPREAD, + &sz);
- return pkt_pool; + pool->dpdk_elt_size = sz.elt_size; + pool->block_offset = sz.header_size + MBUF_OFFSET; + + return total_size; }
static struct rte_mempool_ops ops_stack = { @@ -748,7 +805,8 @@ fail:
static inline void prefetch_pkt(struct rte_mbuf *mbuf) { - odp_packet_hdr_t *pkt_hdr = mbuf->userdata; + odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t) + ((uint8_t *)mbuf + MBUF_OFFSET); void *data = rte_pktmbuf_mtod(mbuf, char *);
odp_prefetch(pkt_hdr); @@ -761,7 +819,6 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry, struct rte_mbuf *mbuf_table[], uint16_t mbuf_num, odp_time_t *ts) { - odp_packet_t pkt; odp_packet_hdr_t *pkt_hdr; uint16_t pkt_len; struct rte_mbuf *mbuf; @@ -799,8 +856,8 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry, data = rte_pktmbuf_mtod(mbuf, char *); pkt_len = rte_pktmbuf_pkt_len(mbuf);
- pkt = (odp_packet_t)mbuf->userdata; - pkt_hdr = packet_hdr(pkt); + pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)((uint8_t *)mbuf + + MBUF_OFFSET);
if (pktio_cls_enabled(pktio_entry)) { packet_parse_reset(&parsed_hdr); @@ -845,7 +902,7 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
packet_set_ts(pkt_hdr, ts);
- pkt_table[nb_pkts++] = pkt; + pkt_table[nb_pkts++] = packet_handle(pkt_hdr); }
return nb_pkts; @@ -866,45 +923,25 @@ static inline int pkt_to_mbuf_zero(pktio_entry_t *pktio_entry, for (i = 0; i < num; i++) { odp_packet_t pkt = pkt_table[i]; odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt); - struct rte_mbuf *mbuf = (struct rte_mbuf *) - (uintptr_t)pkt_hdr->extra; + struct rte_mbuf *mbuf = (struct rte_mbuf *)(uintptr_t) + ((uint8_t *)pkt_hdr - MBUF_OFFSET); uint16_t pkt_len = odp_packet_len(pkt);
if (odp_unlikely(pkt_len > pkt_dpdk->mtu)) goto fail;
- if (odp_likely(pkt_hdr->buf_hdr.segcount == 1 && - pkt_hdr->extra_type == PKT_EXTRA_TYPE_DPDK)) { + if (odp_likely(pkt_hdr->buf_hdr.segcount == 1)) { mbuf_update(mbuf, pkt_hdr, pkt_len);
if (odp_unlikely(pktio_entry->s.chksum_insert_ena)) pkt_set_ol_tx(pktout_cfg, pktout_capa, pkt_hdr, mbuf, odp_packet_data(pkt)); } else { - pool_t *pool_entry = pkt_hdr->buf_hdr.pool_ptr; - - if (odp_unlikely(pool_entry->ext_desc == NULL)) { - if (pool_create(pool_entry) == NULL) - ODP_ABORT("Creating DPDK pool failed"); - } - - if (pkt_hdr->buf_hdr.segcount != 1 || - !pool_entry->mem_from_huge_pages) { - /* Fall back to packet copy */ - if (odp_unlikely(pkt_to_mbuf(pktio_entry, &mbuf, - &pkt, 1) != 1)) - goto fail; - (*copy_count)++; - - } else { - mbuf_init((struct rte_mempool *) - pool_entry->ext_desc, mbuf, pkt_hdr); - mbuf_update(mbuf, pkt_hdr, pkt_len); - if (pktio_entry->s.chksum_insert_ena) - pkt_set_ol_tx(pktout_cfg, pktout_capa, - pkt_hdr, mbuf, - odp_packet_data(pkt)); - } + /* Fall back to packet copy */ + if (odp_unlikely(pkt_to_mbuf(pktio_entry, &mbuf, + &pkt, 1) != 1)) + goto fail; + (*copy_count)++; } mbuf_table[i] = mbuf; } @@ -1486,10 +1523,7 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED, pkt_dpdk->min_rx_burst = 0;
if (ODP_DPDK_ZERO_COPY) { - if (pool_entry->ext_desc != NULL) - pkt_pool = (struct rte_mempool *)pool_entry->ext_desc; - else - pkt_pool = pool_create(pool_entry); + pkt_pool = (struct rte_mempool *)pool_entry->ext_desc; } else { snprintf(pool_name, sizeof(pool_name), "pktpool_%s", netdev); /* Check if the pool exists already */ @@ -1879,4 +1913,27 @@ const pktio_if_ops_t dpdk_pktio_ops = { .output_queues_config = dpdk_output_queues_config };
+#else + +#include <stdint.h> + +#include <odp/api/hints.h> + +#include <odp_packet_dpdk.h> +#include <odp_pool_internal.h> + +/* + * Dummy functions for pool_create() + */ + +uint32_t _odp_dpdk_pool_obj_size(pool_t *pool ODP_UNUSED, uint32_t block_size) +{ + return block_size; +} + +int _odp_dpdk_pool_create(pool_t *pool ODP_UNUSED) +{ + return 0; +} + #endif /* ODP_PKTIO_DPDK */
commit 9c15202e04ce97601d9910ce013da22fc5a3a2b0 Author: Matias Elo matias.elo@nokia.com Date: Tue Oct 9 14:26:47 2018 +0300
linux-gen: dpdk: prefix visible internal parse functions with _odp_
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_packet_dpdk.h b/platform/linux-generic/include/odp_packet_dpdk.h index d457cfa3..0dac296a 100644 --- a/platform/linux-generic/include/odp_packet_dpdk.h +++ b/platform/linux-generic/include/odp_packet_dpdk.h @@ -13,23 +13,24 @@ struct rte_mbuf;
/** Cache for storing packets */ /** Packet parser using DPDK interface */ -int dpdk_packet_parse_common(packet_parser_t *pkt_hdr, - const uint8_t *ptr, - uint32_t pkt_len, - uint32_t seg_len, - struct rte_mbuf *mbuf, - int layer, - odp_pktin_config_opt_t pktin_cfg); +int _odp_dpdk_packet_parse_common(packet_parser_t *pkt_hdr, + const uint8_t *ptr, + uint32_t pkt_len, + uint32_t seg_len, + struct rte_mbuf *mbuf, + int layer, + odp_pktin_config_opt_t pktin_cfg);
-static inline int dpdk_packet_parse_layer(odp_packet_hdr_t *pkt_hdr, - struct rte_mbuf *mbuf, - odp_pktio_parser_layer_t layer, - odp_pktin_config_opt_t pktin_cfg) +static inline int _odp_dpdk_packet_parse_layer(odp_packet_hdr_t *pkt_hdr, + struct rte_mbuf *mbuf, + odp_pktio_parser_layer_t layer, + odp_pktin_config_opt_t pktin_cfg) { uint32_t seg_len = pkt_hdr->buf_hdr.seg[0].len; void *base = pkt_hdr->buf_hdr.seg[0].data;
- return dpdk_packet_parse_common(&pkt_hdr->p, base, pkt_hdr->frame_len, - seg_len, mbuf, layer, pktin_cfg); + return _odp_dpdk_packet_parse_common(&pkt_hdr->p, base, + pkt_hdr->frame_len, seg_len, mbuf, + layer, pktin_cfg); } #endif diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c index 3e34b247..34c23440 100644 --- a/platform/linux-generic/pktio/dpdk.c +++ b/platform/linux-generic/pktio/dpdk.c @@ -530,10 +530,11 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry, if (pktio_cls_enabled(pktio_entry)) { packet_parse_reset(&parsed_hdr); packet_set_len(&parsed_hdr, pkt_len); - if (dpdk_packet_parse_common(&parsed_hdr.p, data, - pkt_len, pkt_len, mbuf, - ODP_PROTO_LAYER_ALL, - pktin_cfg)) { + if (_odp_dpdk_packet_parse_common(&parsed_hdr.p, data, + pkt_len, pkt_len, + mbuf, + ODP_PROTO_LAYER_ALL, + pktin_cfg)) { odp_packet_free(pkt_table[i]); rte_pktmbuf_free(mbuf); continue; @@ -557,8 +558,9 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry, if (pktio_cls_enabled(pktio_entry)) copy_packet_cls_metadata(&parsed_hdr, pkt_hdr); else if (parse_layer != ODP_PROTO_LAYER_NONE) - if (dpdk_packet_parse_layer(pkt_hdr, mbuf, parse_layer, - pktin_cfg)) { + if (_odp_dpdk_packet_parse_layer(pkt_hdr, mbuf, + parse_layer, + pktin_cfg)) { odp_packet_free(pkt); rte_pktmbuf_free(mbuf); continue; @@ -803,10 +805,11 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry, if (pktio_cls_enabled(pktio_entry)) { packet_parse_reset(&parsed_hdr); packet_set_len(&parsed_hdr, pkt_len); - if (dpdk_packet_parse_common(&parsed_hdr.p, data, - pkt_len, pkt_len, mbuf, - ODP_PROTO_LAYER_ALL, - pktin_cfg)) { + if (_odp_dpdk_packet_parse_common(&parsed_hdr.p, data, + pkt_len, pkt_len, + mbuf, + ODP_PROTO_LAYER_ALL, + pktin_cfg)) { rte_pktmbuf_free(mbuf); continue; } @@ -830,8 +833,9 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry, if (pktio_cls_enabled(pktio_entry)) copy_packet_cls_metadata(&parsed_hdr, pkt_hdr); else if (parse_layer != ODP_PROTO_LAYER_NONE) - if (dpdk_packet_parse_layer(pkt_hdr, mbuf, parse_layer, - pktin_cfg)) { + if (_odp_dpdk_packet_parse_layer(pkt_hdr, mbuf, + parse_layer, + pktin_cfg)) { rte_pktmbuf_free(mbuf); continue; } diff --git a/platform/linux-generic/pktio/dpdk_parse.c b/platform/linux-generic/pktio/dpdk_parse.c index e9de0756..5f2b31d0 100644 --- a/platform/linux-generic/pktio/dpdk_parse.c +++ b/platform/linux-generic/pktio/dpdk_parse.c @@ -457,10 +457,10 @@ int dpdk_packet_parse_common_l3_l4(packet_parser_t *prs, /** * DPDK packet parser */ -int dpdk_packet_parse_common(packet_parser_t *prs, const uint8_t *ptr, - uint32_t frame_len, uint32_t seg_len, - struct rte_mbuf *mbuf, int layer, - odp_pktin_config_opt_t pktin_cfg) +int _odp_dpdk_packet_parse_common(packet_parser_t *prs, const uint8_t *ptr, + uint32_t frame_len, uint32_t seg_len, + struct rte_mbuf *mbuf, int layer, + odp_pktin_config_opt_t pktin_cfg) { uint32_t offset; uint16_t ethtype;
commit 177fc4cce6485a2bb80b99309eb7947e634d37fc Author: Matias Elo matias.elo@nokia.com Date: Mon Oct 8 16:56:15 2018 +0300
linux-gen: dpdk: fix running multiple odp instances simulaneusly
Prefix DPDK packet pool names and huge page mappings with PID to avoid name conflicts. Also, let DPDK detect process type automatically.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c index 578a94b0..3e34b247 100644 --- a/platform/linux-generic/pktio/dpdk.c +++ b/platform/linux-generic/pktio/dpdk.c @@ -455,7 +455,8 @@ static struct rte_mempool *pool_create(pool_t *pool) }
snprintf(pool_name, sizeof(pool_name), - "dpdk_pktpool_%" PRIu32 "", pool->pool_idx); + "dpdk_pktpool_%" PRIu32 "_%" PRIu32 "", odp_global_ro.main_pid, + pool->pool_idx); pkt_pool = mbuf_pool_create(pool_name, pool);
if (pkt_pool == NULL) { @@ -1174,15 +1175,17 @@ static int dpdk_pktio_init(void) cmdline = "";
/* masklen includes the terminating null as well */ - cmd_len = strlen("odpdpdk -c --socket-mem ") + masklen + - strlen(mem_str) + strlen(cmdline) + strlen(" "); + cmd_len = snprintf(NULL, 0, "odpdpdk --file-prefix %" PRIu32 "_ " + "--proc-type auto -c %s --socket-mem %s %s ", + odp_global_ro.main_pid, mask_str, mem_str, cmdline);
char full_cmd[cmd_len];
/* first argument is facility log, simply bind it to odpdpdk for now.*/ cmd_len = snprintf(full_cmd, cmd_len, - "odpdpdk -c %s --socket-mem %s %s", mask_str, - mem_str, cmdline); + "odpdpdk --file-prefix %" PRIu32 "_ " + "--proc-type auto -c %s --socket-mem %s %s ", + odp_global_ro.main_pid, mask_str, mem_str, cmdline);
for (i = 0, dpdk_argc = 1; i < cmd_len; ++i) { if (isspace(full_cmd[i]))
commit 386f6f8932a7897fdf8adc60aed8de7ee0174ad4 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Oct 10 11:28:04 2018 +0300
validation: packet: add packet reset test
Added test for odp_packet_reset().
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c index 0c86b051..0f019f6c 100644 --- a/test/validation/api/packet/packet.c +++ b/test/validation/api/packet/packet.c @@ -31,7 +31,7 @@ static uint32_t packet_len; static uint32_t segmented_packet_len; static odp_bool_t segmentation_supported = true;
-odp_packet_t test_packet, segmented_test_packet; +odp_packet_t test_packet, segmented_test_packet, test_reset_packet;
static struct udata_struct { uint64_t u64; @@ -213,11 +213,23 @@ static int packet_suite_init(void)
test_packet = odp_packet_alloc(packet_pool, packet_len);
+ if (test_packet == ODP_PACKET_INVALID) { + printf("test_packet alloc failed\n"); + return -1; + } + for (i = 0; i < packet_len; i++) { odp_packet_copy_from_mem(test_packet, i, 1, &data); data++; }
+ test_reset_packet = odp_packet_alloc(packet_pool, packet_len); + + if (test_reset_packet == ODP_PACKET_INVALID) { + printf("test_reset_packet alloc failed\n"); + return -1; + } + /* Try to allocate PACKET_POOL_NUM_SEG largest possible packets to see * if segmentation is supported */ do { @@ -278,6 +290,7 @@ static int packet_suite_init(void) static int packet_suite_term(void) { odp_packet_free(test_packet); + odp_packet_free(test_reset_packet); odp_packet_free(segmented_test_packet);
if (odp_pool_destroy(packet_pool_double_uarea) != 0 || @@ -661,6 +674,70 @@ static void packet_test_length(void) CU_ASSERT(buf_len >= packet_len + headroom + tailroom); }
+static void packet_test_reset(void) +{ + uint32_t len, headroom; + uintptr_t ptr_len; + void *data, *new_data, *tail, *new_tail; + odp_packet_t pkt = test_reset_packet; + + len = odp_packet_len(pkt); + headroom = odp_packet_headroom(pkt); + CU_ASSERT(len > 1); + + if (headroom) { + data = odp_packet_data(pkt); + new_data = odp_packet_push_head(pkt, 1); + CU_ASSERT(odp_packet_len(pkt) == len + 1); + CU_ASSERT((uintptr_t)new_data == ((uintptr_t)data - 1)); + CU_ASSERT(odp_packet_headroom(pkt) == headroom - 1); + ptr_len = (uintptr_t)odp_packet_data(pkt) - + (uintptr_t)odp_packet_head(pkt); + CU_ASSERT(ptr_len == (headroom - 1)); + CU_ASSERT(odp_packet_reset(pkt, len) == 0); + CU_ASSERT(odp_packet_len(pkt) == len); + CU_ASSERT(odp_packet_headroom(pkt) == headroom); + ptr_len = (uintptr_t)odp_packet_data(pkt) - + (uintptr_t)odp_packet_head(pkt); + CU_ASSERT(ptr_len == headroom); + } + + data = odp_packet_data(pkt); + new_data = odp_packet_pull_head(pkt, 1); + CU_ASSERT(odp_packet_len(pkt) == len - 1); + CU_ASSERT((uintptr_t)new_data == ((uintptr_t)data + 1)); + CU_ASSERT(odp_packet_headroom(pkt) == headroom + 1); + ptr_len = (uintptr_t)odp_packet_data(pkt) - + (uintptr_t)odp_packet_head(pkt); + CU_ASSERT(ptr_len == (headroom + 1)); + CU_ASSERT(odp_packet_reset(pkt, len) == 0); + CU_ASSERT(odp_packet_len(pkt) == len); + CU_ASSERT(odp_packet_headroom(pkt) == headroom); + ptr_len = (uintptr_t)odp_packet_data(pkt) - + (uintptr_t)odp_packet_head(pkt); + CU_ASSERT(ptr_len == headroom); + + tail = odp_packet_tail(pkt); + new_tail = odp_packet_pull_tail(pkt, 1); + CU_ASSERT(odp_packet_len(pkt) == len - 1); + CU_ASSERT((uintptr_t)new_tail == ((uintptr_t)tail - 1)); + CU_ASSERT(odp_packet_reset(pkt, len) == 0); + CU_ASSERT(odp_packet_len(pkt) == len); + + CU_ASSERT(odp_packet_has_udp(pkt) == 0); + odp_packet_has_udp_set(pkt, 1); + CU_ASSERT(odp_packet_has_udp(pkt) != 0); + CU_ASSERT(odp_packet_reset(pkt, len) == 0); + CU_ASSERT(odp_packet_has_udp(pkt) == 0); + + CU_ASSERT(odp_packet_reset(pkt, len - 1) == 0); + CU_ASSERT(odp_packet_len(pkt) == (len - 1)); + + len = len - len / 2; + CU_ASSERT(odp_packet_reset(pkt, len) == 0); + CU_ASSERT(odp_packet_len(pkt) == len); +} + static void packet_test_prefetch(void) { odp_packet_prefetch(test_packet, 0, odp_packet_len(test_packet)); @@ -3423,6 +3500,7 @@ odp_testinfo_t packet_suite[] = { ODP_TEST_INFO(packet_test_debug), ODP_TEST_INFO(packet_test_segments), ODP_TEST_INFO(packet_test_length), + ODP_TEST_INFO(packet_test_reset), ODP_TEST_INFO(packet_test_prefetch), ODP_TEST_INFO(packet_test_headroom), ODP_TEST_INFO(packet_test_tailroom),
-----------------------------------------------------------------------
Summary of changes: .travis.yml | 98 +++++-- DEPENDENCIES | 37 ++- config/odp-linux-generic.conf | 47 +++- configure.ac | 2 + doc/application-api-guide/Doxyfile | 1 + doc/platform-api-guide/Doxyfile | 4 +- example/generator/odp_generator.c | 148 +--------- example/ipsec/odp_ipsec.c | 14 + example/ipsec_api/odp_ipsec.c | 14 + example/ipsec_offload/odp_ipsec_offload.c | 14 + example/l2fwd_simple/odp_l2fwd_simple.c | 9 + example/l3fwd/odp_l3fwd.c | 10 + example/switch/odp_switch.c | 10 + helper/iplookuptable.c | 53 +++- m4/ax_prog_doxygen.m4 | 177 +++++------- .../linux-generic/include/odp_config_internal.h | 16 +- platform/linux-generic/include/odp_global_data.h | 2 + platform/linux-generic/include/odp_packet_dpdk.h | 45 ++- .../linux-generic/include/odp_packet_internal.h | 8 - .../include/odp_packet_io_ring_internal.h | 9 +- platform/linux-generic/include/odp_pool_internal.h | 16 +- .../include/odp_queue_basic_internal.h | 1 + platform/linux-generic/include/odp_queue_if.h | 4 + .../include/odp_queue_scalable_internal.h | 1 + platform/linux-generic/include/odp_ring_internal.h | 52 +++- platform/linux-generic/include/odp_schedule_if.h | 13 +- .../include/odp_schedule_scalable_config.h | 3 + .../linux-generic/include/odp_timer_internal.h | 13 +- platform/linux-generic/odp_init.c | 2 + platform/linux-generic/odp_ipsec_sad.c | 124 ++++++-- platform/linux-generic/odp_libconfig.c | 43 ++- platform/linux-generic/odp_packet_io.c | 70 +---- platform/linux-generic/odp_pool.c | 108 +++++-- platform/linux-generic/odp_queue_basic.c | 29 +- platform/linux-generic/odp_queue_scalable.c | 41 ++- platform/linux-generic/odp_schedule_basic.c | 98 +++++-- platform/linux-generic/odp_schedule_scalable.c | 215 +++++++------- platform/linux-generic/odp_schedule_sp.c | 60 +++- platform/linux-generic/odp_thread.c | 44 ++- platform/linux-generic/odp_timer.c | 41 ++- platform/linux-generic/pktio/dpdk.c | 311 +++++++++++++-------- platform/linux-generic/pktio/dpdk_parse.c | 8 +- platform/linux-generic/pktio/ipc.c | 12 +- platform/linux-generic/pktio/ring.c | 70 +++-- .../test/mmap_vlan_ins/mmap_vlan_ins.c | 9 + platform/linux-generic/test/process-mode.conf | 2 +- scripts/build-pktio-dpdk | 40 --- scripts/ci/build.sh | 2 +- scripts/ci/distcheck.sh | 10 +- test/performance/odp_sched_latency.c | 13 +- test/performance/odp_sched_pktio.c | 32 ++- test/performance/odp_scheduling.c | 15 +- test/performance/odp_scheduling_run.sh | 9 +- .../classification/odp_classification_test_pmr.c | 36 ++- test/validation/api/packet/packet.c | 80 +++++- test/validation/api/pool/pool.c | 163 ++++++++++- 56 files changed, 1656 insertions(+), 852 deletions(-) delete mode 100755 scripts/build-pktio-dpdk
hooks/post-receive