This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, api-next has been updated via ab531245e61a6a3f4a36dbebba4bb42f2ee65ae9 (commit) via e28f851c8b5ec257102e63eac0cfb59ad9b30bd5 (commit) via d551362cbdb427daec14f065259826c2f51784f5 (commit) via 7b369d43f432053d03077c1ad30f216d2148b009 (commit) via 39dd766513a0c51ff8747c2a53a0e0ac0f53f434 (commit) via 3e208bb64df7c2811bb4d04359c35eb7c0a51ddb (commit) via 8cc2ea83ca6318421bfa67044f0dc39b1a510cda (commit) via f4d83d4e5b86f655809f97d278f1f6b5bb488849 (commit) via ce3f91503e397b70e8470b4b61c074756d541ef2 (commit) via 7daaf31cb033e044f5b241bc00df7d708cbee53f (commit) via dab3168796ed7809e717d40c12dabd9b24778168 (commit) via c09d396bd393a6cbc09f668389b6c9e172a53f7f (commit) via 8f0af9b08006290e35b117a02212b6881987adef (commit) via 6301ce94a988b2dee431ebbd791cad49cf6772a2 (commit) via 0e67e186489081513b1fa141461ff44fb74ac7f1 (commit) via 48681b60b4ccc6fc39b6fd02baec82eefe300830 (commit) via c8cf1d87783d4b4c628f219803b78731b8d4ade4 (commit) via 0948333e6bee32cb3de7e872ebb852bbed06e094 (commit) via 1220a970be53403d86cbdf0be97bad7d54cdc335 (commit) via b2ce189680ee17d6019b199c0905cb3f608a71a5 (commit) via c3ab55dc80882b8a1309fdc198abb1ac8f02437d (commit) via 1ecf0ad51eac59ffb71352573aa99146da2c0649 (commit) via 64c06865788ad5c58af3c3d42d857a7ceb9f6ab9 (commit) from eb6efdf362305c0e3045e7fae78fe3558ed3df6f (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit ab531245e61a6a3f4a36dbebba4bb42f2ee65ae9 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:45 2016 +0200
linux-gen: packet: enable multi-segment packets
Enable segmentation support with CONFIG_PACKET_MAX_SEGS configuration option.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index 9a4e6eb..8818cda 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -70,12 +70,12 @@ extern "C" { /* * Maximum number of segments per packet */ -#define CONFIG_PACKET_MAX_SEGS 1 +#define CONFIG_PACKET_MAX_SEGS 2
/* * Maximum packet segment size including head- and tailrooms */ -#define CONFIG_PACKET_SEG_SIZE (64 * 1024) +#define CONFIG_PACKET_SEG_SIZE (8 * 1024)
/* Maximum data length in a segment *
commit e28f851c8b5ec257102e63eac0cfb59ad9b30bd5 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:44 2016 +0200
linux-gen: pool: check pool parameters
Check pool parameters against maximum capabilities. Also defined a limit for maximum buffer and user area sizes. Chose 10 MB as a limit since it's small enough to be available in all Linux systems and it should be more than enough for normal pool usage.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 7c462e5..4be3827 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -29,6 +29,9 @@ #define CACHE_BURST 32 #define RING_SIZE_MIN (2 * CACHE_BURST)
+/* Define a practical limit for contiguous memory allocations */ +#define MAX_SIZE (10 * 1024 * 1024) + ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST), "cache_burst_size_too_large_compared_to_cache_size");
@@ -426,6 +429,71 @@ error: return ODP_POOL_INVALID; }
+static int check_params(odp_pool_param_t *params) +{ + odp_pool_capability_t capa; + + odp_pool_capability(&capa); + + switch (params->type) { + case ODP_POOL_BUFFER: + if (params->buf.num > capa.buf.max_num) { + printf("buf.num too large %u\n", params->buf.num); + return -1; + } + + if (params->buf.size > capa.buf.max_size) { + printf("buf.size too large %u\n", params->buf.size); + return -1; + } + + if (params->buf.align > capa.buf.max_align) { + printf("buf.align too large %u\n", params->buf.align); + return -1; + } + + break; + + case ODP_POOL_PACKET: + if (params->pkt.len > capa.pkt.max_len) { + printf("pkt.len too large %u\n", params->pkt.len); + return -1; + } + + if (params->pkt.max_len > capa.pkt.max_len) { + printf("pkt.max_len too large %u\n", + params->pkt.max_len); + return -1; + } + + if (params->pkt.seg_len > capa.pkt.max_seg_len) { + printf("pkt.seg_len too large %u\n", + params->pkt.seg_len); + return -1; + } + + if (params->pkt.uarea_size > capa.pkt.max_uarea_size) { + printf("pkt.uarea_size too large %u\n", + params->pkt.uarea_size); + return -1; + } + + break; + + case ODP_POOL_TIMEOUT: + if (params->tmo.num > capa.tmo.max_num) { + printf("tmo.num too large %u\n", params->tmo.num); + return -1; + } + break; + + default: + printf("bad pool type %i\n", params->type); + return -1; + } + + return 0; +}
odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params) { @@ -433,6 +501,9 @@ odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params) if (params && (params->type == ODP_POOL_PACKET)) return pool_create(name, params, ODP_SHM_PROC); #endif + if (check_params(params)) + return ODP_POOL_INVALID; + return pool_create(name, params, 0); }
@@ -718,7 +789,7 @@ int odp_pool_capability(odp_pool_capability_t *capa) /* Buffer pools */ capa->buf.max_pools = ODP_CONFIG_POOLS; capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX; - capa->buf.max_size = 0; + capa->buf.max_size = MAX_SIZE; capa->buf.max_num = CONFIG_POOL_MAX_NUM;
/* Packet pools */ @@ -730,7 +801,7 @@ int odp_pool_capability(odp_pool_capability_t *capa) capa->pkt.max_segs_per_pkt = CONFIG_PACKET_MAX_SEGS; capa->pkt.min_seg_len = max_seg_len; capa->pkt.max_seg_len = max_seg_len; - capa->pkt.max_uarea_size = 0; + capa->pkt.max_uarea_size = MAX_SIZE;
/* Timeout pools */ capa->tmo.max_pools = ODP_CONFIG_POOLS;
commit d551362cbdb427daec14f065259826c2f51784f5 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:43 2016 +0200
validation: pktio: honour pool capability limits
Check pool capability limits for packet length and segment length, and do not exceed those.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/validation/api/pktio/pktio.c b/test/common_plat/validation/api/pktio/pktio.c index befaa7e..97db626 100644 --- a/test/common_plat/validation/api/pktio/pktio.c +++ b/test/common_plat/validation/api/pktio/pktio.c @@ -120,8 +120,12 @@ static inline void _pktio_wait_linkup(odp_pktio_t pktio) } }
-static void set_pool_len(odp_pool_param_t *params) +static void set_pool_len(odp_pool_param_t *params, odp_pool_capability_t *capa) { + uint32_t seg_len; + + seg_len = capa->pkt.max_seg_len; + switch (pool_segmentation) { case PKT_POOL_SEGMENTED: /* Force segment to minimum size */ @@ -130,7 +134,7 @@ static void set_pool_len(odp_pool_param_t *params) break; case PKT_POOL_UNSEGMENTED: default: - params->pkt.seg_len = PKT_BUF_SIZE; + params->pkt.seg_len = seg_len; params->pkt.len = PKT_BUF_SIZE; break; } @@ -305,13 +309,17 @@ static int pktio_fixup_checksums(odp_packet_t pkt) static int default_pool_create(void) { odp_pool_param_t params; + odp_pool_capability_t pool_capa; char pool_name[ODP_POOL_NAME_LEN];
+ if (odp_pool_capability(&pool_capa) != 0) + return -1; + if (default_pkt_pool != ODP_POOL_INVALID) return -1;
odp_pool_param_init(¶ms); - set_pool_len(¶ms); + set_pool_len(¶ms, &pool_capa); params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET;
@@ -594,6 +602,7 @@ static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b, int i, ret, num_rx;
if (packet_len == USE_MTU) { + odp_pool_capability_t pool_capa; uint32_t mtu;
mtu = odp_pktio_mtu(pktio_a->id); @@ -603,6 +612,11 @@ static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b, packet_len = mtu; if (packet_len > PKT_LEN_MAX) packet_len = PKT_LEN_MAX; + + CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0); + + if (packet_len > pool_capa.pkt.max_len) + packet_len = pool_capa.pkt.max_len; }
/* generate test packets to send */ @@ -2004,9 +2018,13 @@ static int create_pool(const char *iface, int num) { char pool_name[ODP_POOL_NAME_LEN]; odp_pool_param_t params; + odp_pool_capability_t pool_capa; + + if (odp_pool_capability(&pool_capa) != 0) + return -1;
odp_pool_param_init(¶ms); - set_pool_len(¶ms); + set_pool_len(¶ms, &pool_capa); params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET;
commit 7b369d43f432053d03077c1ad30f216d2148b009 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:42 2016 +0200
validation: crypto: honour pool capability limits
Reduced oversized packet length and segment length requirements from 32 kB to 1 kB (tens of bytes are actually used). Also check that lengths are not larger than pool capabilities for those.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/validation/api/crypto/crypto.c b/test/common_plat/validation/api/crypto/crypto.c index 9c9a00d..2089016 100644 --- a/test/common_plat/validation/api/crypto/crypto.c +++ b/test/common_plat/validation/api/crypto/crypto.c @@ -9,11 +9,8 @@ #include "odp_crypto_test_inp.h" #include "crypto.h"
-#define SHM_PKT_POOL_SIZE (512 * 2048 * 2) -#define SHM_PKT_POOL_BUF_SIZE (1024 * 32) - -#define SHM_COMPL_POOL_SIZE (128 * 1024) -#define SHM_COMPL_POOL_BUF_SIZE 128 +#define PKT_POOL_NUM 64 +#define PKT_POOL_LEN (1 * 1024)
odp_suiteinfo_t crypto_suites[] = { {ODP_CRYPTO_SYNC_INP, crypto_suite_sync_init, NULL, crypto_suite}, @@ -44,13 +41,20 @@ int crypto_init(odp_instance_t *inst) }
odp_pool_param_init(¶ms); - params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.num = SHM_PKT_POOL_SIZE / SHM_PKT_POOL_BUF_SIZE; + params.pkt.seg_len = PKT_POOL_LEN; + params.pkt.len = PKT_POOL_LEN; + params.pkt.num = PKT_POOL_NUM; params.type = ODP_POOL_PACKET;
- if (SHM_PKT_POOL_BUF_SIZE > pool_capa.pkt.max_len) - params.pkt.len = pool_capa.pkt.max_len; + if (PKT_POOL_LEN > pool_capa.pkt.max_seg_len) { + fprintf(stderr, "Warning: small packet segment length\n"); + params.pkt.seg_len = pool_capa.pkt.max_seg_len; + } + + if (PKT_POOL_LEN > pool_capa.pkt.max_len) { + fprintf(stderr, "Pool max packet length too small\n"); + return -1; + }
pool = odp_pool_create("packet_pool", ¶ms);
commit 39dd766513a0c51ff8747c2a53a0e0ac0f53f434 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:41 2016 +0200
linux-gen: socket: use trunc instead of pull tail
This is a bug correction for multi-segment packet handling. Packet pull tail cannot decrement packet length more than there are data in the last segment. Trunc tail must be used instead.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c index 9fe4a7e..7d23968 100644 --- a/platform/linux-generic/pktio/socket.c +++ b/platform/linux-generic/pktio/socket.c @@ -674,6 +674,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, if (cls_classify_packet(pktio_entry, base, pkt_len, pkt_len, &pool, &parsed_hdr)) continue; + num = packet_alloc_multi(pool, pkt_len, &pkt, 1); if (num != 1) continue; @@ -700,6 +701,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
num = packet_alloc_multi(pkt_sock->pool, pkt_sock->mtu, &pkt_table[i], 1); + if (odp_unlikely(num != 1)) { pkt_table[i] = ODP_PACKET_INVALID; break; @@ -724,23 +726,34 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, void *base = msgvec[i].msg_hdr.msg_iov->iov_base; struct ethhdr *eth_hdr = base; odp_packet_hdr_t *pkt_hdr; + odp_packet_t pkt; + int ret; + + pkt = pkt_table[i];
/* Don't receive packets sent by ourselves */ if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac, eth_hdr->h_source))) { - odp_packet_free(pkt_table[i]); + odp_packet_free(pkt); continue; } - pkt_hdr = odp_packet_hdr(pkt_table[i]); + /* Parse and set packet header data */ - odp_packet_pull_tail(pkt_table[i], - odp_packet_len(pkt_table[i]) - - msgvec[i].msg_len); + ret = odp_packet_trunc_tail(&pkt, odp_packet_len(pkt) - + msgvec[i].msg_len, + NULL, NULL); + if (ret < 0) { + ODP_ERR("trunk_tail failed"); + odp_packet_free(pkt); + continue; + } + + pkt_hdr = odp_packet_hdr(pkt); packet_parse_l2(&pkt_hdr->p, pkt_hdr->frame_len); packet_set_ts(pkt_hdr, ts); pkt_hdr->input = pktio_entry->s.handle;
- pkt_table[nb_rx] = pkt_table[i]; + pkt_table[nb_rx] = pkt; nb_rx++; }
commit 3e208bb64df7c2811bb4d04359c35eb7c0a51ddb Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:40 2016 +0200
linux-gen: packet: remove zero len support from alloc
Remove support for zero length allocations which were never required by the API specification or tested by the validation suite.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index a5c6ff4..0d3fd05 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -478,7 +478,6 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) pool_t *pool = pool_entry_from_hdl(pool_hdl); odp_packet_t pkt; int num, num_seg; - int zero_len = 0;
if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; @@ -488,23 +487,12 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) if (odp_unlikely(len > pool->max_len)) return ODP_PACKET_INVALID;
- if (odp_unlikely(len == 0)) { - len = pool->data_size; - zero_len = 1; - } - num_seg = num_segments(len); num = packet_alloc(pool, len, 1, num_seg, &pkt, 0);
if (odp_unlikely(num == 0)) return ODP_PACKET_INVALID;
- if (odp_unlikely(zero_len)) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - - pull_tail(pkt_hdr, len); - } - return pkt; }
@@ -513,7 +501,6 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, { pool_t *pool = pool_entry_from_hdl(pool_hdl); int num, num_seg; - int zero_len = 0;
if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; @@ -523,24 +510,9 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, if (odp_unlikely(len > pool->max_len)) return -1;
- if (odp_unlikely(len == 0)) { - len = pool->data_size; - zero_len = 1; - } - num_seg = num_segments(len); num = packet_alloc(pool, len, max_num, num_seg, pkt, 0);
- if (odp_unlikely(zero_len)) { - int i; - - for (i = 0; i < num; i++) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]); - - pull_tail(pkt_hdr, len); - } - } - return num; }
commit 8cc2ea83ca6318421bfa67044f0dc39b1a510cda Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:39 2016 +0200
api: packet: added limits for packet len on alloc
There's no use case for application to allocate zero length packets. Application should always have some knowledge about the new packet data length before allocation. Also implementations are more efficient when a check for zero length is avoided.
Also added a pool parameter to specify the maximum packet length to be allocated from the pool. Implementations may use this information to optimize e.g. memory usage, etc. Application must not exceed the max_len parameter value on alloc calls. Pool capabilities define already max_len.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h index 4a14f2d..faf62e2 100644 --- a/include/odp/api/spec/packet.h +++ b/include/odp/api/spec/packet.h @@ -82,13 +82,14 @@ extern "C" { * Allocate a packet from a packet pool * * Allocates a packet of the requested length from the specified packet pool. - * Pool must have been created with ODP_POOL_PACKET type. The + * The pool must have been created with ODP_POOL_PACKET type. The * packet is initialized with data pointers and lengths set according to the * specified len, and the default headroom and tailroom length settings. All - * other packet metadata are set to their default values. + * other packet metadata are set to their default values. Packet length must + * be greater than zero and not exceed packet pool parameter 'max_len' value. * * @param pool Pool handle - * @param len Packet data length + * @param len Packet data length (1 ... pool max_len) * * @return Handle of allocated packet * @retval ODP_PACKET_INVALID Packet could not be allocated @@ -105,7 +106,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool, uint32_t len); * packets from a pool. * * @param pool Pool handle - * @param len Packet data length + * @param len Packet data length (1 ... pool max_len) * @param[out] pkt Array of packet handles for output * @param num Maximum number of packets to allocate * diff --git a/include/odp/api/spec/pool.h b/include/odp/api/spec/pool.h index a1331e3..041f4af 100644 --- a/include/odp/api/spec/pool.h +++ b/include/odp/api/spec/pool.h @@ -192,6 +192,12 @@ typedef struct odp_pool_param_t { pkt.max_len. Use 0 for default. */ uint32_t len;
+ /** Maximum packet length that will be allocated from + the pool. The maximum value is defined by pool + capability pkt.max_len. Use 0 for default (the + pool maximum). */ + uint32_t max_len; + /** Minimum number of packet data bytes that are stored in the first segment of a packet. The maximum value is defined by pool capability pkt.max_seg_len.
commit f4d83d4e5b86f655809f97d278f1f6b5bb488849 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:38 2016 +0200
test: validation: packet: improved multi-segment alloc test
Added test cases to allocate and free multiple multi-segment packets.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c index 8b31872..bf7eab2 100644 --- a/test/common_plat/validation/api/packet/packet.c +++ b/test/common_plat/validation/api/packet/packet.c @@ -273,23 +273,86 @@ void packet_test_alloc_free_multi(void)
void packet_test_alloc_segmented(void) { + const int num = 5; + odp_packet_t pkts[num]; odp_packet_t pkt; - uint32_t len; + uint32_t max_len; + odp_pool_t pool; + odp_pool_param_t params; odp_pool_capability_t capa; + int ret, i, num_alloc;
CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
if (capa.pkt.max_len) - len = capa.pkt.max_len; + max_len = capa.pkt.max_len; else - len = capa.pkt.min_seg_len * capa.pkt.max_segs_per_pkt; + max_len = capa.pkt.min_seg_len * capa.pkt.max_segs_per_pkt; + + odp_pool_param_init(¶ms); + + params.type = ODP_POOL_PACKET; + params.pkt.seg_len = capa.pkt.min_seg_len; + params.pkt.len = max_len; + + /* Ensure that 'num' segmented packets can be allocated */ + params.pkt.num = num * capa.pkt.max_segs_per_pkt; + + pool = odp_pool_create("pool_alloc_segmented", ¶ms); + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + /* Less than max len allocs */ + pkt = odp_packet_alloc(pool, max_len / 2); + CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID); + CU_ASSERT(odp_packet_len(pkt) == max_len / 2); + + odp_packet_free(pkt); + + num_alloc = 0; + for (i = 0; i < num; i++) { + ret = odp_packet_alloc_multi(pool, max_len / 2, + &pkts[num_alloc], num - num_alloc); + CU_ASSERT_FATAL(ret >= 0); + num_alloc += ret; + if (num_alloc >= num) + break; + } + + CU_ASSERT(num_alloc == num); + + for (i = 0; i < num_alloc; i++) + CU_ASSERT(odp_packet_len(pkts[i]) == max_len / 2);
- pkt = odp_packet_alloc(packet_pool, len); + odp_packet_free_multi(pkts, num_alloc); + + /* Max len allocs */ + pkt = odp_packet_alloc(pool, max_len); CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID); - CU_ASSERT(odp_packet_len(pkt) == len); + CU_ASSERT(odp_packet_len(pkt) == max_len); + if (segmentation_supported) CU_ASSERT(odp_packet_is_segmented(pkt) == 1); + odp_packet_free(pkt); + + num_alloc = 0; + for (i = 0; i < num; i++) { + ret = odp_packet_alloc_multi(pool, max_len, + &pkts[num_alloc], num - num_alloc); + CU_ASSERT_FATAL(ret >= 0); + num_alloc += ret; + if (num_alloc >= num) + break; + } + + CU_ASSERT(num_alloc == num); + + for (i = 0; i < num_alloc; i++) + CU_ASSERT(odp_packet_len(pkts[i]) == max_len); + + odp_packet_free_multi(pkts, num_alloc); + + CU_ASSERT(odp_pool_destroy(pool) == 0); }
void packet_test_event_conversion(void)
commit ce3f91503e397b70e8470b4b61c074756d541ef2 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:37 2016 +0200
linux-gen: packet: added support for segmented packets
Added support for multi-segmented packets. The first segments is the packet descriptor, which contains all metadata and pointers to other segments.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp/api/plat/packet_types.h b/platform/linux-generic/include/odp/api/plat/packet_types.h index b5345ed..864494d 100644 --- a/platform/linux-generic/include/odp/api/plat/packet_types.h +++ b/platform/linux-generic/include/odp/api/plat/packet_types.h @@ -32,9 +32,11 @@ typedef ODP_HANDLE_T(odp_packet_t);
#define ODP_PACKET_OFFSET_INVALID (0x0fffffff)
-typedef ODP_HANDLE_T(odp_packet_seg_t); +/* A packet segment handle stores a small index. Strong type handles are + * pointers, which would be wasteful in this case. */ +typedef uint8_t odp_packet_seg_t;
-#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0xffffffff) +#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)-1)
/** odp_packet_color_t assigns names to the various pkt "colors" */ typedef enum { diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h index f8688f6..cf817d9 100644 --- a/platform/linux-generic/include/odp_buffer_inlines.h +++ b/platform/linux-generic/include/odp_buffer_inlines.h @@ -23,22 +23,11 @@ odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf); void _odp_buffer_event_type_set(odp_buffer_t buf, int ev); int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
-void *buffer_map(odp_buffer_hdr_t *buf, uint32_t offset, uint32_t *seglen, - uint32_t limit); - static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) { return hdr->handle.handle; }
-static inline uint32_t pool_id_from_buf(odp_buffer_t buf) -{ - odp_buffer_bits_t handle; - - handle.handle = buf; - return handle.pool_id; -} - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index 0ca13f8..4e75908 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -33,10 +33,6 @@ extern "C" { #include <odp_schedule_if.h> #include <stddef.h>
-ODP_STATIC_ASSERT(ODP_CONFIG_PACKET_SEG_LEN_MIN >= 256, - "ODP Segment size must be a minimum of 256 bytes"); - - typedef union odp_buffer_bits_t { odp_buffer_t handle;
@@ -65,6 +61,20 @@ struct odp_buffer_hdr_t { int burst_first; struct odp_buffer_hdr_t *burst[BUFFER_BURST_SIZE];
+ struct { + void *hdr; + uint8_t *data; + uint32_t len; + } seg[CONFIG_PACKET_MAX_SEGS]; + + /* max data size */ + uint32_t size; + + /* Initial buffer data pointer and length */ + void *base_data; + uint32_t base_len; + uint8_t *buf_end; + union { uint32_t all; struct { @@ -75,7 +85,6 @@ struct odp_buffer_hdr_t {
int8_t type; /* buffer type */ odp_event_type_t event_type; /* for reuse as event */ - uint32_t size; /* max data size */ odp_pool_t pool_hdl; /* buffer pool handle */ union { uint64_t buf_u64; /* user u64 */ @@ -86,8 +95,6 @@ struct odp_buffer_hdr_t { uint32_t uarea_size; /* size of user area */ uint32_t segcount; /* segment count */ uint32_t segsize; /* segment size */ - /* block addrs */ - void *addr[ODP_CONFIG_PACKET_MAX_SEGS]; uint64_t order; /* sequence for ordered queues */ queue_entry_t *origin_qe; /* ordered queue origin */ union { @@ -105,8 +112,6 @@ struct odp_buffer_hdr_t { };
/* Forward declarations */ -int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount); -void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount); int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount); void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount);
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index e24d5ab..9a4e6eb 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -54,7 +54,7 @@ extern "C" { * The default value (66) allows a 1500-byte packet to be received into a single * segment with Ethernet offset alignment and room for some header expansion. */ -#define ODP_CONFIG_PACKET_HEADROOM 66 +#define CONFIG_PACKET_HEADROOM 66
/* * Default packet tailroom @@ -65,21 +65,26 @@ extern "C" { * without restriction. Note that most implementations will automatically * consider any unused portion of the last segment of a packet as tailroom */ -#define ODP_CONFIG_PACKET_TAILROOM 0 +#define CONFIG_PACKET_TAILROOM 0
/* * Maximum number of segments per packet */ -#define ODP_CONFIG_PACKET_MAX_SEGS 1 +#define CONFIG_PACKET_MAX_SEGS 1
/* - * Maximum packet segment length - * - * This defines the maximum packet segment buffer length in bytes. The user - * defined segment length (seg_len in odp_pool_param_t) must not be larger than - * this. + * Maximum packet segment size including head- and tailrooms */ -#define ODP_CONFIG_PACKET_SEG_LEN_MAX (64 * 1024) +#define CONFIG_PACKET_SEG_SIZE (64 * 1024) + +/* Maximum data length in a segment + * + * The user defined segment length (seg_len in odp_pool_param_t) must not + * be larger than this. +*/ +#define CONFIG_PACKET_MAX_SEG_LEN (CONFIG_PACKET_SEG_SIZE - \ + CONFIG_PACKET_HEADROOM - \ + CONFIG_PACKET_TAILROOM)
/* * Minimum packet segment length @@ -88,21 +93,7 @@ extern "C" { * defined segment length (seg_len in odp_pool_param_t) will be rounded up into * this value. */ -#define ODP_CONFIG_PACKET_SEG_LEN_MIN ODP_CONFIG_PACKET_SEG_LEN_MAX - -/* - * Maximum packet buffer length - * - * This defines the maximum number of bytes that can be stored into a packet - * (maximum return value of odp_packet_buf_len(void)). Attempts to allocate - * (including default head- and tailrooms) or extend packets to sizes larger - * than this limit will fail. - * - * @internal In odp-linux implementation: - * - The value MUST be an integral number of segments - * - The value SHOULD be large enough to accommodate jumbo packets (9K) - */ -#define ODP_CONFIG_PACKET_BUF_LEN_MAX ODP_CONFIG_PACKET_SEG_LEN_MAX +#define CONFIG_PACKET_SEG_LEN_MIN CONFIG_PACKET_MAX_SEG_LEN
/* Maximum number of shared memory blocks. * diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h index 0cdd5ca..d09231e 100644 --- a/platform/linux-generic/include/odp_packet_internal.h +++ b/platform/linux-generic/include/odp_packet_internal.h @@ -27,8 +27,6 @@ extern "C" { #include <odp/api/crypto.h> #include <odp_crypto_internal.h>
-#define PACKET_JUMBO_LEN (9 * 1024) - /** Minimum segment length expected by packet_parse_common() */ #define PACKET_PARSE_SEG_LEN 96
@@ -218,85 +216,13 @@ static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr, dst_hdr->op_result = src_hdr->op_result; }
-static inline void *packet_map(odp_packet_hdr_t *pkt_hdr, - uint32_t offset, uint32_t *seglen) -{ - if (offset > pkt_hdr->frame_len) - return NULL; - - return buffer_map(&pkt_hdr->buf_hdr, - pkt_hdr->headroom + offset, seglen, - pkt_hdr->headroom + pkt_hdr->frame_len); -} - -static inline void push_head(odp_packet_hdr_t *pkt_hdr, size_t len) -{ - pkt_hdr->headroom -= len; - pkt_hdr->frame_len += len; -} - -static inline void pull_head(odp_packet_hdr_t *pkt_hdr, size_t len) -{ - pkt_hdr->headroom += len; - pkt_hdr->frame_len -= len; -} - -static inline int push_head_seg(odp_packet_hdr_t *pkt_hdr, size_t len) -{ - uint32_t extrasegs = - (len - pkt_hdr->headroom + pkt_hdr->buf_hdr.segsize - 1) / - pkt_hdr->buf_hdr.segsize; - - if (pkt_hdr->buf_hdr.segcount + extrasegs > - ODP_CONFIG_PACKET_MAX_SEGS || - seg_alloc_head(&pkt_hdr->buf_hdr, extrasegs)) - return -1; - - pkt_hdr->headroom += extrasegs * pkt_hdr->buf_hdr.segsize; - return 0; -} - -static inline void pull_head_seg(odp_packet_hdr_t *pkt_hdr) -{ - uint32_t extrasegs = (pkt_hdr->headroom - 1) / pkt_hdr->buf_hdr.segsize; - - seg_free_head(&pkt_hdr->buf_hdr, extrasegs); - pkt_hdr->headroom -= extrasegs * pkt_hdr->buf_hdr.segsize; -} - -static inline void push_tail(odp_packet_hdr_t *pkt_hdr, size_t len) -{ - pkt_hdr->tailroom -= len; - pkt_hdr->frame_len += len; -} - -static inline int push_tail_seg(odp_packet_hdr_t *pkt_hdr, size_t len) +static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len) { - uint32_t extrasegs = - (len - pkt_hdr->tailroom + pkt_hdr->buf_hdr.segsize - 1) / - pkt_hdr->buf_hdr.segsize; + int last = pkt_hdr->buf_hdr.segcount - 1;
- if (pkt_hdr->buf_hdr.segcount + extrasegs > - ODP_CONFIG_PACKET_MAX_SEGS || - seg_alloc_tail(&pkt_hdr->buf_hdr, extrasegs)) - return -1; - - pkt_hdr->tailroom += extrasegs * pkt_hdr->buf_hdr.segsize; - return 0; -} - -static inline void pull_tail_seg(odp_packet_hdr_t *pkt_hdr) -{ - uint32_t extrasegs = pkt_hdr->tailroom / pkt_hdr->buf_hdr.segsize; - - seg_free_tail(&pkt_hdr->buf_hdr, extrasegs); - pkt_hdr->tailroom -= extrasegs * pkt_hdr->buf_hdr.segsize; -} - -static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, size_t len) -{ pkt_hdr->tailroom += len; pkt_hdr->frame_len -= len; + pkt_hdr->buf_hdr.seg[last].len -= len; }
static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr) diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index f7e951a..5d7b817 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -113,9 +113,6 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[], odp_buffer_hdr_t *buf_hdr[], int num); void buffer_free_multi(const odp_buffer_t buf[], int num_free);
-uint32_t pool_headroom(odp_pool_t pool); -uint32_t pool_tailroom(odp_pool_t pool); - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c index eed15c0..b791039 100644 --- a/platform/linux-generic/odp_buffer.c +++ b/platform/linux-generic/odp_buffer.c @@ -28,7 +28,7 @@ void *odp_buffer_addr(odp_buffer_t buf) { odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
- return hdr->addr[0]; + return hdr->seg[0].data; }
uint32_t odp_buffer_size(odp_buffer_t buf) @@ -56,11 +56,11 @@ int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) " pool %" PRIu64 "\n", odp_pool_to_u64(hdr->pool_hdl)); len += snprintf(&str[len], n-len, - " addr %p\n", hdr->addr); + " addr %p\n", hdr->seg[0].data); len += snprintf(&str[len], n-len, - " size %" PRIu32 "\n", hdr->size); + " size %" PRIu32 "\n", hdr->size); len += snprintf(&str[len], n-len, - " type %i\n", hdr->type); + " type %i\n", hdr->type);
return len; } diff --git a/platform/linux-generic/odp_crypto.c b/platform/linux-generic/odp_crypto.c index 3ebabb7..7e686ff 100644 --- a/platform/linux-generic/odp_crypto.c +++ b/platform/linux-generic/odp_crypto.c @@ -754,9 +754,13 @@ odp_crypto_operation(odp_crypto_op_params_t *params, ODP_POOL_INVALID != session->output_pool) params->out_pkt = odp_packet_alloc(session->output_pool, odp_packet_len(params->pkt)); + + if (odp_unlikely(ODP_PACKET_INVALID == params->out_pkt)) { + ODP_DBG("Alloc failed.\n"); + return -1; + } + if (params->pkt != params->out_pkt) { - if (odp_unlikely(ODP_PACKET_INVALID == params->out_pkt)) - ODP_ABORT(); (void)odp_packet_copy_from_pkt(params->out_pkt, 0, params->pkt, diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index 2eee775..a5c6ff4 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -20,12 +20,155 @@ #include <stdio.h> #include <inttypes.h>
-/* - * - * Alloc and free - * ******************************************************** - * - */ +static inline odp_packet_t packet_handle(odp_packet_hdr_t *pkt_hdr) +{ + return (odp_packet_t)pkt_hdr->buf_hdr.handle.handle; +} + +static inline odp_buffer_t buffer_handle(odp_packet_hdr_t *pkt_hdr) +{ + return pkt_hdr->buf_hdr.handle.handle; +} + +static inline uint32_t packet_seg_len(odp_packet_hdr_t *pkt_hdr, + uint32_t seg_idx) +{ + return pkt_hdr->buf_hdr.seg[seg_idx].len; +} + +static inline void *packet_seg_data(odp_packet_hdr_t *pkt_hdr, uint32_t seg_idx) +{ + return pkt_hdr->buf_hdr.seg[seg_idx].data; +} + +static inline int packet_last_seg(odp_packet_hdr_t *pkt_hdr) +{ + if (CONFIG_PACKET_MAX_SEGS == 1) + return 0; + else + return pkt_hdr->buf_hdr.segcount - 1; +} + +static inline uint32_t packet_first_seg_len(odp_packet_hdr_t *pkt_hdr) +{ + return packet_seg_len(pkt_hdr, 0); +} + +static inline uint32_t packet_last_seg_len(odp_packet_hdr_t *pkt_hdr) +{ + int last = packet_last_seg(pkt_hdr); + + return packet_seg_len(pkt_hdr, last); +} + +static inline void *packet_data(odp_packet_hdr_t *pkt_hdr) +{ + return pkt_hdr->buf_hdr.seg[0].data; +} + +static inline void *packet_tail(odp_packet_hdr_t *pkt_hdr) +{ + int last = packet_last_seg(pkt_hdr); + uint32_t seg_len = pkt_hdr->buf_hdr.seg[last].len; + + return pkt_hdr->buf_hdr.seg[last].data + seg_len; +} + +static inline void push_head(odp_packet_hdr_t *pkt_hdr, uint32_t len) +{ + pkt_hdr->headroom -= len; + pkt_hdr->frame_len += len; + pkt_hdr->buf_hdr.seg[0].data -= len; + pkt_hdr->buf_hdr.seg[0].len += len; +} + +static inline void pull_head(odp_packet_hdr_t *pkt_hdr, uint32_t len) +{ + pkt_hdr->headroom += len; + pkt_hdr->frame_len -= len; + pkt_hdr->buf_hdr.seg[0].data += len; + pkt_hdr->buf_hdr.seg[0].len -= len; +} + +static inline void push_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len) +{ + int last = packet_last_seg(pkt_hdr); + + pkt_hdr->tailroom -= len; + pkt_hdr->frame_len += len; + pkt_hdr->buf_hdr.seg[last].len += len; +} + +/* Copy all metadata for segmentation modification. Segment data and lengths + * are not copied. */ +static inline void packet_seg_copy_md(odp_packet_hdr_t *dst, + odp_packet_hdr_t *src) +{ + dst->p = src->p; + + /* lengths are not copied: + * .frame_len + * .headroom + * .tailroom + */ + + dst->input = src->input; + dst->dst_queue = src->dst_queue; + dst->flow_hash = src->flow_hash; + dst->timestamp = src->timestamp; + dst->op_result = src->op_result; + + /* buffer header side packet metadata */ + dst->buf_hdr.buf_u64 = src->buf_hdr.buf_u64; + dst->buf_hdr.uarea_addr = src->buf_hdr.uarea_addr; + dst->buf_hdr.uarea_size = src->buf_hdr.uarea_size; + + /* segmentation data is not copied: + * buf_hdr.seg[] + * buf_hdr.segcount + */ +} + +static inline void *packet_map(odp_packet_hdr_t *pkt_hdr, + uint32_t offset, uint32_t *seg_len, int *seg_idx) +{ + void *addr; + uint32_t len; + int seg = 0; + int seg_count = pkt_hdr->buf_hdr.segcount; + + if (odp_unlikely(offset >= pkt_hdr->frame_len)) + return NULL; + + if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || seg_count == 1)) { + addr = pkt_hdr->buf_hdr.seg[0].data + offset; + len = pkt_hdr->buf_hdr.seg[0].len - offset; + } else { + int i; + uint32_t seg_start = 0, seg_end = 0; + + for (i = 0; i < seg_count; i++) { + seg_end += pkt_hdr->buf_hdr.seg[i].len; + + if (odp_likely(offset < seg_end)) + break; + + seg_start = seg_end; + } + + addr = pkt_hdr->buf_hdr.seg[i].data + (offset - seg_start); + len = pkt_hdr->buf_hdr.seg[i].len - (offset - seg_start); + seg = i; + } + + if (seg_len) + *seg_len = len; + + if (seg_idx) + *seg_idx = seg; + + return addr; +}
static inline void packet_parse_disable(odp_packet_hdr_t *pkt_hdr) { @@ -48,11 +191,23 @@ void packet_parse_reset(odp_packet_hdr_t *pkt_hdr) /** * Initialize packet */ -static void packet_init(pool_t *pool, odp_packet_hdr_t *pkt_hdr, - size_t size, int parse) +static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len, + int parse) { - pkt_hdr->p.parsed_layers = LAYER_NONE; + uint32_t seg_len; + int num = pkt_hdr->buf_hdr.segcount; + + if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || num == 1)) { + seg_len = len; + pkt_hdr->buf_hdr.seg[0].len = len; + } else { + seg_len = len - ((num - 1) * CONFIG_PACKET_MAX_SEG_LEN); + + /* Last segment data length */ + pkt_hdr->buf_hdr.seg[num - 1].len = seg_len; + }
+ pkt_hdr->p.parsed_layers = LAYER_NONE; pkt_hdr->p.input_flags.all = 0; pkt_hdr->p.output_flags.all = 0; pkt_hdr->p.error_flags.all = 0; @@ -70,42 +225,260 @@ static void packet_init(pool_t *pool, odp_packet_hdr_t *pkt_hdr, * Packet tailroom is rounded up to fill the last * segment occupied by the allocated length. */ - pkt_hdr->frame_len = size; - pkt_hdr->headroom = pool->headroom; - pkt_hdr->tailroom = pool->data_size - size + pool->tailroom; + pkt_hdr->frame_len = len; + pkt_hdr->headroom = CONFIG_PACKET_HEADROOM; + pkt_hdr->tailroom = CONFIG_PACKET_MAX_SEG_LEN - seg_len + + CONFIG_PACKET_TAILROOM;
pkt_hdr->input = ODP_PKTIO_INVALID; }
-int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, - odp_packet_t pkt[], int max_num) +static inline odp_packet_hdr_t *init_segments(odp_buffer_t buf[], int num) { - pool_t *pool = pool_entry_from_hdl(pool_hdl); - int num, i; - odp_packet_hdr_t *pkt_hdrs[max_num]; + odp_packet_hdr_t *pkt_hdr; + int i; + + /* First buffer is the packet descriptor */ + pkt_hdr = odp_packet_hdr((odp_packet_t)buf[0]); + + pkt_hdr->buf_hdr.seg[0].data = pkt_hdr->buf_hdr.base_data; + pkt_hdr->buf_hdr.seg[0].len = pkt_hdr->buf_hdr.base_len; + + /* Link segments */ + if (odp_unlikely(CONFIG_PACKET_MAX_SEGS != 1)) { + pkt_hdr->buf_hdr.segcount = num; + + if (odp_unlikely(num > 1)) { + for (i = 1; i < num; i++) { + odp_packet_hdr_t *hdr; + odp_buffer_hdr_t *b_hdr; + + hdr = odp_packet_hdr((odp_packet_t)buf[i]); + b_hdr = &hdr->buf_hdr; + + pkt_hdr->buf_hdr.seg[i].hdr = hdr; + pkt_hdr->buf_hdr.seg[i].data = b_hdr->base_data; + pkt_hdr->buf_hdr.seg[i].len = b_hdr->base_len; + } + } + } + + return pkt_hdr; +} + +/* Calculate the number of segments */ +static inline int num_segments(uint32_t len) +{ + uint32_t max_seg_len; + int num;
- num = buffer_alloc_multi(pool, (odp_buffer_t *)pkt, - (odp_buffer_hdr_t **)pkt_hdrs, max_num); + if (CONFIG_PACKET_MAX_SEGS == 1) + return 1; + + num = 1; + max_seg_len = CONFIG_PACKET_MAX_SEG_LEN; + + if (odp_unlikely(len > max_seg_len)) { + num = len / max_seg_len; + + if (odp_likely((num * max_seg_len) != len)) + num += 1; + } + + return num; +} + +static inline void copy_all_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from) +{ + int i; + int n = to->buf_hdr.segcount; + int num = from->buf_hdr.segcount;
for (i = 0; i < num; i++) { - odp_packet_hdr_t *pkt_hdr = pkt_hdrs[i]; + to->buf_hdr.seg[n + i].hdr = from->buf_hdr.seg[i].hdr; + to->buf_hdr.seg[n + i].data = from->buf_hdr.seg[i].data; + to->buf_hdr.seg[n + i].len = from->buf_hdr.seg[i].len; + } + + to->buf_hdr.segcount = n + num; +}
- packet_init(pool, pkt_hdr, len, 1 /* do parse */); +static inline void copy_num_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from, + int num) +{ + int i;
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize) - pull_tail_seg(pkt_hdr); + for (i = 0; i < num; i++) { + to->buf_hdr.seg[i].hdr = from->buf_hdr.seg[num + i].hdr; + to->buf_hdr.seg[i].data = from->buf_hdr.seg[num + i].data; + to->buf_hdr.seg[i].len = from->buf_hdr.seg[num + i].len; }
+ to->buf_hdr.segcount = num; +} + +static inline odp_packet_hdr_t *add_segments(odp_packet_hdr_t *pkt_hdr, + uint32_t len, int head) +{ + pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl); + odp_packet_hdr_t *new_hdr; + int num, ret; + uint32_t seg_len, offset; + + num = num_segments(len); + + if ((pkt_hdr->buf_hdr.segcount + num) > CONFIG_PACKET_MAX_SEGS) + return NULL; + + { + odp_buffer_t buf[num]; + + ret = buffer_alloc_multi(pool, buf, NULL, num); + if (odp_unlikely(ret != num)) { + if (ret > 0) + buffer_free_multi(buf, ret); + + return NULL; + } + + new_hdr = init_segments(buf, num); + } + + seg_len = len - ((num - 1) * pool->max_seg_len); + offset = pool->max_seg_len - seg_len; + + if (head) { + /* add into the head*/ + copy_all_segs(new_hdr, pkt_hdr); + + /* adjust first segment length */ + new_hdr->buf_hdr.seg[0].data += offset; + new_hdr->buf_hdr.seg[0].len = seg_len; + + packet_seg_copy_md(new_hdr, pkt_hdr); + new_hdr->frame_len = pkt_hdr->frame_len + len; + new_hdr->headroom = pool->headroom + offset; + new_hdr->tailroom = pkt_hdr->tailroom; + + pkt_hdr = new_hdr; + } else { + int last; + + /* add into the tail */ + copy_all_segs(pkt_hdr, new_hdr); + + /* adjust last segment length */ + last = packet_last_seg(pkt_hdr); + pkt_hdr->buf_hdr.seg[last].len = seg_len; + + pkt_hdr->frame_len += len; + pkt_hdr->tailroom = pool->tailroom + offset; + } + + return pkt_hdr; +} + +static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr, + int num, uint32_t free_len, + uint32_t pull_len, int head) +{ + int i; + odp_packet_hdr_t *new_hdr; + odp_buffer_t buf[num]; + int n = pkt_hdr->buf_hdr.segcount - num; + + if (head) { + for (i = 0; i < num; i++) + buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[i].hdr); + + /* First remaining segment is the new packet descriptor */ + new_hdr = pkt_hdr->buf_hdr.seg[num].hdr; + copy_num_segs(new_hdr, pkt_hdr, n); + packet_seg_copy_md(new_hdr, pkt_hdr); + + /* Tailroom not changed */ + new_hdr->tailroom = pkt_hdr->tailroom; + /* No headroom in non-first segments */ + new_hdr->headroom = 0; + new_hdr->frame_len = pkt_hdr->frame_len - free_len; + + pull_head(new_hdr, pull_len); + + pkt_hdr = new_hdr; + } else { + for (i = 0; i < num; i++) + buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[n + i].hdr); + + /* Head segment remains, no need to copy or update majority + * of the metadata. */ + pkt_hdr->buf_hdr.segcount = n; + pkt_hdr->frame_len -= free_len; + pkt_hdr->tailroom = pkt_hdr->buf_hdr.buf_end - + (uint8_t *)packet_tail(pkt_hdr); + + pull_tail(pkt_hdr, pull_len); + } + + buffer_free_multi(buf, num); + + return pkt_hdr; +} + +static inline int packet_alloc(pool_t *pool, uint32_t len, int max_pkt, + int num_seg, odp_packet_t *pkt, int parse) +{ + int num_buf, i; + int num = max_pkt; + int max_buf = max_pkt * num_seg; + odp_buffer_t buf[max_buf]; + + num_buf = buffer_alloc_multi(pool, buf, NULL, max_buf); + + /* Failed to allocate all segments */ + if (odp_unlikely(num_buf != max_buf)) { + int num_free; + + num = num_buf / num_seg; + num_free = num_buf - (num * num_seg); + + if (num_free > 0) + buffer_free_multi(&buf[num_buf - num_free], num_free); + + if (num == 0) + return 0; + } + + for (i = 0; i < num; i++) { + odp_packet_hdr_t *pkt_hdr; + + /* First buffer is the packet descriptor */ + pkt[i] = (odp_packet_t)buf[i * num_seg]; + pkt_hdr = init_segments(&buf[i * num_seg], num_seg); + + packet_init(pkt_hdr, len, parse); + } + + return num; +} + +int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, + odp_packet_t pkt[], int max_num) +{ + pool_t *pool = pool_entry_from_hdl(pool_hdl); + int num, num_seg; + + num_seg = num_segments(len); + num = packet_alloc(pool, len, max_num, num_seg, pkt, 1); + return num; }
odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) { pool_t *pool = pool_entry_from_hdl(pool_hdl); - size_t pkt_size = len ? len : pool->data_size; odp_packet_t pkt; - odp_packet_hdr_t *pkt_hdr; - int ret; + int num, num_seg; + int zero_len = 0;
if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; @@ -115,28 +488,32 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) if (odp_unlikely(len > pool->max_len)) return ODP_PACKET_INVALID;
- ret = buffer_alloc_multi(pool, (odp_buffer_t *)&pkt, NULL, 1); - if (ret != 1) + if (odp_unlikely(len == 0)) { + len = pool->data_size; + zero_len = 1; + } + + num_seg = num_segments(len); + num = packet_alloc(pool, len, 1, num_seg, &pkt, 0); + + if (odp_unlikely(num == 0)) return ODP_PACKET_INVALID;
- pkt_hdr = odp_packet_hdr(pkt); - packet_init(pool, pkt_hdr, pkt_size, 0 /* do not parse */); - if (len == 0) - pull_tail(pkt_hdr, pkt_size); + if (odp_unlikely(zero_len)) { + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize) - pull_tail_seg(pkt_hdr); + pull_tail(pkt_hdr, len); + }
return pkt; }
int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, - odp_packet_t pkt[], int num) + odp_packet_t pkt[], int max_num) { pool_t *pool = pool_entry_from_hdl(pool_hdl); - size_t pkt_size = len ? len : pool->data_size; - int count, i; - odp_packet_hdr_t *pkt_hdrs[num]; + int num, num_seg; + int zero_len = 0;
if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; @@ -146,31 +523,75 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, if (odp_unlikely(len > pool->max_len)) return -1;
- count = buffer_alloc_multi(pool, (odp_buffer_t *)pkt, - (odp_buffer_hdr_t **)pkt_hdrs, num); + if (odp_unlikely(len == 0)) { + len = pool->data_size; + zero_len = 1; + } + + num_seg = num_segments(len); + num = packet_alloc(pool, len, max_num, num_seg, pkt, 0);
- for (i = 0; i < count; ++i) { - odp_packet_hdr_t *pkt_hdr = pkt_hdrs[i]; + if (odp_unlikely(zero_len)) { + int i;
- packet_init(pool, pkt_hdr, pkt_size, 0 /* do not parse */); - if (len == 0) - pull_tail(pkt_hdr, pkt_size); + for (i = 0; i < num; i++) { + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]);
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize) - pull_tail_seg(pkt_hdr); + pull_tail(pkt_hdr, len); + } }
- return count; + return num; }
void odp_packet_free(odp_packet_t pkt) { - buffer_free_multi((odp_buffer_t *)&pkt, 1); + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + int num_seg = pkt_hdr->buf_hdr.segcount; + + if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || num_seg == 1)) { + buffer_free_multi((odp_buffer_t *)&pkt, 1); + } else { + odp_buffer_t buf[num_seg]; + int i; + + buf[0] = (odp_buffer_t)pkt; + + for (i = 1; i < num_seg; i++) + buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[i].hdr); + + buffer_free_multi(buf, num_seg); + } }
void odp_packet_free_multi(const odp_packet_t pkt[], int num) { - buffer_free_multi((const odp_buffer_t * const)pkt, num); + if (CONFIG_PACKET_MAX_SEGS == 1) { + buffer_free_multi((const odp_buffer_t * const)pkt, num); + } else { + odp_buffer_t buf[num * CONFIG_PACKET_MAX_SEGS]; + int i, j; + int bufs = 0; + + for (i = 0; i < num; i++) { + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]); + int num_seg = pkt_hdr->buf_hdr.segcount; + odp_buffer_hdr_t *buf_hdr = &pkt_hdr->buf_hdr; + + buf[bufs] = (odp_buffer_t)pkt[i]; + bufs++; + + if (odp_likely(num_seg == 1)) + continue; + + for (j = 1; j < num_seg; j++) { + buf[bufs] = buffer_handle(buf_hdr->seg[j].hdr); + bufs++; + } + } + + buffer_free_multi(buf, bufs); + } }
int odp_packet_reset(odp_packet_t pkt, uint32_t len) @@ -181,7 +602,7 @@ int odp_packet_reset(odp_packet_t pkt, uint32_t len) if (len > pool->headroom + pool->data_size + pool->tailroom) return -1;
- packet_init(pool, pkt_hdr, len, 0); + packet_init(pkt_hdr, len, 0);
return 0; } @@ -217,7 +638,7 @@ void *odp_packet_head(odp_packet_t pkt) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return buffer_map(&pkt_hdr->buf_hdr, 0, NULL, 0); + return pkt_hdr->buf_hdr.seg[0].data - pkt_hdr->headroom; }
uint32_t odp_packet_buf_len(odp_packet_t pkt) @@ -231,17 +652,14 @@ void *odp_packet_data(odp_packet_t pkt) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return packet_map(pkt_hdr, 0, NULL); + return packet_data(pkt_hdr); }
uint32_t odp_packet_seg_len(odp_packet_t pkt) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - uint32_t seglen;
- /* Call returns length of 1st data segment */ - packet_map(pkt_hdr, 0, &seglen); - return seglen; + return packet_first_seg_len(pkt_hdr); }
uint32_t odp_packet_len(odp_packet_t pkt) @@ -263,7 +681,7 @@ void *odp_packet_tail(odp_packet_t pkt) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return packet_map(pkt_hdr, pkt_hdr->frame_len, NULL); + return packet_tail(pkt_hdr); }
void *odp_packet_push_head(odp_packet_t pkt, uint32_t len) @@ -274,21 +692,38 @@ void *odp_packet_push_head(odp_packet_t pkt, uint32_t len) return NULL;
push_head(pkt_hdr, len); - return packet_map(pkt_hdr, 0, NULL); + return packet_data(pkt_hdr); }
int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len, void **data_ptr, uint32_t *seg_len) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt); + odp_packet_hdr_t *new_hdr; + uint32_t headroom = pkt_hdr->headroom;
- if (len > pkt_hdr->headroom && push_head_seg(pkt_hdr, len)) - return -1; + if (len > headroom) { + push_head(pkt_hdr, headroom); + new_hdr = add_segments(pkt_hdr, len - headroom, 1);
- push_head(pkt_hdr, len); + if (new_hdr == NULL) { + /* segment alloc failed, rollback changes */ + pull_head(pkt_hdr, headroom); + return -1; + } + + *pkt = packet_handle(new_hdr); + pkt_hdr = new_hdr; + } else { + push_head(pkt_hdr, len); + }
if (data_ptr) - *data_ptr = packet_map(pkt_hdr, 0, seg_len); + *data_ptr = packet_data(pkt_hdr); + + if (seg_len) + *seg_len = packet_first_seg_len(pkt_hdr); + return 0; }
@@ -300,51 +735,82 @@ void *odp_packet_pull_head(odp_packet_t pkt, uint32_t len) return NULL;
pull_head(pkt_hdr, len); - return packet_map(pkt_hdr, 0, NULL); + return packet_data(pkt_hdr); }
int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len, - void **data_ptr, uint32_t *seg_len) + void **data_ptr, uint32_t *seg_len_out) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt); + uint32_t seg_len = packet_first_seg_len(pkt_hdr);
if (len > pkt_hdr->frame_len) return -1;
- pull_head(pkt_hdr, len); - if (pkt_hdr->headroom >= pkt_hdr->buf_hdr.segsize) - pull_head_seg(pkt_hdr); + if (len < seg_len) { + pull_head(pkt_hdr, len); + } else if (CONFIG_PACKET_MAX_SEGS != 1) { + int num = 0; + uint32_t pull_len = 0; + + while (seg_len <= len) { + pull_len = len - seg_len; + num++; + seg_len += packet_seg_len(pkt_hdr, num); + } + + pkt_hdr = free_segments(pkt_hdr, num, len - pull_len, + pull_len, 1); + *pkt = packet_handle(pkt_hdr); + }
if (data_ptr) - *data_ptr = packet_map(pkt_hdr, 0, seg_len); + *data_ptr = packet_data(pkt_hdr); + + if (seg_len_out) + *seg_len_out = packet_first_seg_len(pkt_hdr); + return 0; }
void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - uint32_t origin = pkt_hdr->frame_len; + void *old_tail;
if (len > pkt_hdr->tailroom) return NULL;
+ old_tail = packet_tail(pkt_hdr); push_tail(pkt_hdr, len); - return packet_map(pkt_hdr, origin, NULL); + + return old_tail; }
int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len, void **data_ptr, uint32_t *seg_len) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt); - uint32_t origin = pkt_hdr->frame_len; + void *ret; + uint32_t tailroom = pkt_hdr->tailroom; + uint32_t tail_off = pkt_hdr->frame_len;
- if (len > pkt_hdr->tailroom && push_tail_seg(pkt_hdr, len)) - return -1; + if (len > tailroom) { + push_tail(pkt_hdr, tailroom); + ret = add_segments(pkt_hdr, len - tailroom, 0);
- push_tail(pkt_hdr, len); + if (ret == NULL) { + /* segment alloc failed, rollback changes */ + pull_tail(pkt_hdr, tailroom); + return -1; + } + } else { + push_tail(pkt_hdr, len); + }
if (data_ptr) - *data_ptr = packet_map(pkt_hdr, origin, seg_len); + *data_ptr = packet_map(pkt_hdr, tail_off, seg_len, NULL); + return 0; }
@@ -352,27 +818,45 @@ void *odp_packet_pull_tail(odp_packet_t pkt, uint32_t len) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- if (len > pkt_hdr->frame_len) + if (len > packet_last_seg_len(pkt_hdr)) return NULL;
pull_tail(pkt_hdr, len); - return packet_map(pkt_hdr, pkt_hdr->frame_len, NULL); + + return packet_tail(pkt_hdr); }
int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len, void **tail_ptr, uint32_t *tailroom) { + int last; + uint32_t seg_len; odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt);
if (len > pkt_hdr->frame_len) return -1;
- pull_tail(pkt_hdr, len); - if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize) - pull_tail_seg(pkt_hdr); + last = packet_last_seg(pkt_hdr); + seg_len = packet_seg_len(pkt_hdr, last); + + if (len < seg_len) { + pull_tail(pkt_hdr, len); + } else if (CONFIG_PACKET_MAX_SEGS != 1) { + int num = 0; + uint32_t pull_len = 0; + + while (seg_len <= len) { + pull_len = len - seg_len; + num++; + seg_len += packet_seg_len(pkt_hdr, last - num); + } + + free_segments(pkt_hdr, num, len - pull_len, pull_len, 0); + }
if (tail_ptr) - *tail_ptr = packet_map(pkt_hdr, pkt_hdr->frame_len, NULL); + *tail_ptr = packet_tail(pkt_hdr); + if (tailroom) *tailroom = pkt_hdr->tailroom; return 0; @@ -381,11 +865,12 @@ int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len, void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len, odp_packet_seg_t *seg) { + int seg_idx; odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - void *addr = packet_map(pkt_hdr, offset, len); + void *addr = packet_map(pkt_hdr, offset, len, &seg_idx);
if (addr != NULL && seg != NULL) - *seg = (odp_packet_seg_t)pkt; + *seg = seg_idx;
return addr; } @@ -445,7 +930,7 @@ void *odp_packet_l2_ptr(odp_packet_t pkt, uint32_t *len)
if (!packet_hdr_has_l2(pkt_hdr)) return NULL; - return packet_map(pkt_hdr, pkt_hdr->p.l2_offset, len); + return packet_map(pkt_hdr, pkt_hdr->p.l2_offset, len, NULL); }
uint32_t odp_packet_l2_offset(odp_packet_t pkt) @@ -475,7 +960,7 @@ void *odp_packet_l3_ptr(odp_packet_t pkt, uint32_t *len)
if (pkt_hdr->p.parsed_layers < LAYER_L3) packet_parse_layer(pkt_hdr, LAYER_L3); - return packet_map(pkt_hdr, pkt_hdr->p.l3_offset, len); + return packet_map(pkt_hdr, pkt_hdr->p.l3_offset, len, NULL); }
uint32_t odp_packet_l3_offset(odp_packet_t pkt) @@ -506,7 +991,7 @@ void *odp_packet_l4_ptr(odp_packet_t pkt, uint32_t *len)
if (pkt_hdr->p.parsed_layers < LAYER_L4) packet_parse_layer(pkt_hdr, LAYER_L4); - return packet_map(pkt_hdr, pkt_hdr->p.l4_offset, len); + return packet_map(pkt_hdr, pkt_hdr->p.l4_offset, len, NULL); }
uint32_t odp_packet_l4_offset(odp_packet_t pkt) @@ -568,29 +1053,33 @@ int odp_packet_is_segmented(odp_packet_t pkt)
int odp_packet_num_segs(odp_packet_t pkt) { - return odp_packet_hdr(pkt)->buf_hdr.segcount; + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + + return pkt_hdr->buf_hdr.segcount; }
odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt) { - return (odp_packet_seg_t)pkt; + (void)pkt; + + return 0; }
odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt) { - (void)pkt; + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- /* Only one segment */ - return (odp_packet_seg_t)pkt; + return packet_last_seg(pkt_hdr); }
odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg) { - (void)pkt; - (void)seg; + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- /* Only one segment */ - return ODP_PACKET_SEG_INVALID; + if (odp_unlikely(seg >= (odp_packet_seg_t)packet_last_seg(pkt_hdr))) + return ODP_PACKET_SEG_INVALID; + + return seg + 1; }
/* @@ -602,18 +1091,22 @@ odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg)
void *odp_packet_seg_data(odp_packet_t pkt, odp_packet_seg_t seg) { - (void)seg; + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- /* Only one segment */ - return odp_packet_data(pkt); + if (odp_unlikely(seg >= pkt_hdr->buf_hdr.segcount)) + return NULL; + + return packet_seg_data(pkt_hdr, seg); }
uint32_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg) { - (void)seg; + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- /* Only one segment */ - return odp_packet_seg_len(pkt); + if (odp_unlikely(seg >= pkt_hdr->buf_hdr.segcount)) + return 0; + + return packet_seg_len(pkt_hdr, seg); }
/* @@ -688,7 +1181,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len, uint32_t shift; uint32_t seglen = 0; /* GCC */ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt); - void *addr = packet_map(pkt_hdr, offset, &seglen); + void *addr = packet_map(pkt_hdr, offset, &seglen, NULL); uint64_t uaddr = (uint64_t)(uintptr_t)addr; uint64_t misalign;
@@ -733,6 +1226,7 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src) src, 0, src_len); if (src != *dst) odp_packet_free(src); + return 0; }
@@ -808,7 +1302,7 @@ int odp_packet_copy_to_mem(odp_packet_t pkt, uint32_t offset, return -1;
while (len > 0) { - mapaddr = packet_map(pkt_hdr, offset, &seglen); + mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL); cpylen = len > seglen ? seglen : len; memcpy(dstaddr, mapaddr, cpylen); offset += cpylen; @@ -832,7 +1326,7 @@ int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset, return -1;
while (len > 0) { - mapaddr = packet_map(pkt_hdr, offset, &seglen); + mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL); cpylen = len > seglen ? seglen : len; memcpy(mapaddr, srcaddr, cpylen); offset += cpylen; @@ -878,8 +1372,8 @@ int odp_packet_copy_from_pkt(odp_packet_t dst, uint32_t dst_offset, }
while (len > 0) { - dst_map = packet_map(dst_hdr, dst_offset, &dst_seglen); - src_map = packet_map(src_hdr, src_offset, &src_seglen); + dst_map = packet_map(dst_hdr, dst_offset, &dst_seglen, NULL); + src_map = packet_map(src_hdr, src_offset, &src_seglen, NULL);
minseg = dst_seglen > src_seglen ? src_seglen : dst_seglen; cpylen = len > minseg ? minseg : len; @@ -1364,8 +1858,8 @@ parse_exit: */ int packet_parse_layer(odp_packet_hdr_t *pkt_hdr, layer_t layer) { - uint32_t seg_len; - void *base = packet_map(pkt_hdr, 0, &seg_len); + uint32_t seg_len = packet_first_seg_len(pkt_hdr); + void *base = packet_data(pkt_hdr);
return packet_parse_common(&pkt_hdr->p, base, pkt_hdr->frame_len, seg_len, layer); diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 364df97..7c462e5 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -32,6 +32,9 @@ ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST), "cache_burst_size_too_large_compared_to_cache_size");
+ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256, + "ODP Segment size must be a minimum of 256 bytes"); + /* Thread local variables */ typedef struct pool_local_t { pool_cache_t *cache[ODP_CONFIG_POOLS]; @@ -46,6 +49,14 @@ static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx) return _odp_cast_scalar(odp_pool_t, pool_idx); }
+static inline uint32_t pool_id_from_buf(odp_buffer_t buf) +{ + odp_buffer_bits_t handle; + + handle.handle = buf; + return handle.pool_id; +} + int odp_pool_init_global(void) { uint32_t i; @@ -198,7 +209,7 @@ static void init_buffers(pool_t *pool) ring_t *ring; uint32_t mask; int type; - uint32_t size; + uint32_t seg_size;
ring = &pool->ring.hdr; mask = pool->ring_mask; @@ -223,12 +234,12 @@ static void init_buffers(pool_t *pool) while (((uintptr_t)&data[offset]) % pool->align != 0) offset++;
- memset(buf_hdr, 0, sizeof(odp_buffer_hdr_t)); + memset(buf_hdr, 0, (uintptr_t)data - (uintptr_t)buf_hdr);
- size = pool->headroom + pool->data_size + pool->tailroom; + seg_size = pool->headroom + pool->data_size + pool->tailroom;
/* Initialize buffer metadata */ - buf_hdr->size = size; + buf_hdr->size = seg_size; buf_hdr->type = type; buf_hdr->event_type = type; buf_hdr->pool_hdl = pool->pool_hdl; @@ -236,10 +247,18 @@ static void init_buffers(pool_t *pool) /* Show user requested size through API */ buf_hdr->uarea_size = pool->params.pkt.uarea_size; buf_hdr->segcount = 1; - buf_hdr->segsize = size; + buf_hdr->segsize = seg_size;
/* Pointer to data start (of the first segment) */ - buf_hdr->addr[0] = &data[offset]; + buf_hdr->seg[0].hdr = buf_hdr; + buf_hdr->seg[0].data = &data[offset]; + buf_hdr->seg[0].len = pool->data_size; + + /* Store base values for fast init */ + buf_hdr->base_data = buf_hdr->seg[0].data; + buf_hdr->base_len = buf_hdr->seg[0].len; + buf_hdr->buf_end = &data[offset + pool->data_size + + pool->tailroom];
buf_hdl = form_buffer_handle(pool->pool_idx, i); buf_hdr->handle.handle = buf_hdl; @@ -296,25 +315,13 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, break;
case ODP_POOL_PACKET: - headroom = ODP_CONFIG_PACKET_HEADROOM; - tailroom = ODP_CONFIG_PACKET_TAILROOM; - num = params->pkt.num; - uarea_size = params->pkt.uarea_size; - - data_size = ODP_CONFIG_PACKET_SEG_LEN_MAX; - - if (data_size < ODP_CONFIG_PACKET_SEG_LEN_MIN) - data_size = ODP_CONFIG_PACKET_SEG_LEN_MIN; - - if (data_size > ODP_CONFIG_PACKET_SEG_LEN_MAX) { - ODP_ERR("Too large seg len requirement"); - return ODP_POOL_INVALID; - } - - max_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MAX - - ODP_CONFIG_PACKET_HEADROOM - - ODP_CONFIG_PACKET_TAILROOM; - max_len = ODP_CONFIG_PACKET_MAX_SEGS * max_seg_len; + headroom = CONFIG_PACKET_HEADROOM; + tailroom = CONFIG_PACKET_TAILROOM; + num = params->pkt.num; + uarea_size = params->pkt.uarea_size; + data_size = CONFIG_PACKET_MAX_SEG_LEN; + max_seg_len = CONFIG_PACKET_MAX_SEG_LEN; + max_len = CONFIG_PACKET_MAX_SEGS * max_seg_len; break;
case ODP_POOL_TIMEOUT: @@ -470,31 +477,6 @@ void _odp_buffer_event_type_set(odp_buffer_t buf, int ev) buf_hdl_to_hdr(buf)->event_type = ev; }
-void *buffer_map(odp_buffer_hdr_t *buf, - uint32_t offset, - uint32_t *seglen, - uint32_t limit) -{ - int seg_index; - int seg_offset; - - if (odp_likely(offset < buf->segsize)) { - seg_index = 0; - seg_offset = offset; - } else { - ODP_ERR("\nSEGMENTS NOT SUPPORTED\n"); - return NULL; - } - - if (seglen != NULL) { - uint32_t buf_left = limit - offset; - *seglen = seg_offset + buf_left <= buf->segsize ? - buf_left : buf->segsize - seg_offset; - } - - return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); -} - odp_pool_t odp_pool_lookup(const char *name) { uint32_t i; @@ -727,9 +709,7 @@ void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
int odp_pool_capability(odp_pool_capability_t *capa) { - uint32_t max_len = ODP_CONFIG_PACKET_SEG_LEN_MAX - - ODP_CONFIG_PACKET_HEADROOM - - ODP_CONFIG_PACKET_TAILROOM; + uint32_t max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
memset(capa, 0, sizeof(odp_pool_capability_t));
@@ -743,13 +723,13 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Packet pools */ capa->pkt.max_pools = ODP_CONFIG_POOLS; - capa->pkt.max_len = ODP_CONFIG_PACKET_MAX_SEGS * max_len; + capa->pkt.max_len = CONFIG_PACKET_MAX_SEGS * max_seg_len; capa->pkt.max_num = CONFIG_POOL_MAX_NUM; - capa->pkt.min_headroom = ODP_CONFIG_PACKET_HEADROOM; - capa->pkt.min_tailroom = ODP_CONFIG_PACKET_TAILROOM; - capa->pkt.max_segs_per_pkt = ODP_CONFIG_PACKET_MAX_SEGS; - capa->pkt.min_seg_len = max_len; - capa->pkt.max_seg_len = max_len; + capa->pkt.min_headroom = CONFIG_PACKET_HEADROOM; + capa->pkt.min_tailroom = CONFIG_PACKET_TAILROOM; + capa->pkt.max_segs_per_pkt = CONFIG_PACKET_MAX_SEGS; + capa->pkt.min_seg_len = max_seg_len; + capa->pkt.max_seg_len = max_seg_len; capa->pkt.max_uarea_size = 0;
/* Timeout pools */ @@ -765,7 +745,7 @@ void odp_pool_print(odp_pool_t pool_hdl)
pool = pool_entry_from_hdl(pool_hdl);
- printf("Pool info\n"); + printf("\nPool info\n"); printf("---------\n"); printf(" pool %" PRIu64 "\n", odp_pool_to_u64(pool->pool_hdl)); @@ -812,19 +792,6 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl) return _odp_pri(hdl); }
-int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount) -{ - (void)buf_hdr; - (void)segcount; - return 0; -} - -void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount) -{ - (void)buf_hdr; - (void)segcount; -} - int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount) { (void)buf_hdr; @@ -855,13 +822,3 @@ int odp_buffer_is_valid(odp_buffer_t buf)
return 1; } - -uint32_t pool_headroom(odp_pool_t pool) -{ - return pool_entry_from_hdl(pool)->headroom; -} - -uint32_t pool_tailroom(odp_pool_t pool) -{ - return pool_entry_from_hdl(pool)->tailroom; -} diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c index cf67741..8eb8145 100644 --- a/platform/linux-generic/pktio/netmap.c +++ b/platform/linux-generic/pktio/netmap.c @@ -345,9 +345,7 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry, pkt_nm->pool = pool;
/* max frame len taking into account the l2-offset */ - pkt_nm->max_frame_len = ODP_CONFIG_PACKET_BUF_LEN_MAX - - pool_headroom(pool) - - pool_tailroom(pool); + pkt_nm->max_frame_len = CONFIG_PACKET_MAX_SEG_LEN;
/* allow interface to be opened with or without the 'netmap:' prefix */ prefix = "netmap:"; diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c index ab25aab..9fe4a7e 100644 --- a/platform/linux-generic/pktio/socket.c +++ b/platform/linux-generic/pktio/socket.c @@ -46,7 +46,8 @@ #include <protocols/eth.h> #include <protocols/ip.h>
-#define MAX_SEGS ODP_CONFIG_PACKET_MAX_SEGS +#define MAX_SEGS CONFIG_PACKET_MAX_SEGS +#define PACKET_JUMBO_LEN (9 * 1024)
static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
commit 7daaf31cb033e044f5b241bc00df7d708cbee53f Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:36 2016 +0200
test: validation: packet: fix bugs in tailroom and concat tests
Tailroom test did not call odp_packet_extend_tail() since it pushed tail too few bytes. Corrected the test to extend the tail by 100 bytes.
Concat test did pass the same packet as src and dst packets. There's no valid use case to concatenate a packet into itself (forms a loop). Corrected the test to concatenate two copies of the same packet.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c index 454c73f..8b31872 100644 --- a/test/common_plat/validation/api/packet/packet.c +++ b/test/common_plat/validation/api/packet/packet.c @@ -642,9 +642,10 @@ void packet_test_tailroom(void) _verify_tailroom_shift(&pkt, 0);
if (segmentation_supported) { - _verify_tailroom_shift(&pkt, pull_val); + push_val = room + 100; + _verify_tailroom_shift(&pkt, push_val); _verify_tailroom_shift(&pkt, 0); - _verify_tailroom_shift(&pkt, -pull_val); + _verify_tailroom_shift(&pkt, -push_val); }
odp_packet_free(pkt); @@ -1157,12 +1158,18 @@ void packet_test_concatsplit(void) odp_packet_t pkt, pkt2; uint32_t pkt_len; odp_packet_t splits[4]; + odp_pool_t pool;
- pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet)); + pool = odp_packet_pool(test_packet); + pkt = odp_packet_copy(test_packet, pool); + pkt2 = odp_packet_copy(test_packet, pool); pkt_len = odp_packet_len(test_packet); CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID); + CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID); + CU_ASSERT(pkt_len == odp_packet_len(pkt)); + CU_ASSERT(pkt_len == odp_packet_len(pkt2));
- CU_ASSERT(odp_packet_concat(&pkt, pkt) == 0); + CU_ASSERT(odp_packet_concat(&pkt, pkt2) == 0); CU_ASSERT(odp_packet_len(pkt) == pkt_len * 2); _packet_compare_offset(pkt, 0, pkt, pkt_len, pkt_len);
commit dab3168796ed7809e717d40c12dabd9b24778168 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:35 2016 +0200
test: correctly initialize pool parameters
Use odp_pool_param_init() to initialize pool parameters. Also pktio test must use capability to determine maximum packet segment length.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c index 48d7f5f..23dbd55 100644 --- a/example/generator/odp_generator.c +++ b/example/generator/odp_generator.c @@ -732,7 +732,7 @@ int main(int argc, char *argv[]) odp_timer_pool_start();
/* Create timeout pool */ - memset(¶ms, 0, sizeof(params)); + odp_pool_param_init(¶ms); params.tmo.num = tparams.num_timers; /* One timeout per timer */ params.type = ODP_POOL_TIMEOUT;
diff --git a/test/common_plat/validation/api/crypto/crypto.c b/test/common_plat/validation/api/crypto/crypto.c index 8946cde..9c9a00d 100644 --- a/test/common_plat/validation/api/crypto/crypto.c +++ b/test/common_plat/validation/api/crypto/crypto.c @@ -43,7 +43,7 @@ int crypto_init(odp_instance_t *inst) return -1; }
- memset(¶ms, 0, sizeof(params)); + odp_pool_param_init(¶ms); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE / SHM_PKT_POOL_BUF_SIZE; diff --git a/test/common_plat/validation/api/pktio/pktio.c b/test/common_plat/validation/api/pktio/pktio.c index a6a18c3..befaa7e 100644 --- a/test/common_plat/validation/api/pktio/pktio.c +++ b/test/common_plat/validation/api/pktio/pktio.c @@ -310,7 +310,7 @@ static int default_pool_create(void) if (default_pkt_pool != ODP_POOL_INVALID) return -1;
- memset(¶ms, 0, sizeof(params)); + odp_pool_param_init(¶ms); set_pool_len(¶ms); params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET; @@ -1669,10 +1669,11 @@ int pktio_check_send_failure(void)
odp_pktio_close(pktio_tx);
- if (mtu <= pool_capa.pkt.max_len - 32) - return ODP_TEST_ACTIVE; + /* Failure test supports only single segment */ + if (pool_capa.pkt.max_seg_len < mtu + 32) + return ODP_TEST_INACTIVE;
- return ODP_TEST_INACTIVE; + return ODP_TEST_ACTIVE; }
void pktio_test_send_failure(void) @@ -1687,6 +1688,7 @@ void pktio_test_send_failure(void) int long_pkt_idx = TX_BATCH_LEN / 2; pktio_info_t info_rx; odp_pktout_queue_t pktout; + odp_pool_capability_t pool_capa;
pktio_tx = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT); @@ -1705,9 +1707,16 @@ void pktio_test_send_failure(void)
_pktio_wait_linkup(pktio_tx);
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0); + + if (pool_capa.pkt.max_seg_len < mtu + 32) { + CU_FAIL("Max packet seg length is too small."); + return; + } + /* configure the pool so that we can generate test packets larger * than the interface MTU */ - memset(&pool_params, 0, sizeof(pool_params)); + odp_pool_param_init(&pool_params); pool_params.pkt.len = mtu + 32; pool_params.pkt.seg_len = pool_params.pkt.len; pool_params.pkt.num = TX_BATCH_LEN + 1; @@ -1996,7 +2005,7 @@ static int create_pool(const char *iface, int num) char pool_name[ODP_POOL_NAME_LEN]; odp_pool_param_t params;
- memset(¶ms, 0, sizeof(params)); + odp_pool_param_init(¶ms); set_pool_len(¶ms); params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET;
commit c09d396bd393a6cbc09f668389b6c9e172a53f7f Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:34 2016 +0200
test: performance: crypto: use capability to select max packet
Applications must use pool capabilibty to check maximum values for parameters. Used maximum segment length since application seems to support only single segment packets.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_crypto.c b/test/common_plat/performance/odp_crypto.c index 49a9f4b..39df78b 100644 --- a/test/common_plat/performance/odp_crypto.c +++ b/test/common_plat/performance/odp_crypto.c @@ -23,15 +23,10 @@ fprintf(stderr, "%s:%d:%s(): Error: " fmt, __FILE__, \ __LINE__, __func__, ##__VA_ARGS__)
-/** @def SHM_PKT_POOL_SIZE - * @brief Size of the shared memory block +/** @def POOL_NUM_PKT + * Number of packets in the pool */ -#define SHM_PKT_POOL_SIZE (512 * 2048 * 2) - -/** @def SHM_PKT_POOL_BUF_SIZE - * @brief Buffer size of the packet pool buffer - */ -#define SHM_PKT_POOL_BUF_SIZE (1024 * 32) +#define POOL_NUM_PKT 64
static uint8_t test_iv[8] = "01234567";
@@ -165,9 +160,7 @@ static void parse_args(int argc, char *argv[], crypto_args_t *cargs); static void usage(char *progname);
/** - * Set of predefined payloads. Make sure that maximum payload - * size is not bigger than SHM_PKT_POOL_BUF_SIZE. May relax when - * implementation start support segmented buffers/packets. + * Set of predefined payloads. */ static unsigned int payloads[] = { 16, @@ -178,6 +171,9 @@ static unsigned int payloads[] = { 16384 };
+/** Number of payloads used in the test */ +static unsigned num_payloads; + /** * Set of known algorithms to test */ @@ -680,12 +676,10 @@ run_measure_one_config(crypto_args_t *cargs, config, &result); } } else { - unsigned int i; + unsigned i;
print_result_header(); - for (i = 0; - i < (sizeof(payloads) / sizeof(unsigned int)); - i++) { + for (i = 0; i < num_payloads; i++) { rc = run_measure_one(cargs, config, &session, payloads[i], &result); if (rc) @@ -728,6 +722,9 @@ int main(int argc, char *argv[]) int num_workers = 1; odph_odpthread_t thr[num_workers]; odp_instance_t instance; + odp_pool_capability_t capa; + uint32_t max_seg_len; + unsigned i;
memset(&cargs, 0, sizeof(cargs));
@@ -743,11 +740,25 @@ int main(int argc, char *argv[]) /* Init this thread */ odp_init_local(instance, ODP_THREAD_WORKER);
+ if (odp_pool_capability(&capa)) { + app_err("Pool capability request failed.\n"); + exit(EXIT_FAILURE); + } + + max_seg_len = capa.pkt.max_seg_len; + + for (i = 0; i < sizeof(payloads) / sizeof(unsigned int); i++) { + if (payloads[i] > max_seg_len) + break; + } + + num_payloads = i; + /* Create packet pool */ odp_pool_param_init(¶ms); - params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.num = SHM_PKT_POOL_SIZE / SHM_PKT_POOL_BUF_SIZE; + params.pkt.seg_len = max_seg_len; + params.pkt.len = max_seg_len; + params.pkt.num = POOL_NUM_PKT; params.type = ODP_POOL_PACKET; pool = odp_pool_create("packet_pool", ¶ms);
commit 8f0af9b08006290e35b117a02212b6881987adef Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:33 2016 +0200
test: validation: buf: test alignment
Added checks for correct alignment. Also updated tests to call odp_pool_param_init() for parameter initialization.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/validation/api/buffer/buffer.c b/test/common_plat/validation/api/buffer/buffer.c index d26d5e8..7c723d4 100644 --- a/test/common_plat/validation/api/buffer/buffer.c +++ b/test/common_plat/validation/api/buffer/buffer.c @@ -8,20 +8,21 @@ #include "odp_cunit_common.h" #include "buffer.h"
+#define BUF_ALIGN ODP_CACHE_LINE_SIZE +#define BUF_SIZE 1500 + static odp_pool_t raw_pool; static odp_buffer_t raw_buffer = ODP_BUFFER_INVALID; -static const size_t raw_buffer_size = 1500;
int buffer_suite_init(void) { - odp_pool_param_t params = { - .buf = { - .size = raw_buffer_size, - .align = ODP_CACHE_LINE_SIZE, - .num = 100, - }, - .type = ODP_POOL_BUFFER, - }; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_BUFFER; + params.buf.size = BUF_SIZE; + params.buf.align = BUF_ALIGN; + params.buf.num = 100;
raw_pool = odp_pool_create("raw_pool", ¶ms); if (raw_pool == ODP_POOL_INVALID) @@ -44,25 +45,25 @@ void buffer_test_pool_alloc(void) { odp_pool_t pool; const int num = 3; - const size_t size = 1500; odp_buffer_t buffer[num]; odp_event_t ev; int index; - char wrong_type = 0, wrong_size = 0; - odp_pool_param_t params = { - .buf = { - .size = size, - .align = ODP_CACHE_LINE_SIZE, - .num = num, - }, - .type = ODP_POOL_BUFFER, - }; + char wrong_type = 0, wrong_size = 0, wrong_align = 0; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_BUFFER; + params.buf.size = BUF_SIZE; + params.buf.align = BUF_ALIGN; + params.buf.num = num;
pool = odp_pool_create("buffer_pool_alloc", ¶ms); odp_pool_print(pool);
/* Try to allocate num items from the pool */ for (index = 0; index < num; index++) { + uintptr_t addr; + buffer[index] = odp_buffer_alloc(pool);
if (buffer[index] == ODP_BUFFER_INVALID) @@ -71,9 +72,15 @@ void buffer_test_pool_alloc(void) ev = odp_buffer_to_event(buffer[index]); if (odp_event_type(ev) != ODP_EVENT_BUFFER) wrong_type = 1; - if (odp_buffer_size(buffer[index]) < size) + if (odp_buffer_size(buffer[index]) < BUF_SIZE) wrong_size = 1; - if (wrong_type || wrong_size) + + addr = (uintptr_t)odp_buffer_addr(buffer[index]); + + if ((addr % BUF_ALIGN) != 0) + wrong_align = 1; + + if (wrong_type || wrong_size || wrong_align) odp_buffer_print(buffer[index]); }
@@ -85,6 +92,7 @@ void buffer_test_pool_alloc(void) /* Check that the pool had correct buffers */ CU_ASSERT(wrong_type == 0); CU_ASSERT(wrong_size == 0); + CU_ASSERT(wrong_align == 0);
for (; index >= 0; index--) odp_buffer_free(buffer[index]); @@ -112,19 +120,17 @@ void buffer_test_pool_alloc_multi(void) { odp_pool_t pool; const int num = 3; - const size_t size = 1500; odp_buffer_t buffer[num + 1]; odp_event_t ev; int index; - char wrong_type = 0, wrong_size = 0; - odp_pool_param_t params = { - .buf = { - .size = size, - .align = ODP_CACHE_LINE_SIZE, - .num = num, - }, - .type = ODP_POOL_BUFFER, - }; + char wrong_type = 0, wrong_size = 0, wrong_align = 0; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_BUFFER; + params.buf.size = BUF_SIZE; + params.buf.align = BUF_ALIGN; + params.buf.num = num;
pool = odp_pool_create("buffer_pool_alloc_multi", ¶ms); odp_pool_print(pool); @@ -133,15 +139,23 @@ void buffer_test_pool_alloc_multi(void) CU_ASSERT_FATAL(buffer_alloc_multi(pool, buffer, num + 1) == num);
for (index = 0; index < num; index++) { + uintptr_t addr; + if (buffer[index] == ODP_BUFFER_INVALID) break;
ev = odp_buffer_to_event(buffer[index]); if (odp_event_type(ev) != ODP_EVENT_BUFFER) wrong_type = 1; - if (odp_buffer_size(buffer[index]) < size) + if (odp_buffer_size(buffer[index]) < BUF_SIZE) wrong_size = 1; - if (wrong_type || wrong_size) + + addr = (uintptr_t)odp_buffer_addr(buffer[index]); + + if ((addr % BUF_ALIGN) != 0) + wrong_align = 1; + + if (wrong_type || wrong_size || wrong_align) odp_buffer_print(buffer[index]); }
@@ -151,6 +165,7 @@ void buffer_test_pool_alloc_multi(void) /* Check that the pool had correct buffers */ CU_ASSERT(wrong_type == 0); CU_ASSERT(wrong_size == 0); + CU_ASSERT(wrong_align == 0);
odp_buffer_free_multi(buffer, num);
@@ -161,14 +176,13 @@ void buffer_test_pool_free(void) { odp_pool_t pool; odp_buffer_t buffer; - odp_pool_param_t params = { - .buf = { - .size = 64, - .align = ODP_CACHE_LINE_SIZE, - .num = 1, - }, - .type = ODP_POOL_BUFFER, - }; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_BUFFER; + params.buf.size = 64; + params.buf.align = BUF_ALIGN; + params.buf.num = 1;
pool = odp_pool_create("buffer_pool_free", ¶ms);
@@ -194,14 +208,13 @@ void buffer_test_pool_free_multi(void) odp_pool_t pool[2]; odp_buffer_t buffer[4]; odp_buffer_t buf_inval[2]; - odp_pool_param_t params = { - .buf = { - .size = 64, - .align = ODP_CACHE_LINE_SIZE, - .num = 2, - }, - .type = ODP_POOL_BUFFER, - }; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_BUFFER; + params.buf.size = 64; + params.buf.align = BUF_ALIGN; + params.buf.num = 2;
pool[0] = odp_pool_create("buffer_pool_free_multi_0", ¶ms); pool[1] = odp_pool_create("buffer_pool_free_multi_1", ¶ms); @@ -235,7 +248,7 @@ void buffer_test_management_basic(void) CU_ASSERT(odp_buffer_is_valid(raw_buffer) == 1); CU_ASSERT(odp_buffer_pool(raw_buffer) != ODP_POOL_INVALID); CU_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER); - CU_ASSERT(odp_buffer_size(raw_buffer) >= raw_buffer_size); + CU_ASSERT(odp_buffer_size(raw_buffer) >= BUF_SIZE); CU_ASSERT(odp_buffer_addr(raw_buffer) != NULL); odp_buffer_print(raw_buffer); CU_ASSERT(odp_buffer_to_u64(raw_buffer) !=
commit 6301ce94a988b2dee431ebbd791cad49cf6772a2 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:32 2016 +0200
linux-gen: pool: ptr instead of hdl in buffer_alloc_multi
Improve performance by changing the first parameter of buffer_alloc_multi() to pool pointer (from handle), to avoid double lookup of the pool pointer. Pointer is available for packet alloc calls already.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index 64ba221..0ca13f8 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -105,10 +105,6 @@ struct odp_buffer_hdr_t { };
/* Forward declarations */ -int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], - odp_buffer_hdr_t *buf_hdr[], int num); -void buffer_free_multi(const odp_buffer_t buf[], int num_free); - int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount); void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount); int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount); diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index f7c315c..f7e951a 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -109,6 +109,10 @@ static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf) return buf_hdr; }
+int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[], + odp_buffer_hdr_t *buf_hdr[], int num); +void buffer_free_multi(const odp_buffer_t buf[], int num_free); + uint32_t pool_headroom(odp_pool_t pool); uint32_t pool_tailroom(odp_pool_t pool);
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index c44f687..2eee775 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -84,7 +84,7 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, int num, i; odp_packet_hdr_t *pkt_hdrs[max_num];
- num = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, + num = buffer_alloc_multi(pool, (odp_buffer_t *)pkt, (odp_buffer_hdr_t **)pkt_hdrs, max_num);
for (i = 0; i < num; i++) { @@ -115,7 +115,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) if (odp_unlikely(len > pool->max_len)) return ODP_PACKET_INVALID;
- ret = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)&pkt, NULL, 1); + ret = buffer_alloc_multi(pool, (odp_buffer_t *)&pkt, NULL, 1); if (ret != 1) return ODP_PACKET_INVALID;
@@ -146,7 +146,7 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, if (odp_unlikely(len > pool->max_len)) return -1;
- count = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, + count = buffer_alloc_multi(pool, (odp_buffer_t *)pkt, (odp_buffer_hdr_t **)pkt_hdrs, num);
for (i = 0; i < count; ++i) { diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index faea2fc..364df97 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -528,19 +528,17 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info) return 0; }
-int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], +int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[], odp_buffer_hdr_t *buf_hdr[], int max_num) { - pool_t *pool; ring_t *ring; uint32_t mask, i; pool_cache_t *cache; uint32_t cache_num, num_ch, num_deq, burst;
- pool = pool_entry_from_hdl(pool_hdl); ring = &pool->ring.hdr; mask = pool->ring_mask; - cache = local.cache[_odp_typeval(pool_hdl)]; + cache = local.cache[pool->pool_idx];
cache_num = cache->num; num_ch = max_num; @@ -696,9 +694,11 @@ void buffer_free_multi(const odp_buffer_t buf[], int num_total) odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl) { odp_buffer_t buf; + pool_t *pool; int ret;
- ret = buffer_alloc_multi(pool_hdl, &buf, NULL, 1); + pool = pool_entry_from_hdl(pool_hdl); + ret = buffer_alloc_multi(pool, &buf, NULL, 1);
if (odp_likely(ret == 1)) return buf; @@ -708,7 +708,11 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num) { - return buffer_alloc_multi(pool_hdl, buf, NULL, num); + pool_t *pool; + + pool = pool_entry_from_hdl(pool_hdl); + + return buffer_alloc_multi(pool, buf, NULL, num); }
void odp_buffer_free(odp_buffer_t buf)
commit 0e67e186489081513b1fa141461ff44fb74ac7f1 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:31 2016 +0200
linux-gen: pool: clean up pool inlines functions
Removed odp_pool_to_entry(), which was a duplicate of pool_entry_from_hdl(). Renamed odp_buf_to_hdr() to buf_hdl_to_hdr(), which describes more accurately the internal function. Inlined pool_entry(), pool_entry_from_hdl() and buf_hdl_to_hdr(), which are used often and also outside of pool.c. Renamed odp_buffer_pool_headroom() and _tailroom() to simply pool_headroom() and _tailroom(), since those are internal functions (not API as previous names hint). Also moved those into pool.c, since inlining is not needed for functions that are called only in (netmap) init phase.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h index 2f5eb88..f8688f6 100644 --- a/platform/linux-generic/include/odp_buffer_inlines.h +++ b/platform/linux-generic/include/odp_buffer_inlines.h @@ -31,8 +31,6 @@ static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) return hdr->handle.handle; }
-odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf); - static inline uint32_t pool_id_from_buf(odp_buffer_t buf) { odp_buffer_bits_t handle; diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h index 2cad71f..0cdd5ca 100644 --- a/platform/linux-generic/include/odp_packet_internal.h +++ b/platform/linux-generic/include/odp_packet_internal.h @@ -199,7 +199,7 @@ typedef struct { */ static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt) { - return (odp_packet_hdr_t *)odp_buf_to_hdr((odp_buffer_t)pkt); + return (odp_packet_hdr_t *)buf_hdl_to_hdr((odp_buffer_t)pkt); }
static inline void copy_packet_parser_metadata(odp_packet_hdr_t *src_hdr, diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index 278c553..f7c315c 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -73,23 +73,45 @@ typedef struct pool_t {
} pool_t;
-pool_t *pool_entry(uint32_t pool_idx); +typedef struct pool_table_t { + pool_t pool[ODP_CONFIG_POOLS]; + odp_shm_t shm; +} pool_table_t;
-static inline pool_t *odp_pool_to_entry(odp_pool_t pool_hdl) +extern pool_table_t *pool_tbl; + +static inline pool_t *pool_entry(uint32_t pool_idx) { - return pool_entry(_odp_typeval(pool_hdl)); + return &pool_tbl->pool[pool_idx]; }
-static inline uint32_t odp_buffer_pool_headroom(odp_pool_t pool) +static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl) { - return odp_pool_to_entry(pool)->headroom; + return &pool_tbl->pool[_odp_typeval(pool_hdl)]; }
-static inline uint32_t odp_buffer_pool_tailroom(odp_pool_t pool) +static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf) { - return odp_pool_to_entry(pool)->tailroom; + odp_buffer_bits_t handle; + uint32_t pool_id, index, block_offset; + pool_t *pool; + odp_buffer_hdr_t *buf_hdr; + + handle.handle = buf; + pool_id = handle.pool_id; + index = handle.index; + pool = pool_entry(pool_id); + block_offset = index * pool->block_size; + + /* clang requires cast to uintptr_t */ + buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset]; + + return buf_hdr; }
+uint32_t pool_headroom(odp_pool_t pool); +uint32_t pool_tailroom(odp_pool_t pool); + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c index 0ddaf95..eed15c0 100644 --- a/platform/linux-generic/odp_buffer.c +++ b/platform/linux-generic/odp_buffer.c @@ -26,14 +26,14 @@ odp_event_t odp_buffer_to_event(odp_buffer_t buf)
void *odp_buffer_addr(odp_buffer_t buf) { - odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); + odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
return hdr->addr[0]; }
uint32_t odp_buffer_size(odp_buffer_t buf) { - odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); + odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
return hdr->size; } @@ -48,7 +48,7 @@ int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) return len; }
- hdr = odp_buf_to_hdr(buf); + hdr = buf_hdl_to_hdr(buf);
len += snprintf(&str[len], n-len, "Buffer\n"); diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index 6565a5d..c44f687 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -80,7 +80,7 @@ static void packet_init(pool_t *pool, odp_packet_hdr_t *pkt_hdr, int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int max_num) { - pool_t *pool = odp_pool_to_entry(pool_hdl); + pool_t *pool = pool_entry_from_hdl(pool_hdl); int num, i; odp_packet_hdr_t *pkt_hdrs[max_num];
@@ -101,7 +101,7 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) { - pool_t *pool = odp_pool_to_entry(pool_hdl); + pool_t *pool = pool_entry_from_hdl(pool_hdl); size_t pkt_size = len ? len : pool->data_size; odp_packet_t pkt; odp_packet_hdr_t *pkt_hdr; @@ -133,7 +133,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int num) { - pool_t *pool = odp_pool_to_entry(pool_hdl); + pool_t *pool = pool_entry_from_hdl(pool_hdl); size_t pkt_size = len ? len : pool->data_size; int count, i; odp_packet_hdr_t *pkt_hdrs[num]; @@ -176,7 +176,7 @@ void odp_packet_free_multi(const odp_packet_t pkt[], int num) int odp_packet_reset(odp_packet_t pkt, uint32_t len) { odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt); - pool_t *pool = odp_pool_to_entry(pkt_hdr->buf_hdr.pool_hdl); + pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
if (len > pool->headroom + pool->data_size + pool->tailroom) return -1; diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index 0b9939b..50798cf 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -563,7 +563,7 @@ static inline int pktin_recv_buf(odp_pktin_queue_t queue, pkt = packets[i]; pkt_hdr = odp_packet_hdr(pkt); buf = _odp_packet_to_buffer(pkt); - buf_hdr = odp_buf_to_hdr(buf); + buf_hdr = buf_hdl_to_hdr(buf);
if (pkt_hdr->p.input_flags.dst_queue) { queue_entry_t *dst_queue; diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 7dc0938..faea2fc 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -32,18 +32,13 @@ ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST), "cache_burst_size_too_large_compared_to_cache_size");
-typedef struct pool_table_t { - pool_t pool[ODP_CONFIG_POOLS]; - odp_shm_t shm; -} pool_table_t; - /* Thread local variables */ typedef struct pool_local_t { pool_cache_t *cache[ODP_CONFIG_POOLS]; int thr_id; } pool_local_t;
-static pool_table_t *pool_tbl; +pool_table_t *pool_tbl; static __thread pool_local_t local;
static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx) @@ -51,16 +46,6 @@ static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx) return _odp_cast_scalar(odp_pool_t, pool_idx); }
-pool_t *pool_entry(uint32_t pool_idx) -{ - return &pool_tbl->pool[pool_idx]; -} - -static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl) -{ - return &pool_tbl->pool[_odp_typeval(pool_hdl)]; -} - int odp_pool_init_global(void) { uint32_t i; @@ -475,33 +460,14 @@ int odp_pool_destroy(odp_pool_t pool_hdl) return 0; }
-odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) -{ - odp_buffer_bits_t handle; - uint32_t pool_id, index, block_offset; - pool_t *pool; - odp_buffer_hdr_t *buf_hdr; - - handle.handle = buf; - pool_id = handle.pool_id; - index = handle.index; - pool = pool_entry(pool_id); - block_offset = index * pool->block_size; - - /* clang requires cast to uintptr_t */ - buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset]; - - return buf_hdr; -} - odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf) { - return odp_buf_to_hdr(buf)->event_type; + return buf_hdl_to_hdr(buf)->event_type; }
void _odp_buffer_event_type_set(odp_buffer_t buf, int ev) { - odp_buf_to_hdr(buf)->event_type = ev; + buf_hdl_to_hdr(buf)->event_type = ev; }
void *buffer_map(odp_buffer_hdr_t *buf, @@ -614,7 +580,7 @@ int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], buf[idx] = (odp_buffer_t)(uintptr_t)data[i];
if (buf_hdr) { - buf_hdr[idx] = odp_buf_to_hdr(buf[idx]); + buf_hdr[idx] = buf_hdl_to_hdr(buf[idx]); /* Prefetch newly allocated and soon to be used * buffer headers. */ odp_prefetch(buf_hdr[idx]); @@ -633,7 +599,7 @@ int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[],
if (buf_hdr) { for (i = 0; i < num_ch; i++) - buf_hdr[i] = odp_buf_to_hdr(buf[i]); + buf_hdr[i] = buf_hdl_to_hdr(buf[i]); }
return num_ch + num_deq; @@ -885,3 +851,13 @@ int odp_buffer_is_valid(odp_buffer_t buf)
return 1; } + +uint32_t pool_headroom(odp_pool_t pool) +{ + return pool_entry_from_hdl(pool)->headroom; +} + +uint32_t pool_tailroom(odp_pool_t pool) +{ + return pool_entry_from_hdl(pool)->tailroom; +} diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index 6bf1629..43e212a 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -483,7 +483,7 @@ int odp_queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num) queue = queue_to_qentry(handle);
for (i = 0; i < num; i++) - buf_hdr[i] = odp_buf_to_hdr(odp_buffer_from_event(ev[i])); + buf_hdr[i] = buf_hdl_to_hdr(odp_buffer_from_event(ev[i]));
return num == 0 ? 0 : queue->s.enqueue_multi(queue, buf_hdr, num, SUSTAIN_ORDER); @@ -495,7 +495,7 @@ int odp_queue_enq(odp_queue_t handle, odp_event_t ev) queue_entry_t *queue;
queue = queue_to_qentry(handle); - buf_hdr = odp_buf_to_hdr(odp_buffer_from_event(ev)); + buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
/* No chains via this entry */ buf_hdr->link = NULL; diff --git a/platform/linux-generic/odp_schedule_ordered.c b/platform/linux-generic/odp_schedule_ordered.c index 8412183..5574faf 100644 --- a/platform/linux-generic/odp_schedule_ordered.c +++ b/platform/linux-generic/odp_schedule_ordered.c @@ -749,7 +749,7 @@ int release_order(void *origin_qe_ptr, uint64_t order, return -1; }
- placeholder_buf_hdr = odp_buf_to_hdr(placeholder_buf); + placeholder_buf_hdr = buf_hdl_to_hdr(placeholder_buf);
/* Copy info to placeholder and add it to the reorder queue */ placeholder_buf_hdr->origin_qe = origin_qe; @@ -805,7 +805,7 @@ void cache_order_info(uint32_t queue_index) uint32_t i; queue_entry_t *qe = get_qentry(queue_index); odp_event_t ev = sched_local.ev_stash[0]; - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(odp_buffer_from_event(ev)); + odp_buffer_hdr_t *buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
sched_local.origin_qe = qe; sched_local.order = buf_hdr->order; diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 90ff1fe..53fec08 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -76,7 +76,7 @@ static _odp_atomic_flag_t locks[NUM_LOCKS]; /* Multiple locks per cache line! */
static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf) { - return (odp_timeout_hdr_t *)(void *)odp_buf_to_hdr(buf); + return (odp_timeout_hdr_t *)(void *)buf_hdl_to_hdr(buf); }
static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo) diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c index 21d7542..28dd404 100644 --- a/platform/linux-generic/pktio/loop.c +++ b/platform/linux-generic/pktio/loop.c @@ -162,7 +162,7 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED, len = QUEUE_MULTI_MAX;
for (i = 0; i < len; ++i) { - hdr_tbl[i] = odp_buf_to_hdr(_odp_packet_to_buffer(pkt_tbl[i])); + hdr_tbl[i] = buf_hdl_to_hdr(_odp_packet_to_buffer(pkt_tbl[i])); bytes += odp_packet_len(pkt_tbl[i]); }
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c index c1cdf72..cf67741 100644 --- a/platform/linux-generic/pktio/netmap.c +++ b/platform/linux-generic/pktio/netmap.c @@ -346,8 +346,8 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
/* max frame len taking into account the l2-offset */ pkt_nm->max_frame_len = ODP_CONFIG_PACKET_BUF_LEN_MAX - - odp_buffer_pool_headroom(pool) - - odp_buffer_pool_tailroom(pool); + pool_headroom(pool) - + pool_tailroom(pool);
/* allow interface to be opened with or without the 'netmap:' prefix */ prefix = "netmap:"; diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c index bf4402a..666aae6 100644 --- a/platform/linux-generic/pktio/socket_mmap.c +++ b/platform/linux-generic/pktio/socket_mmap.c @@ -351,7 +351,7 @@ static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) if (pool_hdl == ODP_POOL_INVALID) ODP_ABORT("Invalid pool handle\n");
- pool = odp_pool_to_entry(pool_hdl); + pool = pool_entry_from_hdl(pool_hdl);
/* Frame has to capture full packet which can fit to the pool block.*/ ring->req.tp_frame_size = (pool->data_size +
commit 48681b60b4ccc6fc39b6fd02baec82eefe300830 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:30 2016 +0200
linux-gen: pool: optimize buffer alloc
Round up global pool allocations to a burst size. Cache any extra buffers for future use. Prefetch buffers header which very newly allocated from global pool and will be returned to the caller.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index abe8591..64ba221 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -105,7 +105,8 @@ struct odp_buffer_hdr_t { };
/* Forward declarations */ -int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num); +int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], + odp_buffer_hdr_t *buf_hdr[], int num); void buffer_free_multi(const odp_buffer_t buf[], int num_free);
int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount); diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index 6df1c5b..6565a5d 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -80,14 +80,16 @@ static void packet_init(pool_t *pool, odp_packet_hdr_t *pkt_hdr, int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int max_num) { - odp_packet_hdr_t *pkt_hdr; pool_t *pool = odp_pool_to_entry(pool_hdl); int num, i; + odp_packet_hdr_t *pkt_hdrs[max_num];
- num = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, max_num); + num = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, + (odp_buffer_hdr_t **)pkt_hdrs, max_num);
for (i = 0; i < num; i++) { - pkt_hdr = odp_packet_hdr(pkt[i]); + odp_packet_hdr_t *pkt_hdr = pkt_hdrs[i]; + packet_init(pool, pkt_hdr, len, 1 /* do parse */);
if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize) @@ -113,7 +115,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) if (odp_unlikely(len > pool->max_len)) return ODP_PACKET_INVALID;
- ret = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)&pkt, 1); + ret = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)&pkt, NULL, 1); if (ret != 1) return ODP_PACKET_INVALID;
@@ -134,6 +136,7 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, pool_t *pool = odp_pool_to_entry(pool_hdl); size_t pkt_size = len ? len : pool->data_size; int count, i; + odp_packet_hdr_t *pkt_hdrs[num];
if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; @@ -143,10 +146,11 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, if (odp_unlikely(len > pool->max_len)) return -1;
- count = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, num); + count = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, + (odp_buffer_hdr_t **)pkt_hdrs, num);
for (i = 0; i < count; ++i) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]); + odp_packet_hdr_t *pkt_hdr = pkt_hdrs[i];
packet_init(pool, pkt_hdr, pkt_size, 0 /* do not parse */); if (len == 0) diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index a2e5d54..7dc0938 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -562,14 +562,14 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info) return 0; }
-int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int max_num) +int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], + odp_buffer_hdr_t *buf_hdr[], int max_num) { pool_t *pool; ring_t *ring; - uint32_t mask; - int i; + uint32_t mask, i; pool_cache_t *cache; - uint32_t cache_num; + uint32_t cache_num, num_ch, num_deq, burst;
pool = pool_entry_from_hdl(pool_hdl); ring = &pool->ring.hdr; @@ -577,28 +577,66 @@ int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int max_num) cache = local.cache[_odp_typeval(pool_hdl)];
cache_num = cache->num; + num_ch = max_num; + num_deq = 0; + burst = CACHE_BURST;
- if (odp_likely((int)cache_num >= max_num)) { - for (i = 0; i < max_num; i++) - buf[i] = cache->buf[cache_num - max_num + i]; + if (odp_unlikely(cache_num < (uint32_t)max_num)) { + /* Cache does not have enough buffers */ + num_ch = cache_num; + num_deq = max_num - cache_num;
- cache->num = cache_num - max_num; - return max_num; + if (odp_unlikely(num_deq > CACHE_BURST)) + burst = num_deq; }
- { + /* Get buffers from the cache */ + for (i = 0; i < num_ch; i++) + buf[i] = cache->buf[cache_num - num_ch + i]; + + /* If needed, get more from the global pool */ + if (odp_unlikely(num_deq)) { /* Temporary copy needed since odp_buffer_t is uintptr_t * and not uint32_t. */ - int num; - uint32_t data[max_num]; + uint32_t data[burst];
- num = ring_deq_multi(ring, mask, data, max_num); + burst = ring_deq_multi(ring, mask, data, burst); + cache_num = burst - num_deq;
- for (i = 0; i < num; i++) - buf[i] = (odp_buffer_t)(uintptr_t)data[i]; + if (odp_unlikely(burst < num_deq)) { + num_deq = burst; + cache_num = 0; + } + + for (i = 0; i < num_deq; i++) { + uint32_t idx = num_ch + i; + + buf[idx] = (odp_buffer_t)(uintptr_t)data[i]; + + if (buf_hdr) { + buf_hdr[idx] = odp_buf_to_hdr(buf[idx]); + /* Prefetch newly allocated and soon to be used + * buffer headers. */ + odp_prefetch(buf_hdr[idx]); + } + } + + /* Cache extra buffers. Cache is currently empty. */ + for (i = 0; i < cache_num; i++) + cache->buf[i] = (odp_buffer_t) + (uintptr_t)data[num_deq + i]; + + cache->num = cache_num; + } else { + cache->num = cache_num - num_ch; + } + + if (buf_hdr) { + for (i = 0; i < num_ch; i++) + buf_hdr[i] = odp_buf_to_hdr(buf[i]); }
- return i; + return num_ch + num_deq; }
static inline void buffer_free_to_pool(uint32_t pool_id, @@ -694,7 +732,7 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl) odp_buffer_t buf; int ret;
- ret = buffer_alloc_multi(pool_hdl, &buf, 1); + ret = buffer_alloc_multi(pool_hdl, &buf, NULL, 1);
if (odp_likely(ret == 1)) return buf; @@ -704,7 +742,7 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num) { - return buffer_alloc_multi(pool_hdl, buf, num); + return buffer_alloc_multi(pool_hdl, buf, NULL, num); }
void odp_buffer_free(odp_buffer_t buf)
commit c8cf1d87783d4b4c628f219803b78731b8d4ade4 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:29 2016 +0200
linux-gen: pool: use ring multi enq and deq operations
Use multi enq and deq operations to optimize global pool access performance. Temporary uint32_t arrays are needed since handles are pointer size variables.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 1286753..a2e5d54 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -586,15 +586,16 @@ int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int max_num) return max_num; }
- for (i = 0; i < max_num; i++) { - uint32_t data; + { + /* Temporary copy needed since odp_buffer_t is uintptr_t + * and not uint32_t. */ + int num; + uint32_t data[max_num];
- data = ring_deq(ring, mask); + num = ring_deq_multi(ring, mask, data, max_num);
- if (data == RING_EMPTY) - break; - - buf[i] = (odp_buffer_t)(uintptr_t)data; + for (i = 0; i < num; i++) + buf[i] = (odp_buffer_t)(uintptr_t)data[i]; }
return i; @@ -629,17 +630,24 @@ static inline void buffer_free_to_pool(uint32_t pool_id, cache_num = cache->num;
if (odp_unlikely((int)(CONFIG_POOL_CACHE_SIZE - cache_num) < num)) { + uint32_t index; int burst = CACHE_BURST;
if (odp_unlikely(num > CACHE_BURST)) burst = num;
- for (i = 0; i < burst; i++) { - uint32_t data, index; + { + /* Temporary copy needed since odp_buffer_t is + * uintptr_t and not uint32_t. */ + uint32_t data[burst]; + + index = cache_num - burst; + + for (i = 0; i < burst; i++) + data[i] = (uint32_t) + (uintptr_t)cache->buf[index + i];
- index = cache_num - burst + i; - data = (uint32_t)(uintptr_t)cache->buf[index]; - ring_enq(ring, mask, data); + ring_enq_multi(ring, mask, data, burst); }
cache_num -= burst;
commit 0948333e6bee32cb3de7e872ebb852bbed06e094 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:28 2016 +0200
linux-gen: ring: added multi enq and deq
Added multi-data versions of ring enqueue and dequeue operations.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h index 6a6291a..55fedeb 100644 --- a/platform/linux-generic/include/odp_ring_internal.h +++ b/platform/linux-generic/include/odp_ring_internal.h @@ -80,6 +80,45 @@ static inline uint32_t ring_deq(ring_t *ring, uint32_t mask) return data; }
+/* Dequeue multiple data from the ring head. Num is smaller than ring size. */ +static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask, + uint32_t data[], uint32_t num) +{ + uint32_t head, tail, new_head, i; + + head = odp_atomic_load_u32(&ring->r_head); + + /* Move reader head. This thread owns data at the new head. */ + do { + tail = odp_atomic_load_u32(&ring->w_tail); + + /* Ring is empty */ + if (head == tail) + return 0; + + /* Try to take all available */ + if ((tail - head) < num) + num = tail - head; + + new_head = head + num; + + } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head, + new_head) == 0)); + + /* Read queue index */ + for (i = 0; i < num; i++) + data[i] = ring->data[(head + 1 + i) & mask]; + + /* Wait until other readers have updated the tail */ + while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head)) + odp_cpu_pause(); + + /* Now update the reader tail */ + odp_atomic_store_rel_u32(&ring->r_tail, new_head); + + return num; +} + /* Enqueue data into the ring tail */ static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data) { @@ -104,6 +143,32 @@ static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data) odp_atomic_store_rel_u32(&ring->w_tail, new_head); }
+/* Enqueue multiple data into the ring tail. Num is smaller than ring size. */ +static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[], + uint32_t num) +{ + uint32_t old_head, new_head, i; + + /* Reserve a slot in the ring for writing */ + old_head = odp_atomic_fetch_add_u32(&ring->w_head, num); + new_head = old_head + 1; + + /* Ring is full. Wait for the last reader to finish. */ + while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head)) + odp_cpu_pause(); + + /* Write data */ + for (i = 0; i < num; i++) + ring->data[(new_head + i) & mask] = data[i]; + + /* Wait until other writers have updated the tail */ + while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) + odp_cpu_pause(); + + /* Now update the writer tail */ + odp_atomic_store_rel_u32(&ring->w_tail, old_head + num); +} + #ifdef __cplusplus } #endif
commit 1220a970be53403d86cbdf0be97bad7d54cdc335 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:27 2016 +0200
linux-gen: pool: reimplement pool with ring
Used the ring data structure to implement pool. Also buffer structure was simplified to enable future driver interface. Every buffer includes a packet header, so each buffer can be used as a packet head or segment. Segmentation was disabled and segment size was fixed to a large number (64kB) to limit the number of modification in the commit.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp/api/plat/pool_types.h b/platform/linux-generic/include/odp/api/plat/pool_types.h index 1ca8f02..4e39de5 100644 --- a/platform/linux-generic/include/odp/api/plat/pool_types.h +++ b/platform/linux-generic/include/odp/api/plat/pool_types.h @@ -39,12 +39,6 @@ typedef enum odp_pool_type_t { ODP_POOL_TIMEOUT = ODP_EVENT_TIMEOUT, } odp_pool_type_t;
-/** Get printable format of odp_pool_t */ -static inline uint64_t odp_pool_to_u64(odp_pool_t hdl) -{ - return _odp_pri(hdl); -} - /** * @} */ diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h index 2b1ab42..2f5eb88 100644 --- a/platform/linux-generic/include/odp_buffer_inlines.h +++ b/platform/linux-generic/include/odp_buffer_inlines.h @@ -18,43 +18,20 @@ extern "C" { #endif
#include <odp_buffer_internal.h> -#include <odp_pool_internal.h>
-static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr) -{ - odp_buffer_bits_t handle; - uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl); - struct pool_entry_s *pool = get_pool_entry(pool_id); +odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf); +void _odp_buffer_event_type_set(odp_buffer_t buf, int ev); +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
- handle.handle = 0; - handle.pool_id = pool_id; - handle.index = ((uint8_t *)hdr - pool->pool_mdata_addr) / - ODP_CACHE_LINE_SIZE; - handle.seg = 0; - - return handle.handle; -} +void *buffer_map(odp_buffer_hdr_t *buf, uint32_t offset, uint32_t *seglen, + uint32_t limit);
static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) { return hdr->handle.handle; }
-static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) -{ - odp_buffer_bits_t handle; - uint32_t pool_id; - uint32_t index; - struct pool_entry_s *pool; - - handle.handle = buf; - pool_id = handle.pool_id; - index = handle.index; - pool = get_pool_entry(pool_id); - - return (odp_buffer_hdr_t *)(void *) - (pool->pool_mdata_addr + (index * ODP_CACHE_LINE_SIZE)); -} +odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf);
static inline uint32_t pool_id_from_buf(odp_buffer_t buf) { @@ -64,131 +41,6 @@ static inline uint32_t pool_id_from_buf(odp_buffer_t buf) return handle.pool_id; }
-static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf) -{ - odp_buffer_bits_t handle; - odp_buffer_hdr_t *buf_hdr; - handle.handle = buf; - - /* For buffer handles, segment index must be 0 and pool id in range */ - if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_POOLS) - return NULL; - - pool_entry_t *pool = - odp_pool_to_entry(_odp_cast_scalar(odp_pool_t, - handle.pool_id)); - - /* If pool not created, handle is invalid */ - if (pool->s.pool_shm == ODP_SHM_INVALID) - return NULL; - - uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE; - - /* A valid buffer index must be on stride, and must be in range */ - if ((handle.index % buf_stride != 0) || - ((uint32_t)(handle.index / buf_stride) >= pool->s.params.buf.num)) - return NULL; - - buf_hdr = (odp_buffer_hdr_t *)(void *) - (pool->s.pool_mdata_addr + - (handle.index * ODP_CACHE_LINE_SIZE)); - - /* Handle is valid, so buffer is valid if it is allocated */ - return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr; -} - -int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); - -static inline void *buffer_map(odp_buffer_hdr_t *buf, - uint32_t offset, - uint32_t *seglen, - uint32_t limit) -{ - int seg_index; - int seg_offset; - - if (odp_likely(offset < buf->segsize)) { - seg_index = 0; - seg_offset = offset; - } else { - seg_index = offset / buf->segsize; - seg_offset = offset % buf->segsize; - } - if (seglen != NULL) { - uint32_t buf_left = limit - offset; - *seglen = seg_offset + buf_left <= buf->segsize ? - buf_left : buf->segsize - seg_offset; - } - - return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); -} - -static inline odp_buffer_seg_t segment_next(odp_buffer_hdr_t *buf, - odp_buffer_seg_t seg) -{ - odp_buffer_bits_t seghandle; - seghandle.handle = (odp_buffer_t)seg; - - if (seg == ODP_SEGMENT_INVALID || - seghandle.prefix != buf->handle.prefix || - seghandle.seg >= buf->segcount - 1) - return ODP_SEGMENT_INVALID; - else { - seghandle.seg++; - return (odp_buffer_seg_t)seghandle.handle; - } -} - -static inline void *segment_map(odp_buffer_hdr_t *buf, - odp_buffer_seg_t seg, - uint32_t *seglen, - uint32_t limit, - uint32_t hr) -{ - uint32_t seg_offset, buf_left; - odp_buffer_bits_t seghandle; - uint8_t *seg_addr; - seghandle.handle = (odp_buffer_t)seg; - - if (seghandle.prefix != buf->handle.prefix || - seghandle.seg >= buf->segcount) - return NULL; - - seg_addr = (uint8_t *)buf->addr[seghandle.seg]; - seg_offset = seghandle.seg * buf->segsize; - limit += hr; - - /* Can't map this segment if it's nothing but headroom or tailroom */ - if (hr >= seg_offset + buf->segsize || seg_offset > limit) - return NULL; - - /* Adjust address & offset if this segment contains any headroom */ - if (hr > seg_offset) { - seg_addr += hr % buf->segsize; - seg_offset += hr % buf->segsize; - } - - /* Set seglen if caller is asking for it */ - if (seglen != NULL) { - buf_left = limit - seg_offset; - *seglen = buf_left < buf->segsize ? buf_left : - (seg_offset >= buf->segsize ? buf->segsize : - buf->segsize - seg_offset); - } - - return (void *)seg_addr; -} - -static inline odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf) -{ - return odp_buf_to_hdr(buf)->event_type; -} - -static inline void _odp_buffer_event_type_set(odp_buffer_t buf, int ev) -{ - odp_buf_to_hdr(buf)->event_type = ev; -} - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index 1c09cd3..abe8591 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -33,72 +33,19 @@ extern "C" { #include <odp_schedule_if.h> #include <stddef.h>
-#define ODP_BITSIZE(x) \ - ((x) <= 2 ? 1 : \ - ((x) <= 4 ? 2 : \ - ((x) <= 8 ? 3 : \ - ((x) <= 16 ? 4 : \ - ((x) <= 32 ? 5 : \ - ((x) <= 64 ? 6 : \ - ((x) <= 128 ? 7 : \ - ((x) <= 256 ? 8 : \ - ((x) <= 512 ? 9 : \ - ((x) <= 1024 ? 10 : \ - ((x) <= 2048 ? 11 : \ - ((x) <= 4096 ? 12 : \ - ((x) <= 8196 ? 13 : \ - ((x) <= 16384 ? 14 : \ - ((x) <= 32768 ? 15 : \ - ((x) <= 65536 ? 16 : \ - (0/0))))))))))))))))) - ODP_STATIC_ASSERT(ODP_CONFIG_PACKET_SEG_LEN_MIN >= 256, "ODP Segment size must be a minimum of 256 bytes");
-ODP_STATIC_ASSERT((ODP_CONFIG_PACKET_BUF_LEN_MAX % - ODP_CONFIG_PACKET_SEG_LEN_MIN) == 0, - "Packet max size must be a multiple of segment size"); - -#define ODP_BUFFER_MAX_SEG \ - (ODP_CONFIG_PACKET_BUF_LEN_MAX / ODP_CONFIG_PACKET_SEG_LEN_MIN) - -/* We can optimize storage of small raw buffers within metadata area */ -#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - 1)) - -#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_POOLS) -#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG) -#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - ODP_BUFFER_SEG_BITS) -#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + ODP_BUFFER_INDEX_BITS) -#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) -#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) - -#define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2) -#define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1)
typedef union odp_buffer_bits_t { - odp_buffer_t handle; + odp_buffer_t handle; + union { - uint32_t u32; - struct { -#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN - uint32_t pool_id:ODP_BUFFER_POOL_BITS; - uint32_t index:ODP_BUFFER_INDEX_BITS; - uint32_t seg:ODP_BUFFER_SEG_BITS; -#else - uint32_t seg:ODP_BUFFER_SEG_BITS; - uint32_t index:ODP_BUFFER_INDEX_BITS; - uint32_t pool_id:ODP_BUFFER_POOL_BITS; -#endif - }; + uint32_t u32;
struct { -#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN - uint32_t prefix:ODP_BUFFER_PREFIX_BITS; - uint32_t pfxseg:ODP_BUFFER_SEG_BITS; -#else - uint32_t pfxseg:ODP_BUFFER_SEG_BITS; - uint32_t prefix:ODP_BUFFER_PREFIX_BITS; -#endif + uint32_t pool_id: 8; + uint32_t index: 24; }; }; } odp_buffer_bits_t; @@ -125,7 +72,7 @@ struct odp_buffer_hdr_t { uint32_t sustain:1; /* Sustain order */ }; } flags; - int16_t allocator; /* allocating thread id */ + int8_t type; /* buffer type */ odp_event_type_t event_type; /* for reuse as event */ uint32_t size; /* max data size */ @@ -139,7 +86,8 @@ struct odp_buffer_hdr_t { uint32_t uarea_size; /* size of user area */ uint32_t segcount; /* segment count */ uint32_t segsize; /* segment size */ - void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */ + /* block addrs */ + void *addr[ODP_CONFIG_PACKET_MAX_SEGS]; uint64_t order; /* sequence for ordered queues */ queue_entry_t *origin_qe; /* ordered queue origin */ union { @@ -149,39 +97,17 @@ struct odp_buffer_hdr_t { #ifdef _ODP_PKTIO_IPC /* ipc mapped process can not walk over pointers, * offset has to be used */ - uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG]; + uint64_t ipc_addr_offset[ODP_CONFIG_PACKET_MAX_SEGS]; #endif -}; - -/** @internal Compile time assert that the - * allocator field can handle any allocator id*/ -ODP_STATIC_ASSERT(INT16_MAX >= ODP_THREAD_COUNT_MAX, - "ODP_BUFFER_HDR_T__ALLOCATOR__SIZE_ERROR"); - -typedef struct odp_buffer_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))]; -} odp_buffer_hdr_stride;
-typedef struct odp_buf_blk_t { - struct odp_buf_blk_t *next; - struct odp_buf_blk_t *prev; -} odp_buf_blk_t; - -/* Raw buffer header */ -typedef struct { - odp_buffer_hdr_t buf_hdr; /* common buffer header */ -} odp_raw_buffer_hdr_t; - -/* Free buffer marker */ -#define ODP_FREEBUF -1 + /* Data or next header */ + uint8_t data[0]; +};
/* Forward declarations */ -odp_buffer_t buffer_alloc(odp_pool_t pool, size_t size); -int buffer_alloc_multi(odp_pool_t pool_hdl, size_t size, - odp_buffer_t buf[], int num); -void buffer_free(uint32_t pool_id, const odp_buffer_t buf); -void buffer_free_multi(uint32_t pool_id, - const odp_buffer_t buf[], int num_free); +int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num); +void buffer_free_multi(const odp_buffer_t buf[], int num_free); + int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount); void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount); int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount); diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h index dc2190d..8505c67 100644 --- a/platform/linux-generic/include/odp_classification_datamodel.h +++ b/platform/linux-generic/include/odp_classification_datamodel.h @@ -77,7 +77,7 @@ Class Of Service */ struct cos_s { queue_entry_t *queue; /* Associated Queue */ - pool_entry_t *pool; /* Associated Buffer pool */ + odp_pool_t pool; /* Associated Buffer pool */ union pmr_u *pmr[ODP_PMR_PER_COS_MAX]; /* Chained PMR */ union cos_u *linked_cos[ODP_PMR_PER_COS_MAX]; /* Chained CoS with PMR*/ uint32_t valid; /* validity Flag */ diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index e1bab20..e24d5ab 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -32,7 +32,7 @@ extern "C" { * This defines the minimum supported buffer alignment. Requests for values * below this will be rounded up to this value. */ -#define ODP_CONFIG_BUFFER_ALIGN_MIN 16 +#define ODP_CONFIG_BUFFER_ALIGN_MIN 64
/* * Maximum buffer alignment @@ -70,16 +70,7 @@ extern "C" { /* * Maximum number of segments per packet */ -#define ODP_CONFIG_PACKET_MAX_SEGS 6 - -/* - * Minimum packet segment length - * - * This defines the minimum packet segment buffer length in bytes. The user - * defined segment length (seg_len in odp_pool_param_t) will be rounded up into - * this value. - */ -#define ODP_CONFIG_PACKET_SEG_LEN_MIN 1598 +#define ODP_CONFIG_PACKET_MAX_SEGS 1
/* * Maximum packet segment length @@ -91,6 +82,15 @@ extern "C" { #define ODP_CONFIG_PACKET_SEG_LEN_MAX (64 * 1024)
/* + * Minimum packet segment length + * + * This defines the minimum packet segment buffer length in bytes. The user + * defined segment length (seg_len in odp_pool_param_t) will be rounded up into + * this value. + */ +#define ODP_CONFIG_PACKET_SEG_LEN_MIN ODP_CONFIG_PACKET_SEG_LEN_MAX + +/* * Maximum packet buffer length * * This defines the maximum number of bytes that can be stored into a packet @@ -102,7 +102,7 @@ extern "C" { * - The value MUST be an integral number of segments * - The value SHOULD be large enough to accommodate jumbo packets (9K) */ -#define ODP_CONFIG_PACKET_BUF_LEN_MAX (ODP_CONFIG_PACKET_SEG_LEN_MIN * 6) +#define ODP_CONFIG_PACKET_BUF_LEN_MAX ODP_CONFIG_PACKET_SEG_LEN_MAX
/* Maximum number of shared memory blocks. * @@ -133,6 +133,16 @@ extern "C" { */ #define CONFIG_BURST_SIZE 16
+/* + * Maximum number of events in a pool + */ +#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024) + +/* + * Maximum number of events in a thread local pool cache + */ +#define CONFIG_POOL_CACHE_SIZE 256 + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h index b23ad9c..2cad71f 100644 --- a/platform/linux-generic/include/odp_packet_internal.h +++ b/platform/linux-generic/include/odp_packet_internal.h @@ -189,11 +189,10 @@ typedef struct { odp_time_t timestamp; /**< Timestamp value */
odp_crypto_generic_op_result_t op_result; /**< Result for crypto */ -} odp_packet_hdr_t;
-typedef struct odp_packet_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))]; -} odp_packet_hdr_stride; + /* Packet data storage */ + uint8_t data[0]; +} odp_packet_hdr_t;
/** * Return the packet header @@ -248,7 +247,8 @@ static inline int push_head_seg(odp_packet_hdr_t *pkt_hdr, size_t len) (len - pkt_hdr->headroom + pkt_hdr->buf_hdr.segsize - 1) / pkt_hdr->buf_hdr.segsize;
- if (pkt_hdr->buf_hdr.segcount + extrasegs > ODP_BUFFER_MAX_SEG || + if (pkt_hdr->buf_hdr.segcount + extrasegs > + ODP_CONFIG_PACKET_MAX_SEGS || seg_alloc_head(&pkt_hdr->buf_hdr, extrasegs)) return -1;
@@ -276,7 +276,8 @@ static inline int push_tail_seg(odp_packet_hdr_t *pkt_hdr, size_t len) (len - pkt_hdr->tailroom + pkt_hdr->buf_hdr.segsize - 1) / pkt_hdr->buf_hdr.segsize;
- if (pkt_hdr->buf_hdr.segcount + extrasegs > ODP_BUFFER_MAX_SEG || + if (pkt_hdr->buf_hdr.segcount + extrasegs > + ODP_CONFIG_PACKET_MAX_SEGS || seg_alloc_tail(&pkt_hdr->buf_hdr, extrasegs)) return -1;
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index ca59ade..278c553 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -18,240 +18,78 @@ extern "C" { #endif
-#include <odp/api/std_types.h> -#include <odp/api/align.h> -#include <odp_align_internal.h> -#include <odp/api/pool.h> -#include <odp_buffer_internal.h> -#include <odp/api/hints.h> -#include <odp_config_internal.h> -#include <odp/api/debug.h> #include <odp/api/shared_memory.h> -#include <odp/api/atomic.h> -#include <odp/api/thread.h> -#include <string.h> - -/** - * Buffer initialization routine prototype - * - * @note Routines of this type MAY be passed as part of the - * _odp_buffer_pool_init_t structure to be called whenever a - * buffer is allocated to initialize the user metadata - * associated with that buffer. - */ -typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg); +#include <odp/api/ticketlock.h>
-/** - * Buffer pool initialization parameters - * Used to communicate buffer pool initialization options. Internal for now. - */ -typedef struct _odp_buffer_pool_init_t { - size_t udata_size; /**< Size of user metadata for each buffer */ - _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */ - void *buf_init_arg; /**< Argument to be passed to buf_init() */ -} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */ - -#define POOL_MAX_LOCAL_CHUNKS 4 -#define POOL_CHUNK_SIZE (4 * CONFIG_BURST_SIZE) -#define POOL_MAX_LOCAL_BUFS (POOL_MAX_LOCAL_CHUNKS * POOL_CHUNK_SIZE) - -struct local_cache_s { - uint64_t bufallocs; /* Local buffer alloc count */ - uint64_t buffrees; /* Local buffer free count */ - - uint32_t num_buf; - odp_buffer_hdr_t *buf[POOL_MAX_LOCAL_BUFS]; -}; +#include <odp_buffer_internal.h> +#include <odp_config_internal.h> +#include <odp_ring_internal.h>
-/* Local cache for buffer alloc/free acceleration */ -typedef struct local_cache_t { - union { - struct local_cache_s s; +typedef struct pool_cache_t { + uint32_t num;
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP( - sizeof(struct local_cache_s))]; - }; -} local_cache_t; + odp_buffer_t buf[CONFIG_POOL_CACHE_SIZE];
-#include <odp/api/plat/ticketlock_inlines.h> -#define POOL_LOCK(a) _odp_ticketlock_lock(a) -#define POOL_UNLOCK(a) _odp_ticketlock_unlock(a) -#define POOL_LOCK_INIT(a) odp_ticketlock_init(a) +} pool_cache_t ODP_ALIGNED_CACHE;
-/** - * ODP Pool stats - Maintain some useful stats regarding pool utilization - */ +/* Buffer header ring */ typedef struct { - odp_atomic_u64_t bufallocs; /**< Count of successful buf allocs */ - odp_atomic_u64_t buffrees; /**< Count of successful buf frees */ - odp_atomic_u64_t blkallocs; /**< Count of successful blk allocs */ - odp_atomic_u64_t blkfrees; /**< Count of successful blk frees */ - odp_atomic_u64_t bufempty; /**< Count of unsuccessful buf allocs */ - odp_atomic_u64_t blkempty; /**< Count of unsuccessful blk allocs */ - odp_atomic_u64_t buf_high_wm_count; /**< Count of high buf wm conditions */ - odp_atomic_u64_t buf_low_wm_count; /**< Count of low buf wm conditions */ - odp_atomic_u64_t blk_high_wm_count; /**< Count of high blk wm conditions */ - odp_atomic_u64_t blk_low_wm_count; /**< Count of low blk wm conditions */ -} _odp_pool_stats_t; - -struct pool_entry_s { - odp_ticketlock_t lock ODP_ALIGNED_CACHE; - odp_ticketlock_t buf_lock; - odp_ticketlock_t blk_lock; - - char name[ODP_POOL_NAME_LEN]; - odp_pool_param_t params; - uint32_t udata_size; - odp_pool_t pool_hdl; - uint32_t pool_id; - odp_shm_t pool_shm; - union { - uint32_t all; - struct { - uint32_t has_name:1; - uint32_t user_supplied_shm:1; - uint32_t unsegmented:1; - uint32_t zeroized:1; - uint32_t predefined:1; - }; - } flags; - uint32_t quiesced; - uint32_t buf_low_wm_assert; - uint32_t blk_low_wm_assert; - uint8_t *pool_base_addr; - uint8_t *pool_mdata_addr; - size_t pool_size; - uint32_t buf_align; - uint32_t buf_stride; - odp_buffer_hdr_t *buf_freelist; - void *blk_freelist; - odp_atomic_u32_t bufcount; - odp_atomic_u32_t blkcount; - _odp_pool_stats_t poolstats; - uint32_t buf_num; - uint32_t seg_size; - uint32_t blk_size; - uint32_t buf_high_wm; - uint32_t buf_low_wm; - uint32_t blk_high_wm; - uint32_t blk_low_wm; - uint32_t headroom; - uint32_t tailroom; - - local_cache_t local_cache[ODP_THREAD_COUNT_MAX] ODP_ALIGNED_CACHE; -}; - -typedef union pool_entry_u { - struct pool_entry_s s; - - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))]; -} pool_entry_t; - -extern void *pool_entry_ptr[]; - -#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1) -#define buffer_is_secure(buf) (buf->flags.zeroized) -#define pool_is_secure(pool) (pool->flags.zeroized) -#else -#define buffer_is_secure(buf) 0 -#define pool_is_secure(pool) 0 -#endif - -static inline void *get_blk(struct pool_entry_s *pool) -{ - void *myhead; - uint64_t blkcount; - - POOL_LOCK(&pool->blk_lock); - - myhead = pool->blk_freelist; - - if (odp_unlikely(myhead == NULL)) { - POOL_UNLOCK(&pool->blk_lock); - odp_atomic_inc_u64(&pool->poolstats.blkempty); - } else { - pool->blk_freelist = ((odp_buf_blk_t *)myhead)->next; - POOL_UNLOCK(&pool->blk_lock); - blkcount = odp_atomic_fetch_sub_u32(&pool->blkcount, 1) - 1; - - /* Check for low watermark condition */ - if (blkcount == pool->blk_low_wm && !pool->blk_low_wm_assert) { - pool->blk_low_wm_assert = 1; - odp_atomic_inc_u64(&pool->poolstats.blk_low_wm_count); - } - - odp_atomic_inc_u64(&pool->poolstats.blkallocs); - } - - return myhead; -} - -static inline void ret_blk(struct pool_entry_s *pool, void *block) + /* Ring header */ + ring_t hdr; + + /* Ring data: buffer handles */ + uint32_t buf[CONFIG_POOL_MAX_NUM]; + +} pool_ring_t ODP_ALIGNED_CACHE; + +typedef struct pool_t { + odp_ticketlock_t lock ODP_ALIGNED_CACHE; + + char name[ODP_POOL_NAME_LEN]; + odp_pool_param_t params; + odp_pool_t pool_hdl; + uint32_t pool_idx; + uint32_t ring_mask; + odp_shm_t shm; + odp_shm_t uarea_shm; + int reserved; + uint32_t num; + uint32_t align; + uint32_t headroom; + uint32_t tailroom; + uint32_t data_size; + uint32_t max_len; + uint32_t max_seg_len; + uint32_t uarea_size; + uint32_t block_size; + uint32_t shm_size; + uint32_t uarea_shm_size; + uint8_t *base_addr; + uint8_t *uarea_base_addr; + + pool_cache_t local_cache[ODP_THREAD_COUNT_MAX]; + + pool_ring_t ring; + +} pool_t; + +pool_t *pool_entry(uint32_t pool_idx); + +static inline pool_t *odp_pool_to_entry(odp_pool_t pool_hdl) { - uint64_t blkcount; - - POOL_LOCK(&pool->blk_lock); - - ((odp_buf_blk_t *)block)->next = pool->blk_freelist; - pool->blk_freelist = block; - - POOL_UNLOCK(&pool->blk_lock); - - blkcount = odp_atomic_fetch_add_u32(&pool->blkcount, 1); - - /* Check if low watermark condition should be deasserted */ - if (blkcount == pool->blk_high_wm && pool->blk_low_wm_assert) { - pool->blk_low_wm_assert = 0; - odp_atomic_inc_u64(&pool->poolstats.blk_high_wm_count); - } - - odp_atomic_inc_u64(&pool->poolstats.blkfrees); -} - -static inline odp_pool_t pool_index_to_handle(uint32_t pool_id) -{ - return _odp_cast_scalar(odp_pool_t, pool_id); -} - -static inline uint32_t pool_handle_to_index(odp_pool_t pool_hdl) -{ - return _odp_typeval(pool_hdl); -} - -static inline void *get_pool_entry(uint32_t pool_id) -{ - return pool_entry_ptr[pool_id]; -} - -static inline pool_entry_t *odp_pool_to_entry(odp_pool_t pool) -{ - return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool)); -} - -static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf) -{ - return odp_pool_to_entry(buf->pool_hdl); -} - -static inline uint32_t odp_buffer_pool_segment_size(odp_pool_t pool) -{ - return odp_pool_to_entry(pool)->s.seg_size; + return pool_entry(_odp_typeval(pool_hdl)); }
static inline uint32_t odp_buffer_pool_headroom(odp_pool_t pool) { - return odp_pool_to_entry(pool)->s.headroom; + return odp_pool_to_entry(pool)->headroom; }
static inline uint32_t odp_buffer_pool_tailroom(odp_pool_t pool) { - return odp_pool_to_entry(pool)->s.tailroom; + return odp_pool_to_entry(pool)->tailroom; }
-odp_pool_t _pool_create(const char *name, - odp_pool_param_t *params, - uint32_t shmflags); - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h index b1cd73f..91b12c5 100644 --- a/platform/linux-generic/include/odp_timer_internal.h +++ b/platform/linux-generic/include/odp_timer_internal.h @@ -35,8 +35,4 @@ typedef struct { odp_timer_t timer; } odp_timeout_hdr_t;
-typedef struct odp_timeout_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))]; -} odp_timeout_hdr_stride; - #endif diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c index ce2fdba..0ddaf95 100644 --- a/platform/linux-generic/odp_buffer.c +++ b/platform/linux-generic/odp_buffer.c @@ -31,7 +31,6 @@ void *odp_buffer_addr(odp_buffer_t buf) return hdr->addr[0]; }
- uint32_t odp_buffer_size(odp_buffer_t buf) { odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); @@ -39,12 +38,6 @@ uint32_t odp_buffer_size(odp_buffer_t buf) return hdr->size; }
-int odp_buffer_is_valid(odp_buffer_t buf) -{ - return validate_buf(buf) != NULL; -} - - int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) { odp_buffer_hdr_t *hdr; @@ -72,7 +65,6 @@ int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) return len; }
- void odp_buffer_print(odp_buffer_t buf) { int max_len = 512; diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c index de72cfb..50a7e54 100644 --- a/platform/linux-generic/odp_classification.c +++ b/platform/linux-generic/odp_classification.c @@ -16,7 +16,6 @@ #include <odp_classification_datamodel.h> #include <odp_classification_inlines.h> #include <odp_classification_internal.h> -#include <odp_pool_internal.h> #include <odp/api/shared_memory.h> #include <protocols/eth.h> #include <protocols/ip.h> @@ -159,7 +158,6 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param) { int i, j; queue_entry_t *queue; - pool_entry_t *pool; odp_cls_drop_t drop_policy;
/* Packets are dropped if Queue or Pool is invalid*/ @@ -168,11 +166,6 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param) else queue = queue_to_qentry(param->queue);
- if (param->pool == ODP_POOL_INVALID) - pool = NULL; - else - pool = odp_pool_to_entry(param->pool); - drop_policy = param->drop_policy;
for (i = 0; i < ODP_COS_MAX_ENTRY; i++) { @@ -191,7 +184,7 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param) cos_tbl->cos_entry[i].s.linked_cos[j] = NULL; } cos_tbl->cos_entry[i].s.queue = queue; - cos_tbl->cos_entry[i].s.pool = pool; + cos_tbl->cos_entry[i].s.pool = param->pool; cos_tbl->cos_entry[i].s.flow_set = 0; cos_tbl->cos_entry[i].s.headroom = 0; cos_tbl->cos_entry[i].s.valid = 1; @@ -555,7 +548,7 @@ odp_pmr_t odp_cls_pmr_create(const odp_pmr_param_t *terms, int num_terms, return id; }
-int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id) +int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool) { cos_t *cos;
@@ -565,10 +558,7 @@ int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id) return -1; }
- if (pool_id == ODP_POOL_INVALID) - cos->s.pool = NULL; - else - cos->s.pool = odp_pool_to_entry(pool_id); + cos->s.pool = pool;
return 0; } @@ -583,10 +573,7 @@ odp_pool_t odp_cls_cos_pool(odp_cos_t cos_id) return ODP_POOL_INVALID; }
- if (!cos->s.pool) - return ODP_POOL_INVALID; - - return cos->s.pool->s.pool_hdl; + return cos->s.pool; }
int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr) @@ -832,10 +819,10 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base, if (cos == NULL) return -EINVAL;
- if (cos->s.queue == NULL || cos->s.pool == NULL) + if (cos->s.queue == NULL || cos->s.pool == ODP_POOL_INVALID) return -EFAULT;
- *pool = cos->s.pool->s.pool_hdl; + *pool = cos->s.pool; pkt_hdr->p.input_flags.dst_queue = 1; pkt_hdr->dst_queue = cos->s.queue->s.handle;
diff --git a/platform/linux-generic/odp_crypto.c b/platform/linux-generic/odp_crypto.c index 9e09d42..3ebabb7 100644 --- a/platform/linux-generic/odp_crypto.c +++ b/platform/linux-generic/odp_crypto.c @@ -40,7 +40,9 @@ static odp_crypto_global_t *global; static odp_crypto_generic_op_result_t *get_op_result_from_event(odp_event_t ev) { - return &(odp_packet_hdr(odp_packet_from_event(ev))->op_result); + odp_packet_hdr_t *hdr = odp_packet_hdr(odp_packet_from_event(ev)); + + return &hdr->op_result; }
static diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index c2b26fd..6df1c5b 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -48,7 +48,7 @@ void packet_parse_reset(odp_packet_hdr_t *pkt_hdr) /** * Initialize packet */ -static void packet_init(pool_entry_t *pool, odp_packet_hdr_t *pkt_hdr, +static void packet_init(pool_t *pool, odp_packet_hdr_t *pkt_hdr, size_t size, int parse) { pkt_hdr->p.parsed_layers = LAYER_NONE; @@ -71,10 +71,8 @@ static void packet_init(pool_entry_t *pool, odp_packet_hdr_t *pkt_hdr, * segment occupied by the allocated length. */ pkt_hdr->frame_len = size; - pkt_hdr->headroom = pool->s.headroom; - pkt_hdr->tailroom = - (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) - - (pool->s.headroom + size); + pkt_hdr->headroom = pool->headroom; + pkt_hdr->tailroom = pool->data_size - size + pool->tailroom;
pkt_hdr->input = ODP_PKTIO_INVALID; } @@ -83,10 +81,10 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int max_num) { odp_packet_hdr_t *pkt_hdr; - pool_entry_t *pool = odp_pool_to_entry(pool_hdl); + pool_t *pool = odp_pool_to_entry(pool_hdl); int num, i;
- num = buffer_alloc_multi(pool_hdl, len, (odp_buffer_t *)pkt, max_num); + num = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, max_num);
for (i = 0; i < num; i++) { pkt_hdr = odp_packet_hdr(pkt[i]); @@ -101,18 +99,22 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) { - pool_entry_t *pool = odp_pool_to_entry(pool_hdl); - size_t pkt_size = len ? len : pool->s.params.buf.size; + pool_t *pool = odp_pool_to_entry(pool_hdl); + size_t pkt_size = len ? len : pool->data_size; odp_packet_t pkt; odp_packet_hdr_t *pkt_hdr; + int ret;
- if (pool->s.params.type != ODP_POOL_PACKET) { + if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; return ODP_PACKET_INVALID; }
- pkt = (odp_packet_t)buffer_alloc(pool_hdl, pkt_size); - if (pkt == ODP_PACKET_INVALID) + if (odp_unlikely(len > pool->max_len)) + return ODP_PACKET_INVALID; + + ret = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)&pkt, 1); + if (ret != 1) return ODP_PACKET_INVALID;
pkt_hdr = odp_packet_hdr(pkt); @@ -129,17 +131,19 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int num) { - pool_entry_t *pool = odp_pool_to_entry(pool_hdl); - size_t pkt_size = len ? len : pool->s.params.buf.size; + pool_t *pool = odp_pool_to_entry(pool_hdl); + size_t pkt_size = len ? len : pool->data_size; int count, i;
- if (pool->s.params.type != ODP_POOL_PACKET) { + if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; return -1; }
- count = buffer_alloc_multi(pool_hdl, pkt_size, - (odp_buffer_t *)pkt, num); + if (odp_unlikely(len > pool->max_len)) + return -1; + + count = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, num);
for (i = 0; i < count; ++i) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]); @@ -157,25 +161,20 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
void odp_packet_free(odp_packet_t pkt) { - uint32_t pool_id = pool_id_from_buf((odp_buffer_t)pkt); - - buffer_free(pool_id, (odp_buffer_t)pkt); + buffer_free_multi((odp_buffer_t *)&pkt, 1); }
void odp_packet_free_multi(const odp_packet_t pkt[], int num) { - uint32_t pool_id = pool_id_from_buf((odp_buffer_t)pkt[0]); - - buffer_free_multi(pool_id, (const odp_buffer_t * const)pkt, num); + buffer_free_multi((const odp_buffer_t * const)pkt, num); }
int odp_packet_reset(odp_packet_t pkt, uint32_t len) { odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt); - pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr); - uint32_t totsize = pool->s.headroom + len + pool->s.tailroom; + pool_t *pool = odp_pool_to_entry(pkt_hdr->buf_hdr.pool_hdl);
- if (totsize > pkt_hdr->buf_hdr.size) + if (len > pool->headroom + pool->data_size + pool->tailroom) return -1;
packet_init(pool, pkt_hdr, len, 0); @@ -381,14 +380,8 @@ void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len, odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); void *addr = packet_map(pkt_hdr, offset, len);
- if (addr != NULL && seg != NULL) { - odp_buffer_bits_t seghandle; - - seghandle.handle = (odp_buffer_t)pkt; - seghandle.seg = (pkt_hdr->headroom + offset) / - pkt_hdr->buf_hdr.segsize; - *seg = (odp_packet_seg_t)seghandle.handle; - } + if (addr != NULL && seg != NULL) + *seg = (odp_packet_seg_t)pkt;
return addr; } @@ -581,20 +574,19 @@ odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt)
odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - odp_buffer_bits_t seghandle; + (void)pkt;
- seghandle.handle = (odp_buffer_t)pkt; - seghandle.seg = pkt_hdr->buf_hdr.segcount - 1; - return (odp_packet_seg_t)seghandle.handle; + /* Only one segment */ + return (odp_packet_seg_t)pkt; }
odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + (void)pkt; + (void)seg;
- return (odp_packet_seg_t)segment_next(&pkt_hdr->buf_hdr, - (odp_buffer_seg_t)seg); + /* Only one segment */ + return ODP_PACKET_SEG_INVALID; }
/* @@ -606,21 +598,18 @@ odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg)
void *odp_packet_seg_data(odp_packet_t pkt, odp_packet_seg_t seg) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + (void)seg;
- return segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, NULL, - pkt_hdr->frame_len, pkt_hdr->headroom); + /* Only one segment */ + return odp_packet_data(pkt); }
uint32_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - uint32_t seglen = 0; + (void)seg;
- segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, &seglen, - pkt_hdr->frame_len, pkt_hdr->headroom); - - return seglen; + /* Only one segment */ + return odp_packet_seg_len(pkt); }
/* @@ -960,9 +949,13 @@ void odp_packet_print(odp_packet_t pkt)
int odp_packet_is_valid(odp_packet_t pkt) { - odp_buffer_hdr_t *buf = validate_buf((odp_buffer_t)pkt); + if (odp_buffer_is_valid((odp_buffer_t)pkt) == 0) + return 0; + + if (odp_event_type(odp_packet_to_event(pkt)) != ODP_EVENT_PACKET) + return 0;
- return (buf != NULL && buf->type == ODP_EVENT_PACKET); + return 1; }
/* diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 415c9fa..1286753 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -4,77 +4,71 @@ * SPDX-License-Identifier: BSD-3-Clause */
-#include <odp/api/std_types.h> #include <odp/api/pool.h> -#include <odp_buffer_internal.h> -#include <odp_pool_internal.h> -#include <odp_buffer_inlines.h> -#include <odp_packet_internal.h> -#include <odp_timer_internal.h> -#include <odp_align_internal.h> #include <odp/api/shared_memory.h> #include <odp/api/align.h> +#include <odp/api/ticketlock.h> + +#include <odp_pool_internal.h> #include <odp_internal.h> +#include <odp_buffer_inlines.h> +#include <odp_packet_internal.h> #include <odp_config_internal.h> -#include <odp/api/hints.h> -#include <odp/api/thread.h> #include <odp_debug_internal.h> +#include <odp_ring_internal.h>
#include <string.h> -#include <stdlib.h> +#include <stdio.h> #include <inttypes.h>
-#if ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS -#error ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS -#endif - - -typedef union buffer_type_any_u { - odp_buffer_hdr_t buf; - odp_packet_hdr_t pkt; - odp_timeout_hdr_t tmo; -} odp_anybuf_t; +#include <odp/api/plat/ticketlock_inlines.h> +#define LOCK(a) _odp_ticketlock_lock(a) +#define UNLOCK(a) _odp_ticketlock_unlock(a) +#define LOCK_INIT(a) odp_ticketlock_init(a)
-/* Any buffer type header */ -typedef struct { - union buffer_type_any_u any_hdr; /* any buffer type */ -} odp_any_buffer_hdr_t; - -typedef struct odp_any_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))]; -} odp_any_hdr_stride; +#define CACHE_BURST 32 +#define RING_SIZE_MIN (2 * CACHE_BURST)
+ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST), + "cache_burst_size_too_large_compared_to_cache_size");
typedef struct pool_table_t { - pool_entry_t pool[ODP_CONFIG_POOLS]; + pool_t pool[ODP_CONFIG_POOLS]; + odp_shm_t shm; } pool_table_t;
- -/* The pool table */ -static pool_table_t *pool_tbl; -static const char SHM_DEFAULT_NAME[] = "odp_buffer_pools"; - -/* Pool entry pointers (for inlining) */ -void *pool_entry_ptr[ODP_CONFIG_POOLS]; - /* Thread local variables */ typedef struct pool_local_t { - local_cache_t *cache[ODP_CONFIG_POOLS]; + pool_cache_t *cache[ODP_CONFIG_POOLS]; int thr_id; } pool_local_t;
+static pool_table_t *pool_tbl; static __thread pool_local_t local;
-static void flush_cache(local_cache_t *buf_cache, struct pool_entry_s *pool); +static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx) +{ + return _odp_cast_scalar(odp_pool_t, pool_idx); +} + +pool_t *pool_entry(uint32_t pool_idx) +{ + return &pool_tbl->pool[pool_idx]; +} + +static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl) +{ + return &pool_tbl->pool[_odp_typeval(pool_hdl)]; +}
int odp_pool_init_global(void) { uint32_t i; odp_shm_t shm;
- shm = odp_shm_reserve(SHM_DEFAULT_NAME, + shm = odp_shm_reserve("_odp_pool_table", sizeof(pool_table_t), - sizeof(pool_entry_t), 0); + ODP_CACHE_LINE_SIZE, 0);
pool_tbl = odp_shm_addr(shm);
@@ -82,1079 +76,766 @@ int odp_pool_init_global(void) return -1;
memset(pool_tbl, 0, sizeof(pool_table_t)); + pool_tbl->shm = shm;
for (i = 0; i < ODP_CONFIG_POOLS; i++) { - /* init locks */ - pool_entry_t *pool = &pool_tbl->pool[i]; - POOL_LOCK_INIT(&pool->s.lock); - POOL_LOCK_INIT(&pool->s.buf_lock); - POOL_LOCK_INIT(&pool->s.blk_lock); - pool->s.pool_hdl = pool_index_to_handle(i); - pool->s.pool_id = i; - pool_entry_ptr[i] = pool; - odp_atomic_init_u32(&pool->s.bufcount, 0); - odp_atomic_init_u32(&pool->s.blkcount, 0); - - /* Initialize pool statistics counters */ - odp_atomic_init_u64(&pool->s.poolstats.bufallocs, 0); - odp_atomic_init_u64(&pool->s.poolstats.buffrees, 0); - odp_atomic_init_u64(&pool->s.poolstats.blkallocs, 0); - odp_atomic_init_u64(&pool->s.poolstats.blkfrees, 0); - odp_atomic_init_u64(&pool->s.poolstats.bufempty, 0); - odp_atomic_init_u64(&pool->s.poolstats.blkempty, 0); - odp_atomic_init_u64(&pool->s.poolstats.buf_high_wm_count, 0); - odp_atomic_init_u64(&pool->s.poolstats.buf_low_wm_count, 0); - odp_atomic_init_u64(&pool->s.poolstats.blk_high_wm_count, 0); - odp_atomic_init_u64(&pool->s.poolstats.blk_low_wm_count, 0); + pool_t *pool = pool_entry(i); + + LOCK_INIT(&pool->lock); + pool->pool_hdl = pool_index_to_handle(i); + pool->pool_idx = i; }
ODP_DBG("\nPool init global\n"); - ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s)); - ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t)); ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t)); + ODP_DBG(" odp_packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t)); ODP_DBG("\n"); return 0; }
-int odp_pool_init_local(void) -{ - pool_entry_t *pool; - int i; - int thr_id = odp_thread_id(); - - memset(&local, 0, sizeof(pool_local_t)); - - for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); - local.cache[i] = &pool->s.local_cache[thr_id]; - local.cache[i]->s.num_buf = 0; - } - - local.thr_id = thr_id; - return 0; -} - int odp_pool_term_global(void) { int i; - pool_entry_t *pool; + pool_t *pool; int ret = 0; int rc = 0;
for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); + pool = pool_entry(i);
- POOL_LOCK(&pool->s.lock); - if (pool->s.pool_shm != ODP_SHM_INVALID) { - ODP_ERR("Not destroyed pool: %s\n", pool->s.name); + LOCK(&pool->lock); + if (pool->reserved) { + ODP_ERR("Not destroyed pool: %s\n", pool->name); rc = -1; } - POOL_UNLOCK(&pool->s.lock); + UNLOCK(&pool->lock); }
- ret = odp_shm_free(odp_shm_lookup(SHM_DEFAULT_NAME)); + ret = odp_shm_free(pool_tbl->shm); if (ret < 0) { - ODP_ERR("shm free failed for %s", SHM_DEFAULT_NAME); + ODP_ERR("shm free failed"); rc = -1; }
return rc; }
-int odp_pool_term_local(void) +int odp_pool_init_local(void) { + pool_t *pool; int i; + int thr_id = odp_thread_id();
- for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool_entry_t *pool = get_pool_entry(i); + memset(&local, 0, sizeof(pool_local_t));
- flush_cache(local.cache[i], &pool->s); + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool = pool_entry(i); + local.cache[i] = &pool->local_cache[thr_id]; + local.cache[i]->num = 0; }
+ local.thr_id = thr_id; return 0; }
-int odp_pool_capability(odp_pool_capability_t *capa) +static void flush_cache(pool_cache_t *cache, pool_t *pool) { - memset(capa, 0, sizeof(odp_pool_capability_t)); + ring_t *ring; + uint32_t mask; + uint32_t cache_num, i, data;
- capa->max_pools = ODP_CONFIG_POOLS; + ring = &pool->ring.hdr; + mask = pool->ring_mask; + cache_num = cache->num;
- /* Buffer pools */ - capa->buf.max_pools = ODP_CONFIG_POOLS; - capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX; - capa->buf.max_size = 0; - capa->buf.max_num = 0; + for (i = 0; i < cache_num; i++) { + data = (uint32_t)(uintptr_t)cache->buf[i]; + ring_enq(ring, mask, data); + }
- /* Packet pools */ - capa->pkt.max_pools = ODP_CONFIG_POOLS; - capa->pkt.max_len = ODP_CONFIG_PACKET_MAX_SEGS * - ODP_CONFIG_PACKET_SEG_LEN_MIN; - capa->pkt.max_num = 0; - capa->pkt.min_headroom = ODP_CONFIG_PACKET_HEADROOM; - capa->pkt.min_tailroom = ODP_CONFIG_PACKET_TAILROOM; - capa->pkt.max_segs_per_pkt = ODP_CONFIG_PACKET_MAX_SEGS; - capa->pkt.min_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MIN; - capa->pkt.max_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MAX; - capa->pkt.max_uarea_size = 0; + cache->num = 0; +}
- /* Timeout pools */ - capa->tmo.max_pools = ODP_CONFIG_POOLS; - capa->tmo.max_num = 0; +int odp_pool_term_local(void) +{ + int i; + + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool_t *pool = pool_entry(i); + + flush_cache(local.cache[i], pool); + }
return 0; }
-static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) +static pool_t *reserve_pool(void) { - odp_buffer_hdr_t *myhead; - - POOL_LOCK(&pool->buf_lock); - - myhead = pool->buf_freelist; + int i; + pool_t *pool;
- if (odp_unlikely(myhead == NULL)) { - POOL_UNLOCK(&pool->buf_lock); - odp_atomic_inc_u64(&pool->poolstats.bufempty); - } else { - pool->buf_freelist = myhead->next; - POOL_UNLOCK(&pool->buf_lock); + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool = pool_entry(i);
- odp_atomic_fetch_sub_u32(&pool->bufcount, 1); - odp_atomic_inc_u64(&pool->poolstats.bufallocs); + LOCK(&pool->lock); + if (pool->reserved == 0) { + pool->reserved = 1; + UNLOCK(&pool->lock); + return pool; + } + UNLOCK(&pool->lock); }
- return (void *)myhead; + return NULL; }
-static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf) +static odp_buffer_t form_buffer_handle(uint32_t pool_idx, uint32_t buffer_idx) { - if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) { - while (buf->segcount > 0) { - if (buffer_is_secure(buf) || pool_is_secure(pool)) - memset(buf->addr[buf->segcount - 1], - 0, buf->segsize); - ret_blk(pool, buf->addr[--buf->segcount]); - } - buf->size = 0; - } + odp_buffer_bits_t bits;
- buf->allocator = ODP_FREEBUF; /* Mark buffer free */ - POOL_LOCK(&pool->buf_lock); - buf->next = pool->buf_freelist; - pool->buf_freelist = buf; - POOL_UNLOCK(&pool->buf_lock); + bits.handle = 0; + bits.pool_id = pool_idx; + bits.index = buffer_idx;
- odp_atomic_fetch_add_u32(&pool->bufcount, 1); - odp_atomic_inc_u64(&pool->poolstats.buffrees); + return bits.handle; }
-/* - * Pool creation - */ -odp_pool_t _pool_create(const char *name, - odp_pool_param_t *params, - uint32_t shmflags) +static void init_buffers(pool_t *pool) { - odp_pool_t pool_hdl = ODP_POOL_INVALID; - pool_entry_t *pool; - uint32_t i, headroom = 0, tailroom = 0; - odp_shm_t shm; + uint32_t i; + odp_buffer_hdr_t *buf_hdr; + odp_packet_hdr_t *pkt_hdr; + odp_buffer_t buf_hdl; + void *addr; + void *uarea = NULL; + uint8_t *data; + uint32_t offset; + ring_t *ring; + uint32_t mask; + int type; + uint32_t size; + + ring = &pool->ring.hdr; + mask = pool->ring_mask; + type = pool->params.type; + + for (i = 0; i < pool->num; i++) { + addr = &pool->base_addr[i * pool->block_size]; + buf_hdr = addr; + pkt_hdr = addr; + + if (pool->uarea_size) + uarea = &pool->uarea_base_addr[i * pool->uarea_size]; + + data = buf_hdr->data; + + if (type == ODP_POOL_PACKET) + data = pkt_hdr->data; + + offset = pool->headroom; + + /* move to correct align */ + while (((uintptr_t)&data[offset]) % pool->align != 0) + offset++; + + memset(buf_hdr, 0, sizeof(odp_buffer_hdr_t)); + + size = pool->headroom + pool->data_size + pool->tailroom; + + /* Initialize buffer metadata */ + buf_hdr->size = size; + buf_hdr->type = type; + buf_hdr->event_type = type; + buf_hdr->pool_hdl = pool->pool_hdl; + buf_hdr->uarea_addr = uarea; + /* Show user requested size through API */ + buf_hdr->uarea_size = pool->params.pkt.uarea_size; + buf_hdr->segcount = 1; + buf_hdr->segsize = size; + + /* Pointer to data start (of the first segment) */ + buf_hdr->addr[0] = &data[offset]; + + buf_hdl = form_buffer_handle(pool->pool_idx, i); + buf_hdr->handle.handle = buf_hdl; + + /* Store buffer into the global pool */ + ring_enq(ring, mask, (uint32_t)(uintptr_t)buf_hdl); + } +}
- if (params == NULL) +static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, + uint32_t shmflags) +{ + pool_t *pool; + uint32_t uarea_size, headroom, tailroom; + odp_shm_t shm; + uint32_t data_size, align, num, hdr_size, block_size; + uint32_t max_len, max_seg_len; + uint32_t ring_size; + int name_len; + const char *postfix = "_uarea"; + char uarea_name[ODP_POOL_NAME_LEN + sizeof(postfix)]; + + if (params == NULL) { + ODP_ERR("No params"); return ODP_POOL_INVALID; - - /* Default size and align for timeouts */ - if (params->type == ODP_POOL_TIMEOUT) { - params->buf.size = 0; /* tmo.__res1 */ - params->buf.align = 0; /* tmo.__res2 */ }
- /* Default initialization parameters */ - uint32_t p_udata_size = 0; - uint32_t udata_stride = 0; + align = 0;
- /* Restriction for v1.0: All non-packet buffers are unsegmented */ - int unseg = 1; + if (params->type == ODP_POOL_BUFFER) + align = params->buf.align;
- uint32_t blk_size, buf_stride, buf_num, blk_num, seg_len = 0; - uint32_t buf_align = - params->type == ODP_POOL_BUFFER ? params->buf.align : 0; + if (align < ODP_CONFIG_BUFFER_ALIGN_MIN) + align = ODP_CONFIG_BUFFER_ALIGN_MIN;
/* Validate requested buffer alignment */ - if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX || - buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align)) + if (align > ODP_CONFIG_BUFFER_ALIGN_MAX || + align != ODP_ALIGN_ROUNDDOWN_POWER_2(align, align)) { + ODP_ERR("Bad align requirement"); return ODP_POOL_INVALID; + }
- /* Set correct alignment based on input request */ - if (buf_align == 0) - buf_align = ODP_CACHE_LINE_SIZE; - else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN) - buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN; + headroom = 0; + tailroom = 0; + data_size = 0; + max_len = 0; + max_seg_len = 0; + uarea_size = 0;
- /* Calculate space needed for buffer blocks and metadata */ switch (params->type) { case ODP_POOL_BUFFER: - buf_num = params->buf.num; - blk_size = params->buf.size; - - /* Optimize small raw buffers */ - if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0) - blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align); - - buf_stride = sizeof(odp_buffer_hdr_stride); + num = params->buf.num; + data_size = params->buf.size; break;
case ODP_POOL_PACKET: - unseg = 0; /* Packets are always segmented */ - headroom = ODP_CONFIG_PACKET_HEADROOM; - tailroom = ODP_CONFIG_PACKET_TAILROOM; - buf_num = params->pkt.num; - - seg_len = params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MIN ? - ODP_CONFIG_PACKET_SEG_LEN_MIN : - (params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MAX ? - params->pkt.seg_len : ODP_CONFIG_PACKET_SEG_LEN_MAX); - - seg_len = ODP_ALIGN_ROUNDUP( - headroom + seg_len + tailroom, - ODP_CONFIG_BUFFER_ALIGN_MIN); - - blk_size = params->pkt.len <= seg_len ? seg_len : - ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len); - - /* Reject create if pkt.len needs too many segments */ - if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) { - ODP_ERR("ODP_BUFFER_MAX_SEG exceed %d(%d)\n", - blk_size / seg_len, ODP_BUFFER_MAX_SEG); + headroom = ODP_CONFIG_PACKET_HEADROOM; + tailroom = ODP_CONFIG_PACKET_TAILROOM; + num = params->pkt.num; + uarea_size = params->pkt.uarea_size; + + data_size = ODP_CONFIG_PACKET_SEG_LEN_MAX; + + if (data_size < ODP_CONFIG_PACKET_SEG_LEN_MIN) + data_size = ODP_CONFIG_PACKET_SEG_LEN_MIN; + + if (data_size > ODP_CONFIG_PACKET_SEG_LEN_MAX) { + ODP_ERR("Too large seg len requirement"); return ODP_POOL_INVALID; }
- p_udata_size = params->pkt.uarea_size; - udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size, - sizeof(uint64_t)); - - buf_stride = sizeof(odp_packet_hdr_stride); + max_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MAX - + ODP_CONFIG_PACKET_HEADROOM - + ODP_CONFIG_PACKET_TAILROOM; + max_len = ODP_CONFIG_PACKET_MAX_SEGS * max_seg_len; break;
case ODP_POOL_TIMEOUT: - blk_size = 0; - buf_num = params->tmo.num; - buf_stride = sizeof(odp_timeout_hdr_stride); + num = params->tmo.num; break;
default: + ODP_ERR("Bad pool type"); return ODP_POOL_INVALID; }
- /* Validate requested number of buffers against addressable limits */ - if (buf_num > - (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) { - ODP_ERR("buf_num %d > then expected %d\n", - buf_num, ODP_BUFFER_MAX_BUFFERS / - (buf_stride / ODP_CACHE_LINE_SIZE)); + if (uarea_size) + uarea_size = ODP_CACHE_LINE_SIZE_ROUNDUP(uarea_size); + + pool = reserve_pool(); + + if (pool == NULL) { + ODP_ERR("No more free pools"); return ODP_POOL_INVALID; }
- /* Find an unused buffer pool slot and iniitalize it as requested */ - for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); + if (name == NULL) { + pool->name[0] = 0; + } else { + strncpy(pool->name, name, + ODP_POOL_NAME_LEN - 1); + pool->name[ODP_POOL_NAME_LEN - 1] = 0; + }
- POOL_LOCK(&pool->s.lock); - if (pool->s.pool_shm != ODP_SHM_INVALID) { - POOL_UNLOCK(&pool->s.lock); - continue; - } + name_len = strlen(pool->name); + memcpy(uarea_name, pool->name, name_len); + strcpy(&uarea_name[name_len], postfix);
- /* found free pool */ - size_t block_size, pad_size, mdata_size, udata_size; + pool->params = *params;
- pool->s.flags.all = 0; + hdr_size = sizeof(odp_packet_hdr_t); + hdr_size = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size);
- if (name == NULL) { - pool->s.name[0] = 0; - } else { - strncpy(pool->s.name, name, - ODP_POOL_NAME_LEN - 1); - pool->s.name[ODP_POOL_NAME_LEN - 1] = 0; - pool->s.flags.has_name = 1; - } + block_size = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size + align + headroom + + data_size + tailroom);
- pool->s.params = *params; - pool->s.buf_align = buf_align; + if (num <= RING_SIZE_MIN) + ring_size = RING_SIZE_MIN; + else + ring_size = ODP_ROUNDUP_POWER_2(num);
- /* Optimize for short buffers: Data stored in buffer hdr */ - if (blk_size <= ODP_MAX_INLINE_BUF) { - block_size = 0; - pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *); - } else { - block_size = buf_num * blk_size; - pool->s.buf_align = buf_align; - } + pool->ring_mask = ring_size - 1; + pool->num = num; + pool->align = align; + pool->headroom = headroom; + pool->data_size = data_size; + pool->max_len = max_len; + pool->max_seg_len = max_seg_len; + pool->tailroom = tailroom; + pool->block_size = block_size; + pool->uarea_size = uarea_size; + pool->shm_size = num * block_size; + pool->uarea_shm_size = num * uarea_size;
- pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size; - mdata_size = buf_num * buf_stride; - udata_size = buf_num * udata_stride; + shm = odp_shm_reserve(pool->name, pool->shm_size, + ODP_PAGE_SIZE, shmflags);
- pool->s.buf_num = buf_num; - pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size + - pad_size + - mdata_size + - udata_size); + pool->shm = shm;
- shm = odp_shm_reserve(pool->s.name, - pool->s.pool_size, - ODP_PAGE_SIZE, shmflags); - if (shm == ODP_SHM_INVALID) { - POOL_UNLOCK(&pool->s.lock); - return ODP_POOL_INVALID; - } - pool->s.pool_base_addr = odp_shm_addr(shm); - pool->s.pool_shm = shm; - - /* Now safe to unlock since pool entry has been allocated */ - POOL_UNLOCK(&pool->s.lock); - - pool->s.flags.unsegmented = unseg; - pool->s.seg_size = unseg ? blk_size : seg_len; - pool->s.blk_size = blk_size; - - uint8_t *block_base_addr = pool->s.pool_base_addr; - uint8_t *mdata_base_addr = - block_base_addr + block_size + pad_size; - uint8_t *udata_base_addr = mdata_base_addr + mdata_size; - - /* Pool mdata addr is used for indexing buffer metadata */ - pool->s.pool_mdata_addr = mdata_base_addr; - pool->s.udata_size = p_udata_size; - - pool->s.buf_stride = buf_stride; - pool->s.buf_freelist = NULL; - pool->s.blk_freelist = NULL; - - /* Initialization will increment these to their target vals */ - odp_atomic_store_u32(&pool->s.bufcount, 0); - odp_atomic_store_u32(&pool->s.blkcount, 0); - - uint8_t *buf = udata_base_addr - buf_stride; - uint8_t *udat = udata_stride == 0 ? NULL : - udata_base_addr + udata_size - udata_stride; - - /* Init buffer common header and add to pool buffer freelist */ - do { - odp_buffer_hdr_t *tmp = - (odp_buffer_hdr_t *)(void *)buf; - - /* Iniitalize buffer metadata */ - tmp->allocator = ODP_FREEBUF; - tmp->flags.all = 0; - tmp->size = 0; - tmp->type = params->type; - tmp->event_type = params->type; - tmp->pool_hdl = pool->s.pool_hdl; - tmp->uarea_addr = (void *)udat; - tmp->uarea_size = p_udata_size; - tmp->segcount = 0; - tmp->segsize = pool->s.seg_size; - tmp->handle.handle = odp_buffer_encode_handle(tmp); - - /* Set 1st seg addr for zero-len buffers */ - tmp->addr[0] = NULL; - - /* Special case for short buffer data */ - if (blk_size <= ODP_MAX_INLINE_BUF) { - tmp->flags.hdrdata = 1; - if (blk_size > 0) { - tmp->segcount = 1; - tmp->addr[0] = &tmp->addr[1]; - tmp->size = blk_size; - } - } - - /* Push buffer onto pool's freelist */ - ret_buf(&pool->s, tmp); - buf -= buf_stride; - udat -= udata_stride; - } while (buf >= mdata_base_addr); - - /* Form block freelist for pool */ - uint8_t *blk = - block_base_addr + block_size - pool->s.seg_size; - - if (blk_size > ODP_MAX_INLINE_BUF) - do { - ret_blk(&pool->s, blk); - blk -= pool->s.seg_size; - } while (blk >= block_base_addr); - - blk_num = odp_atomic_load_u32(&pool->s.blkcount); - - /* Initialize pool statistics counters */ - odp_atomic_store_u64(&pool->s.poolstats.bufallocs, 0); - odp_atomic_store_u64(&pool->s.poolstats.buffrees, 0); - odp_atomic_store_u64(&pool->s.poolstats.blkallocs, 0); - odp_atomic_store_u64(&pool->s.poolstats.blkfrees, 0); - odp_atomic_store_u64(&pool->s.poolstats.bufempty, 0); - odp_atomic_store_u64(&pool->s.poolstats.blkempty, 0); - odp_atomic_store_u64(&pool->s.poolstats.buf_high_wm_count, 0); - odp_atomic_store_u64(&pool->s.poolstats.buf_low_wm_count, 0); - odp_atomic_store_u64(&pool->s.poolstats.blk_high_wm_count, 0); - odp_atomic_store_u64(&pool->s.poolstats.blk_low_wm_count, 0); - - /* Reset other pool globals to initial state */ - pool->s.buf_low_wm_assert = 0; - pool->s.blk_low_wm_assert = 0; - pool->s.quiesced = 0; - pool->s.headroom = headroom; - pool->s.tailroom = tailroom; - - /* Watermarks are hard-coded for now to control caching */ - pool->s.buf_high_wm = buf_num / 2; - pool->s.buf_low_wm = buf_num / 4; - pool->s.blk_high_wm = blk_num / 2; - pool->s.blk_low_wm = blk_num / 4; - - pool_hdl = pool->s.pool_hdl; - break; + if (shm == ODP_SHM_INVALID) { + ODP_ERR("Shm reserve failed"); + goto error; }
- return pool_hdl; -} + pool->base_addr = odp_shm_addr(pool->shm);
-odp_pool_t odp_pool_create(const char *name, - odp_pool_param_t *params) -{ -#ifdef _ODP_PKTIO_IPC - if (params && (params->type == ODP_POOL_PACKET)) - return _pool_create(name, params, ODP_SHM_PROC); -#endif - return _pool_create(name, params, 0); - -} - -odp_pool_t odp_pool_lookup(const char *name) -{ - uint32_t i; - pool_entry_t *pool; + pool->uarea_shm = ODP_SHM_INVALID; + if (uarea_size) { + shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, + ODP_PAGE_SIZE, shmflags);
- for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); + pool->uarea_shm = shm;
- POOL_LOCK(&pool->s.lock); - if (strcmp(name, pool->s.name) == 0) { - /* found it */ - POOL_UNLOCK(&pool->s.lock); - return pool->s.pool_hdl; + if (shm == ODP_SHM_INVALID) { + ODP_ERR("Shm reserve failed (uarea)"); + goto error; } - POOL_UNLOCK(&pool->s.lock); + + pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm); }
- return ODP_POOL_INVALID; -} + ring_init(&pool->ring.hdr); + init_buffers(pool);
-int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info) -{ - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); + return pool->pool_hdl;
- if (pool == NULL || info == NULL) - return -1; +error: + if (pool->shm != ODP_SHM_INVALID) + odp_shm_free(pool->shm);
- info->name = pool->s.name; - info->params = pool->s.params; + if (pool->uarea_shm != ODP_SHM_INVALID) + odp_shm_free(pool->uarea_shm);
- return 0; + LOCK(&pool->lock); + pool->reserved = 0; + UNLOCK(&pool->lock); + return ODP_POOL_INVALID; }
-static inline void get_local_cache_bufs(local_cache_t *buf_cache, uint32_t idx, - odp_buffer_hdr_t *buf_hdr[], - uint32_t num) -{ - uint32_t i;
- for (i = 0; i < num; i++) { - buf_hdr[i] = buf_cache->s.buf[idx + i]; - odp_prefetch(buf_hdr[i]); - odp_prefetch_store(buf_hdr[i]); - } -} - -static void flush_cache(local_cache_t *buf_cache, struct pool_entry_s *pool) +odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params) { - uint32_t flush_count = 0; - uint32_t num; - - while ((num = buf_cache->s.num_buf)) { - odp_buffer_hdr_t *buf; - - buf = buf_cache->s.buf[num - 1]; - ret_buf(pool, buf); - flush_count++; - buf_cache->s.num_buf--; - } - - odp_atomic_add_u64(&pool->poolstats.bufallocs, buf_cache->s.bufallocs); - odp_atomic_add_u64(&pool->poolstats.buffrees, - buf_cache->s.buffrees - flush_count); - - buf_cache->s.bufallocs = 0; - buf_cache->s.buffrees = 0; +#ifdef _ODP_PKTIO_IPC + if (params && (params->type == ODP_POOL_PACKET)) + return pool_create(name, params, ODP_SHM_PROC); +#endif + return pool_create(name, params, 0); }
int odp_pool_destroy(odp_pool_t pool_hdl) { - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); + pool_t *pool = pool_entry_from_hdl(pool_hdl); int i;
if (pool == NULL) return -1;
- POOL_LOCK(&pool->s.lock); + LOCK(&pool->lock);
- /* Call fails if pool is not allocated or predefined*/ - if (pool->s.pool_shm == ODP_SHM_INVALID || - pool->s.flags.predefined) { - POOL_UNLOCK(&pool->s.lock); - ODP_ERR("invalid shm for pool %s\n", pool->s.name); + if (pool->reserved == 0) { + UNLOCK(&pool->lock); + ODP_ERR("Pool not created\n"); return -1; }
/* Make sure local caches are empty */ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) - flush_cache(&pool->s.local_cache[i], &pool->s); - - /* Call fails if pool has allocated buffers */ - if (odp_atomic_load_u32(&pool->s.bufcount) < pool->s.buf_num) { - POOL_UNLOCK(&pool->s.lock); - ODP_DBG("error: pool has allocated buffers %d/%d\n", - odp_atomic_load_u32(&pool->s.bufcount), - pool->s.buf_num); - return -1; - } + flush_cache(&pool->local_cache[i], pool);
- odp_shm_free(pool->s.pool_shm); - pool->s.pool_shm = ODP_SHM_INVALID; - POOL_UNLOCK(&pool->s.lock); + odp_shm_free(pool->shm); + + if (pool->uarea_shm != ODP_SHM_INVALID) + odp_shm_free(pool->uarea_shm); + + pool->reserved = 0; + UNLOCK(&pool->lock);
return 0; }
-int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount) +odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - void *newsegs[segcount]; - int i; + odp_buffer_bits_t handle; + uint32_t pool_id, index, block_offset; + pool_t *pool; + odp_buffer_hdr_t *buf_hdr;
- for (i = 0; i < segcount; i++) { - newsegs[i] = get_blk(&pool->s); - if (newsegs[i] == NULL) { - while (--i >= 0) - ret_blk(&pool->s, newsegs[i]); - return -1; - } - } + handle.handle = buf; + pool_id = handle.pool_id; + index = handle.index; + pool = pool_entry(pool_id); + block_offset = index * pool->block_size;
- for (i = buf_hdr->segcount - 1; i >= 0; i--) - buf_hdr->addr[i + segcount] = buf_hdr->addr[i]; + /* clang requires cast to uintptr_t */ + buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
- for (i = 0; i < segcount; i++) - buf_hdr->addr[i] = newsegs[i]; + return buf_hdr; +}
- buf_hdr->segcount += segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; - return 0; +odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf) +{ + return odp_buf_to_hdr(buf)->event_type; }
-void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount) +void _odp_buffer_event_type_set(odp_buffer_t buf, int ev) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - int s_cnt = buf_hdr->segcount; - int i; + odp_buf_to_hdr(buf)->event_type = ev; +}
- for (i = 0; i < segcount; i++) - ret_blk(&pool->s, buf_hdr->addr[i]); +void *buffer_map(odp_buffer_hdr_t *buf, + uint32_t offset, + uint32_t *seglen, + uint32_t limit) +{ + int seg_index; + int seg_offset;
- for (i = 0; i < s_cnt - segcount; i++) - buf_hdr->addr[i] = buf_hdr->addr[i + segcount]; + if (odp_likely(offset < buf->segsize)) { + seg_index = 0; + seg_offset = offset; + } else { + ODP_ERR("\nSEGMENTS NOT SUPPORTED\n"); + return NULL; + }
- buf_hdr->segcount -= segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; + if (seglen != NULL) { + uint32_t buf_left = limit - offset; + *seglen = seg_offset + buf_left <= buf->segsize ? + buf_left : buf->segsize - seg_offset; + } + + return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); }
-int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount) +odp_pool_t odp_pool_lookup(const char *name) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - uint32_t s_cnt = buf_hdr->segcount; - int i; + uint32_t i; + pool_t *pool;
- for (i = 0; i < segcount; i++) { - buf_hdr->addr[s_cnt + i] = get_blk(&pool->s); - if (buf_hdr->addr[s_cnt + i] == NULL) { - while (--i >= 0) - ret_blk(&pool->s, buf_hdr->addr[s_cnt + i]); - return -1; + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool = pool_entry(i); + + LOCK(&pool->lock); + if (strcmp(name, pool->name) == 0) { + /* found it */ + UNLOCK(&pool->lock); + return pool->pool_hdl; } + UNLOCK(&pool->lock); }
- buf_hdr->segcount += segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; - return 0; + return ODP_POOL_INVALID; }
-void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount) +int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - int s_cnt = buf_hdr->segcount; - int i; + pool_t *pool = pool_entry_from_hdl(pool_hdl);
- for (i = s_cnt - 1; i >= s_cnt - segcount; i--) - ret_blk(&pool->s, buf_hdr->addr[i]); + if (pool == NULL || info == NULL) + return -1;
- buf_hdr->segcount -= segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; + info->name = pool->name; + info->params = pool->params; + + return 0; }
-static inline int get_local_bufs(local_cache_t *buf_cache, - odp_buffer_hdr_t *buf_hdr[], uint32_t max_num) +int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int max_num) { - uint32_t num_buf = buf_cache->s.num_buf; - uint32_t num = num_buf; + pool_t *pool; + ring_t *ring; + uint32_t mask; + int i; + pool_cache_t *cache; + uint32_t cache_num;
- if (odp_unlikely(num_buf == 0)) - return 0; + pool = pool_entry_from_hdl(pool_hdl); + ring = &pool->ring.hdr; + mask = pool->ring_mask; + cache = local.cache[_odp_typeval(pool_hdl)];
- if (odp_likely(max_num < num)) - num = max_num; + cache_num = cache->num;
- get_local_cache_bufs(buf_cache, num_buf - num, buf_hdr, num); - buf_cache->s.num_buf -= num; - buf_cache->s.bufallocs += num; + if (odp_likely((int)cache_num >= max_num)) { + for (i = 0; i < max_num; i++) + buf[i] = cache->buf[cache_num - max_num + i];
- return num; -} + cache->num = cache_num - max_num; + return max_num; + }
-static inline void ret_local_buf(local_cache_t *buf_cache, uint32_t idx, - odp_buffer_hdr_t *buf) -{ - buf_cache->s.buf[idx] = buf; - buf_cache->s.num_buf++; - buf_cache->s.buffrees++; -} + for (i = 0; i < max_num; i++) { + uint32_t data;
-static inline void ret_local_bufs(local_cache_t *buf_cache, uint32_t idx, - odp_buffer_hdr_t *buf[], int num_buf) -{ - int i; + data = ring_deq(ring, mask); + + if (data == RING_EMPTY) + break;
- for (i = 0; i < num_buf; i++) - buf_cache->s.buf[idx + i] = buf[i]; + buf[i] = (odp_buffer_t)(uintptr_t)data; + }
- buf_cache->s.num_buf += num_buf; - buf_cache->s.buffrees += num_buf; + return i; }
-int buffer_alloc_multi(odp_pool_t pool_hdl, size_t size, - odp_buffer_t buf[], int max_num) +static inline void buffer_free_to_pool(uint32_t pool_id, + const odp_buffer_t buf[], int num) { - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom; - odp_buffer_hdr_t *buf_tbl[max_num]; - odp_buffer_hdr_t *buf_hdr; - int num, i; - intmax_t needed; - void *blk; - - /* Reject oversized allocation requests */ - if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || - (!pool->s.flags.unsegmented && - totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG)) - return 0; + pool_t *pool; + int i; + ring_t *ring; + uint32_t mask; + pool_cache_t *cache; + uint32_t cache_num; + + cache = local.cache[pool_id]; + pool = pool_entry(pool_id); + ring = &pool->ring.hdr; + mask = pool->ring_mask; + + /* Special case of a very large free. Move directly to + * the global pool. */ + if (odp_unlikely(num > CONFIG_POOL_CACHE_SIZE)) { + for (i = 0; i < num; i++) + ring_enq(ring, mask, (uint32_t)(uintptr_t)buf[i]);
- /* Try to satisfy request from the local cache */ - num = get_local_bufs(local.cache[pool_id], buf_tbl, max_num); - - /* If cache is empty, satisfy request from the pool */ - if (odp_unlikely(num < max_num)) { - for (; num < max_num; num++) { - buf_hdr = get_buf(&pool->s); - - if (odp_unlikely(buf_hdr == NULL)) - goto pool_empty; - - /* Get blocks for this buffer, if pool uses - * application data */ - if (buf_hdr->size < totsize) { - uint32_t segcount; - - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - ret_buf(&pool->s, buf_hdr); - goto pool_empty; - } - - segcount = buf_hdr->segcount++; - buf_hdr->addr[segcount] = blk; - needed -= pool->s.seg_size; - } while (needed > 0); - buf_hdr->size = buf_hdr->segcount * - pool->s.seg_size; - } - - buf_tbl[num] = buf_hdr; - } + return; }
-pool_empty: - for (i = 0; i < num; i++) { - buf_hdr = buf_tbl[i]; - - /* Mark buffer as allocated */ - buf_hdr->allocator = local.thr_id; - - /* By default, buffers are not associated with - * an ordered queue */ - buf_hdr->origin_qe = NULL; + /* Make room into local cache if needed. Do at least burst size + * transfer. */ + cache_num = cache->num;
- buf[i] = odp_hdr_to_buf(buf_hdr); + if (odp_unlikely((int)(CONFIG_POOL_CACHE_SIZE - cache_num) < num)) { + int burst = CACHE_BURST;
- /* Add more segments if buffer from local cache is too small */ - if (odp_unlikely(buf_hdr->size < totsize)) { - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - int j; + if (odp_unlikely(num > CACHE_BURST)) + burst = num;
- ret_buf(&pool->s, buf_hdr); - buf_hdr = NULL; - local.cache[pool_id]->s.buffrees--; + for (i = 0; i < burst; i++) { + uint32_t data, index;
- /* move remaining bufs up one step - * and update loop counters */ - num--; - for (j = i; j < num; j++) - buf_tbl[j] = buf_tbl[j + 1]; - - i--; - break; - } - needed -= pool->s.seg_size; - buf_hdr->addr[buf_hdr->segcount++] = blk; - buf_hdr->size = buf_hdr->segcount * - pool->s.seg_size; - } while (needed > 0); + index = cache_num - burst + i; + data = (uint32_t)(uintptr_t)cache->buf[index]; + ring_enq(ring, mask, data); } + + cache_num -= burst; }
- return num; + for (i = 0; i < num; i++) + cache->buf[cache_num + i] = buf[i]; + + cache->num = cache_num + num; }
-odp_buffer_t buffer_alloc(odp_pool_t pool_hdl, size_t size) +void buffer_free_multi(const odp_buffer_t buf[], int num_total) { - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom; - odp_buffer_hdr_t *buf_hdr; - intmax_t needed; - void *blk; + uint32_t pool_id; + int num; + int i; + int first = 0;
- /* Reject oversized allocation requests */ - if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || - (!pool->s.flags.unsegmented && - totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG)) - return 0; + while (1) { + num = 1; + i = 1; + pool_id = pool_id_from_buf(buf[first]);
- /* Try to satisfy request from the local cache. If cache is empty, - * satisfy request from the pool */ - if (odp_unlikely(!get_local_bufs(local.cache[pool_id], &buf_hdr, 1))) { - buf_hdr = get_buf(&pool->s); - - if (odp_unlikely(buf_hdr == NULL)) - return ODP_BUFFER_INVALID; - - /* Get blocks for this buffer, if pool uses application data */ - if (buf_hdr->size < totsize) { - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - ret_buf(&pool->s, buf_hdr); - return ODP_BUFFER_INVALID; - } - buf_hdr->addr[buf_hdr->segcount++] = blk; - needed -= pool->s.seg_size; - } while (needed > 0); - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; + /* 'num' buffers are from the same pool */ + if (num_total > 1) { + for (i = first; i < num_total; i++) + if (pool_id != pool_id_from_buf(buf[i])) + break; + + num = i - first; } - } - /* Mark buffer as allocated */ - buf_hdr->allocator = local.thr_id; - - /* By default, buffers are not associated with - * an ordered queue */ - buf_hdr->origin_qe = NULL; - - /* Add more segments if buffer from local cache is too small */ - if (odp_unlikely(buf_hdr->size < totsize)) { - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - ret_buf(&pool->s, buf_hdr); - buf_hdr = NULL; - local.cache[pool_id]->s.buffrees--; - return ODP_BUFFER_INVALID; - } - buf_hdr->addr[buf_hdr->segcount++] = blk; - needed -= pool->s.seg_size; - } while (needed > 0); - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; - }
- return odp_hdr_to_buf(buf_hdr); + buffer_free_to_pool(pool_id, &buf[first], num); + + if (i == num_total) + return; + + first = i; + } }
odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl) { - return buffer_alloc(pool_hdl, - odp_pool_to_entry(pool_hdl)->s.params.buf.size); + odp_buffer_t buf; + int ret; + + ret = buffer_alloc_multi(pool_hdl, &buf, 1); + + if (odp_likely(ret == 1)) + return buf; + + return ODP_BUFFER_INVALID; }
int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num) { - size_t buf_size = odp_pool_to_entry(pool_hdl)->s.params.buf.size; - - return buffer_alloc_multi(pool_hdl, buf_size, buf, num); + return buffer_alloc_multi(pool_hdl, buf, num); }
-static void multi_pool_free(odp_buffer_hdr_t *buf_hdr[], int num_buf) +void odp_buffer_free(odp_buffer_t buf) { - uint32_t pool_id, num; - local_cache_t *buf_cache; - pool_entry_t *pool; - int i, j, idx; - - for (i = 0; i < num_buf; i++) { - pool_id = pool_handle_to_index(buf_hdr[i]->pool_hdl); - buf_cache = local.cache[pool_id]; - num = buf_cache->s.num_buf; - - if (num < POOL_MAX_LOCAL_BUFS) { - ret_local_buf(buf_cache, num, buf_hdr[i]); - continue; - } - - idx = POOL_MAX_LOCAL_BUFS - POOL_CHUNK_SIZE; - pool = get_pool_entry(pool_id); - - /* local cache full, return a chunk */ - for (j = 0; j < POOL_CHUNK_SIZE; j++) { - odp_buffer_hdr_t *tmp; - - tmp = buf_cache->s.buf[idx + i]; - ret_buf(&pool->s, tmp); - } - - num = POOL_MAX_LOCAL_BUFS - POOL_CHUNK_SIZE; - buf_cache->s.num_buf = num; - ret_local_buf(buf_cache, num, buf_hdr[i]); - } + buffer_free_multi(&buf, 1); }
-void buffer_free_multi(uint32_t pool_id, - const odp_buffer_t buf[], int num_free) +void odp_buffer_free_multi(const odp_buffer_t buf[], int num) { - local_cache_t *buf_cache = local.cache[pool_id]; - uint32_t num; - int i, idx; - pool_entry_t *pool; - odp_buffer_hdr_t *buf_hdr[num_free]; - int multi_pool = 0; - - for (i = 0; i < num_free; i++) { - uint32_t id; - - buf_hdr[i] = odp_buf_to_hdr(buf[i]); - ODP_ASSERT(buf_hdr[i]->allocator != ODP_FREEBUF); - buf_hdr[i]->allocator = ODP_FREEBUF; - id = pool_handle_to_index(buf_hdr[i]->pool_hdl); - multi_pool |= (pool_id != id); - } - - if (odp_unlikely(multi_pool)) { - multi_pool_free(buf_hdr, num_free); - return; - } + buffer_free_multi(buf, num); +}
- num = buf_cache->s.num_buf; +int odp_pool_capability(odp_pool_capability_t *capa) +{ + uint32_t max_len = ODP_CONFIG_PACKET_SEG_LEN_MAX - + ODP_CONFIG_PACKET_HEADROOM - + ODP_CONFIG_PACKET_TAILROOM;
- if (odp_likely((num + num_free) < POOL_MAX_LOCAL_BUFS)) { - ret_local_bufs(buf_cache, num, buf_hdr, num_free); - return; - } + memset(capa, 0, sizeof(odp_pool_capability_t));
- pool = get_pool_entry(pool_id); + capa->max_pools = ODP_CONFIG_POOLS;
- /* Return at least one chunk into the global pool */ - if (odp_unlikely(num_free > POOL_CHUNK_SIZE)) { - for (i = 0; i < num_free; i++) - ret_buf(&pool->s, buf_hdr[i]); + /* Buffer pools */ + capa->buf.max_pools = ODP_CONFIG_POOLS; + capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX; + capa->buf.max_size = 0; + capa->buf.max_num = CONFIG_POOL_MAX_NUM;
- return; - } + /* Packet pools */ + capa->pkt.max_pools = ODP_CONFIG_POOLS; + capa->pkt.max_len = ODP_CONFIG_PACKET_MAX_SEGS * max_len; + capa->pkt.max_num = CONFIG_POOL_MAX_NUM; + capa->pkt.min_headroom = ODP_CONFIG_PACKET_HEADROOM; + capa->pkt.min_tailroom = ODP_CONFIG_PACKET_TAILROOM; + capa->pkt.max_segs_per_pkt = ODP_CONFIG_PACKET_MAX_SEGS; + capa->pkt.min_seg_len = max_len; + capa->pkt.max_seg_len = max_len; + capa->pkt.max_uarea_size = 0;
- idx = num - POOL_CHUNK_SIZE; - for (i = 0; i < POOL_CHUNK_SIZE; i++) - ret_buf(&pool->s, buf_cache->s.buf[idx + i]); + /* Timeout pools */ + capa->tmo.max_pools = ODP_CONFIG_POOLS; + capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
- num -= POOL_CHUNK_SIZE; - buf_cache->s.num_buf = num; - ret_local_bufs(buf_cache, num, buf_hdr, num_free); + return 0; }
-void buffer_free(uint32_t pool_id, const odp_buffer_t buf) +void odp_pool_print(odp_pool_t pool_hdl) { - local_cache_t *buf_cache = local.cache[pool_id]; - uint32_t num; - int i; - pool_entry_t *pool; - odp_buffer_hdr_t *buf_hdr; + pool_t *pool;
- buf_hdr = odp_buf_to_hdr(buf); - ODP_ASSERT(buf_hdr->allocator != ODP_FREEBUF); - buf_hdr->allocator = ODP_FREEBUF; - - num = buf_cache->s.num_buf; - - if (odp_likely((num + 1) < POOL_MAX_LOCAL_BUFS)) { - ret_local_bufs(buf_cache, num, &buf_hdr, 1); - return; - } + pool = pool_entry_from_hdl(pool_hdl);
- pool = get_pool_entry(pool_id); - - num -= POOL_CHUNK_SIZE; - for (i = 0; i < POOL_CHUNK_SIZE; i++) - ret_buf(&pool->s, buf_cache->s.buf[num + i]); - - buf_cache->s.num_buf = num; - ret_local_bufs(buf_cache, num, &buf_hdr, 1); + printf("Pool info\n"); + printf("---------\n"); + printf(" pool %" PRIu64 "\n", + odp_pool_to_u64(pool->pool_hdl)); + printf(" name %s\n", pool->name); + printf(" pool type %s\n", + pool->params.type == ODP_POOL_BUFFER ? "buffer" : + (pool->params.type == ODP_POOL_PACKET ? "packet" : + (pool->params.type == ODP_POOL_TIMEOUT ? "timeout" : + "unknown"))); + printf(" pool shm %" PRIu64 "\n", + odp_shm_to_u64(pool->shm)); + printf(" user area shm %" PRIu64 "\n", + odp_shm_to_u64(pool->uarea_shm)); + printf(" num %u\n", pool->num); + printf(" align %u\n", pool->align); + printf(" headroom %u\n", pool->headroom); + printf(" data size %u\n", pool->data_size); + printf(" max data len %u\n", pool->max_len); + printf(" max seg len %u\n", pool->max_seg_len); + printf(" tailroom %u\n", pool->tailroom); + printf(" block size %u\n", pool->block_size); + printf(" uarea size %u\n", pool->uarea_size); + printf(" shm size %u\n", pool->shm_size); + printf(" base addr %p\n", pool->base_addr); + printf(" uarea shm size %u\n", pool->uarea_shm_size); + printf(" uarea base addr %p\n", pool->uarea_base_addr); + printf("\n"); }
-void odp_buffer_free(odp_buffer_t buf) +odp_pool_t odp_buffer_pool(odp_buffer_t buf) { uint32_t pool_id = pool_id_from_buf(buf);
- buffer_free(pool_id, buf); + return pool_index_to_handle(pool_id); }
-void odp_buffer_free_multi(const odp_buffer_t buf[], int num) +void odp_pool_param_init(odp_pool_param_t *params) { - uint32_t pool_id = pool_id_from_buf(buf[0]); + memset(params, 0, sizeof(odp_pool_param_t)); +}
- buffer_free_multi(pool_id, buf, num); +uint64_t odp_pool_to_u64(odp_pool_t hdl) +{ + return _odp_pri(hdl); }
-void odp_pool_print(odp_pool_t pool_hdl) +int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount) { - pool_entry_t *pool; - uint32_t pool_id; + (void)buf_hdr; + (void)segcount; + return 0; +}
- pool_id = pool_handle_to_index(pool_hdl); - pool = get_pool_entry(pool_id); - - uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount); - uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount); - uint64_t bufallocs = odp_atomic_load_u64(&pool->s.poolstats.bufallocs); - uint64_t buffrees = odp_atomic_load_u64(&pool->s.poolstats.buffrees); - uint64_t blkallocs = odp_atomic_load_u64(&pool->s.poolstats.blkallocs); - uint64_t blkfrees = odp_atomic_load_u64(&pool->s.poolstats.blkfrees); - uint64_t bufempty = odp_atomic_load_u64(&pool->s.poolstats.bufempty); - uint64_t blkempty = odp_atomic_load_u64(&pool->s.poolstats.blkempty); - uint64_t bufhiwmct = - odp_atomic_load_u64(&pool->s.poolstats.buf_high_wm_count); - uint64_t buflowmct = - odp_atomic_load_u64(&pool->s.poolstats.buf_low_wm_count); - uint64_t blkhiwmct = - odp_atomic_load_u64(&pool->s.poolstats.blk_high_wm_count); - uint64_t blklowmct = - odp_atomic_load_u64(&pool->s.poolstats.blk_low_wm_count); - - ODP_DBG("Pool info\n"); - ODP_DBG("---------\n"); - ODP_DBG(" pool %" PRIu64 "\n", - odp_pool_to_u64(pool->s.pool_hdl)); - ODP_DBG(" name %s\n", - pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); - ODP_DBG(" pool type %s\n", - pool->s.params.type == ODP_POOL_BUFFER ? "buffer" : - (pool->s.params.type == ODP_POOL_PACKET ? "packet" : - (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" : - "unknown"))); - ODP_DBG(" pool storage ODP managed shm handle %" PRIu64 "\n", - odp_shm_to_u64(pool->s.pool_shm)); - ODP_DBG(" pool status %s\n", - pool->s.quiesced ? "quiesced" : "active"); - ODP_DBG(" pool opts %s, %s\n", - pool->s.flags.unsegmented ? "unsegmented" : "segmented", - pool->s.flags.predefined ? "predefined" : "created"); - ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); - ODP_DBG(" pool size %zu (%zu pages)\n", - pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); - ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr); - ODP_DBG(" udata size %zu\n", pool->s.udata_size); - ODP_DBG(" headroom %u\n", pool->s.headroom); - ODP_DBG(" tailroom %u\n", pool->s.tailroom); - if (pool->s.params.type == ODP_POOL_BUFFER) { - ODP_DBG(" buf size %zu\n", pool->s.params.buf.size); - ODP_DBG(" buf align %u requested, %u used\n", - pool->s.params.buf.align, pool->s.buf_align); - } else if (pool->s.params.type == ODP_POOL_PACKET) { - ODP_DBG(" seg length %u requested, %u used\n", - pool->s.params.pkt.seg_len, pool->s.seg_size); - ODP_DBG(" pkt length %u requested, %u used\n", - pool->s.params.pkt.len, pool->s.blk_size); - } - ODP_DBG(" num bufs %u\n", pool->s.buf_num); - ODP_DBG(" bufs available %u %s\n", bufcount, - pool->s.buf_low_wm_assert ? " **buf low wm asserted**" : ""); - ODP_DBG(" bufs in use %u\n", pool->s.buf_num - bufcount); - ODP_DBG(" buf allocs %lu\n", bufallocs); - ODP_DBG(" buf frees %lu\n", buffrees); - ODP_DBG(" buf empty %lu\n", bufempty); - ODP_DBG(" blk size %zu\n", - pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0); - ODP_DBG(" blks available %u %s\n", blkcount, - pool->s.blk_low_wm_assert ? " **blk low wm asserted**" : ""); - ODP_DBG(" blk allocs %lu\n", blkallocs); - ODP_DBG(" blk frees %lu\n", blkfrees); - ODP_DBG(" blk empty %lu\n", blkempty); - ODP_DBG(" buf high wm value %lu\n", pool->s.buf_high_wm); - ODP_DBG(" buf high wm count %lu\n", bufhiwmct); - ODP_DBG(" buf low wm value %lu\n", pool->s.buf_low_wm); - ODP_DBG(" buf low wm count %lu\n", buflowmct); - ODP_DBG(" blk high wm value %lu\n", pool->s.blk_high_wm); - ODP_DBG(" blk high wm count %lu\n", blkhiwmct); - ODP_DBG(" blk low wm value %lu\n", pool->s.blk_low_wm); - ODP_DBG(" blk low wm count %lu\n", blklowmct); +void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount) +{ + (void)buf_hdr; + (void)segcount; }
-odp_pool_t odp_buffer_pool(odp_buffer_t buf) +int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount) { - uint32_t pool_id = pool_id_from_buf(buf); + (void)buf_hdr; + (void)segcount; + return 0; +}
- return pool_index_to_handle(pool_id); +void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount) +{ + (void)buf_hdr; + (void)segcount; }
-void odp_pool_param_init(odp_pool_param_t *params) +int odp_buffer_is_valid(odp_buffer_t buf) { - memset(params, 0, sizeof(odp_pool_param_t)); + odp_buffer_bits_t handle; + pool_t *pool; + + handle.handle = buf; + + if (handle.pool_id >= ODP_CONFIG_POOLS) + return 0; + + pool = pool_entry(handle.pool_id); + + if (pool->reserved == 0) + return 0; + + return 1; } diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 86fb4c1..90ff1fe 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -29,6 +29,7 @@ #include <unistd.h> #include <sys/syscall.h> #include <inttypes.h> +#include <string.h>
#include <odp/api/align.h> #include <odp_align_internal.h> diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c index e01b0a5..ab25aab 100644 --- a/platform/linux-generic/pktio/socket.c +++ b/platform/linux-generic/pktio/socket.c @@ -46,6 +46,8 @@ #include <protocols/eth.h> #include <protocols/ip.h>
+#define MAX_SEGS ODP_CONFIG_PACKET_MAX_SEGS + static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
static int sock_stats_reset(pktio_entry_t *pktio_entry); @@ -583,20 +585,18 @@ static int sock_mmsg_open(odp_pktio_t id ODP_UNUSED, }
static uint32_t _rx_pkt_to_iovec(odp_packet_t pkt, - struct iovec iovecs[ODP_BUFFER_MAX_SEG]) + struct iovec iovecs[MAX_SEGS]) { odp_packet_seg_t seg = odp_packet_first_seg(pkt); uint32_t seg_count = odp_packet_num_segs(pkt); uint32_t seg_id = 0; uint32_t iov_count = 0; - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); uint8_t *ptr; uint32_t seglen;
for (seg_id = 0; seg_id < seg_count; ++seg_id) { - ptr = segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, - &seglen, pkt_hdr->frame_len, - pkt_hdr->headroom); + ptr = odp_packet_seg_data(pkt, seg); + seglen = odp_packet_seg_data_len(pkt, seg);
if (ptr) { iovecs[iov_count].iov_base = ptr; @@ -692,7 +692,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, } } else { struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_RX] - [ODP_BUFFER_MAX_SEG]; + [MAX_SEGS];
for (i = 0; i < (int)len; i++) { int num; @@ -754,7 +754,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, }
static uint32_t _tx_pkt_to_iovec(odp_packet_t pkt, - struct iovec iovecs[ODP_BUFFER_MAX_SEG]) + struct iovec iovecs[MAX_SEGS]) { uint32_t pkt_len = odp_packet_len(pkt); uint32_t offset = odp_packet_l2_offset(pkt); @@ -780,7 +780,7 @@ static int sock_mmsg_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED, { pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock; struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_TX]; - struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][ODP_BUFFER_MAX_SEG]; + struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][MAX_SEGS]; int ret; int sockfd; int n, i; diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c index 9655668..bf4402a 100644 --- a/platform/linux-generic/pktio/socket_mmap.c +++ b/platform/linux-generic/pktio/socket_mmap.c @@ -346,17 +346,15 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) { int pz = getpagesize(); - uint32_t pool_id; - pool_entry_t *pool_entry; + pool_t *pool;
if (pool_hdl == ODP_POOL_INVALID) ODP_ABORT("Invalid pool handle\n");
- pool_id = pool_handle_to_index(pool_hdl); - pool_entry = get_pool_entry(pool_id); + pool = odp_pool_to_entry(pool_hdl);
/* Frame has to capture full packet which can fit to the pool block.*/ - ring->req.tp_frame_size = (pool_entry->s.blk_size + + ring->req.tp_frame_size = (pool->data_size + TPACKET_HDRLEN + TPACKET_ALIGNMENT + + (pz - 1)) & (-pz);
@@ -364,7 +362,7 @@ static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) * and align size to page boundary. */ ring->req.tp_block_size = (ring->req.tp_frame_size * - pool_entry->s.buf_num + (pz - 1)) & (-pz); + pool->num + (pz - 1)) & (-pz);
if (!fanout) { /* Single socket is in use. Use 1 block with buf_num frames. */ diff --git a/test/common_plat/performance/odp_pktio_perf.c b/test/common_plat/performance/odp_pktio_perf.c index 483f067..0e41f26 100644 --- a/test/common_plat/performance/odp_pktio_perf.c +++ b/test/common_plat/performance/odp_pktio_perf.c @@ -36,7 +36,7 @@
#define TEST_SKIP 77
-#define PKT_BUF_NUM 8192 +#define PKT_BUF_NUM (32 * 1024) #define MAX_NUM_IFACES 2 #define TEST_HDR_MAGIC 0x92749451 #define MAX_WORKERS 32 diff --git a/test/common_plat/performance/odp_scheduling.c b/test/common_plat/performance/odp_scheduling.c index 9407636..e2a49d3 100644 --- a/test/common_plat/performance/odp_scheduling.c +++ b/test/common_plat/performance/odp_scheduling.c @@ -28,7 +28,7 @@ /* GNU lib C */ #include <getopt.h>
-#define MSG_POOL_SIZE (4 * 1024 * 1024) /**< Message pool size */ +#define NUM_MSG (512 * 1024) /**< Number of msg in pool */ #define MAX_ALLOCS 32 /**< Alloc burst size */ #define QUEUES_PER_PRIO 64 /**< Queue per priority */ #define NUM_PRIOS 2 /**< Number of tested priorities */ @@ -868,7 +868,7 @@ int main(int argc, char *argv[]) odp_pool_param_init(¶ms); params.buf.size = sizeof(test_message_t); params.buf.align = 0; - params.buf.num = MSG_POOL_SIZE / sizeof(test_message_t); + params.buf.num = NUM_MSG; params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("msg_pool", ¶ms); @@ -880,8 +880,6 @@ int main(int argc, char *argv[])
globals->pool = pool;
- /* odp_pool_print(pool); */ - /* * Create a queue for plain queue test */ @@ -940,6 +938,8 @@ int main(int argc, char *argv[])
odp_shm_print_all();
+ odp_pool_print(pool); + /* Barrier to sync test case execution */ odp_barrier_init(&globals->barrier, num_workers);
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c index a4426e2..454c73f 100644 --- a/test/common_plat/validation/api/packet/packet.c +++ b/test/common_plat/validation/api/packet/packet.c @@ -44,7 +44,12 @@ int packet_suite_init(void) if (odp_pool_capability(&capa) < 0) return -1;
- packet_len = capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE; + /* Pick a typical packet size and decrement it to the single segment + * limit if needed (min_seg_len maybe equal to max_len + * on some systems). */ + packet_len = 512; + while (packet_len > (capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE)) + packet_len--;
if (capa.pkt.max_len) { segmented_packet_len = capa.pkt.max_len; @@ -115,6 +120,7 @@ int packet_suite_init(void) udat_size = odp_packet_user_area_size(test_packet); if (!udat || udat_size != sizeof(struct udata_struct)) return -1; + odp_pool_print(packet_pool); memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
commit b2ce189680ee17d6019b199c0905cb3f608a71a5 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:26 2016 +0200
linux-gen: align: added round up power of two
Added a macro to round up a value to the next power of two, if it's not already a power of two. Also removed duplicated code from the same file.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_align_internal.h b/platform/linux-generic/include/odp_align_internal.h index 9ccde53..d9cd30b 100644 --- a/platform/linux-generic/include/odp_align_internal.h +++ b/platform/linux-generic/include/odp_align_internal.h @@ -29,24 +29,18 @@ extern "C" {
/** * @internal - * Round up pointer 'x' to alignment 'align' - */ -#define ODP_ALIGN_ROUNDUP_PTR(x, align)\ - ((void *)ODP_ALIGN_ROUNDUP((uintptr_t)(x), (uintptr_t)(align))) - -/** - * @internal - * Round up pointer 'x' to cache line size alignment + * Round up 'x' to alignment 'align' */ -#define ODP_CACHE_LINE_SIZE_ROUNDUP_PTR(x)\ - ((void *)ODP_CACHE_LINE_SIZE_ROUNDUP((uintptr_t)(x))) +#define ODP_ALIGN_ROUNDUP(x, align)\ + ((align) * (((x) + (align) - 1) / (align)))
/** * @internal - * Round up 'x' to alignment 'align' + * When 'x' is not already a power of two, round it up to the next + * power of two value. Zero is not supported as an input value. */ -#define ODP_ALIGN_ROUNDUP(x, align)\ - ((align) * (((x) + align - 1) / (align))) +#define ODP_ROUNDUP_POWER_2(x)\ + (1 << (((int)(8 * sizeof(x))) - __builtin_clz((x) - 1)))
/** * @internal @@ -82,20 +76,6 @@ extern "C" {
/** * @internal - * Round down pointer 'x' to 'align' alignment, which is a power of two - */ -#define ODP_ALIGN_ROUNDDOWN_PTR_POWER_2(x, align)\ -((void *)ODP_ALIGN_ROUNDDOWN_POWER_2((uintptr_t)(x), (uintptr_t)(align))) - -/** - * @internal - * Round down pointer 'x' to cache line size alignment - */ -#define ODP_CACHE_LINE_SIZE_ROUNDDOWN_PTR(x)\ - ((void *)ODP_CACHE_LINE_SIZE_ROUNDDOWN((uintptr_t)(x))) - -/** - * @internal * Round down 'x' to 'align' alignment, which is a power of two */ #define ODP_ALIGN_ROUNDDOWN_POWER_2(x, align)\
commit c3ab55dc80882b8a1309fdc198abb1ac8f02437d Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:25 2016 +0200
linux-gen: ring: created common ring implementation
Moved scheduler ring code into a new header file, so that it can be used also in other parts of the implementation.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am index 19dc0ba..b60eacb 100644 --- a/platform/linux-generic/Makefile.am +++ b/platform/linux-generic/Makefile.am @@ -151,6 +151,7 @@ noinst_HEADERS = \ ${srcdir}/include/odp_pool_internal.h \ ${srcdir}/include/odp_posix_extensions.h \ ${srcdir}/include/odp_queue_internal.h \ + ${srcdir}/include/odp_ring_internal.h \ ${srcdir}/include/odp_schedule_if.h \ ${srcdir}/include/odp_schedule_internal.h \ ${srcdir}/include/odp_schedule_ordered_internal.h \ diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h new file mode 100644 index 0000000..6a6291a --- /dev/null +++ b/platform/linux-generic/include/odp_ring_internal.h @@ -0,0 +1,111 @@ +/* Copyright (c) 2016, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ODP_RING_INTERNAL_H_ +#define ODP_RING_INTERNAL_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <odp/api/atomic.h> +#include <odp/api/hints.h> +#include <odp_align_internal.h> + +/* Ring empty, not a valid data value. */ +#define RING_EMPTY ((uint32_t)-1) + +/* Ring of uint32_t data + * + * Ring stores head and tail counters. Ring indexes are formed from these + * counters with a mask (mask = ring_size - 1), which requires that ring size + * must be a power of two. Also ring size must be larger than the maximum + * number of data items that will be stored on it (there's no check against + * overwriting). */ +typedef struct { + /* Writer head and tail */ + odp_atomic_u32_t w_head; + odp_atomic_u32_t w_tail; + uint8_t pad[ODP_CACHE_LINE_SIZE - (2 * sizeof(odp_atomic_u32_t))]; + + /* Reader head and tail */ + odp_atomic_u32_t r_head; + odp_atomic_u32_t r_tail; + + uint32_t data[0]; +} ring_t ODP_ALIGNED_CACHE; + +/* Initialize ring */ +static inline void ring_init(ring_t *ring) +{ + odp_atomic_init_u32(&ring->w_head, 0); + odp_atomic_init_u32(&ring->w_tail, 0); + odp_atomic_init_u32(&ring->r_head, 0); + odp_atomic_init_u32(&ring->r_tail, 0); +} + +/* Dequeue data from the ring head */ +static inline uint32_t ring_deq(ring_t *ring, uint32_t mask) +{ + uint32_t head, tail, new_head; + uint32_t data; + + head = odp_atomic_load_u32(&ring->r_head); + + /* Move reader head. This thread owns data at the new head. */ + do { + tail = odp_atomic_load_u32(&ring->w_tail); + + if (head == tail) + return RING_EMPTY; + + new_head = head + 1; + + } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head, + new_head) == 0)); + + /* Read queue index */ + data = ring->data[new_head & mask]; + + /* Wait until other readers have updated the tail */ + while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head)) + odp_cpu_pause(); + + /* Now update the reader tail */ + odp_atomic_store_rel_u32(&ring->r_tail, new_head); + + return data; +} + +/* Enqueue data into the ring tail */ +static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data) +{ + uint32_t old_head, new_head; + + /* Reserve a slot in the ring for writing */ + old_head = odp_atomic_fetch_inc_u32(&ring->w_head); + new_head = old_head + 1; + + /* Ring is full. Wait for the last reader to finish. */ + while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head)) + odp_cpu_pause(); + + /* Write data */ + ring->data[new_head & mask] = data; + + /* Wait until other writers have updated the tail */ + while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) + odp_cpu_pause(); + + /* Now update the writer tail */ + odp_atomic_store_rel_u32(&ring->w_tail, new_head); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index 86b1cec..dfc9555 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -17,12 +17,12 @@ #include <odp/api/hints.h> #include <odp/api/cpu.h> #include <odp/api/thrmask.h> -#include <odp/api/atomic.h> #include <odp_config_internal.h> #include <odp_align_internal.h> #include <odp_schedule_internal.h> #include <odp_schedule_ordered_internal.h> #include <odp/api/sync.h> +#include <odp_ring_internal.h>
/* Number of priority levels */ #define NUM_PRIO 8 @@ -82,9 +82,6 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) && /* Priority queue empty, not a valid queue index. */ #define PRIO_QUEUE_EMPTY ((uint32_t)-1)
-/* Ring empty, not a valid index. */ -#define RING_EMPTY ((uint32_t)-1) - /* For best performance, the number of queues should be a power of two. */ ODP_STATIC_ASSERT(ODP_VAL_IS_POWER_2(ODP_CONFIG_QUEUES), "Number_of_queues_is_not_power_of_two"); @@ -111,28 +108,10 @@ ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= QUEUES_PER_PRIO, /* Start of named groups in group mask arrays */ #define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
-/* Scheduler ring - * - * Ring stores head and tail counters. Ring indexes are formed from these - * counters with a mask (mask = ring_size - 1), which requires that ring size - * must be a power of two. */ -typedef struct { - /* Writer head and tail */ - odp_atomic_u32_t w_head; - odp_atomic_u32_t w_tail; - uint8_t pad[ODP_CACHE_LINE_SIZE - (2 * sizeof(odp_atomic_u32_t))]; - - /* Reader head and tail */ - odp_atomic_u32_t r_head; - odp_atomic_u32_t r_tail; - - uint32_t data[0]; -} sched_ring_t ODP_ALIGNED_CACHE; - /* Priority queue */ typedef struct { /* Ring header */ - sched_ring_t ring; + ring_t ring;
/* Ring data: queue indexes */ uint32_t queue_index[PRIO_QUEUE_RING_SIZE]; @@ -142,7 +121,7 @@ typedef struct { /* Packet IO queue */ typedef struct { /* Ring header */ - sched_ring_t ring; + ring_t ring;
/* Ring data: pktio poll command indexes */ uint32_t cmd_index[PKTIO_RING_SIZE]; @@ -205,71 +184,6 @@ __thread sched_local_t sched_local; /* Function prototypes */ static inline void schedule_release_context(void);
-static void ring_init(sched_ring_t *ring) -{ - odp_atomic_init_u32(&ring->w_head, 0); - odp_atomic_init_u32(&ring->w_tail, 0); - odp_atomic_init_u32(&ring->r_head, 0); - odp_atomic_init_u32(&ring->r_tail, 0); -} - -/* Dequeue data from the ring head */ -static inline uint32_t ring_deq(sched_ring_t *ring, uint32_t mask) -{ - uint32_t head, tail, new_head; - uint32_t data; - - head = odp_atomic_load_u32(&ring->r_head); - - /* Move reader head. This thread owns data at the new head. */ - do { - tail = odp_atomic_load_u32(&ring->w_tail); - - if (head == tail) - return RING_EMPTY; - - new_head = head + 1; - - } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head, - new_head) == 0)); - - /* Read queue index */ - data = ring->data[new_head & mask]; - - /* Wait until other readers have updated the tail */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head)) - odp_cpu_pause(); - - /* Now update the reader tail */ - odp_atomic_store_rel_u32(&ring->r_tail, new_head); - - return data; -} - -/* Enqueue data into the ring tail */ -static inline void ring_enq(sched_ring_t *ring, uint32_t mask, uint32_t data) -{ - uint32_t old_head, new_head; - - /* Reserve a slot in the ring for writing */ - old_head = odp_atomic_fetch_inc_u32(&ring->w_head); - new_head = old_head + 1; - - /* Ring is full. Wait for the last reader to finish. */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head)) - odp_cpu_pause(); - - /* Write data */ - ring->data[new_head & mask] = data; - - /* Wait until other writers have updated the tail */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) - odp_cpu_pause(); - - /* Now update the writer tail */ - odp_atomic_store_rel_u32(&ring->w_tail, new_head); -} - static void sched_local_init(void) { memset(&sched_local, 0, sizeof(sched_local_t)); @@ -347,7 +261,7 @@ static int schedule_term_global(void)
for (i = 0; i < NUM_PRIO; i++) { for (j = 0; j < QUEUES_PER_PRIO; j++) { - sched_ring_t *ring = &sched->prio_q[i][j].ring; + ring_t *ring = &sched->prio_q[i][j].ring; uint32_t qi;
while ((qi = ring_deq(ring, PRIO_QUEUE_MASK)) != @@ -541,7 +455,7 @@ static void schedule_release_atomic(void) if (qi != PRIO_QUEUE_EMPTY && sched_local.num == 0) { int prio = sched->queue[qi].prio; int queue_per_prio = sched->queue[qi].queue_per_prio; - sched_ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring; + ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
/* Release current atomic queue */ ring_enq(ring, PRIO_QUEUE_MASK, qi); @@ -636,7 +550,7 @@ static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[], int grp; int ordered; odp_queue_t handle; - sched_ring_t *ring; + ring_t *ring;
if (id >= QUEUES_PER_PRIO) id = 0; @@ -747,7 +661,7 @@ static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
for (i = 0; i < PKTIO_CMD_QUEUES; i++, id = ((id + 1) & PKTIO_CMD_QUEUE_MASK)) { - sched_ring_t *ring; + ring_t *ring; uint32_t cmd_index; pktio_cmd_t *cmd;
@@ -1051,7 +965,7 @@ static int schedule_sched_queue(uint32_t queue_index) { int prio = sched->queue[queue_index].prio; int queue_per_prio = sched->queue[queue_index].queue_per_prio; - sched_ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring; + ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
sched_local.ignore_ordered_context = 1;
commit 1ecf0ad51eac59ffb71352573aa99146da2c0649 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:24 2016 +0200
linux-gen: pktio: do not free zero packets
In some error cases, netmap and dpdk pktios were calling odp_packet_free_multi with zero packets. Moved existing error check to avoid a free call with zero packets.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c index 11f3509..0eb025a 100644 --- a/platform/linux-generic/pktio/dpdk.c +++ b/platform/linux-generic/pktio/dpdk.c @@ -956,10 +956,12 @@ static int dpdk_send(pktio_entry_t *pktio_entry, int index, rte_pktmbuf_free(tx_mbufs[i]); }
- odp_packet_free_multi(pkt_table, tx_pkts); - - if (odp_unlikely(tx_pkts == 0 && __odp_errno != 0)) - return -1; + if (odp_unlikely(tx_pkts == 0)) { + if (__odp_errno != 0) + return -1; + } else { + odp_packet_free_multi(pkt_table, tx_pkts); + }
return tx_pkts; } diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c index 412beec..c1cdf72 100644 --- a/platform/linux-generic/pktio/netmap.c +++ b/platform/linux-generic/pktio/netmap.c @@ -830,10 +830,12 @@ static int netmap_send(pktio_entry_t *pktio_entry, int index, if (!pkt_nm->lockless_tx) odp_ticketlock_unlock(&pkt_nm->tx_desc_ring[index].s.lock);
- odp_packet_free_multi(pkt_table, nb_tx); - - if (odp_unlikely(nb_tx == 0 && __odp_errno != 0)) - return -1; + if (odp_unlikely(nb_tx == 0)) { + if (__odp_errno != 0) + return -1; + } else { + odp_packet_free_multi(pkt_table, nb_tx); + }
return nb_tx; }
commit 64c06865788ad5c58af3c3d42d857a7ceb9f6ab9 Author: Petri Savolainen petri.savolainen@nokia.com Date: Mon Nov 21 16:53:23 2016 +0200
linux-gen: ipc: disable build of ipc pktio
IPC pktio implementation depends heavily on pool internals. It's build is disabled due to pool re-implementation. IPC should be re-implemented with a cleaner internal interface towards pool and shm.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c index c1f28db..0e99c6e 100644 --- a/platform/linux-generic/pktio/ipc.c +++ b/platform/linux-generic/pktio/ipc.c @@ -3,7 +3,7 @@ * * SPDX-License-Identifier: BSD-3-Clause */ - +#ifdef _ODP_PKTIO_IPC #include <odp_packet_io_ipc_internal.h> #include <odp_debug_internal.h> #include <odp_packet_io_internal.h> @@ -795,3 +795,4 @@ const pktio_if_ops_t ipc_pktio_ops = { .pktin_ts_from_ns = NULL, .config = NULL }; +#endif
-----------------------------------------------------------------------
Summary of changes: example/generator/odp_generator.c | 2 +- include/odp/api/spec/packet.h | 9 +- include/odp/api/spec/pool.h | 6 + platform/linux-generic/Makefile.am | 1 + .../include/odp/api/plat/packet_types.h | 6 +- .../include/odp/api/plat/pool_types.h | 6 - .../linux-generic/include/odp_align_internal.h | 34 +- .../linux-generic/include/odp_buffer_inlines.h | 167 +-- .../linux-generic/include/odp_buffer_internal.h | 120 +- .../include/odp_classification_datamodel.h | 2 +- .../linux-generic/include/odp_config_internal.h | 55 +- .../linux-generic/include/odp_packet_internal.h | 87 +- platform/linux-generic/include/odp_pool_internal.h | 289 +--- platform/linux-generic/include/odp_ring_internal.h | 176 +++ .../linux-generic/include/odp_timer_internal.h | 4 - platform/linux-generic/odp_buffer.c | 22 +- platform/linux-generic/odp_classification.c | 25 +- platform/linux-generic/odp_crypto.c | 12 +- platform/linux-generic/odp_packet.c | 717 ++++++++-- platform/linux-generic/odp_packet_io.c | 2 +- platform/linux-generic/odp_pool.c | 1487 ++++++++------------ platform/linux-generic/odp_queue.c | 4 +- platform/linux-generic/odp_schedule.c | 102 +- platform/linux-generic/odp_schedule_ordered.c | 4 +- platform/linux-generic/odp_timer.c | 3 +- platform/linux-generic/pktio/dpdk.c | 10 +- platform/linux-generic/pktio/ipc.c | 3 +- platform/linux-generic/pktio/loop.c | 2 +- platform/linux-generic/pktio/netmap.c | 14 +- platform/linux-generic/pktio/socket.c | 42 +- platform/linux-generic/pktio/socket_mmap.c | 10 +- test/common_plat/performance/odp_crypto.c | 47 +- test/common_plat/performance/odp_pktio_perf.c | 2 +- test/common_plat/performance/odp_scheduling.c | 8 +- test/common_plat/validation/api/buffer/buffer.c | 113 +- test/common_plat/validation/api/crypto/crypto.c | 26 +- test/common_plat/validation/api/packet/packet.c | 96 +- test/common_plat/validation/api/pktio/pktio.c | 47 +- 38 files changed, 1859 insertions(+), 1903 deletions(-) create mode 100644 platform/linux-generic/include/odp_ring_internal.h
hooks/post-receive