14 #include <sys/queue.h>    19 #include <rte_config.h>    23 #include <rte_compat.h>    30 #define RTE_LPM_NAMESIZE                32    33 #define RTE_LPM_MAX_DEPTH               32    36 #define RTE_LPM_TBL24_NUM_ENTRIES       (1 << 24)    39 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES  256    42 #define RTE_LPM_MAX_TBL8_NUM_GROUPS         (1 << 24)    45 #define RTE_LPM_TBL8_NUM_GROUPS         256    48 #define RTE_LPM_TBL8_NUM_ENTRIES        (RTE_LPM_TBL8_NUM_GROUPS * \    49                     RTE_LPM_TBL8_GROUP_NUM_ENTRIES)    52 #if defined(RTE_LIBRTE_LPM_DEBUG)    53 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \    54     if (cond) return (retval);                \    57 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)    61 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000    64 #define RTE_LPM_LOOKUP_SUCCESS          0x01000000    66 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN    69 struct rte_lpm_tbl_entry_v20 {
    89     uint8_t valid_group :1;
    94 struct rte_lpm_tbl_entry {
   100     uint32_t next_hop    :24;
   110     uint32_t valid_group :1;
   116 struct rte_lpm_tbl_entry_v20 {
   118     uint8_t valid_group :1;
   127 struct rte_lpm_tbl_entry {
   129     uint32_t valid_group :1;
   131     uint32_t next_hop    :24;
   145 struct rte_lpm_rule_v20 {
   150 struct rte_lpm_rule {
   156 struct rte_lpm_rule_info {
   169     struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
   171     struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
   173     struct rte_lpm_rule_v20 rules_tbl[]
   181     uint32_t number_tbl8s; 
   185     struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
   187     struct rte_lpm_tbl_entry *tbl8; 
   188     struct rte_lpm_rule *rules_tbl; 
   214 rte_lpm_create_v20(
const char *name, 
int socket_id, 
int max_rules, 
int flags);
   216 rte_lpm_create_v1604(
const char *name, 
int socket_id,
   232 rte_lpm_find_existing_v20(
const char *name);
   234 rte_lpm_find_existing_v1604(
const char *name);
   247 rte_lpm_free_v20(
struct rte_lpm_v20 *lpm);
   249 rte_lpm_free_v1604(
struct rte_lpm *lpm);
   266 rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
   268 rte_lpm_add_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
   271 rte_lpm_add_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
   293 rte_lpm_is_rule_present_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
   296 rte_lpm_is_rule_present_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
   314 rte_lpm_delete_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
   316 rte_lpm_delete_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
   327 rte_lpm_delete_all_v20(
struct rte_lpm_v20 *lpm);
   329 rte_lpm_delete_all_v1604(
struct rte_lpm *lpm);
   346     unsigned tbl24_index = (ip >> 8);
   348     const uint32_t *ptbl;
   351     RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
   354     ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
   358     if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
   359             RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
   361         unsigned tbl8_index = (uint8_t)ip +
   362                 (((uint32_t)tbl_entry & 0x00FFFFFF) *
   363                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
   365         ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
   369     *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
   393 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \   394         rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)   397 rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm, 
const uint32_t *ips,
   398         uint32_t *next_hops, 
const unsigned n)
   401     unsigned tbl24_indexes[n];
   402     const uint32_t *ptbl;
   405     RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
   406             (next_hops == NULL)), -EINVAL);
   408     for (i = 0; i < n; i++) {
   409         tbl24_indexes[i] = ips[i] >> 8;
   412     for (i = 0; i < n; i++) {
   414         ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
   415         next_hops[i] = *ptbl;
   418         if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
   419                 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
   421             unsigned tbl8_index = (uint8_t)ips[i] +
   422                     (((uint32_t)next_hops[i] & 0x00FFFFFF) *
   423                      RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
   425             ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
   426             next_hops[i] = *ptbl;
   433 #define  RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)   458 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)   459 #include "rte_lpm_neon.h"   460 #elif defined(RTE_ARCH_PPC_64)   461 #include "rte_lpm_altivec.h"   463 #include "rte_lpm_sse.h" 
void rte_lpm_free(struct rte_lpm *lpm)
 
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
 
void rte_lpm_delete_all(struct rte_lpm *lpm)
 
#define RTE_LPM_MAX_DEPTH
 
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
 
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
 
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 
#define __rte_cache_aligned
 
#define RTE_LPM_LOOKUP_SUCCESS
 
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
 
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
 
struct rte_lpm * rte_lpm_find_existing(const char *name)