DPDK  19.08.0-rc0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_byteorder.h>
44 #include <rte_mbuf_ptype.h>
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49 
50 /*
51  * Packet Offload Features Flags. It also carry packet type information.
52  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
53  *
54  * - RX flags start at bit position zero, and get added to the left of previous
55  * flags.
56  * - The most-significant 3 bits are reserved for generic mbuf flags
57  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
58  * added to the right of the previously defined flags i.e. they should count
59  * downwards, not upwards.
60  *
61  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
62  * rte_get_tx_ol_flag_name().
63  */
64 
72 #define PKT_RX_VLAN (1ULL << 0)
73 
74 #define PKT_RX_RSS_HASH (1ULL << 1)
75 #define PKT_RX_FDIR (1ULL << 2)
84 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
85 
93 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
94 
95 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
103 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
104 
113 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
114 
115 #define PKT_RX_IP_CKSUM_UNKNOWN 0
116 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
117 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
118 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
119 
128 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
129 
130 #define PKT_RX_L4_CKSUM_UNKNOWN 0
131 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
132 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
133 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
134 
135 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
136 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
137 #define PKT_RX_FDIR_ID (1ULL << 13)
138 #define PKT_RX_FDIR_FLX (1ULL << 14)
148 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
149 
155 #define PKT_RX_LRO (1ULL << 16)
156 
160 #define PKT_RX_TIMESTAMP (1ULL << 17)
161 
165 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
166 
170 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
171 
180 #define PKT_RX_QINQ (1ULL << 20)
181 
194 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
195 
196 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
197 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
198 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
199 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
200 
201 /* add new RX flags here */
202 
203 /* add new TX flags here */
204 
208 #define PKT_TX_METADATA (1ULL << 40)
209 
219 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
220 
226 #define PKT_TX_UDP_SEG (1ULL << 42)
227 
231 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
232 
237 #define PKT_TX_MACSEC (1ULL << 44)
238 
247 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
248 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
249 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
250 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
251 
252 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
253 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
254 
265 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
266 
278 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
279 /* add new TX TUNNEL type here */
280 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
281 
287 #define PKT_TX_QINQ (1ULL << 49)
288 /* this old name is deprecated */
289 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
290 
300 #define PKT_TX_TCP_SEG (1ULL << 50)
301 
302 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
312 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
313 #define PKT_TX_TCP_CKSUM (1ULL << 52)
314 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
315 #define PKT_TX_UDP_CKSUM (3ULL << 52)
316 #define PKT_TX_L4_MASK (3ULL << 52)
324 #define PKT_TX_IP_CKSUM (1ULL << 54)
325 
332 #define PKT_TX_IPV4 (1ULL << 55)
333 
340 #define PKT_TX_IPV6 (1ULL << 56)
341 
347 #define PKT_TX_VLAN (1ULL << 57)
348 /* this old name is deprecated */
349 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
350 
357 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
358 
364 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
365 
371 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
372 
377 #define PKT_TX_OFFLOAD_MASK ( \
378  PKT_TX_OUTER_IPV6 | \
379  PKT_TX_OUTER_IPV4 | \
380  PKT_TX_OUTER_IP_CKSUM | \
381  PKT_TX_VLAN_PKT | \
382  PKT_TX_IPV6 | \
383  PKT_TX_IPV4 | \
384  PKT_TX_IP_CKSUM | \
385  PKT_TX_L4_MASK | \
386  PKT_TX_IEEE1588_TMST | \
387  PKT_TX_TCP_SEG | \
388  PKT_TX_QINQ_PKT | \
389  PKT_TX_TUNNEL_MASK | \
390  PKT_TX_MACSEC | \
391  PKT_TX_SEC_OFFLOAD | \
392  PKT_TX_UDP_SEG | \
393  PKT_TX_OUTER_UDP_CKSUM | \
394  PKT_TX_METADATA)
395 
399 #define EXT_ATTACHED_MBUF (1ULL << 61)
400 
401 #define IND_ATTACHED_MBUF (1ULL << 62)
404 #define RTE_MBUF_PRIV_ALIGN 8
405 
414 const char *rte_get_rx_ol_flag_name(uint64_t mask);
415 
428 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
429 
440 const char *rte_get_tx_ol_flag_name(uint64_t mask);
441 
454 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
455 
462 #define RTE_MBUF_DEFAULT_DATAROOM 2048
463 #define RTE_MBUF_DEFAULT_BUF_SIZE \
464  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
465 
466 /* define a set of marker types that can be used to refer to set points in the
467  * mbuf */
468 __extension__
469 typedef void *MARKER[0];
470 __extension__
471 typedef uint8_t MARKER8[0];
472 __extension__
473 typedef uint64_t MARKER64[0];
477  uint32_t queue_id;
478  uint8_t traffic_class;
482  uint8_t color;
484  uint16_t reserved;
485 };
491 enum {
492  RTE_MBUF_L2_LEN_BITS = 7,
493  RTE_MBUF_L3_LEN_BITS = 9,
494  RTE_MBUF_L4_LEN_BITS = 8,
495  RTE_MBUF_TSO_SEGSZ_BITS = 16,
496  RTE_MBUF_OUTL3_LEN_BITS = 9,
497  RTE_MBUF_OUTL2_LEN_BITS = 7,
498  RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT -
499  RTE_MBUF_L2_LEN_BITS -
500  RTE_MBUF_L3_LEN_BITS -
501  RTE_MBUF_L4_LEN_BITS -
502  RTE_MBUF_TSO_SEGSZ_BITS -
503  RTE_MBUF_OUTL3_LEN_BITS -
504  RTE_MBUF_OUTL2_LEN_BITS,
505 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
506  RTE_MBUF_L2_LEN_OFS =
507  sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS,
508  RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS,
509  RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS,
510  RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS,
511  RTE_MBUF_OUTL3_LEN_OFS =
512  RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS,
513  RTE_MBUF_OUTL2_LEN_OFS =
514  RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS,
515  RTE_MBUF_TXOFLD_UNUSED_OFS =
516  RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS,
517 #else
518  RTE_MBUF_L2_LEN_OFS = 0,
519  RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS,
520  RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS,
521  RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS,
522  RTE_MBUF_OUTL3_LEN_OFS =
523  RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS,
524  RTE_MBUF_OUTL2_LEN_OFS =
525  RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS,
526  RTE_MBUF_TXOFLD_UNUSED_OFS =
527  RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS,
528 #endif
529 };
530 
534 struct rte_mbuf {
535  MARKER cacheline0;
536 
537  void *buf_addr;
545  union {
546  rte_iova_t buf_iova;
548  } __rte_aligned(sizeof(rte_iova_t));
549 
550  /* next 8 bytes are initialised on RX descriptor rearm */
551  MARKER64 rearm_data;
552  uint16_t data_off;
553 
564  union {
566  uint16_t refcnt;
567  };
568  uint16_t nb_segs;
573  uint16_t port;
574 
575  uint64_t ol_flags;
577  /* remaining bytes are set on RX when pulling packet from descriptor */
578  MARKER rx_descriptor_fields1;
579 
580  /*
581  * The packet type, which is the combination of outer/inner L2, L3, L4
582  * and tunnel types. The packet_type is about data really present in the
583  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
584  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
585  * vlan is stripped from the data.
586  */
588  union {
589  uint32_t packet_type;
590  struct {
591  uint32_t l2_type:4;
592  uint32_t l3_type:4;
593  uint32_t l4_type:4;
594  uint32_t tun_type:4;
596  union {
602  __extension__
603  struct {
604  uint8_t inner_l2_type:4;
606  uint8_t inner_l3_type:4;
608  };
609  };
610  uint32_t inner_l4_type:4;
611  };
612  };
613 
614  uint32_t pkt_len;
615  uint16_t data_len;
617  uint16_t vlan_tci;
618 
620  union {
621  union {
622  uint32_t rss;
623  struct {
624  union {
625  struct {
626  uint16_t hash;
627  uint16_t id;
628  };
629  uint32_t lo;
631  };
632  uint32_t hi;
636  } fdir;
639  struct {
640  uint32_t reserved1;
641  uint16_t reserved2;
642  uint16_t txq;
647  } txadapter;
649  uint32_t usr;
650  } hash;
651  struct {
659  uint32_t tx_metadata;
660  uint32_t reserved;
661  };
662  };
663 
665  uint16_t vlan_tci_outer;
666 
667  uint16_t buf_len;
674  uint64_t timestamp;
675 
676  /* second cache line - fields only used in slow path or on TX */
677  MARKER cacheline1 __rte_cache_min_aligned;
678 
680  union {
681  void *userdata;
682  uint64_t udata64;
683  };
684 
685  struct rte_mempool *pool;
686  struct rte_mbuf *next;
688  /* fields to support TX offloads */
690  union {
691  uint64_t tx_offload;
692  __extension__
693  struct {
694  uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
698  uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
700  uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
702  uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
705  /* fields for TX offloading of tunnels */
706  uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS;
708  uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS;
711  /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
712  };
713  };
714 
717  uint16_t priv_size;
718 
720  uint16_t timesync;
721 
723  uint32_t seqn;
724 
729 
731 
735 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
736 
742  void *fcb_opaque;
744 };
745 
747 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
748 
759 static inline void
761 {
762  rte_prefetch0(&m->cacheline0);
763 }
764 
776 static inline void
778 {
779 #if RTE_CACHE_LINE_SIZE == 64
780  rte_prefetch0(&m->cacheline1);
781 #else
782  RTE_SET_USED(m);
783 #endif
784 }
785 
786 
787 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
788 
797 static inline rte_iova_t
798 rte_mbuf_data_iova(const struct rte_mbuf *mb)
799 {
800  return mb->buf_iova + mb->data_off;
801 }
802 
803 __rte_deprecated
804 static inline phys_addr_t
805 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
806 {
807  return rte_mbuf_data_iova(mb);
808 }
809 
822 static inline rte_iova_t
824 {
825  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
826 }
827 
828 __rte_deprecated
829 static inline phys_addr_t
830 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
831 {
832  return rte_mbuf_data_iova_default(mb);
833 }
834 
843 static inline struct rte_mbuf *
845 {
846  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
847 }
848 
869 static inline char * __rte_experimental
870 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
871 {
872  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
873 }
874 
886 static inline char * __rte_experimental
888 {
889  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
890 }
891 
905 static inline char *
907 {
908 #ifdef ALLOW_EXPERIMENTAL_API
909  return rte_mbuf_buf_addr(md, md->pool);
910 #else
911  char *buffer_addr;
912  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
913  return buffer_addr;
914 #endif
915 }
916 
929 static inline void * __rte_experimental
931 {
932  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
933 }
934 
942 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
943 
949 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
950 
957 #define RTE_MBUF_DIRECT(mb) \
958  (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
959 
968  uint16_t mbuf_priv_size;
969 };
970 
971 #ifdef RTE_LIBRTE_MBUF_DEBUG
972 
974 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
975 
976 #else /* RTE_LIBRTE_MBUF_DEBUG */
977 
979 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
980 
981 #endif /* RTE_LIBRTE_MBUF_DEBUG */
982 
983 #ifdef RTE_MBUF_REFCNT_ATOMIC
984 
992 static inline uint16_t
993 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
994 {
995  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
996 }
997 
1005 static inline void
1006 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1007 {
1008  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
1009 }
1010 
1011 /* internal */
1012 static inline uint16_t
1013 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1014 {
1015  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
1016 }
1017 
1027 static inline uint16_t
1028 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1029 {
1030  /*
1031  * The atomic_add is an expensive operation, so we don't want to
1032  * call it in the case where we know we are the unique holder of
1033  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
1034  * operation has to be used because concurrent accesses on the
1035  * reference counter can occur.
1036  */
1037  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1038  ++value;
1039  rte_mbuf_refcnt_set(m, (uint16_t)value);
1040  return (uint16_t)value;
1041  }
1042 
1043  return __rte_mbuf_refcnt_update(m, value);
1044 }
1045 
1046 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
1047 
1048 /* internal */
1049 static inline uint16_t
1050 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1051 {
1052  m->refcnt = (uint16_t)(m->refcnt + value);
1053  return m->refcnt;
1054 }
1055 
1059 static inline uint16_t
1060 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1061 {
1062  return __rte_mbuf_refcnt_update(m, value);
1063 }
1064 
1068 static inline uint16_t
1070 {
1071  return m->refcnt;
1072 }
1073 
1077 static inline void
1078 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1079 {
1080  m->refcnt = new_value;
1081 }
1082 
1083 #endif /* RTE_MBUF_REFCNT_ATOMIC */
1084 
1093 static inline uint16_t
1095 {
1096  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
1097 }
1098 
1107 static inline void
1109  uint16_t new_value)
1110 {
1111  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
1112 }
1113 
1125 static inline uint16_t
1127  int16_t value)
1128 {
1129  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
1130  ++value;
1131  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
1132  return (uint16_t)value;
1133  }
1134 
1135  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
1136 }
1137 
1139 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1140  if ((m) != NULL) \
1141  rte_prefetch0(m); \
1142 } while (0)
1143 
1144 
1157 void
1158 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1159 
1179 __rte_experimental
1180 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
1181  const char **reason);
1182 
1183 #define MBUF_RAW_ALLOC_CHECK(m) do { \
1184  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
1185  RTE_ASSERT((m)->next == NULL); \
1186  RTE_ASSERT((m)->nb_segs == 1); \
1187  __rte_mbuf_sanity_check(m, 0); \
1188 } while (0)
1189 
1209 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
1210 {
1211  struct rte_mbuf *m;
1212 
1213  if (rte_mempool_get(mp, (void **)&m) < 0)
1214  return NULL;
1215  MBUF_RAW_ALLOC_CHECK(m);
1216  return m;
1217 }
1218 
1233 static __rte_always_inline void
1235 {
1236  RTE_ASSERT(RTE_MBUF_DIRECT(m));
1237  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
1238  RTE_ASSERT(m->next == NULL);
1239  RTE_ASSERT(m->nb_segs == 1);
1241  rte_mempool_put(m->pool, m);
1242 }
1243 
1263 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1264  void *m, unsigned i);
1265 
1266 
1284 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1285 
1320 struct rte_mempool *
1321 rte_pktmbuf_pool_create(const char *name, unsigned n,
1322  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1323  int socket_id);
1324 
1362 struct rte_mempool *
1363 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1364  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1365  int socket_id, const char *ops_name);
1366 
1378 static inline uint16_t
1380 {
1381  struct rte_pktmbuf_pool_private *mbp_priv;
1382 
1383  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1384  return mbp_priv->mbuf_data_room_size;
1385 }
1386 
1399 static inline uint16_t
1401 {
1402  struct rte_pktmbuf_pool_private *mbp_priv;
1403 
1404  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1405  return mbp_priv->mbuf_priv_size;
1406 }
1407 
1416 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1417 {
1418  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1419  (uint16_t)m->buf_len);
1420 }
1421 
1430 #define MBUF_INVALID_PORT UINT16_MAX
1431 
1432 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1433 {
1434  m->next = NULL;
1435  m->pkt_len = 0;
1436  m->tx_offload = 0;
1437  m->vlan_tci = 0;
1438  m->vlan_tci_outer = 0;
1439  m->nb_segs = 1;
1440  m->port = MBUF_INVALID_PORT;
1441 
1442  m->ol_flags = 0;
1443  m->packet_type = 0;
1445 
1446  m->data_len = 0;
1448 }
1449 
1463 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1464 {
1465  struct rte_mbuf *m;
1466  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1467  rte_pktmbuf_reset(m);
1468  return m;
1469 }
1470 
1485 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1486  struct rte_mbuf **mbufs, unsigned count)
1487 {
1488  unsigned idx = 0;
1489  int rc;
1490 
1491  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1492  if (unlikely(rc))
1493  return rc;
1494 
1495  /* To understand duff's device on loop unwinding optimization, see
1496  * https://en.wikipedia.org/wiki/Duff's_device.
1497  * Here while() loop is used rather than do() while{} to avoid extra
1498  * check if count is zero.
1499  */
1500  switch (count % 4) {
1501  case 0:
1502  while (idx != count) {
1503  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1504  rte_pktmbuf_reset(mbufs[idx]);
1505  idx++;
1506  /* fall-through */
1507  case 3:
1508  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1509  rte_pktmbuf_reset(mbufs[idx]);
1510  idx++;
1511  /* fall-through */
1512  case 2:
1513  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1514  rte_pktmbuf_reset(mbufs[idx]);
1515  idx++;
1516  /* fall-through */
1517  case 1:
1518  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1519  rte_pktmbuf_reset(mbufs[idx]);
1520  idx++;
1521  /* fall-through */
1522  }
1523  }
1524  return 0;
1525 }
1526 
1559 static inline struct rte_mbuf_ext_shared_info *
1560 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1562 {
1563  struct rte_mbuf_ext_shared_info *shinfo;
1564  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1565  void *addr;
1566 
1567  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1568  sizeof(uintptr_t));
1569  if (addr <= buf_addr)
1570  return NULL;
1571 
1572  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1573  shinfo->free_cb = free_cb;
1574  shinfo->fcb_opaque = fcb_opaque;
1575  rte_mbuf_ext_refcnt_set(shinfo, 1);
1576 
1577  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1578  return shinfo;
1579 }
1580 
1637 static inline void
1638 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1639  rte_iova_t buf_iova, uint16_t buf_len,
1640  struct rte_mbuf_ext_shared_info *shinfo)
1641 {
1642  /* mbuf should not be read-only */
1643  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1644  RTE_ASSERT(shinfo->free_cb != NULL);
1645 
1646  m->buf_addr = buf_addr;
1647  m->buf_iova = buf_iova;
1648  m->buf_len = buf_len;
1649 
1650  m->data_len = 0;
1651  m->data_off = 0;
1652 
1654  m->shinfo = shinfo;
1655 }
1656 
1664 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1665 
1687 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1688 {
1689  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1690  rte_mbuf_refcnt_read(mi) == 1);
1691 
1692  if (RTE_MBUF_HAS_EXTBUF(m)) {
1694  mi->ol_flags = m->ol_flags;
1695  mi->shinfo = m->shinfo;
1696  } else {
1697  /* if m is not direct, get the mbuf that embeds the data */
1699  mi->priv_size = m->priv_size;
1700  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1701  }
1702 
1703  mi->buf_iova = m->buf_iova;
1704  mi->buf_addr = m->buf_addr;
1705  mi->buf_len = m->buf_len;
1706 
1707  mi->data_off = m->data_off;
1708  mi->data_len = m->data_len;
1709  mi->port = m->port;
1710  mi->vlan_tci = m->vlan_tci;
1711  mi->vlan_tci_outer = m->vlan_tci_outer;
1712  mi->tx_offload = m->tx_offload;
1713  mi->hash = m->hash;
1714 
1715  mi->next = NULL;
1716  mi->pkt_len = mi->data_len;
1717  mi->nb_segs = 1;
1718  mi->packet_type = m->packet_type;
1719  mi->timestamp = m->timestamp;
1720 
1721  __rte_mbuf_sanity_check(mi, 1);
1723 }
1724 
1732 static inline void
1733 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1734 {
1735  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1736  RTE_ASSERT(m->shinfo != NULL);
1737 
1738  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1739  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1740 }
1741 
1748 static inline void
1749 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1750 {
1751  struct rte_mbuf *md;
1752 
1753  RTE_ASSERT(RTE_MBUF_CLONED(m));
1754 
1755  md = rte_mbuf_from_indirect(m);
1756 
1757  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1758  md->next = NULL;
1759  md->nb_segs = 1;
1760  rte_mbuf_refcnt_set(md, 1);
1761  rte_mbuf_raw_free(md);
1762  }
1763 }
1764 
1778 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1779 {
1780  struct rte_mempool *mp = m->pool;
1781  uint32_t mbuf_size, buf_len;
1782  uint16_t priv_size;
1783 
1784  if (RTE_MBUF_HAS_EXTBUF(m))
1785  __rte_pktmbuf_free_extbuf(m);
1786  else
1787  __rte_pktmbuf_free_direct(m);
1788 
1789  priv_size = rte_pktmbuf_priv_size(mp);
1790  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1791  buf_len = rte_pktmbuf_data_room_size(mp);
1792 
1793  m->priv_size = priv_size;
1794  m->buf_addr = (char *)m + mbuf_size;
1795  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1796  m->buf_len = (uint16_t)buf_len;
1798  m->data_len = 0;
1799  m->ol_flags = 0;
1800 }
1801 
1816 static __rte_always_inline struct rte_mbuf *
1818 {
1820 
1821  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1822 
1823  if (!RTE_MBUF_DIRECT(m))
1824  rte_pktmbuf_detach(m);
1825 
1826  if (m->next != NULL) {
1827  m->next = NULL;
1828  m->nb_segs = 1;
1829  }
1830 
1831  return m;
1832 
1833  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1834 
1835  if (!RTE_MBUF_DIRECT(m))
1836  rte_pktmbuf_detach(m);
1837 
1838  if (m->next != NULL) {
1839  m->next = NULL;
1840  m->nb_segs = 1;
1841  }
1842  rte_mbuf_refcnt_set(m, 1);
1843 
1844  return m;
1845  }
1846  return NULL;
1847 }
1848 
1858 static __rte_always_inline void
1860 {
1861  m = rte_pktmbuf_prefree_seg(m);
1862  if (likely(m != NULL))
1863  rte_mbuf_raw_free(m);
1864 }
1865 
1875 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1876 {
1877  struct rte_mbuf *m_next;
1878 
1879  if (m != NULL)
1881 
1882  while (m != NULL) {
1883  m_next = m->next;
1885  m = m_next;
1886  }
1887 }
1888 
1906 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1907  struct rte_mempool *mp)
1908 {
1909  struct rte_mbuf *mc, *mi, **prev;
1910  uint32_t pktlen;
1911  uint16_t nseg;
1912 
1913  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1914  return NULL;
1915 
1916  mi = mc;
1917  prev = &mi->next;
1918  pktlen = md->pkt_len;
1919  nseg = 0;
1920 
1921  do {
1922  nseg++;
1923  rte_pktmbuf_attach(mi, md);
1924  *prev = mi;
1925  prev = &mi->next;
1926  } while ((md = md->next) != NULL &&
1927  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1928 
1929  *prev = NULL;
1930  mc->nb_segs = nseg;
1931  mc->pkt_len = pktlen;
1932 
1933  /* Allocation of new indirect segment failed */
1934  if (unlikely (mi == NULL)) {
1935  rte_pktmbuf_free(mc);
1936  return NULL;
1937  }
1938 
1939  __rte_mbuf_sanity_check(mc, 1);
1940  return mc;
1941 }
1942 
1954 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1955 {
1957 
1958  do {
1959  rte_mbuf_refcnt_update(m, v);
1960  } while ((m = m->next) != NULL);
1961 }
1962 
1971 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1972 {
1974  return m->data_off;
1975 }
1976 
1985 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1986 {
1988  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1989  m->data_len);
1990 }
1991 
2000 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
2001 {
2003  while (m->next != NULL)
2004  m = m->next;
2005  return m;
2006 }
2007 
2022 #define rte_pktmbuf_mtod_offset(m, t, o) \
2023  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
2024 
2037 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
2038 
2048 #define rte_pktmbuf_iova_offset(m, o) \
2049  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
2050 
2051 /* deprecated */
2052 #define rte_pktmbuf_mtophys_offset(m, o) \
2053  rte_pktmbuf_iova_offset(m, o)
2054 
2062 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
2063 
2064 /* deprecated */
2065 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
2066 
2075 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
2076 
2085 #define rte_pktmbuf_data_len(m) ((m)->data_len)
2086 
2102 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
2103  uint16_t len)
2104 {
2106 
2107  if (unlikely(len > rte_pktmbuf_headroom(m)))
2108  return NULL;
2109 
2110  /* NB: elaborating the subtraction like this instead of using
2111  * -= allows us to ensure the result type is uint16_t
2112  * avoiding compiler warnings on gcc 8.1 at least */
2113  m->data_off = (uint16_t)(m->data_off - len);
2114  m->data_len = (uint16_t)(m->data_len + len);
2115  m->pkt_len = (m->pkt_len + len);
2116 
2117  return (char *)m->buf_addr + m->data_off;
2118 }
2119 
2135 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
2136 {
2137  void *tail;
2138  struct rte_mbuf *m_last;
2139 
2141 
2142  m_last = rte_pktmbuf_lastseg(m);
2143  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
2144  return NULL;
2145 
2146  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
2147  m_last->data_len = (uint16_t)(m_last->data_len + len);
2148  m->pkt_len = (m->pkt_len + len);
2149  return (char*) tail;
2150 }
2151 
2166 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
2167 {
2169 
2170  if (unlikely(len > m->data_len))
2171  return NULL;
2172 
2173  /* NB: elaborating the addition like this instead of using
2174  * += allows us to ensure the result type is uint16_t
2175  * avoiding compiler warnings on gcc 8.1 at least */
2176  m->data_len = (uint16_t)(m->data_len - len);
2177  m->data_off = (uint16_t)(m->data_off + len);
2178  m->pkt_len = (m->pkt_len - len);
2179  return (char *)m->buf_addr + m->data_off;
2180 }
2181 
2196 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
2197 {
2198  struct rte_mbuf *m_last;
2199 
2201 
2202  m_last = rte_pktmbuf_lastseg(m);
2203  if (unlikely(len > m_last->data_len))
2204  return -1;
2205 
2206  m_last->data_len = (uint16_t)(m_last->data_len - len);
2207  m->pkt_len = (m->pkt_len - len);
2208  return 0;
2209 }
2210 
2220 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
2221 {
2223  return !!(m->nb_segs == 1);
2224 }
2225 
2229 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
2230  uint32_t len, void *buf);
2231 
2252 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
2253  uint32_t off, uint32_t len, void *buf)
2254 {
2255  if (likely(off + len <= rte_pktmbuf_data_len(m)))
2256  return rte_pktmbuf_mtod_offset(m, char *, off);
2257  else
2258  return __rte_pktmbuf_read(m, off, len, buf);
2259 }
2260 
2277 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
2278 {
2279  struct rte_mbuf *cur_tail;
2280 
2281  /* Check for number-of-segments-overflow */
2282  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
2283  return -EOVERFLOW;
2284 
2285  /* Chain 'tail' onto the old tail */
2286  cur_tail = rte_pktmbuf_lastseg(head);
2287  cur_tail->next = tail;
2288 
2289  /* accumulate number of segments and total length.
2290  * NB: elaborating the addition like this instead of using
2291  * -= allows us to ensure the result type is uint16_t
2292  * avoiding compiler warnings on gcc 8.1 at least */
2293  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
2294  head->pkt_len += tail->pkt_len;
2295 
2296  /* pkt_len is only set in the head */
2297  tail->pkt_len = tail->data_len;
2298 
2299  return 0;
2300 }
2301 
2302 /*
2303  * @warning
2304  * @b EXPERIMENTAL: This API may change without prior notice.
2305  *
2306  * For given input values generate raw tx_offload value.
2307  * Note that it is caller responsibility to make sure that input parameters
2308  * don't exceed maximum bit-field values.
2309  * @param il2
2310  * l2_len value.
2311  * @param il3
2312  * l3_len value.
2313  * @param il4
2314  * l4_len value.
2315  * @param tso
2316  * tso_segsz value.
2317  * @param ol3
2318  * outer_l3_len value.
2319  * @param ol2
2320  * outer_l2_len value.
2321  * @param unused
2322  * unused value.
2323  * @return
2324  * raw tx_offload value.
2325  */
2326 static __rte_always_inline uint64_t
2327 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
2328  uint64_t ol3, uint64_t ol2, uint64_t unused)
2329 {
2330  return il2 << RTE_MBUF_L2_LEN_OFS |
2331  il3 << RTE_MBUF_L3_LEN_OFS |
2332  il4 << RTE_MBUF_L4_LEN_OFS |
2333  tso << RTE_MBUF_TSO_SEGSZ_OFS |
2334  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
2335  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
2336  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
2337 }
2338 
2349 static inline int
2351 {
2352  uint64_t ol_flags = m->ol_flags;
2353 
2354  /* Does packet set any of available offloads? */
2355  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
2356  return 0;
2357 
2358  /* IP checksum can be counted only for IPv4 packet */
2360  return -EINVAL;
2361 
2362  /* IP type not set when required */
2364  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
2365  return -EINVAL;
2366 
2367  /* Check requirements for TSO packet */
2368  if (ol_flags & PKT_TX_TCP_SEG)
2369  if ((m->tso_segsz == 0) ||
2370  ((ol_flags & PKT_TX_IPV4) &&
2371  !(ol_flags & PKT_TX_IP_CKSUM)))
2372  return -EINVAL;
2373 
2374  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
2375  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2377  return -EINVAL;
2378 
2379  return 0;
2380 }
2381 
2394 static inline int
2396 {
2397  size_t seg_len, copy_len;
2398  struct rte_mbuf *m;
2399  struct rte_mbuf *m_next;
2400  char *buffer;
2401 
2402  if (rte_pktmbuf_is_contiguous(mbuf))
2403  return 0;
2404 
2405  /* Extend first segment to the total packet length */
2406  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
2407 
2408  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
2409  return -1;
2410 
2411  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
2412  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
2413 
2414  /* Append data from next segments to the first one */
2415  m = mbuf->next;
2416  while (m != NULL) {
2417  m_next = m->next;
2418 
2419  seg_len = rte_pktmbuf_data_len(m);
2420  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
2421  buffer += seg_len;
2422 
2424  m = m_next;
2425  }
2426 
2427  mbuf->next = NULL;
2428  mbuf->nb_segs = 1;
2429 
2430  return 0;
2431 }
2432 
2447 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2448 
2452 static inline uint32_t
2454 {
2455  return m->hash.sched.queue_id;
2456 }
2457 
2461 static inline uint8_t
2463 {
2464  return m->hash.sched.traffic_class;
2465 }
2466 
2470 static inline uint8_t
2472 {
2473  return m->hash.sched.color;
2474 }
2475 
2488 static inline void
2489 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
2490  uint8_t *traffic_class,
2491  uint8_t *color)
2492 {
2493  struct rte_mbuf_sched sched = m->hash.sched;
2494 
2495  *queue_id = sched.queue_id;
2496  *traffic_class = sched.traffic_class;
2497  *color = sched.color;
2498 }
2499 
2503 static inline void
2505 {
2506  m->hash.sched.queue_id = queue_id;
2507 }
2508 
2512 static inline void
2514 {
2515  m->hash.sched.traffic_class = traffic_class;
2516 }
2517 
2521 static inline void
2523 {
2524  m->hash.sched.color = color;
2525 }
2526 
2539 static inline void
2541  uint8_t traffic_class,
2542  uint8_t color)
2543 {
2544  m->hash.sched = (struct rte_mbuf_sched){
2545  .queue_id = queue_id,
2546  .traffic_class = traffic_class,
2547  .color = color,
2548  .reserved = 0,
2549  };
2550 }
2551 
2552 #ifdef __cplusplus
2553 }
2554 #endif
2555 
2556 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
Definition: rte_mbuf.h:728
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:798
struct rte_mbuf * next
Definition: rte_mbuf.h:686
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:967
uint16_t txq
Definition: rte_mbuf.h:642
uint64_t timestamp
Definition: rte_mbuf.h:674
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:665
#define __rte_always_inline
Definition: rte_common.h:153
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:253
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1463
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:2513
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:597
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:469
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:957
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:401
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:547
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1400
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2350
uint32_t queue_id
Definition: rte_mbuf.h:477
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1875
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2453
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:194
uint64_t l2_len
Definition: rte_mbuf.h:694
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1906
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1859
void * buf_addr
Definition: rte_mbuf.h:537
uint32_t l2_type
Definition: rte_mbuf.h:591
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:844
uint16_t data_len
Definition: rte_mbuf.h:615
uint32_t lo
Definition: rte_mbuf.h:629
rte_mbuf_extbuf_free_callback_t free_cb
Definition: rte_mbuf.h:741
void * userdata
Definition: rte_mbuf.h:681
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:2277
struct rte_mbuf::@179::@191::@195 txadapter
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2462
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
uint8_t inner_l2_type
Definition: rte_mbuf.h:604
uint64_t tso_segsz
Definition: rte_mbuf.h:702
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:471
uint64_t l4_len
Definition: rte_mbuf.h:700
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1971
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1485
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1416
uint32_t cache_size
Definition: rte_mempool.h:230
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:357
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:777
#define PKT_TX_IPV6
Definition: rte_mbuf.h:340
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:1126
uint16_t nb_segs
Definition: rte_mbuf.h:568
uint16_t port
Definition: rte_mbuf.h:573
uint64_t outer_l3_len
Definition: rte_mbuf.h:706
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1817
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2220
uint64_t l3_len
Definition: rte_mbuf.h:698
uint32_t l4_type
Definition: rte_mbuf.h:593
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:165
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:364
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:2540
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1985
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1234
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:2489
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:300
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:717
uint16_t timesync
Definition: rte_mbuf.h:720
uint32_t hi
Definition: rte_mbuf.h:632
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:2504
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:473
struct rte_mbuf_sched sched
Definition: rte_mbuf.h:637
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:2522
#define RTE_MIN(a, b)
Definition: rte_common.h:433
#define PKT_TX_IPV4
Definition: rte_mbuf.h:332
#define RTE_MBUF_CLONED(mb)
Definition: rte_mbuf.h:942
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:979
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1069
uint8_t color
Definition: rte_mbuf.h:482
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:2395
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1482
uint64_t outer_l2_len
Definition: rte_mbuf.h:708
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:267
uint16_t refcnt
Definition: rte_mbuf.h:566
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2166
static char *__rte_experimental rte_mbuf_data_addr_default(struct rte_mbuf *mb)
Definition: rte_mbuf.h:887
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:2075
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1687
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1454
uint32_t tun_type
Definition: rte_mbuf.h:594
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:348
static char *__rte_experimental rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:870
uint64_t ol_flags
Definition: rte_mbuf.h:575
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1778
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1094
uint32_t pkt_len
Definition: rte_mbuf.h:614
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:316
uint16_t buf_len
Definition: rte_mbuf.h:667
uint32_t inner_l4_type
Definition: rte_mbuf.h:610
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:2085
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:2037
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:1060
uint32_t packet_type
Definition: rte_mbuf.h:589
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1430
uint32_t seqn
Definition: rte_mbuf.h:723
#define EXT_ATTACHED_MBUF
Definition: rte_mbuf.h:399
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1379
uint8_t inner_l3_type
Definition: rte_mbuf.h:606
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1560
#define RTE_MBUF_HAS_EXTBUF(mb)
Definition: rte_mbuf.h:949
#define __rte_cache_aligned
Definition: rte_memory.h:66
struct rte_mbuf::@179::@191::@194 fdir
#define RTE_STD_C11
Definition: rte_common.h:40
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:324
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:685
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:1108
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2135
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:1078
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
Definition: rte_mbuf.h:735
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:823
uint32_t tx_metadata
Definition: rte_mbuf.h:659
uint32_t rss
Definition: rte_mbuf.h:622
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2196
uint64_t rte_iova_t
Definition: rte_memory.h:82
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:906
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:2252
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2102
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1209
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1954
uint64_t phys_addr_t
Definition: rte_memory.h:73
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:170
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:2000
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2471
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:377
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1608
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1323
uint16_t reserved
Definition: rte_mbuf.h:484
uint64_t udata64
Definition: rte_mbuf.h:682
uint8_t traffic_class
Definition: rte_mbuf.h:478
uint32_t l3_type
Definition: rte_mbuf.h:592
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:177
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:760
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:565
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1636
uint64_t tx_offload
Definition: rte_mbuf.h:691
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
uint16_t vlan_tci
Definition: rte_mbuf.h:617
static void *__rte_experimental rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:930
RTE_STD_C11 union rte_mbuf::@176 __rte_aligned
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:743
#define RTE_SET_USED(x)
Definition: rte_common.h:90
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:2022
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1638