DPDK  19.08.0-rc0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 
160 #include "rte_ether.h"
161 #include "rte_dev_info.h"
162 
163 extern int rte_eth_dev_logtype;
164 
165 #define RTE_ETHDEV_LOG(level, ...) \
166  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
167 
168 struct rte_mbuf;
169 
186 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
187 
202 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
203 
216 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
217 
231 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
232  for (rte_eth_iterator_init(iter, devargs), \
233  id = rte_eth_iterator_next(iter); \
234  id != RTE_MAX_ETHPORTS; \
235  id = rte_eth_iterator_next(iter))
236 
244  uint64_t ipackets;
245  uint64_t opackets;
246  uint64_t ibytes;
247  uint64_t obytes;
248  uint64_t imissed;
252  uint64_t ierrors;
253  uint64_t oerrors;
254  uint64_t rx_nombuf;
255  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
257  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
259  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
261  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265 };
266 
270 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
271 #define ETH_LINK_SPEED_FIXED (1 << 0)
272 #define ETH_LINK_SPEED_10M_HD (1 << 1)
273 #define ETH_LINK_SPEED_10M (1 << 2)
274 #define ETH_LINK_SPEED_100M_HD (1 << 3)
275 #define ETH_LINK_SPEED_100M (1 << 4)
276 #define ETH_LINK_SPEED_1G (1 << 5)
277 #define ETH_LINK_SPEED_2_5G (1 << 6)
278 #define ETH_LINK_SPEED_5G (1 << 7)
279 #define ETH_LINK_SPEED_10G (1 << 8)
280 #define ETH_LINK_SPEED_20G (1 << 9)
281 #define ETH_LINK_SPEED_25G (1 << 10)
282 #define ETH_LINK_SPEED_40G (1 << 11)
283 #define ETH_LINK_SPEED_50G (1 << 12)
284 #define ETH_LINK_SPEED_56G (1 << 13)
285 #define ETH_LINK_SPEED_100G (1 << 14)
290 #define ETH_SPEED_NUM_NONE 0
291 #define ETH_SPEED_NUM_10M 10
292 #define ETH_SPEED_NUM_100M 100
293 #define ETH_SPEED_NUM_1G 1000
294 #define ETH_SPEED_NUM_2_5G 2500
295 #define ETH_SPEED_NUM_5G 5000
296 #define ETH_SPEED_NUM_10G 10000
297 #define ETH_SPEED_NUM_20G 20000
298 #define ETH_SPEED_NUM_25G 25000
299 #define ETH_SPEED_NUM_40G 40000
300 #define ETH_SPEED_NUM_50G 50000
301 #define ETH_SPEED_NUM_56G 56000
302 #define ETH_SPEED_NUM_100G 100000
307 __extension__
308 struct rte_eth_link {
309  uint32_t link_speed;
310  uint16_t link_duplex : 1;
311  uint16_t link_autoneg : 1;
312  uint16_t link_status : 1;
313 } __attribute__((aligned(8)));
315 /* Utility constants */
316 #define ETH_LINK_HALF_DUPLEX 0
317 #define ETH_LINK_FULL_DUPLEX 1
318 #define ETH_LINK_DOWN 0
319 #define ETH_LINK_UP 1
320 #define ETH_LINK_FIXED 0
321 #define ETH_LINK_AUTONEG 1
327 struct rte_eth_thresh {
328  uint8_t pthresh;
329  uint8_t hthresh;
330  uint8_t wthresh;
331 };
332 
336 #define ETH_MQ_RX_RSS_FLAG 0x1
337 #define ETH_MQ_RX_DCB_FLAG 0x2
338 #define ETH_MQ_RX_VMDQ_FLAG 0x4
339 
347 
351  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
353  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
354 
356  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
358  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
360  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
363  ETH_MQ_RX_VMDQ_FLAG,
364 };
365 
369 #define ETH_RSS ETH_MQ_RX_RSS
370 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
371 #define ETH_DCB_RX ETH_MQ_RX_DCB
372 
382 };
383 
387 #define ETH_DCB_NONE ETH_MQ_TX_NONE
388 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
389 #define ETH_DCB_TX ETH_MQ_TX_DCB
390 
397  uint32_t max_rx_pkt_len;
398  uint16_t split_hdr_size;
404  uint64_t offloads;
405 };
406 
412  ETH_VLAN_TYPE_UNKNOWN = 0,
415  ETH_VLAN_TYPE_MAX,
416 };
417 
423  uint64_t ids[64];
424 };
425 
444  uint8_t *rss_key;
445  uint8_t rss_key_len;
446  uint64_t rss_hf;
447 };
448 
449 /*
450  * A packet can be identified by hardware as different flow types. Different
451  * NIC hardware may support different flow types.
452  * Basically, the NIC hardware identifies the flow type as deep protocol as
453  * possible, and exclusively. For example, if a packet is identified as
454  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
455  * though it is an actual IPV4 packet.
456  * Note that the flow types are used to define RSS offload types.
457  */
458 #define RTE_ETH_FLOW_UNKNOWN 0
459 #define RTE_ETH_FLOW_RAW 1
460 #define RTE_ETH_FLOW_IPV4 2
461 #define RTE_ETH_FLOW_FRAG_IPV4 3
462 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
463 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
464 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
465 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
466 #define RTE_ETH_FLOW_IPV6 8
467 #define RTE_ETH_FLOW_FRAG_IPV6 9
468 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
469 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
470 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
471 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
472 #define RTE_ETH_FLOW_L2_PAYLOAD 14
473 #define RTE_ETH_FLOW_IPV6_EX 15
474 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
475 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
476 #define RTE_ETH_FLOW_PORT 18
477 
478 #define RTE_ETH_FLOW_VXLAN 19
479 #define RTE_ETH_FLOW_GENEVE 20
480 #define RTE_ETH_FLOW_NVGRE 21
481 #define RTE_ETH_FLOW_VXLAN_GPE 22
482 #define RTE_ETH_FLOW_MAX 23
483 
484 /*
485  * The RSS offload types are defined based on flow types.
486  * Different NIC hardware may support different RSS offload
487  * types. The supported flow types or RSS offload types can be queried by
488  * rte_eth_dev_info_get().
489  */
490 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
491 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
492 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
493 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
494 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
495 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
496 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
497 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
498 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
499 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
500 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
501 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
502 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
503 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
504 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
505 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
506 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
507 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
508 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
509 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
510 
511 #define ETH_RSS_IP ( \
512  ETH_RSS_IPV4 | \
513  ETH_RSS_FRAG_IPV4 | \
514  ETH_RSS_NONFRAG_IPV4_OTHER | \
515  ETH_RSS_IPV6 | \
516  ETH_RSS_FRAG_IPV6 | \
517  ETH_RSS_NONFRAG_IPV6_OTHER | \
518  ETH_RSS_IPV6_EX)
519 
520 #define ETH_RSS_UDP ( \
521  ETH_RSS_NONFRAG_IPV4_UDP | \
522  ETH_RSS_NONFRAG_IPV6_UDP | \
523  ETH_RSS_IPV6_UDP_EX)
524 
525 #define ETH_RSS_TCP ( \
526  ETH_RSS_NONFRAG_IPV4_TCP | \
527  ETH_RSS_NONFRAG_IPV6_TCP | \
528  ETH_RSS_IPV6_TCP_EX)
529 
530 #define ETH_RSS_SCTP ( \
531  ETH_RSS_NONFRAG_IPV4_SCTP | \
532  ETH_RSS_NONFRAG_IPV6_SCTP)
533 
534 #define ETH_RSS_TUNNEL ( \
535  ETH_RSS_VXLAN | \
536  ETH_RSS_GENEVE | \
537  ETH_RSS_NVGRE)
538 
540 #define ETH_RSS_PROTO_MASK ( \
541  ETH_RSS_IPV4 | \
542  ETH_RSS_FRAG_IPV4 | \
543  ETH_RSS_NONFRAG_IPV4_TCP | \
544  ETH_RSS_NONFRAG_IPV4_UDP | \
545  ETH_RSS_NONFRAG_IPV4_SCTP | \
546  ETH_RSS_NONFRAG_IPV4_OTHER | \
547  ETH_RSS_IPV6 | \
548  ETH_RSS_FRAG_IPV6 | \
549  ETH_RSS_NONFRAG_IPV6_TCP | \
550  ETH_RSS_NONFRAG_IPV6_UDP | \
551  ETH_RSS_NONFRAG_IPV6_SCTP | \
552  ETH_RSS_NONFRAG_IPV6_OTHER | \
553  ETH_RSS_L2_PAYLOAD | \
554  ETH_RSS_IPV6_EX | \
555  ETH_RSS_IPV6_TCP_EX | \
556  ETH_RSS_IPV6_UDP_EX | \
557  ETH_RSS_PORT | \
558  ETH_RSS_VXLAN | \
559  ETH_RSS_GENEVE | \
560  ETH_RSS_NVGRE)
561 
562 /*
563  * Definitions used for redirection table entry size.
564  * Some RSS RETA sizes may not be supported by some drivers, check the
565  * documentation or the description of relevant functions for more details.
566  */
567 #define ETH_RSS_RETA_SIZE_64 64
568 #define ETH_RSS_RETA_SIZE_128 128
569 #define ETH_RSS_RETA_SIZE_256 256
570 #define ETH_RSS_RETA_SIZE_512 512
571 #define RTE_RETA_GROUP_SIZE 64
572 
573 /* Definitions used for VMDQ and DCB functionality */
574 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
575 #define ETH_DCB_NUM_USER_PRIORITIES 8
576 #define ETH_VMDQ_DCB_NUM_QUEUES 128
577 #define ETH_DCB_NUM_QUEUES 128
579 /* DCB capability defines */
580 #define ETH_DCB_PG_SUPPORT 0x00000001
581 #define ETH_DCB_PFC_SUPPORT 0x00000002
583 /* Definitions used for VLAN Offload functionality */
584 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
585 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
586 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
588 /* Definitions used for mask VLAN setting */
589 #define ETH_VLAN_STRIP_MASK 0x0001
590 #define ETH_VLAN_FILTER_MASK 0x0002
591 #define ETH_VLAN_EXTEND_MASK 0x0004
592 #define ETH_VLAN_ID_MAX 0x0FFF
594 /* Definitions used for receive MAC address */
595 #define ETH_NUM_RECEIVE_MAC_ADDR 128
597 /* Definitions used for unicast hash */
598 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
600 /* Definitions used for VMDQ pool rx mode setting */
601 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
602 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
603 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
604 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
605 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
608 #define ETH_MIRROR_MAX_VLANS 64
609 
610 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
611 #define ETH_MIRROR_UPLINK_PORT 0x02
612 #define ETH_MIRROR_DOWNLINK_PORT 0x04
613 #define ETH_MIRROR_VLAN 0x08
614 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
619 struct rte_eth_vlan_mirror {
620  uint64_t vlan_mask;
622  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
623 };
624 
629  uint8_t rule_type;
630  uint8_t dst_pool;
631  uint64_t pool_mask;
634 };
635 
643  uint64_t mask;
645  uint16_t reta[RTE_RETA_GROUP_SIZE];
647 };
648 
654  ETH_4_TCS = 4,
656 };
657 
667 };
668 
669 /* This structure may be extended in future. */
670 struct rte_eth_dcb_rx_conf {
671  enum rte_eth_nb_tcs nb_tcs;
673  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
674 };
675 
676 struct rte_eth_vmdq_dcb_tx_conf {
677  enum rte_eth_nb_pools nb_queue_pools;
679  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
680 };
681 
682 struct rte_eth_dcb_tx_conf {
683  enum rte_eth_nb_tcs nb_tcs;
685  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
686 };
687 
688 struct rte_eth_vmdq_tx_conf {
689  enum rte_eth_nb_pools nb_queue_pools;
690 };
691 
706  uint8_t default_pool;
707  uint8_t nb_pool_maps;
708  struct {
709  uint16_t vlan_id;
710  uint64_t pools;
714 };
715 
737  uint8_t default_pool;
739  uint8_t nb_pool_maps;
740  uint32_t rx_mode;
741  struct {
742  uint16_t vlan_id;
743  uint64_t pools;
745 };
746 
757  uint64_t offloads;
758 
759  /* For i40e specifically */
760  uint16_t pvid;
761  __extension__
762  uint8_t hw_vlan_reject_tagged : 1,
768 };
769 
775  uint16_t rx_free_thresh;
776  uint8_t rx_drop_en;
783  uint64_t offloads;
784 };
785 
791  uint16_t tx_rs_thresh;
792  uint16_t tx_free_thresh;
801  uint64_t offloads;
802 };
803 
808  uint16_t nb_max;
809  uint16_t nb_min;
810  uint16_t nb_align;
820  uint16_t nb_seg_max;
821 
833  uint16_t nb_mtu_seg_max;
834 };
835 
844 };
845 
852  uint32_t high_water;
853  uint32_t low_water;
854  uint16_t pause_time;
855  uint16_t send_xon;
858  uint8_t autoneg;
859 };
860 
868  uint8_t priority;
869 };
870 
875  RTE_TUNNEL_TYPE_NONE = 0,
876  RTE_TUNNEL_TYPE_VXLAN,
877  RTE_TUNNEL_TYPE_GENEVE,
878  RTE_TUNNEL_TYPE_TEREDO,
879  RTE_TUNNEL_TYPE_NVGRE,
880  RTE_TUNNEL_TYPE_IP_IN_GRE,
881  RTE_L2_TUNNEL_TYPE_E_TAG,
882  RTE_TUNNEL_TYPE_VXLAN_GPE,
883  RTE_TUNNEL_TYPE_MAX,
884 };
885 
886 /* Deprecated API file for rte_eth_dev_filter_* functions */
887 #include "rte_eth_ctrl.h"
888 
897 };
898 
906 };
907 
919  uint8_t drop_queue;
920  struct rte_eth_fdir_masks mask;
923 };
924 
933  uint16_t udp_port;
934  uint8_t prot_type;
935 };
936 
942  uint32_t lsc:1;
944  uint32_t rxq:1;
946  uint32_t rmv:1;
947 };
948 
954 struct rte_eth_conf {
955  uint32_t link_speeds;
964  uint32_t lpbk_mode;
969  struct {
973  struct rte_eth_dcb_rx_conf dcb_rx_conf;
977  } rx_adv_conf;
978  union {
979  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
981  struct rte_eth_dcb_tx_conf dcb_tx_conf;
983  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
985  } tx_adv_conf;
991 };
992 
996 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
997 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
998 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
999 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1000 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1001 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1002 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1003 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1004 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1005 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1006 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1007 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1008 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1009 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1010 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1011 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1012 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1013 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1014 
1015 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1016  DEV_RX_OFFLOAD_UDP_CKSUM | \
1017  DEV_RX_OFFLOAD_TCP_CKSUM)
1018 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1019  DEV_RX_OFFLOAD_VLAN_FILTER | \
1020  DEV_RX_OFFLOAD_VLAN_EXTEND)
1021 
1022 /*
1023  * If new Rx offload capabilities are defined, they also must be
1024  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1025  */
1026 
1030 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1031 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1032 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1033 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1034 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1035 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1036 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1037 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1038 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1039 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1040 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1041 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1042 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1043 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1044 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1045 
1048 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1049 
1050 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1051 
1055 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1056 
1061 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1062 
1067 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1068 
1069 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1070 
1074 #define DEV_TX_OFFLOAD_MATCH_METADATA 0x00200000
1075 
1076 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1077 
1078 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1079 
1081 /*
1082  * If new Tx offload capabilities are defined, they also must be
1083  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1084  */
1085 
1086 /*
1087  * Fallback default preferred Rx/Tx port parameters.
1088  * These are used if an application requests default parameters
1089  * but the PMD does not provide preferred values.
1090  */
1091 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1092 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1093 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1094 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1095 
1102  uint16_t burst_size;
1103  uint16_t ring_size;
1104  uint16_t nb_queues;
1105 };
1106 
1111 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1112 
1117  const char *name;
1118  uint16_t domain_id;
1119  uint16_t port_id;
1127 };
1128 
1139  struct rte_device *device;
1140  const char *driver_name;
1141  unsigned int if_index;
1143  uint16_t min_mtu;
1144  uint16_t max_mtu;
1145  const uint32_t *dev_flags;
1146  uint32_t min_rx_bufsize;
1147  uint32_t max_rx_pktlen;
1148  uint16_t max_rx_queues;
1149  uint16_t max_tx_queues;
1150  uint32_t max_mac_addrs;
1151  uint32_t max_hash_mac_addrs;
1153  uint16_t max_vfs;
1154  uint16_t max_vmdq_pools;
1163  uint16_t reta_size;
1165  uint8_t hash_key_size;
1170  uint16_t vmdq_queue_base;
1171  uint16_t vmdq_queue_num;
1172  uint16_t vmdq_pool_base;
1175  uint32_t speed_capa;
1177  uint16_t nb_rx_queues;
1178  uint16_t nb_tx_queues;
1184  uint64_t dev_capa;
1190 };
1191 
1197  struct rte_mempool *mp;
1199  uint8_t scattered_rx;
1200  uint16_t nb_desc;
1202 
1209  uint16_t nb_desc;
1211 
1213 #define RTE_ETH_XSTATS_NAME_SIZE 64
1214 
1225  uint64_t id;
1226  uint64_t value;
1227 };
1228 
1238 };
1239 
1240 #define ETH_DCB_NUM_TCS 8
1241 #define ETH_MAX_VMDQ_POOL 64
1242 
1249  struct {
1250  uint8_t base;
1251  uint8_t nb_queue;
1252  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1254  struct {
1255  uint8_t base;
1256  uint8_t nb_queue;
1257  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1258 };
1259 
1265  uint8_t nb_tcs;
1267  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1270 };
1271 
1275 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1276 #define RTE_ETH_QUEUE_STATE_STARTED 1
1277 
1278 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1279 
1280 /* Macros to check for valid port */
1281 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1282  if (!rte_eth_dev_is_valid_port(port_id)) { \
1283  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1284  return retval; \
1285  } \
1286 } while (0)
1287 
1288 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1289  if (!rte_eth_dev_is_valid_port(port_id)) { \
1290  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1291  return; \
1292  } \
1293 } while (0)
1294 
1300 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1301 
1302 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1303 
1304 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1305 
1306 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1307 
1330 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1331  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1332  void *user_param);
1333 
1354 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1355  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1356 
1367 };
1368 
1369 struct rte_eth_dev_sriov {
1370  uint8_t active;
1371  uint8_t nb_q_per_pool;
1372  uint16_t def_vmdq_idx;
1373  uint16_t def_pool_q_idx;
1374 };
1375 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1376 
1377 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1378 
1379 #define RTE_ETH_DEV_NO_OWNER 0
1380 
1381 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1382 
1383 struct rte_eth_dev_owner {
1384  uint64_t id;
1385  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1386 };
1387 
1392 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1393 
1394 #define RTE_ETH_DEV_INTR_LSC 0x0002
1395 
1396 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1397 
1398 #define RTE_ETH_DEV_INTR_RMV 0x0008
1399 
1400 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1401 
1402 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1403 
1415 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1416  const uint64_t owner_id);
1417 
1421 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1422  for (p = rte_eth_find_next_owned_by(0, o); \
1423  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1424  p = rte_eth_find_next_owned_by(p + 1, o))
1425 
1434 uint16_t rte_eth_find_next(uint16_t port_id);
1435 
1439 #define RTE_ETH_FOREACH_DEV(p) \
1440  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1441 
1456 uint16_t __rte_experimental
1457 rte_eth_find_next_of(uint16_t port_id_start,
1458  const struct rte_device *parent);
1459 
1468 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1469  for (port_id = rte_eth_find_next_of(0, parent); \
1470  port_id < RTE_MAX_ETHPORTS; \
1471  port_id = rte_eth_find_next_of(port_id + 1, parent))
1472 
1487 uint16_t __rte_experimental
1488 rte_eth_find_next_sibling(uint16_t port_id_start,
1489  uint16_t ref_port_id);
1490 
1501 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1502  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1503  port_id < RTE_MAX_ETHPORTS; \
1504  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1505 
1519 int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
1520 
1534 int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
1535  const struct rte_eth_dev_owner *owner);
1536 
1550 int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
1551  const uint64_t owner_id);
1552 
1562 void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
1563 
1577 int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
1578  struct rte_eth_dev_owner *owner);
1579 
1592 __rte_deprecated
1593 uint16_t rte_eth_dev_count(void);
1594 
1605 uint16_t rte_eth_dev_count_avail(void);
1606 
1615 uint16_t rte_eth_dev_count_total(void);
1616 
1628 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1629 
1638 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1639 
1648 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1649 
1689 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1690  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1691 
1703 int __rte_experimental
1704 rte_eth_dev_is_removed(uint16_t port_id);
1705 
1755 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1756  uint16_t nb_rx_desc, unsigned int socket_id,
1757  const struct rte_eth_rxconf *rx_conf,
1758  struct rte_mempool *mb_pool);
1759 
1808 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1809  uint16_t nb_tx_desc, unsigned int socket_id,
1810  const struct rte_eth_txconf *tx_conf);
1811 
1822 int rte_eth_dev_socket_id(uint16_t port_id);
1823 
1833 int rte_eth_dev_is_valid_port(uint16_t port_id);
1834 
1851 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1852 
1868 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1869 
1886 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1887 
1903 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1904 
1924 int rte_eth_dev_start(uint16_t port_id);
1925 
1933 void rte_eth_dev_stop(uint16_t port_id);
1934 
1947 int rte_eth_dev_set_link_up(uint16_t port_id);
1948 
1958 int rte_eth_dev_set_link_down(uint16_t port_id);
1959 
1968 void rte_eth_dev_close(uint16_t port_id);
1969 
2007 int rte_eth_dev_reset(uint16_t port_id);
2008 
2015 void rte_eth_promiscuous_enable(uint16_t port_id);
2016 
2023 void rte_eth_promiscuous_disable(uint16_t port_id);
2024 
2035 int rte_eth_promiscuous_get(uint16_t port_id);
2036 
2043 void rte_eth_allmulticast_enable(uint16_t port_id);
2044 
2051 void rte_eth_allmulticast_disable(uint16_t port_id);
2052 
2063 int rte_eth_allmulticast_get(uint16_t port_id);
2064 
2076 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2077 
2089 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2090 
2108 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2109 
2120 int rte_eth_stats_reset(uint16_t port_id);
2121 
2151 int rte_eth_xstats_get_names(uint16_t port_id,
2152  struct rte_eth_xstat_name *xstats_names,
2153  unsigned int size);
2154 
2184 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2185  unsigned int n);
2186 
2209 int
2210 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2211  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2212  uint64_t *ids);
2213 
2237 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2238  uint64_t *values, unsigned int size);
2239 
2258 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2259  uint64_t *id);
2260 
2267 void rte_eth_xstats_reset(uint16_t port_id);
2268 
2286 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2287  uint16_t tx_queue_id, uint8_t stat_idx);
2288 
2306 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2307  uint16_t rx_queue_id,
2308  uint8_t stat_idx);
2309 
2319 void rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2320 
2359 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2360 
2380 int rte_eth_dev_fw_version_get(uint16_t port_id,
2381  char *fw_version, size_t fw_size);
2382 
2421 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2422  uint32_t *ptypes, int num);
2423 
2435 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2436 
2454 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2455 
2475 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2476 
2496 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2497  int on);
2498 
2516 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2517  enum rte_vlan_type vlan_type,
2518  uint16_t tag_type);
2519 
2541 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2542 
2555 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2556 
2571 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2572 
2573 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2574  void *userdata);
2575 
2581  buffer_tx_error_fn error_callback;
2582  void *error_userdata;
2583  uint16_t size;
2584  uint16_t length;
2585  struct rte_mbuf *pkts[];
2587 };
2588 
2595 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2596  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2597 
2608 int
2609 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2610 
2635 int
2637  buffer_tx_error_fn callback, void *userdata);
2638 
2661 void
2662 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2663  void *userdata);
2664 
2688 void
2689 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2690  void *userdata);
2691 
2717 int
2718 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2719 
2735 };
2736 
2744  uint64_t metadata;
2758 };
2759 
2777 };
2778 
2779 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2780  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2800 int rte_eth_dev_callback_register(uint16_t port_id,
2801  enum rte_eth_event_type event,
2802  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2803 
2822 int rte_eth_dev_callback_unregister(uint16_t port_id,
2823  enum rte_eth_event_type event,
2824  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2825 
2847 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2848 
2869 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2870 
2888 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2889 
2911 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2912  int epfd, int op, void *data);
2913 
2931 int __rte_experimental
2932 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
2933 
2947 int rte_eth_led_on(uint16_t port_id);
2948 
2962 int rte_eth_led_off(uint16_t port_id);
2963 
2977 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2978  struct rte_eth_fc_conf *fc_conf);
2979 
2994 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
2995  struct rte_eth_fc_conf *fc_conf);
2996 
3012 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3013  struct rte_eth_pfc_conf *pfc_conf);
3014 
3034 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3035  uint32_t pool);
3036 
3050 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3051  struct rte_ether_addr *mac_addr);
3052 
3066 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3067  struct rte_ether_addr *mac_addr);
3068 
3085 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3086  struct rte_eth_rss_reta_entry64 *reta_conf,
3087  uint16_t reta_size);
3088 
3106 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3107  struct rte_eth_rss_reta_entry64 *reta_conf,
3108  uint16_t reta_size);
3109 
3129 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3130  uint8_t on);
3131 
3150 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3151 
3174 int rte_eth_mirror_rule_set(uint16_t port_id,
3175  struct rte_eth_mirror_conf *mirror_conf,
3176  uint8_t rule_id,
3177  uint8_t on);
3178 
3193 int rte_eth_mirror_rule_reset(uint16_t port_id,
3194  uint8_t rule_id);
3195 
3212 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3213  uint16_t tx_rate);
3214 
3229 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3230  struct rte_eth_rss_conf *rss_conf);
3231 
3246 int
3247 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3248  struct rte_eth_rss_conf *rss_conf);
3249 
3268 int
3269 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3270  struct rte_eth_udp_tunnel *tunnel_udp);
3271 
3291 int
3292 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3293  struct rte_eth_udp_tunnel *tunnel_udp);
3294 
3309 __rte_deprecated
3310 int rte_eth_dev_filter_supported(uint16_t port_id,
3311  enum rte_filter_type filter_type);
3312 
3332 __rte_deprecated
3333 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3334  enum rte_filter_op filter_op, void *arg);
3335 
3349 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3350  struct rte_eth_dcb_info *dcb_info);
3351 
3352 struct rte_eth_rxtx_callback;
3353 
3378 const struct rte_eth_rxtx_callback *
3379 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3380  rte_rx_callback_fn fn, void *user_param);
3381 
3407 const struct rte_eth_rxtx_callback *
3408 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3409  rte_rx_callback_fn fn, void *user_param);
3410 
3435 const struct rte_eth_rxtx_callback *
3436 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3437  rte_tx_callback_fn fn, void *user_param);
3438 
3469 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3470  const struct rte_eth_rxtx_callback *user_cb);
3471 
3502 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3503  const struct rte_eth_rxtx_callback *user_cb);
3504 
3522 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3523  struct rte_eth_rxq_info *qinfo);
3524 
3542 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3543  struct rte_eth_txq_info *qinfo);
3544 
3562 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3563 
3576 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3577 
3593 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3594 
3610 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3611 
3629 int __rte_experimental
3630 rte_eth_dev_get_module_info(uint16_t port_id,
3631  struct rte_eth_dev_module_info *modinfo);
3632 
3651 int __rte_experimental
3652 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3653  struct rte_dev_eeprom_info *info);
3654 
3673 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3674  struct rte_ether_addr *mc_addr_set,
3675  uint32_t nb_mc_addr);
3676 
3689 int rte_eth_timesync_enable(uint16_t port_id);
3690 
3703 int rte_eth_timesync_disable(uint16_t port_id);
3704 
3723 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3724  struct timespec *timestamp, uint32_t flags);
3725 
3741 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3742  struct timespec *timestamp);
3743 
3761 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3762 
3777 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3778 
3797 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3798 
3843 int __rte_experimental
3844 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
3845 
3861 int
3862 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3863  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3864 
3889 int
3890 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3891  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3892  uint32_t mask,
3893  uint8_t en);
3894 
3910 int
3911 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3912 
3927 int
3928 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3929 
3946 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3947  uint16_t *nb_rx_desc,
3948  uint16_t *nb_tx_desc);
3949 
3964 int
3965 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3966 
3976 void *
3977 rte_eth_dev_get_sec_ctx(uint16_t port_id);
3978 
3979 
3980 #include <rte_ethdev_core.h>
3981 
4064 static inline uint16_t
4065 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4066  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4067 {
4068  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4069  uint16_t nb_rx;
4070 
4071 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4072  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4073  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4074 
4075  if (queue_id >= dev->data->nb_rx_queues) {
4076  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4077  return 0;
4078  }
4079 #endif
4080  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4081  rx_pkts, nb_pkts);
4082 
4083 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4084  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4085  struct rte_eth_rxtx_callback *cb =
4086  dev->post_rx_burst_cbs[queue_id];
4087 
4088  do {
4089  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4090  nb_pkts, cb->param);
4091  cb = cb->next;
4092  } while (cb != NULL);
4093  }
4094 #endif
4095 
4096  return nb_rx;
4097 }
4098 
4111 static inline int
4112 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4113 {
4114  struct rte_eth_dev *dev;
4115 
4116  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4117  dev = &rte_eth_devices[port_id];
4118  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4119  if (queue_id >= dev->data->nb_rx_queues)
4120  return -EINVAL;
4121 
4122  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4123 }
4124 
4140 static inline int
4141 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4142 {
4143  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4144  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4145  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4146  return (*dev->dev_ops->rx_descriptor_done)( \
4147  dev->data->rx_queues[queue_id], offset);
4148 }
4149 
4150 #define RTE_ETH_RX_DESC_AVAIL 0
4151 #define RTE_ETH_RX_DESC_DONE 1
4152 #define RTE_ETH_RX_DESC_UNAVAIL 2
4187 static inline int
4188 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4189  uint16_t offset)
4190 {
4191  struct rte_eth_dev *dev;
4192  void *rxq;
4193 
4194 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4195  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4196 #endif
4197  dev = &rte_eth_devices[port_id];
4198 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4199  if (queue_id >= dev->data->nb_rx_queues)
4200  return -ENODEV;
4201 #endif
4202  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4203  rxq = dev->data->rx_queues[queue_id];
4204 
4205  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4206 }
4207 
4208 #define RTE_ETH_TX_DESC_FULL 0
4209 #define RTE_ETH_TX_DESC_DONE 1
4210 #define RTE_ETH_TX_DESC_UNAVAIL 2
4245 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4246  uint16_t queue_id, uint16_t offset)
4247 {
4248  struct rte_eth_dev *dev;
4249  void *txq;
4250 
4251 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4252  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4253 #endif
4254  dev = &rte_eth_devices[port_id];
4255 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4256  if (queue_id >= dev->data->nb_tx_queues)
4257  return -ENODEV;
4258 #endif
4259  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4260  txq = dev->data->tx_queues[queue_id];
4261 
4262  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4263 }
4264 
4331 static inline uint16_t
4332 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4333  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4334 {
4335  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4336 
4337 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4338  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4339  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4340 
4341  if (queue_id >= dev->data->nb_tx_queues) {
4342  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4343  return 0;
4344  }
4345 #endif
4346 
4347 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4348  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4349 
4350  if (unlikely(cb != NULL)) {
4351  do {
4352  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4353  cb->param);
4354  cb = cb->next;
4355  } while (cb != NULL);
4356  }
4357 #endif
4358 
4359  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4360 }
4361 
4415 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4416 
4417 static inline uint16_t
4418 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4419  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4420 {
4421  struct rte_eth_dev *dev;
4422 
4423 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4424  if (!rte_eth_dev_is_valid_port(port_id)) {
4425  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4426  rte_errno = EINVAL;
4427  return 0;
4428  }
4429 #endif
4430 
4431  dev = &rte_eth_devices[port_id];
4432 
4433 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4434  if (queue_id >= dev->data->nb_tx_queues) {
4435  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4436  rte_errno = EINVAL;
4437  return 0;
4438  }
4439 #endif
4440 
4441  if (!dev->tx_pkt_prepare)
4442  return nb_pkts;
4443 
4444  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4445  tx_pkts, nb_pkts);
4446 }
4447 
4448 #else
4449 
4450 /*
4451  * Native NOOP operation for compilation targets which doesn't require any
4452  * preparations steps, and functional NOOP may introduce unnecessary performance
4453  * drop.
4454  *
4455  * Generally this is not a good idea to turn it on globally and didn't should
4456  * be used if behavior of tx_preparation can change.
4457  */
4458 
4459 static inline uint16_t
4460 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4461  __rte_unused uint16_t queue_id,
4462  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4463 {
4464  return nb_pkts;
4465 }
4466 
4467 #endif
4468 
4491 static inline uint16_t
4492 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4493  struct rte_eth_dev_tx_buffer *buffer)
4494 {
4495  uint16_t sent;
4496  uint16_t to_send = buffer->length;
4497 
4498  if (to_send == 0)
4499  return 0;
4500 
4501  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4502 
4503  buffer->length = 0;
4504 
4505  /* All packets sent, or to be dealt with by callback below */
4506  if (unlikely(sent != to_send))
4507  buffer->error_callback(&buffer->pkts[sent],
4508  (uint16_t)(to_send - sent),
4509  buffer->error_userdata);
4510 
4511  return sent;
4512 }
4513 
4544 static __rte_always_inline uint16_t
4545 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4546  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4547 {
4548  buffer->pkts[buffer->length++] = tx_pkt;
4549  if (buffer->length < buffer->size)
4550  return 0;
4551 
4552  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4553 }
4554 
4555 #ifdef __cplusplus
4556 }
4557 #endif
4558 
4559 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1177
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:979
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int __rte_experimental rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint8_t tc_bws[ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1267
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:574
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1180
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:989
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:946
#define __rte_always_inline
Definition: rte_common.h:153
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:791
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1209
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1155
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:645
const uint32_t * dev_flags
Definition: rte_ethdev.h:1145
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
void rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4418
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:653
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:983
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:263
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:2742
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:774
uint16_t rte_eth_find_next(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:893
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4188
uint64_t imissed
Definition: rte_ethdev.h:248
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4141
uint32_t low_water
Definition: rte_ethdev.h:853
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:397
uint8_t rss_key_len
Definition: rte_ethdev.h:445
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:329
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1159
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1163
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
Definition: rte_mbuf.h:681
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:964
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:917
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:751
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:955
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1161
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:377
rte_eth_fc_mode
Definition: rte_ethdev.h:839
int __rte_experimental rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:705
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:833
#define __rte_unused
Definition: rte_common.h:84
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:261
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:245
rte_filter_op
Definition: rte_eth_ctrl.h:46
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:970
uint8_t hash_key_size
Definition: rte_ethdev.h:1165
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:398
struct rte_mempool * mp
Definition: rte_ethdev.h:1197
uint32_t dcb_capability_en
Definition: rte_ethdev.h:988
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:259
const char * name
Definition: rte_ethdev.h:1117
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1189
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
void rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t rxq
Definition: rte_ethdev.h:944
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:790
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1173
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:712
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1171
uint8_t rx_deferred_start
Definition: rte_ethdev.h:777
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2585
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:2779
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:962
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:255
uint32_t high_water
Definition: rte_ethdev.h:852
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:704
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1208
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:990
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1213
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1174
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:783
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
uint16_t send_xon
Definition: rte_ethdev.h:855
int rte_eth_stats_reset(uint16_t port_id)
struct rte_eth_vmdq_dcb_conf::@117 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1169
#define unlikely(x)
uint16_t nb_max
Definition: rte_ethdev.h:808
uint64_t ibytes
Definition: rte_ethdev.h:246
struct rte_eth_conf::@119 rx_adv_conf
uint64_t offloads
Definition: rte_ethdev.h:801
uint64_t oerrors
Definition: rte_ethdev.h:253
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:973
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:975
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1237
void rte_eth_promiscuous_enable(uint16_t port_id)
uint16_t max_mtu
Definition: rte_ethdev.h:1144
uint64_t offloads
Definition: rte_ethdev.h:404
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:396
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:735
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:792
uint16_t nb_desc
Definition: rte_ethdev.h:1200
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4065
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1154
uint8_t scattered_rx
Definition: rte_ethdev.h:1199
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:981
uint64_t offloads
Definition: rte_ethdev.h:757
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1172
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1157
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:257
uint16_t min_mtu
Definition: rte_ethdev.h:1143
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1354
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:247
uint8_t enable_loop_back
Definition: rte_ethdev.h:738
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
uint16_t __rte_experimental rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
int __rte_experimental rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1198
void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
struct rte_eth_vmdq_rx_conf::@118 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint16_t __rte_experimental rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
struct rte_eth_dcb_tc_queue_mapping::@121 tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:575
void rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1149
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1360
uint16_t rx_free_thresh
Definition: rte_ethdev.h:775
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:633
uint64_t dev_capa
Definition: rte_ethdev.h:1184
uint64_t ierrors
Definition: rte_ethdev.h:252
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:763
uint8_t priority
Definition: rte_ethdev.h:868
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1167
uint16_t rte_eth_dev_count_total(void)
int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1182
rte_vlan_type
Definition: rte_ethdev.h:411
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t nb_seg_max
Definition: rte_ethdev.h:820
uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1266
uint64_t ipackets
Definition: rte_ethdev.h:244
uint16_t max_vfs
Definition: rte_ethdev.h:1153
uint16_t pause_time
Definition: rte_ethdev.h:854
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1269
int __rte_experimental rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_filter_type
Definition: rte_eth_ctrl.h:28
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:254
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4545
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:763
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define __rte_cache_min_aligned
Definition: rte_memory.h:71
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:336
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1170
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:2724
rte_eth_nb_pools
Definition: rte_ethdev.h:662
void rte_eth_xstats_reset(uint16_t port_id)
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:608
uint16_t nb_align
Definition: rte_ethdev.h:810
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:344
const char * driver_name
Definition: rte_ethdev.h:1140
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4112
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:736
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1178
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:921
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int __rte_experimental rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1150
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:874
uint64_t value
Definition: rte_ethdev.h:1226
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
struct rte_eth_dcb_tc_queue_mapping::@122 tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:916
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1147
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:446
int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id)
uint64_t id
Definition: rte_ethdev.h:1225
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:763
enum rte_fdir_mode mode
Definition: rte_ethdev.h:915
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1141
__rte_deprecated int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
void rte_eth_allmulticast_disable(uint16_t port_id)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:857
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1330
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:856
void rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:609
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:971
uint8_t * rss_key
Definition: rte_ethdev.h:444
rte_fdir_status_mode
Definition: rte_ethdev.h:902
__rte_deprecated int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
uint8_t tx_deferred_start
Definition: rte_ethdev.h:795
uint8_t wthresh
Definition: rte_ethdev.h:330
uint16_t max_rx_queues
Definition: rte_ethdev.h:1148
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:867
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:963
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:776
int rte_eth_dev_is_valid_port(uint16_t port_id)
uint16_t nb_min
Definition: rte_ethdev.h:809
union rte_eth_conf::@120 tx_adv_conf
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:328
void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1168
uint32_t speed_capa
Definition: rte_ethdev.h:1175
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4332
uint8_t drop_queue
Definition: rte_ethdev.h:919
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_deprecated uint16_t rte_eth_dev_count(void)
uint8_t autoneg
Definition: rte_ethdev.h:858
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1146
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:942
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4492
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:2763