DPDK  19.11.3
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 #include <rte_ether.h>
160 
161 #include "rte_dev_info.h"
162 
163 extern int rte_eth_dev_logtype;
164 
165 #define RTE_ETHDEV_LOG(level, ...) \
166  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
167 
168 struct rte_mbuf;
169 
186 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
187 
202 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
203 
216 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
217 
231 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
232  for (rte_eth_iterator_init(iter, devargs), \
233  id = rte_eth_iterator_next(iter); \
234  id != RTE_MAX_ETHPORTS; \
235  id = rte_eth_iterator_next(iter))
236 
244  uint64_t ipackets;
245  uint64_t opackets;
246  uint64_t ibytes;
247  uint64_t obytes;
248  uint64_t imissed;
252  uint64_t ierrors;
253  uint64_t oerrors;
254  uint64_t rx_nombuf;
255  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
257  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
259  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
261  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265 };
266 
270 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
271 #define ETH_LINK_SPEED_FIXED (1 << 0)
272 #define ETH_LINK_SPEED_10M_HD (1 << 1)
273 #define ETH_LINK_SPEED_10M (1 << 2)
274 #define ETH_LINK_SPEED_100M_HD (1 << 3)
275 #define ETH_LINK_SPEED_100M (1 << 4)
276 #define ETH_LINK_SPEED_1G (1 << 5)
277 #define ETH_LINK_SPEED_2_5G (1 << 6)
278 #define ETH_LINK_SPEED_5G (1 << 7)
279 #define ETH_LINK_SPEED_10G (1 << 8)
280 #define ETH_LINK_SPEED_20G (1 << 9)
281 #define ETH_LINK_SPEED_25G (1 << 10)
282 #define ETH_LINK_SPEED_40G (1 << 11)
283 #define ETH_LINK_SPEED_50G (1 << 12)
284 #define ETH_LINK_SPEED_56G (1 << 13)
285 #define ETH_LINK_SPEED_100G (1 << 14)
286 #define ETH_LINK_SPEED_200G (1 << 15)
291 #define ETH_SPEED_NUM_NONE 0
292 #define ETH_SPEED_NUM_10M 10
293 #define ETH_SPEED_NUM_100M 100
294 #define ETH_SPEED_NUM_1G 1000
295 #define ETH_SPEED_NUM_2_5G 2500
296 #define ETH_SPEED_NUM_5G 5000
297 #define ETH_SPEED_NUM_10G 10000
298 #define ETH_SPEED_NUM_20G 20000
299 #define ETH_SPEED_NUM_25G 25000
300 #define ETH_SPEED_NUM_40G 40000
301 #define ETH_SPEED_NUM_50G 50000
302 #define ETH_SPEED_NUM_56G 56000
303 #define ETH_SPEED_NUM_100G 100000
304 #define ETH_SPEED_NUM_200G 200000
309 __extension__
310 struct rte_eth_link {
311  uint32_t link_speed;
312  uint16_t link_duplex : 1;
313  uint16_t link_autoneg : 1;
314  uint16_t link_status : 1;
315 } __rte_aligned(8);
317 /* Utility constants */
318 #define ETH_LINK_HALF_DUPLEX 0
319 #define ETH_LINK_FULL_DUPLEX 1
320 #define ETH_LINK_DOWN 0
321 #define ETH_LINK_UP 1
322 #define ETH_LINK_FIXED 0
323 #define ETH_LINK_AUTONEG 1
329 struct rte_eth_thresh {
330  uint8_t pthresh;
331  uint8_t hthresh;
332  uint8_t wthresh;
333 };
334 
338 #define ETH_MQ_RX_RSS_FLAG 0x1
339 #define ETH_MQ_RX_DCB_FLAG 0x2
340 #define ETH_MQ_RX_VMDQ_FLAG 0x4
341 
349 
353  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
355  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
356 
358  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
360  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
362  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
365  ETH_MQ_RX_VMDQ_FLAG,
366 };
367 
371 #define ETH_RSS ETH_MQ_RX_RSS
372 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
373 #define ETH_DCB_RX ETH_MQ_RX_DCB
374 
384 };
385 
389 #define ETH_DCB_NONE ETH_MQ_TX_NONE
390 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
391 #define ETH_DCB_TX ETH_MQ_TX_DCB
392 
399  uint32_t max_rx_pkt_len;
402  uint16_t split_hdr_size;
408  uint64_t offloads;
409 
410  uint64_t reserved_64s[2];
411  void *reserved_ptrs[2];
412 };
413 
419  ETH_VLAN_TYPE_UNKNOWN = 0,
422  ETH_VLAN_TYPE_MAX,
423 };
424 
430  uint64_t ids[64];
431 };
432 
451  uint8_t *rss_key;
452  uint8_t rss_key_len;
453  uint64_t rss_hf;
454 };
455 
456 /*
457  * A packet can be identified by hardware as different flow types. Different
458  * NIC hardware may support different flow types.
459  * Basically, the NIC hardware identifies the flow type as deep protocol as
460  * possible, and exclusively. For example, if a packet is identified as
461  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
462  * though it is an actual IPV4 packet.
463  */
464 #define RTE_ETH_FLOW_UNKNOWN 0
465 #define RTE_ETH_FLOW_RAW 1
466 #define RTE_ETH_FLOW_IPV4 2
467 #define RTE_ETH_FLOW_FRAG_IPV4 3
468 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
469 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
470 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
471 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
472 #define RTE_ETH_FLOW_IPV6 8
473 #define RTE_ETH_FLOW_FRAG_IPV6 9
474 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
475 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
476 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
477 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
478 #define RTE_ETH_FLOW_L2_PAYLOAD 14
479 #define RTE_ETH_FLOW_IPV6_EX 15
480 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
481 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
482 #define RTE_ETH_FLOW_PORT 18
483 
484 #define RTE_ETH_FLOW_VXLAN 19
485 #define RTE_ETH_FLOW_GENEVE 20
486 #define RTE_ETH_FLOW_NVGRE 21
487 #define RTE_ETH_FLOW_VXLAN_GPE 22
488 #define RTE_ETH_FLOW_GTPU 23
489 #define RTE_ETH_FLOW_MAX 24
490 
491 /*
492  * Below macros are defined for RSS offload types, they can be used to
493  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
494  */
495 #define ETH_RSS_IPV4 (1ULL << 2)
496 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
497 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
498 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
499 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
500 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
501 #define ETH_RSS_IPV6 (1ULL << 8)
502 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
503 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
504 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
505 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
506 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
507 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
508 #define ETH_RSS_IPV6_EX (1ULL << 15)
509 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
510 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
511 #define ETH_RSS_PORT (1ULL << 18)
512 #define ETH_RSS_VXLAN (1ULL << 19)
513 #define ETH_RSS_GENEVE (1ULL << 20)
514 #define ETH_RSS_NVGRE (1ULL << 21)
515 #define ETH_RSS_GTPU (1ULL << 23)
516 
517 /*
518  * We use the following macros to combine with above ETH_RSS_* for
519  * more specific input set selection. These bits are defined starting
520  * from the high end of the 64 bits.
521  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
522  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
523  * the same level are used simultaneously, it is the same case as none of
524  * them are added.
525  */
526 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
527 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
528 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
529 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
530 
541 static inline uint64_t
542 rte_eth_rss_hf_refine(uint64_t rss_hf)
543 {
544  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
545  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
546 
547  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
548  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
549 
550  return rss_hf;
551 }
552 
553 #define ETH_RSS_IP ( \
554  ETH_RSS_IPV4 | \
555  ETH_RSS_FRAG_IPV4 | \
556  ETH_RSS_NONFRAG_IPV4_OTHER | \
557  ETH_RSS_IPV6 | \
558  ETH_RSS_FRAG_IPV6 | \
559  ETH_RSS_NONFRAG_IPV6_OTHER | \
560  ETH_RSS_IPV6_EX)
561 
562 #define ETH_RSS_UDP ( \
563  ETH_RSS_NONFRAG_IPV4_UDP | \
564  ETH_RSS_NONFRAG_IPV6_UDP | \
565  ETH_RSS_IPV6_UDP_EX)
566 
567 #define ETH_RSS_TCP ( \
568  ETH_RSS_NONFRAG_IPV4_TCP | \
569  ETH_RSS_NONFRAG_IPV6_TCP | \
570  ETH_RSS_IPV6_TCP_EX)
571 
572 #define ETH_RSS_SCTP ( \
573  ETH_RSS_NONFRAG_IPV4_SCTP | \
574  ETH_RSS_NONFRAG_IPV6_SCTP)
575 
576 #define ETH_RSS_TUNNEL ( \
577  ETH_RSS_VXLAN | \
578  ETH_RSS_GENEVE | \
579  ETH_RSS_NVGRE)
580 
582 #define ETH_RSS_PROTO_MASK ( \
583  ETH_RSS_IPV4 | \
584  ETH_RSS_FRAG_IPV4 | \
585  ETH_RSS_NONFRAG_IPV4_TCP | \
586  ETH_RSS_NONFRAG_IPV4_UDP | \
587  ETH_RSS_NONFRAG_IPV4_SCTP | \
588  ETH_RSS_NONFRAG_IPV4_OTHER | \
589  ETH_RSS_IPV6 | \
590  ETH_RSS_FRAG_IPV6 | \
591  ETH_RSS_NONFRAG_IPV6_TCP | \
592  ETH_RSS_NONFRAG_IPV6_UDP | \
593  ETH_RSS_NONFRAG_IPV6_SCTP | \
594  ETH_RSS_NONFRAG_IPV6_OTHER | \
595  ETH_RSS_L2_PAYLOAD | \
596  ETH_RSS_IPV6_EX | \
597  ETH_RSS_IPV6_TCP_EX | \
598  ETH_RSS_IPV6_UDP_EX | \
599  ETH_RSS_PORT | \
600  ETH_RSS_VXLAN | \
601  ETH_RSS_GENEVE | \
602  ETH_RSS_NVGRE)
603 
604 /*
605  * Definitions used for redirection table entry size.
606  * Some RSS RETA sizes may not be supported by some drivers, check the
607  * documentation or the description of relevant functions for more details.
608  */
609 #define ETH_RSS_RETA_SIZE_64 64
610 #define ETH_RSS_RETA_SIZE_128 128
611 #define ETH_RSS_RETA_SIZE_256 256
612 #define ETH_RSS_RETA_SIZE_512 512
613 #define RTE_RETA_GROUP_SIZE 64
614 
615 /* Definitions used for VMDQ and DCB functionality */
616 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
617 #define ETH_DCB_NUM_USER_PRIORITIES 8
618 #define ETH_VMDQ_DCB_NUM_QUEUES 128
619 #define ETH_DCB_NUM_QUEUES 128
621 /* DCB capability defines */
622 #define ETH_DCB_PG_SUPPORT 0x00000001
623 #define ETH_DCB_PFC_SUPPORT 0x00000002
625 /* Definitions used for VLAN Offload functionality */
626 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
627 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
628 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
629 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
631 /* Definitions used for mask VLAN setting */
632 #define ETH_VLAN_STRIP_MASK 0x0001
633 #define ETH_VLAN_FILTER_MASK 0x0002
634 #define ETH_VLAN_EXTEND_MASK 0x0004
635 #define ETH_QINQ_STRIP_MASK 0x0008
636 #define ETH_VLAN_ID_MAX 0x0FFF
638 /* Definitions used for receive MAC address */
639 #define ETH_NUM_RECEIVE_MAC_ADDR 128
641 /* Definitions used for unicast hash */
642 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
644 /* Definitions used for VMDQ pool rx mode setting */
645 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
646 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
647 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
648 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
649 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
652 #define ETH_MIRROR_MAX_VLANS 64
653 
654 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
655 #define ETH_MIRROR_UPLINK_PORT 0x02
656 #define ETH_MIRROR_DOWNLINK_PORT 0x04
657 #define ETH_MIRROR_VLAN 0x08
658 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
663 struct rte_eth_vlan_mirror {
664  uint64_t vlan_mask;
666  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
667 };
668 
673  uint8_t rule_type;
674  uint8_t dst_pool;
675  uint64_t pool_mask;
678 };
679 
687  uint64_t mask;
689  uint16_t reta[RTE_RETA_GROUP_SIZE];
691 };
692 
698  ETH_4_TCS = 4,
700 };
701 
711 };
712 
713 /* This structure may be extended in future. */
714 struct rte_eth_dcb_rx_conf {
715  enum rte_eth_nb_tcs nb_tcs;
717  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
718 };
719 
720 struct rte_eth_vmdq_dcb_tx_conf {
721  enum rte_eth_nb_pools nb_queue_pools;
723  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
724 };
725 
726 struct rte_eth_dcb_tx_conf {
727  enum rte_eth_nb_tcs nb_tcs;
729  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
730 };
731 
732 struct rte_eth_vmdq_tx_conf {
733  enum rte_eth_nb_pools nb_queue_pools;
734 };
735 
750  uint8_t default_pool;
751  uint8_t nb_pool_maps;
752  struct {
753  uint16_t vlan_id;
754  uint64_t pools;
758 };
759 
781  uint8_t default_pool;
783  uint8_t nb_pool_maps;
784  uint32_t rx_mode;
785  struct {
786  uint16_t vlan_id;
787  uint64_t pools;
789 };
790 
801  uint64_t offloads;
802 
803  /* For i40e specifically */
804  uint16_t pvid;
805  __extension__
806  uint8_t hw_vlan_reject_tagged : 1,
813  uint64_t reserved_64s[2];
814  void *reserved_ptrs[2];
815 };
816 
822  uint16_t rx_free_thresh;
823  uint8_t rx_drop_en;
830  uint64_t offloads;
831 
832  uint64_t reserved_64s[2];
833  void *reserved_ptrs[2];
834 };
835 
841  uint16_t tx_rs_thresh;
842  uint16_t tx_free_thresh;
851  uint64_t offloads;
852 
853  uint64_t reserved_64s[2];
854  void *reserved_ptrs[2];
855 };
856 
865  uint16_t max_nb_queues;
867  uint16_t max_rx_2_tx;
869  uint16_t max_tx_2_rx;
870  uint16_t max_nb_desc;
871 };
872 
873 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
874 
882  uint16_t port;
883  uint16_t queue;
884 };
885 
893  uint16_t peer_count;
894  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
895 };
896 
901  uint16_t nb_max;
902  uint16_t nb_min;
903  uint16_t nb_align;
913  uint16_t nb_seg_max;
914 
926  uint16_t nb_mtu_seg_max;
927 };
928 
937 };
938 
945  uint32_t high_water;
946  uint32_t low_water;
947  uint16_t pause_time;
948  uint16_t send_xon;
951  uint8_t autoneg;
952 };
953 
961  uint8_t priority;
962 };
963 
968  RTE_TUNNEL_TYPE_NONE = 0,
969  RTE_TUNNEL_TYPE_VXLAN,
970  RTE_TUNNEL_TYPE_GENEVE,
971  RTE_TUNNEL_TYPE_TEREDO,
972  RTE_TUNNEL_TYPE_NVGRE,
973  RTE_TUNNEL_TYPE_IP_IN_GRE,
974  RTE_L2_TUNNEL_TYPE_E_TAG,
975  RTE_TUNNEL_TYPE_VXLAN_GPE,
976  RTE_TUNNEL_TYPE_MAX,
977 };
978 
979 /* Deprecated API file for rte_eth_dev_filter_* functions */
980 #include "rte_eth_ctrl.h"
981 
990 };
991 
999 };
1000 
1012  uint8_t drop_queue;
1013  struct rte_eth_fdir_masks mask;
1016 };
1017 
1026  uint16_t udp_port;
1027  uint8_t prot_type;
1028 };
1029 
1035  uint32_t lsc:1;
1037  uint32_t rxq:1;
1039  uint32_t rmv:1;
1040 };
1041 
1048  uint32_t link_speeds;
1057  uint32_t lpbk_mode;
1062  struct {
1066  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1070  } rx_adv_conf;
1071  union {
1072  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1074  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1076  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1078  } tx_adv_conf;
1084 };
1085 
1089 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1090 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1091 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1092 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1093 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1094 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1095 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1096 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1097 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1098 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1099 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1100 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1101 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1102 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1103 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1104 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1105 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1106 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1107 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1108 
1109 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1110  DEV_RX_OFFLOAD_UDP_CKSUM | \
1111  DEV_RX_OFFLOAD_TCP_CKSUM)
1112 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1113  DEV_RX_OFFLOAD_VLAN_FILTER | \
1114  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1115  DEV_RX_OFFLOAD_QINQ_STRIP)
1116 
1117 /*
1118  * If new Rx offload capabilities are defined, they also must be
1119  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1120  */
1121 
1125 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1126 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1127 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1128 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1129 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1130 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1131 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1132 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1133 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1134 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1135 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1136 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1137 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1138 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1139 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1140 
1143 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1144 
1145 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1146 
1150 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1151 
1156 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1157 
1162 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1163 
1164 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1165 
1166 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1167 
1168 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1169 
1171 /*
1172  * If new Tx offload capabilities are defined, they also must be
1173  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1174  */
1175 
1176 /*
1177  * Fallback default preferred Rx/Tx port parameters.
1178  * These are used if an application requests default parameters
1179  * but the PMD does not provide preferred values.
1180  */
1181 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1182 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1183 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1184 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1185 
1192  uint16_t burst_size;
1193  uint16_t ring_size;
1194  uint16_t nb_queues;
1195 };
1196 
1201 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1202 
1207  const char *name;
1208  uint16_t domain_id;
1209  uint16_t port_id;
1217 };
1218 
1229  struct rte_device *device;
1230  const char *driver_name;
1231  unsigned int if_index;
1233  uint16_t min_mtu;
1234  uint16_t max_mtu;
1235  const uint32_t *dev_flags;
1236  uint32_t min_rx_bufsize;
1237  uint32_t max_rx_pktlen;
1240  uint16_t max_rx_queues;
1241  uint16_t max_tx_queues;
1242  uint32_t max_mac_addrs;
1243  uint32_t max_hash_mac_addrs;
1245  uint16_t max_vfs;
1246  uint16_t max_vmdq_pools;
1255  uint16_t reta_size;
1257  uint8_t hash_key_size;
1262  uint16_t vmdq_queue_base;
1263  uint16_t vmdq_queue_num;
1264  uint16_t vmdq_pool_base;
1267  uint32_t speed_capa;
1269  uint16_t nb_rx_queues;
1270  uint16_t nb_tx_queues;
1276  uint64_t dev_capa;
1282 
1283  uint64_t reserved_64s[2];
1284  void *reserved_ptrs[2];
1285 };
1286 
1292  struct rte_mempool *mp;
1294  uint8_t scattered_rx;
1295  uint16_t nb_desc;
1297 
1304  uint16_t nb_desc;
1306 
1307 /* Generic Burst mode flag definition, values can be ORed. */
1308 
1314 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1315 
1321  uint64_t flags;
1323 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1324  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1325 };
1326 
1328 #define RTE_ETH_XSTATS_NAME_SIZE 64
1329 
1340  uint64_t id;
1341  uint64_t value;
1342 };
1343 
1353 };
1354 
1355 #define ETH_DCB_NUM_TCS 8
1356 #define ETH_MAX_VMDQ_POOL 64
1357 
1364  struct {
1365  uint8_t base;
1366  uint8_t nb_queue;
1367  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1369  struct {
1370  uint8_t base;
1371  uint8_t nb_queue;
1372  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1373 };
1374 
1380  uint8_t nb_tcs;
1381  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1382  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1385 };
1386 
1387 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1388 
1389 /* Macros to check for valid port */
1390 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1391  if (!rte_eth_dev_is_valid_port(port_id)) { \
1392  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1393  return retval; \
1394  } \
1395 } while (0)
1396 
1397 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1398  if (!rte_eth_dev_is_valid_port(port_id)) { \
1399  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1400  return; \
1401  } \
1402 } while (0)
1403 
1409 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1410 
1411 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1412 
1413 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1414 
1415 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1416 
1439 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1440  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1441  void *user_param);
1442 
1463 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1464  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1465 
1476 };
1477 
1478 struct rte_eth_dev_sriov {
1479  uint8_t active;
1480  uint8_t nb_q_per_pool;
1481  uint16_t def_vmdq_idx;
1482  uint16_t def_pool_q_idx;
1483 };
1484 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1485 
1486 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1487 
1488 #define RTE_ETH_DEV_NO_OWNER 0
1489 
1490 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1491 
1492 struct rte_eth_dev_owner {
1493  uint64_t id;
1494  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1495 };
1496 
1501 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1502 
1503 #define RTE_ETH_DEV_INTR_LSC 0x0002
1504 
1505 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1506 
1507 #define RTE_ETH_DEV_INTR_RMV 0x0008
1508 
1509 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1510 
1511 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1512 
1524 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1525  const uint64_t owner_id);
1526 
1530 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1531  for (p = rte_eth_find_next_owned_by(0, o); \
1532  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1533  p = rte_eth_find_next_owned_by(p + 1, o))
1534 
1543 uint16_t rte_eth_find_next(uint16_t port_id);
1544 
1548 #define RTE_ETH_FOREACH_DEV(p) \
1549  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1550 
1565 __rte_experimental
1566 uint16_t
1567 rte_eth_find_next_of(uint16_t port_id_start,
1568  const struct rte_device *parent);
1569 
1578 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1579  for (port_id = rte_eth_find_next_of(0, parent); \
1580  port_id < RTE_MAX_ETHPORTS; \
1581  port_id = rte_eth_find_next_of(port_id + 1, parent))
1582 
1597 __rte_experimental
1598 uint16_t
1599 rte_eth_find_next_sibling(uint16_t port_id_start,
1600  uint16_t ref_port_id);
1601 
1612 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1613  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1614  port_id < RTE_MAX_ETHPORTS; \
1615  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1616 
1630 __rte_experimental
1631 int rte_eth_dev_owner_new(uint64_t *owner_id);
1632 
1646 __rte_experimental
1647 int rte_eth_dev_owner_set(const uint16_t port_id,
1648  const struct rte_eth_dev_owner *owner);
1649 
1663 __rte_experimental
1664 int rte_eth_dev_owner_unset(const uint16_t port_id,
1665  const uint64_t owner_id);
1666 
1678 __rte_experimental
1679 int rte_eth_dev_owner_delete(const uint64_t owner_id);
1680 
1694 __rte_experimental
1695 int rte_eth_dev_owner_get(const uint16_t port_id,
1696  struct rte_eth_dev_owner *owner);
1697 
1708 uint16_t rte_eth_dev_count_avail(void);
1709 
1718 uint16_t rte_eth_dev_count_total(void);
1719 
1731 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1732 
1741 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1742 
1751 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1752 
1792 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1793  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1794 
1806 __rte_experimental
1807 int
1808 rte_eth_dev_is_removed(uint16_t port_id);
1809 
1859 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1860  uint16_t nb_rx_desc, unsigned int socket_id,
1861  const struct rte_eth_rxconf *rx_conf,
1862  struct rte_mempool *mb_pool);
1863 
1890 __rte_experimental
1892  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
1893  const struct rte_eth_hairpin_conf *conf);
1894 
1943 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1944  uint16_t nb_tx_desc, unsigned int socket_id,
1945  const struct rte_eth_txconf *tx_conf);
1946 
1971 __rte_experimental
1973  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
1974  const struct rte_eth_hairpin_conf *conf);
1975 
1986 int rte_eth_dev_socket_id(uint16_t port_id);
1987 
1997 int rte_eth_dev_is_valid_port(uint16_t port_id);
1998 
2015 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2016 
2032 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2033 
2050 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2051 
2067 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2068 
2088 int rte_eth_dev_start(uint16_t port_id);
2089 
2097 void rte_eth_dev_stop(uint16_t port_id);
2098 
2111 int rte_eth_dev_set_link_up(uint16_t port_id);
2112 
2122 int rte_eth_dev_set_link_down(uint16_t port_id);
2123 
2132 void rte_eth_dev_close(uint16_t port_id);
2133 
2171 int rte_eth_dev_reset(uint16_t port_id);
2172 
2184 int rte_eth_promiscuous_enable(uint16_t port_id);
2185 
2197 int rte_eth_promiscuous_disable(uint16_t port_id);
2198 
2209 int rte_eth_promiscuous_get(uint16_t port_id);
2210 
2222 int rte_eth_allmulticast_enable(uint16_t port_id);
2223 
2235 int rte_eth_allmulticast_disable(uint16_t port_id);
2236 
2247 int rte_eth_allmulticast_get(uint16_t port_id);
2248 
2264 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2265 
2281 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2282 
2300 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2301 
2313 int rte_eth_stats_reset(uint16_t port_id);
2314 
2344 int rte_eth_xstats_get_names(uint16_t port_id,
2345  struct rte_eth_xstat_name *xstats_names,
2346  unsigned int size);
2347 
2377 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2378  unsigned int n);
2379 
2402 int
2403 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2404  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2405  uint64_t *ids);
2406 
2430 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2431  uint64_t *values, unsigned int size);
2432 
2451 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2452  uint64_t *id);
2453 
2466 int rte_eth_xstats_reset(uint16_t port_id);
2467 
2485 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2486  uint16_t tx_queue_id, uint8_t stat_idx);
2487 
2505 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2506  uint16_t rx_queue_id,
2507  uint8_t stat_idx);
2508 
2521 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2522 
2565 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2566 
2586 int rte_eth_dev_fw_version_get(uint16_t port_id,
2587  char *fw_version, size_t fw_size);
2588 
2627 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2628  uint32_t *ptypes, int num);
2662 __rte_experimental
2663 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
2664  uint32_t *set_ptypes, unsigned int num);
2665 
2677 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2678 
2696 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2697 
2717 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2718 
2738 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2739  int on);
2740 
2758 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2759  enum rte_vlan_type vlan_type,
2760  uint16_t tag_type);
2761 
2784 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2785 
2799 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2800 
2815 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2816 
2817 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2818  void *userdata);
2819 
2825  buffer_tx_error_fn error_callback;
2826  void *error_userdata;
2827  uint16_t size;
2828  uint16_t length;
2829  struct rte_mbuf *pkts[];
2831 };
2832 
2839 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2840  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2841 
2852 int
2853 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2854 
2879 int
2881  buffer_tx_error_fn callback, void *userdata);
2882 
2905 void
2906 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2907  void *userdata);
2908 
2932 void
2933 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2934  void *userdata);
2935 
2961 int
2962 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2963 
2979 };
2980 
2988  uint64_t metadata;
3002 };
3003 
3022 };
3023 
3024 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3025  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3045 int rte_eth_dev_callback_register(uint16_t port_id,
3046  enum rte_eth_event_type event,
3047  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3048 
3067 int rte_eth_dev_callback_unregister(uint16_t port_id,
3068  enum rte_eth_event_type event,
3069  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3070 
3092 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3093 
3114 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3115 
3133 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3134 
3156 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3157  int epfd, int op, void *data);
3158 
3176 __rte_experimental
3177 int
3178 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3179 
3193 int rte_eth_led_on(uint16_t port_id);
3194 
3208 int rte_eth_led_off(uint16_t port_id);
3209 
3223 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3224  struct rte_eth_fc_conf *fc_conf);
3225 
3240 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3241  struct rte_eth_fc_conf *fc_conf);
3242 
3258 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3259  struct rte_eth_pfc_conf *pfc_conf);
3260 
3280 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3281  uint32_t pool);
3282 
3296 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3297  struct rte_ether_addr *mac_addr);
3298 
3312 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3313  struct rte_ether_addr *mac_addr);
3314 
3331 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3332  struct rte_eth_rss_reta_entry64 *reta_conf,
3333  uint16_t reta_size);
3334 
3352 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3353  struct rte_eth_rss_reta_entry64 *reta_conf,
3354  uint16_t reta_size);
3355 
3375 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3376  uint8_t on);
3377 
3396 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3397 
3420 int rte_eth_mirror_rule_set(uint16_t port_id,
3421  struct rte_eth_mirror_conf *mirror_conf,
3422  uint8_t rule_id,
3423  uint8_t on);
3424 
3439 int rte_eth_mirror_rule_reset(uint16_t port_id,
3440  uint8_t rule_id);
3441 
3458 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3459  uint16_t tx_rate);
3460 
3475 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3476  struct rte_eth_rss_conf *rss_conf);
3477 
3492 int
3493 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3494  struct rte_eth_rss_conf *rss_conf);
3495 
3514 int
3515 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3516  struct rte_eth_udp_tunnel *tunnel_udp);
3517 
3537 int
3538 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3539  struct rte_eth_udp_tunnel *tunnel_udp);
3540 
3555 __rte_deprecated
3556 int rte_eth_dev_filter_supported(uint16_t port_id,
3557  enum rte_filter_type filter_type);
3558 
3578 __rte_deprecated
3579 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3580  enum rte_filter_op filter_op, void *arg);
3581 
3595 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3596  struct rte_eth_dcb_info *dcb_info);
3597 
3598 struct rte_eth_rxtx_callback;
3599 
3624 const struct rte_eth_rxtx_callback *
3625 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3626  rte_rx_callback_fn fn, void *user_param);
3627 
3653 const struct rte_eth_rxtx_callback *
3654 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3655  rte_rx_callback_fn fn, void *user_param);
3656 
3681 const struct rte_eth_rxtx_callback *
3682 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3683  rte_tx_callback_fn fn, void *user_param);
3684 
3715 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3716  const struct rte_eth_rxtx_callback *user_cb);
3717 
3748 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3749  const struct rte_eth_rxtx_callback *user_cb);
3750 
3769 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3770  struct rte_eth_rxq_info *qinfo);
3771 
3790 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3791  struct rte_eth_txq_info *qinfo);
3792 
3810 __rte_experimental
3811 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3812  struct rte_eth_burst_mode *mode);
3813 
3831 __rte_experimental
3832 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3833  struct rte_eth_burst_mode *mode);
3834 
3852 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3853 
3866 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3867 
3883 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3884 
3900 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3901 
3919 __rte_experimental
3920 int
3921 rte_eth_dev_get_module_info(uint16_t port_id,
3922  struct rte_eth_dev_module_info *modinfo);
3923 
3942 __rte_experimental
3943 int
3944 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3945  struct rte_dev_eeprom_info *info);
3946 
3965 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3966  struct rte_ether_addr *mc_addr_set,
3967  uint32_t nb_mc_addr);
3968 
3981 int rte_eth_timesync_enable(uint16_t port_id);
3982 
3995 int rte_eth_timesync_disable(uint16_t port_id);
3996 
4015 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4016  struct timespec *timestamp, uint32_t flags);
4017 
4033 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4034  struct timespec *timestamp);
4035 
4053 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4054 
4069 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4070 
4089 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4090 
4135 __rte_experimental
4136 int
4137 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4138 
4154 int
4155 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4156  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4157 
4182 int
4183 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4184  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4185  uint32_t mask,
4186  uint8_t en);
4187 
4203 int
4204 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4205 
4220 int
4221 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4222 
4239 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4240  uint16_t *nb_rx_desc,
4241  uint16_t *nb_tx_desc);
4242 
4257 int
4258 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4259 
4269 void *
4270 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4271 
4286 __rte_experimental
4287 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4288  struct rte_eth_hairpin_cap *cap);
4289 
4290 #include <rte_ethdev_core.h>
4291 
4374 static inline uint16_t
4375 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4376  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4377 {
4378  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4379  uint16_t nb_rx;
4380 
4381 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4382  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4383  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4384 
4385  if (queue_id >= dev->data->nb_rx_queues) {
4386  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4387  return 0;
4388  }
4389 #endif
4390  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4391  rx_pkts, nb_pkts);
4392 
4393 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4394  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4395  struct rte_eth_rxtx_callback *cb =
4396  dev->post_rx_burst_cbs[queue_id];
4397 
4398  do {
4399  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4400  nb_pkts, cb->param);
4401  cb = cb->next;
4402  } while (cb != NULL);
4403  }
4404 #endif
4405 
4406  return nb_rx;
4407 }
4408 
4421 static inline int
4422 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4423 {
4424  struct rte_eth_dev *dev;
4425 
4426  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4427  dev = &rte_eth_devices[port_id];
4428  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4429  if (queue_id >= dev->data->nb_rx_queues)
4430  return -EINVAL;
4431 
4432  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4433 }
4434 
4450 static inline int
4451 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4452 {
4453  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4454  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4455  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4456  return (*dev->dev_ops->rx_descriptor_done)( \
4457  dev->data->rx_queues[queue_id], offset);
4458 }
4459 
4460 #define RTE_ETH_RX_DESC_AVAIL 0
4461 #define RTE_ETH_RX_DESC_DONE 1
4462 #define RTE_ETH_RX_DESC_UNAVAIL 2
4497 static inline int
4498 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4499  uint16_t offset)
4500 {
4501  struct rte_eth_dev *dev;
4502  void *rxq;
4503 
4504 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4505  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4506 #endif
4507  dev = &rte_eth_devices[port_id];
4508 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4509  if (queue_id >= dev->data->nb_rx_queues)
4510  return -ENODEV;
4511 #endif
4512  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4513  rxq = dev->data->rx_queues[queue_id];
4514 
4515  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4516 }
4517 
4518 #define RTE_ETH_TX_DESC_FULL 0
4519 #define RTE_ETH_TX_DESC_DONE 1
4520 #define RTE_ETH_TX_DESC_UNAVAIL 2
4555 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4556  uint16_t queue_id, uint16_t offset)
4557 {
4558  struct rte_eth_dev *dev;
4559  void *txq;
4560 
4561 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4562  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4563 #endif
4564  dev = &rte_eth_devices[port_id];
4565 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4566  if (queue_id >= dev->data->nb_tx_queues)
4567  return -ENODEV;
4568 #endif
4569  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4570  txq = dev->data->tx_queues[queue_id];
4571 
4572  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4573 }
4574 
4641 static inline uint16_t
4642 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4643  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4644 {
4645  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4646 
4647 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4648  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4649  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4650 
4651  if (queue_id >= dev->data->nb_tx_queues) {
4652  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4653  return 0;
4654  }
4655 #endif
4656 
4657 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4658  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4659 
4660  if (unlikely(cb != NULL)) {
4661  do {
4662  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4663  cb->param);
4664  cb = cb->next;
4665  } while (cb != NULL);
4666  }
4667 #endif
4668 
4669  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4670 }
4671 
4725 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4726 
4727 static inline uint16_t
4728 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4729  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4730 {
4731  struct rte_eth_dev *dev;
4732 
4733 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4734  if (!rte_eth_dev_is_valid_port(port_id)) {
4735  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4736  rte_errno = EINVAL;
4737  return 0;
4738  }
4739 #endif
4740 
4741  dev = &rte_eth_devices[port_id];
4742 
4743 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4744  if (queue_id >= dev->data->nb_tx_queues) {
4745  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4746  rte_errno = EINVAL;
4747  return 0;
4748  }
4749 #endif
4750 
4751  if (!dev->tx_pkt_prepare)
4752  return nb_pkts;
4753 
4754  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4755  tx_pkts, nb_pkts);
4756 }
4757 
4758 #else
4759 
4760 /*
4761  * Native NOOP operation for compilation targets which doesn't require any
4762  * preparations steps, and functional NOOP may introduce unnecessary performance
4763  * drop.
4764  *
4765  * Generally this is not a good idea to turn it on globally and didn't should
4766  * be used if behavior of tx_preparation can change.
4767  */
4768 
4769 static inline uint16_t
4770 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4771  __rte_unused uint16_t queue_id,
4772  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4773 {
4774  return nb_pkts;
4775 }
4776 
4777 #endif
4778 
4801 static inline uint16_t
4802 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4803  struct rte_eth_dev_tx_buffer *buffer)
4804 {
4805  uint16_t sent;
4806  uint16_t to_send = buffer->length;
4807 
4808  if (to_send == 0)
4809  return 0;
4810 
4811  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4812 
4813  buffer->length = 0;
4814 
4815  /* All packets sent, or to be dealt with by callback below */
4816  if (unlikely(sent != to_send))
4817  buffer->error_callback(&buffer->pkts[sent],
4818  (uint16_t)(to_send - sent),
4819  buffer->error_userdata);
4820 
4821  return sent;
4822 }
4823 
4854 static __rte_always_inline uint16_t
4855 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4856  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4857 {
4858  buffer->pkts[buffer->length++] = tx_pkt;
4859  if (buffer->length < buffer->size)
4860  return 0;
4861 
4862  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4863 }
4864 
4865 #ifdef __cplusplus
4866 }
4867 #endif
4868 
4869 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1269
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1072
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:616
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1272
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:1082
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:1039
#define __rte_always_inline
Definition: rte_common.h:173
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:841
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1304
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1324
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1247
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:689
const uint32_t * dev_flags
Definition: rte_ethdev.h:1235
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
struct rte_eth_vmdq_dcb_conf::@135 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
void rte_eth_dev_stop(uint16_t port_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:350
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4728
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:697
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1076
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:263
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:821
uint16_t rte_eth_find_next(uint16_t port_id)
struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:986
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4498
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:248
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4451
uint32_t low_water
Definition: rte_ethdev.h:946
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:399
void
Definition: rte_common.h:805
uint8_t rss_key_len
Definition: rte_ethdev.h:452
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:331
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1251
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1255
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1057
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:1010
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:795
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1048
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1253
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:379
rte_eth_fc_mode
Definition: rte_ethdev.h:932
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:749
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:926
#define __rte_unused
Definition: rte_common.h:104
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:261
uint16_t max_rx_2_tx
Definition: rte_ethdev.h:867
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:245
rte_filter_op
Definition: rte_eth_ctrl.h:46
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1063
uint8_t hash_key_size
Definition: rte_ethdev.h:1257
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:402
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1292
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1081
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:259
const char * name
Definition: rte_ethdev.h:1207
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1281
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1037
int rte_eth_dev_set_link_up(uint16_t port_id)
struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:840
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1265
struct rte_eth_conf::@137 rx_adv_conf
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:756
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1263
union rte_eth_conf::@138 tx_adv_conf
uint8_t rx_deferred_start
Definition: rte_ethdev.h:824
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2829
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1055
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:255
uint32_t high_water
Definition: rte_ethdev.h:945
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:748
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1303
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:1083
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1328
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1266
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:830
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
uint16_t send_xon
Definition: rte_ethdev.h:948
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1261
#define unlikely(x)
uint16_t nb_max
Definition: rte_ethdev.h:901
uint64_t ibytes
Definition: rte_ethdev.h:246
uint64_t offloads
Definition: rte_ethdev.h:851
uint16_t max_nb_queues
Definition: rte_ethdev.h:865
uint64_t oerrors
Definition: rte_ethdev.h:253
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1066
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1068
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1234
uint64_t offloads
Definition: rte_ethdev.h:408
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:398
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:779
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:842
uint16_t nb_desc
Definition: rte_ethdev.h:1295
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4375
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1246
uint8_t scattered_rx
Definition: rte_ethdev.h:1294
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1074
uint64_t offloads
Definition: rte_ethdev.h:801
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1264
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1249
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:813
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:257
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1233
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1284
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:247
uint8_t enable_loop_back
Definition: rte_ethdev.h:782
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:410
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1293
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:617
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1241
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3024
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1469
uint16_t rx_free_thresh
Definition: rte_ethdev.h:822
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:677
uint64_t dev_capa
Definition: rte_ethdev.h:1276
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1439
uint16_t max_tx_2_rx
Definition: rte_ethdev.h:869
uint64_t ierrors
Definition: rte_ethdev.h:252
uint16_t max_nb_desc
Definition: rte_ethdev.h:870
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:807
uint8_t priority
Definition: rte_ethdev.h:961
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1259
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1463
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1274
rte_vlan_type
Definition: rte_ethdev.h:418
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t nb_seg_max
Definition: rte_ethdev.h:913
uint64_t ipackets
Definition: rte_ethdev.h:244
uint16_t max_vfs
Definition: rte_ethdev.h:1245
uint16_t pause_time
Definition: rte_ethdev.h:947
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
rte_filter_type
Definition: rte_eth_ctrl.h:28
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:254
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4855
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:807
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:338
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1262
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:2968
rte_eth_nb_pools
Definition: rte_ethdev.h:706
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:652
uint16_t nb_align
Definition: rte_ethdev.h:903
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:346
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1230
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4422
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:780
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1270
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:1014
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:853
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1242
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:967
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1341
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:542
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:1009
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1237
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:453
void * reserved_ptrs[2]
Definition: rte_ethdev.h:854
uint64_t id
Definition: rte_ethdev.h:1340
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1283
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:807
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:832
enum rte_fdir_mode mode
Definition: rte_ethdev.h:1008
struct rte_eth_vmdq_rx_conf::@136 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1231
__rte_deprecated int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:950
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:949
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:603
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1064
uint8_t * rss_key
Definition: rte_ethdev.h:451
rte_fdir_status_mode
Definition: rte_ethdev.h:995
__rte_deprecated int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
void * reserved_ptrs[2]
Definition: rte_ethdev.h:814
uint8_t tx_deferred_start
Definition: rte_ethdev.h:845
uint8_t wthresh
Definition: rte_ethdev.h:332
void * reserved_ptrs[2]
Definition: rte_ethdev.h:833
uint16_t max_rx_queues
Definition: rte_ethdev.h:1240
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:401
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:960
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1056
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:823
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1239
uint16_t nb_min
Definition: rte_ethdev.h:902
void * reserved_ptrs[2]
Definition: rte_ethdev.h:411
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:330
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1260
uint32_t speed_capa
Definition: rte_ethdev.h:1267
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4642
uint8_t drop_queue
Definition: rte_ethdev.h:1012
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
uint8_t autoneg
Definition: rte_ethdev.h:951
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1236
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1035
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4802
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3007