DPDK  19.11.3
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_byteorder.h>
44 #include <rte_mbuf_ptype.h>
45 #include <rte_mbuf_core.h>
46 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
59 const char *rte_get_rx_ol_flag_name(uint64_t mask);
60 
73 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
74 
85 const char *rte_get_tx_ol_flag_name(uint64_t mask);
86 
99 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
100 
111 static inline void
113 {
114  rte_prefetch0(&m->cacheline0);
115 }
116 
128 static inline void
130 {
131 #if RTE_CACHE_LINE_SIZE == 64
132  rte_prefetch0(&m->cacheline1);
133 #else
134  RTE_SET_USED(m);
135 #endif
136 }
137 
138 
139 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
140 
149 static inline rte_iova_t
150 rte_mbuf_data_iova(const struct rte_mbuf *mb)
151 {
152  return mb->buf_iova + mb->data_off;
153 }
154 
155 __rte_deprecated
156 static inline phys_addr_t
157 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
158 {
159  return rte_mbuf_data_iova(mb);
160 }
161 
174 static inline rte_iova_t
176 {
177  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
178 }
179 
180 __rte_deprecated
181 static inline phys_addr_t
182 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
183 {
184  return rte_mbuf_data_iova_default(mb);
185 }
186 
195 static inline struct rte_mbuf *
197 {
198  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
199 }
200 
221 __rte_experimental
222 static inline char *
223 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
224 {
225  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
226 }
227 
239 __rte_experimental
240 static inline char *
242 {
243  /* gcc complains about calling this experimental function even
244  * when not using it. Hide it with ALLOW_EXPERIMENTAL_API.
245  */
246 #ifdef ALLOW_EXPERIMENTAL_API
247  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
248 #else
249  return NULL;
250 #endif
251 }
252 
266 static inline char *
268 {
269 #ifdef ALLOW_EXPERIMENTAL_API
270  return rte_mbuf_buf_addr(md, md->pool);
271 #else
272  char *buffer_addr;
273  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
274  return buffer_addr;
275 #endif
276 }
277 
290 __rte_experimental
291 static inline void *
293 {
294  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
295 }
296 
305  uint16_t mbuf_priv_size;
306  uint32_t flags;
307 };
308 
317 static inline uint32_t
319 {
320  struct rte_pktmbuf_pool_private *mbp_priv;
321 
322  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
323  return mbp_priv->flags;
324 }
325 
332 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
333 
341 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
342  (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
343 
344 #ifdef RTE_LIBRTE_MBUF_DEBUG
345 
347 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
348 
349 #else /* RTE_LIBRTE_MBUF_DEBUG */
350 
352 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
353 
354 #endif /* RTE_LIBRTE_MBUF_DEBUG */
355 
356 #ifdef RTE_MBUF_REFCNT_ATOMIC
357 
365 static inline uint16_t
366 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
367 {
368  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
369 }
370 
378 static inline void
379 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
380 {
381  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
382 }
383 
384 /* internal */
385 static inline uint16_t
386 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
387 {
388  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
389 }
390 
400 static inline uint16_t
401 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
402 {
403  /*
404  * The atomic_add is an expensive operation, so we don't want to
405  * call it in the case where we know we are the unique holder of
406  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
407  * operation has to be used because concurrent accesses on the
408  * reference counter can occur.
409  */
410  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
411  ++value;
412  rte_mbuf_refcnt_set(m, (uint16_t)value);
413  return (uint16_t)value;
414  }
415 
416  return __rte_mbuf_refcnt_update(m, value);
417 }
418 
419 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
420 
421 /* internal */
422 static inline uint16_t
423 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
424 {
425  m->refcnt = (uint16_t)(m->refcnt + value);
426  return m->refcnt;
427 }
428 
432 static inline uint16_t
433 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
434 {
435  return __rte_mbuf_refcnt_update(m, value);
436 }
437 
441 static inline uint16_t
443 {
444  return m->refcnt;
445 }
446 
450 static inline void
451 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
452 {
453  m->refcnt = new_value;
454 }
455 
456 #endif /* RTE_MBUF_REFCNT_ATOMIC */
457 
466 static inline uint16_t
468 {
469  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
470 }
471 
480 static inline void
482  uint16_t new_value)
483 {
484  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
485 }
486 
498 static inline uint16_t
500  int16_t value)
501 {
502  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
503  ++value;
504  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
505  return (uint16_t)value;
506  }
507 
508  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
509 }
510 
512 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
513  if ((m) != NULL) \
514  rte_prefetch0(m); \
515 } while (0)
516 
517 
530 void
531 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
532 
552 __rte_experimental
553 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
554  const char **reason);
555 
556 #define MBUF_RAW_ALLOC_CHECK(m) do { \
557  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
558  RTE_ASSERT((m)->next == NULL); \
559  RTE_ASSERT((m)->nb_segs == 1); \
560  __rte_mbuf_sanity_check(m, 0); \
561 } while (0)
562 
582 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
583 {
584  struct rte_mbuf *m;
585 
586  if (rte_mempool_get(mp, (void **)&m) < 0)
587  return NULL;
588  MBUF_RAW_ALLOC_CHECK(m);
589  return m;
590 }
591 
606 static __rte_always_inline void
608 {
609  RTE_ASSERT(!RTE_MBUF_CLONED(m) &&
611  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
612  RTE_ASSERT(m->next == NULL);
613  RTE_ASSERT(m->nb_segs == 1);
615  rte_mempool_put(m->pool, m);
616 }
617 
637 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
638  void *m, unsigned i);
639 
657 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
658 
693 struct rte_mempool *
694 rte_pktmbuf_pool_create(const char *name, unsigned n,
695  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
696  int socket_id);
697 
735 struct rte_mempool *
736 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
737  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
738  int socket_id, const char *ops_name);
739 
742  void *buf_ptr;
744  size_t buf_len;
745  uint16_t elt_size;
746 };
747 
789 __rte_experimental
790 struct rte_mempool *
791 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
792  unsigned int cache_size, uint16_t priv_size,
793  uint16_t data_room_size, int socket_id,
794  const struct rte_pktmbuf_extmem *ext_mem,
795  unsigned int ext_num);
796 
808 static inline uint16_t
810 {
811  struct rte_pktmbuf_pool_private *mbp_priv;
812 
813  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
814  return mbp_priv->mbuf_data_room_size;
815 }
816 
829 static inline uint16_t
831 {
832  struct rte_pktmbuf_pool_private *mbp_priv;
833 
834  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
835  return mbp_priv->mbuf_priv_size;
836 }
837 
846 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
847 {
848  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
849  (uint16_t)m->buf_len);
850 }
851 
860 #define MBUF_INVALID_PORT UINT16_MAX
861 
862 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
863 {
864  m->next = NULL;
865  m->pkt_len = 0;
866  m->tx_offload = 0;
867  m->vlan_tci = 0;
868  m->vlan_tci_outer = 0;
869  m->nb_segs = 1;
870  m->port = MBUF_INVALID_PORT;
871 
873  m->packet_type = 0;
875 
876  m->data_len = 0;
878 }
879 
893 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
894 {
895  struct rte_mbuf *m;
896  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
897  rte_pktmbuf_reset(m);
898  return m;
899 }
900 
915 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
916  struct rte_mbuf **mbufs, unsigned count)
917 {
918  unsigned idx = 0;
919  int rc;
920 
921  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
922  if (unlikely(rc))
923  return rc;
924 
925  /* To understand duff's device on loop unwinding optimization, see
926  * https://en.wikipedia.org/wiki/Duff's_device.
927  * Here while() loop is used rather than do() while{} to avoid extra
928  * check if count is zero.
929  */
930  switch (count % 4) {
931  case 0:
932  while (idx != count) {
933  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
934  rte_pktmbuf_reset(mbufs[idx]);
935  idx++;
936  /* fall-through */
937  case 3:
938  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
939  rte_pktmbuf_reset(mbufs[idx]);
940  idx++;
941  /* fall-through */
942  case 2:
943  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
944  rte_pktmbuf_reset(mbufs[idx]);
945  idx++;
946  /* fall-through */
947  case 1:
948  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
949  rte_pktmbuf_reset(mbufs[idx]);
950  idx++;
951  /* fall-through */
952  }
953  }
954  return 0;
955 }
956 
989 static inline struct rte_mbuf_ext_shared_info *
990 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
992 {
993  struct rte_mbuf_ext_shared_info *shinfo;
994  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
995  void *addr;
996 
997  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
998  sizeof(uintptr_t));
999  if (addr <= buf_addr)
1000  return NULL;
1001 
1002  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1003  shinfo->free_cb = free_cb;
1004  shinfo->fcb_opaque = fcb_opaque;
1005  rte_mbuf_ext_refcnt_set(shinfo, 1);
1006 
1007  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1008  return shinfo;
1009 }
1010 
1067 static inline void
1068 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1069  rte_iova_t buf_iova, uint16_t buf_len,
1070  struct rte_mbuf_ext_shared_info *shinfo)
1071 {
1072  /* mbuf should not be read-only */
1073  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1074  RTE_ASSERT(shinfo->free_cb != NULL);
1075 
1076  m->buf_addr = buf_addr;
1077  m->buf_iova = buf_iova;
1078  m->buf_len = buf_len;
1079 
1080  m->data_len = 0;
1081  m->data_off = 0;
1082 
1084  m->shinfo = shinfo;
1085 }
1086 
1094 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1095 
1104 static inline void
1105 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1106 {
1107  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1108 }
1109 
1110 /* internal */
1111 static inline void
1112 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1113 {
1114  mdst->port = msrc->port;
1115  mdst->vlan_tci = msrc->vlan_tci;
1116  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1117  mdst->tx_offload = msrc->tx_offload;
1118  mdst->hash = msrc->hash;
1119  mdst->packet_type = msrc->packet_type;
1120  mdst->timestamp = msrc->timestamp;
1121  rte_mbuf_dynfield_copy(mdst, msrc);
1122 }
1123 
1145 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1146 {
1147  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1148  rte_mbuf_refcnt_read(mi) == 1);
1149 
1150  if (RTE_MBUF_HAS_EXTBUF(m)) {
1152  mi->ol_flags = m->ol_flags;
1153  mi->shinfo = m->shinfo;
1154  } else {
1155  /* if m is not direct, get the mbuf that embeds the data */
1157  mi->priv_size = m->priv_size;
1158  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1159  }
1160 
1161  __rte_pktmbuf_copy_hdr(mi, m);
1162 
1163  mi->data_off = m->data_off;
1164  mi->data_len = m->data_len;
1165  mi->buf_iova = m->buf_iova;
1166  mi->buf_addr = m->buf_addr;
1167  mi->buf_len = m->buf_len;
1168 
1169  mi->next = NULL;
1170  mi->pkt_len = mi->data_len;
1171  mi->nb_segs = 1;
1172 
1173  __rte_mbuf_sanity_check(mi, 1);
1175 }
1176 
1184 static inline void
1185 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1186 {
1187  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1188  RTE_ASSERT(m->shinfo != NULL);
1189 
1190  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1191  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1192 }
1193 
1200 static inline void
1201 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1202 {
1203  struct rte_mbuf *md;
1204 
1205  RTE_ASSERT(RTE_MBUF_CLONED(m));
1206 
1207  md = rte_mbuf_from_indirect(m);
1208 
1209  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1210  md->next = NULL;
1211  md->nb_segs = 1;
1212  rte_mbuf_refcnt_set(md, 1);
1213  rte_mbuf_raw_free(md);
1214  }
1215 }
1216 
1235 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1236 {
1237  struct rte_mempool *mp = m->pool;
1238  uint32_t mbuf_size, buf_len;
1239  uint16_t priv_size;
1240 
1241  if (RTE_MBUF_HAS_EXTBUF(m)) {
1242  /*
1243  * The mbuf has the external attached buffer,
1244  * we should check the type of the memory pool where
1245  * the mbuf was allocated from to detect the pinned
1246  * external buffer.
1247  */
1248  uint32_t flags = rte_pktmbuf_priv_flags(mp);
1249 
1250  if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
1251  /*
1252  * The pinned external buffer should not be
1253  * detached from its backing mbuf, just exit.
1254  */
1255  return;
1256  }
1257  __rte_pktmbuf_free_extbuf(m);
1258  } else {
1259  __rte_pktmbuf_free_direct(m);
1260  }
1261  priv_size = rte_pktmbuf_priv_size(mp);
1262  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1263  buf_len = rte_pktmbuf_data_room_size(mp);
1264 
1265  m->priv_size = priv_size;
1266  m->buf_addr = (char *)m + mbuf_size;
1267  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1268  m->buf_len = (uint16_t)buf_len;
1270  m->data_len = 0;
1271  m->ol_flags = 0;
1272 }
1273 
1287 static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1288 {
1289  struct rte_mbuf_ext_shared_info *shinfo;
1290 
1291  /* Clear flags, mbuf is being freed. */
1293  shinfo = m->shinfo;
1294 
1295  /* Optimize for performance - do not dec/reinit */
1296  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1297  return 0;
1298 
1299  /*
1300  * Direct usage of add primitive to avoid
1301  * duplication of comparing with one.
1302  */
1304  (&shinfo->refcnt_atomic, -1)))
1305  return 1;
1306 
1307  /* Reinitialize counter before mbuf freeing. */
1308  rte_mbuf_ext_refcnt_set(shinfo, 1);
1309  return 0;
1310 }
1311 
1326 static __rte_always_inline struct rte_mbuf *
1328 {
1330 
1331  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1332 
1333  if (!RTE_MBUF_DIRECT(m)) {
1334  rte_pktmbuf_detach(m);
1335  if (RTE_MBUF_HAS_EXTBUF(m) &&
1337  __rte_pktmbuf_pinned_extbuf_decref(m))
1338  return NULL;
1339  }
1340 
1341  if (m->next != NULL) {
1342  m->next = NULL;
1343  m->nb_segs = 1;
1344  }
1345 
1346  return m;
1347 
1348  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1349 
1350  if (!RTE_MBUF_DIRECT(m)) {
1351  rte_pktmbuf_detach(m);
1352  if (RTE_MBUF_HAS_EXTBUF(m) &&
1354  __rte_pktmbuf_pinned_extbuf_decref(m))
1355  return NULL;
1356  }
1357 
1358  if (m->next != NULL) {
1359  m->next = NULL;
1360  m->nb_segs = 1;
1361  }
1362  rte_mbuf_refcnt_set(m, 1);
1363 
1364  return m;
1365  }
1366  return NULL;
1367 }
1368 
1378 static __rte_always_inline void
1380 {
1381  m = rte_pktmbuf_prefree_seg(m);
1382  if (likely(m != NULL))
1383  rte_mbuf_raw_free(m);
1384 }
1385 
1395 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1396 {
1397  struct rte_mbuf *m_next;
1398 
1399  if (m != NULL)
1401 
1402  while (m != NULL) {
1403  m_next = m->next;
1405  m = m_next;
1406  }
1407 }
1408 
1421 __rte_experimental
1422 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1423 
1441 struct rte_mbuf *
1442 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1443 
1465 __rte_experimental
1466 struct rte_mbuf *
1467 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1468  uint32_t offset, uint32_t length);
1469 
1481 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1482 {
1484 
1485  do {
1486  rte_mbuf_refcnt_update(m, v);
1487  } while ((m = m->next) != NULL);
1488 }
1489 
1498 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1499 {
1501  return m->data_off;
1502 }
1503 
1512 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1513 {
1515  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1516  m->data_len);
1517 }
1518 
1527 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1528 {
1530  while (m->next != NULL)
1531  m = m->next;
1532  return m;
1533 }
1534 
1535 /* deprecated */
1536 #define rte_pktmbuf_mtophys_offset(m, o) \
1537  rte_pktmbuf_iova_offset(m, o)
1538 
1539 /* deprecated */
1540 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1541 
1550 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1551 
1560 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1561 
1577 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1578  uint16_t len)
1579 {
1581 
1582  if (unlikely(len > rte_pktmbuf_headroom(m)))
1583  return NULL;
1584 
1585  /* NB: elaborating the subtraction like this instead of using
1586  * -= allows us to ensure the result type is uint16_t
1587  * avoiding compiler warnings on gcc 8.1 at least */
1588  m->data_off = (uint16_t)(m->data_off - len);
1589  m->data_len = (uint16_t)(m->data_len + len);
1590  m->pkt_len = (m->pkt_len + len);
1591 
1592  return (char *)m->buf_addr + m->data_off;
1593 }
1594 
1610 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1611 {
1612  void *tail;
1613  struct rte_mbuf *m_last;
1614 
1616 
1617  m_last = rte_pktmbuf_lastseg(m);
1618  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1619  return NULL;
1620 
1621  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1622  m_last->data_len = (uint16_t)(m_last->data_len + len);
1623  m->pkt_len = (m->pkt_len + len);
1624  return (char*) tail;
1625 }
1626 
1641 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1642 {
1644 
1645  if (unlikely(len > m->data_len))
1646  return NULL;
1647 
1648  /* NB: elaborating the addition like this instead of using
1649  * += allows us to ensure the result type is uint16_t
1650  * avoiding compiler warnings on gcc 8.1 at least */
1651  m->data_len = (uint16_t)(m->data_len - len);
1652  m->data_off = (uint16_t)(m->data_off + len);
1653  m->pkt_len = (m->pkt_len - len);
1654  return (char *)m->buf_addr + m->data_off;
1655 }
1656 
1671 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1672 {
1673  struct rte_mbuf *m_last;
1674 
1676 
1677  m_last = rte_pktmbuf_lastseg(m);
1678  if (unlikely(len > m_last->data_len))
1679  return -1;
1680 
1681  m_last->data_len = (uint16_t)(m_last->data_len - len);
1682  m->pkt_len = (m->pkt_len - len);
1683  return 0;
1684 }
1685 
1695 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1696 {
1698  return m->nb_segs == 1;
1699 }
1700 
1704 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1705  uint32_t len, void *buf);
1706 
1727 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1728  uint32_t off, uint32_t len, void *buf)
1729 {
1730  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1731  return rte_pktmbuf_mtod_offset(m, char *, off);
1732  else
1733  return __rte_pktmbuf_read(m, off, len, buf);
1734 }
1735 
1752 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1753 {
1754  struct rte_mbuf *cur_tail;
1755 
1756  /* Check for number-of-segments-overflow */
1757  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1758  return -EOVERFLOW;
1759 
1760  /* Chain 'tail' onto the old tail */
1761  cur_tail = rte_pktmbuf_lastseg(head);
1762  cur_tail->next = tail;
1763 
1764  /* accumulate number of segments and total length.
1765  * NB: elaborating the addition like this instead of using
1766  * -= allows us to ensure the result type is uint16_t
1767  * avoiding compiler warnings on gcc 8.1 at least */
1768  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1769  head->pkt_len += tail->pkt_len;
1770 
1771  /* pkt_len is only set in the head */
1772  tail->pkt_len = tail->data_len;
1773 
1774  return 0;
1775 }
1776 
1777 /*
1778  * @warning
1779  * @b EXPERIMENTAL: This API may change without prior notice.
1780  *
1781  * For given input values generate raw tx_offload value.
1782  * Note that it is caller responsibility to make sure that input parameters
1783  * don't exceed maximum bit-field values.
1784  * @param il2
1785  * l2_len value.
1786  * @param il3
1787  * l3_len value.
1788  * @param il4
1789  * l4_len value.
1790  * @param tso
1791  * tso_segsz value.
1792  * @param ol3
1793  * outer_l3_len value.
1794  * @param ol2
1795  * outer_l2_len value.
1796  * @param unused
1797  * unused value.
1798  * @return
1799  * raw tx_offload value.
1800  */
1801 static __rte_always_inline uint64_t
1802 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1803  uint64_t ol3, uint64_t ol2, uint64_t unused)
1804 {
1805  return il2 << RTE_MBUF_L2_LEN_OFS |
1806  il3 << RTE_MBUF_L3_LEN_OFS |
1807  il4 << RTE_MBUF_L4_LEN_OFS |
1808  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1809  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1810  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1811  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1812 }
1813 
1824 static inline int
1826 {
1827  uint64_t ol_flags = m->ol_flags;
1828 
1829  /* Does packet set any of available offloads? */
1830  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1831  return 0;
1832 
1833  /* IP checksum can be counted only for IPv4 packet */
1834  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1835  return -EINVAL;
1836 
1837  /* IP type not set when required */
1838  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1839  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1840  return -EINVAL;
1841 
1842  /* Check requirements for TSO packet */
1843  if (ol_flags & PKT_TX_TCP_SEG)
1844  if ((m->tso_segsz == 0) ||
1845  ((ol_flags & PKT_TX_IPV4) &&
1846  !(ol_flags & PKT_TX_IP_CKSUM)))
1847  return -EINVAL;
1848 
1849  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1850  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1851  !(ol_flags & PKT_TX_OUTER_IPV4))
1852  return -EINVAL;
1853 
1854  return 0;
1855 }
1856 
1860 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1861 
1874 static inline int
1876 {
1877  if (rte_pktmbuf_is_contiguous(mbuf))
1878  return 0;
1879  return __rte_pktmbuf_linearize(mbuf);
1880 }
1881 
1896 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1897 
1901 static inline uint32_t
1903 {
1904  return m->hash.sched.queue_id;
1905 }
1906 
1910 static inline uint8_t
1912 {
1913  return m->hash.sched.traffic_class;
1914 }
1915 
1919 static inline uint8_t
1921 {
1922  return m->hash.sched.color;
1923 }
1924 
1937 static inline void
1938 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
1939  uint8_t *traffic_class,
1940  uint8_t *color)
1941 {
1942  struct rte_mbuf_sched sched = m->hash.sched;
1943 
1944  *queue_id = sched.queue_id;
1945  *traffic_class = sched.traffic_class;
1946  *color = sched.color;
1947 }
1948 
1952 static inline void
1954 {
1955  m->hash.sched.queue_id = queue_id;
1956 }
1957 
1961 static inline void
1963 {
1964  m->hash.sched.traffic_class = traffic_class;
1965 }
1966 
1970 static inline void
1972 {
1973  m->hash.sched.color = color;
1974 }
1975 
1988 static inline void
1990  uint8_t traffic_class,
1991  uint8_t color)
1992 {
1993  m->hash.sched = (struct rte_mbuf_sched){
1994  .queue_id = queue_id,
1995  .traffic_class = traffic_class,
1996  .color = color,
1997  .reserved = 0,
1998  };
1999 }
2000 
2001 #ifdef __cplusplus
2002 }
2003 #endif
2004 
2005 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:150
struct rte_mbuf * next
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:304
uint64_t timestamp
uint16_t vlan_tci_outer
#define __rte_always_inline
Definition: rte_common.h:173
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:253
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:893
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:1962
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:830
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1825
__rte_experimental struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
uint32_t queue_id
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1395
#define PKT_TX_OUTER_IP_CKSUM
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1902
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:224
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
uint64_t rte_iova_t
Definition: rte_common.h:365
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1379
void * buf_addr
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:196
uint16_t data_len
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1752
__rte_experimental void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1911
#define PKT_TX_IPV4
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:223
unsigned int flags
Definition: rte_mempool.h:230
#define __rte_unused
Definition: rte_common.h:104
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
uint64_t tso_segsz
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1498
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:915
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:846
uint32_t cache_size
Definition: rte_mempool.h:233
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:129
#define PKT_TX_OUTER_IPV4
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:499
uint16_t nb_segs
#define IND_ATTACHED_MBUF
uint16_t port
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1327
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1695
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:195
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:1989
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1512
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:607
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:1938
#define unlikely(x)
uint16_t priv_size
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:1953
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:1971
#define RTE_MIN(a, b)
Definition: rte_common.h:508
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:352
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:442
#define PKT_TX_TCP_SEG
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1875
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1563
uint64_t phys_addr_t
Definition: rte_common.h:355
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:267
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:318
uint16_t elt_size
Definition: rte_mbuf.h:745
uint16_t refcnt
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1641
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:292
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_CLONED(mb)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1145
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1535
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define PKT_TX_IP_CKSUM
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:348
uint64_t ol_flags
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1105
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1235
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:467
uint32_t pkt_len
uint64_t dynfield1[2]
uint16_t buf_len
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1560
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:433
uint32_t packet_type
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:860
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:809
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:990
#define PKT_TX_IPV6
#define PKT_TX_L4_MASK
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:481
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1610
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:451
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:175
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1671
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:267
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1727
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1577
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:582
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1481
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:200
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1527
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
Definition: rte_mbuf.h:241
__rte_experimental struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1920
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:332
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1689
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1404
rte_iova_t buf_iova
Definition: rte_mbuf.h:743
uint8_t traffic_class
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:207
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:112
rte_atomic16_t refcnt_atomic
#define PKT_TX_OFFLOAD_MASK
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1717
uint64_t tx_offload
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:222
uint16_t vlan_tci
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:341
rte_atomic16_t refcnt_atomic
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define RTE_SET_USED(x)
Definition: rte_common.h:110
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
#define EXT_ATTACHED_MBUF
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1068