DPDK  17.11.0
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
63 #include <stdint.h>
64 #include <rte_common.h>
65 #include <rte_mempool.h>
66 #include <rte_memory.h>
67 #include <rte_atomic.h>
68 #include <rte_prefetch.h>
69 #include <rte_branch_prediction.h>
70 #include <rte_mbuf_ptype.h>
71 
72 #ifdef __cplusplus
73 extern "C" {
74 #endif
75 
76 /*
77  * Packet Offload Features Flags. It also carry packet type information.
78  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
79  *
80  * - RX flags start at bit position zero, and get added to the left of previous
81  * flags.
82  * - The most-significant 3 bits are reserved for generic mbuf flags
83  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
84  * added to the right of the previously defined flags i.e. they should count
85  * downwards, not upwards.
86  *
87  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
88  * rte_get_tx_ol_flag_name().
89  */
90 
98 #define PKT_RX_VLAN (1ULL << 0)
99 
100 #define PKT_RX_RSS_HASH (1ULL << 1)
101 #define PKT_RX_FDIR (1ULL << 2)
110 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
111 
119 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
120 
121 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
129 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
130 
139 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
140 
141 #define PKT_RX_IP_CKSUM_UNKNOWN 0
142 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
143 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
144 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
145 
154 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
155 
156 #define PKT_RX_L4_CKSUM_UNKNOWN 0
157 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
158 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
159 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
160 
161 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
162 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
163 #define PKT_RX_FDIR_ID (1ULL << 13)
164 #define PKT_RX_FDIR_FLX (1ULL << 14)
174 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
175 
181 #define PKT_RX_LRO (1ULL << 16)
182 
186 #define PKT_RX_TIMESTAMP (1ULL << 17)
187 
191 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
192 
196 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
197 
205 #define PKT_RX_QINQ (1ULL << 20)
206 
207 /* add new RX flags here */
208 
209 /* add new TX flags here */
210 
214 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
215 
220 #define PKT_TX_MACSEC (1ULL << 44)
221 
227 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
228 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
229 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
230 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
231 
232 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
233 /* add new TX TUNNEL type here */
234 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
235 
239 #define PKT_TX_QINQ_PKT (1ULL << 49)
254 #define PKT_TX_TCP_SEG (1ULL << 50)
255 
256 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
269 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
270 #define PKT_TX_TCP_CKSUM (1ULL << 52)
271 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
272 #define PKT_TX_UDP_CKSUM (3ULL << 52)
273 #define PKT_TX_L4_MASK (3ULL << 52)
282 #define PKT_TX_IP_CKSUM (1ULL << 54)
283 
290 #define PKT_TX_IPV4 (1ULL << 55)
291 
298 #define PKT_TX_IPV6 (1ULL << 56)
299 
300 #define PKT_TX_VLAN_PKT (1ULL << 57)
310 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
311 
317 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
318 
324 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
325 
330 #define PKT_TX_OFFLOAD_MASK ( \
331  PKT_TX_IP_CKSUM | \
332  PKT_TX_L4_MASK | \
333  PKT_TX_OUTER_IP_CKSUM | \
334  PKT_TX_TCP_SEG | \
335  PKT_TX_IEEE1588_TMST | \
336  PKT_TX_QINQ_PKT | \
337  PKT_TX_VLAN_PKT | \
338  PKT_TX_TUNNEL_MASK | \
339  PKT_TX_MACSEC | \
340  PKT_TX_SEC_OFFLOAD)
341 
342 #define __RESERVED (1ULL << 61)
344 #define IND_ATTACHED_MBUF (1ULL << 62)
346 /* Use final bit of flags to indicate a control mbuf */
347 #define CTRL_MBUF_FLAG (1ULL << 63)
350 #define RTE_MBUF_PRIV_ALIGN 8
351 
360 const char *rte_get_rx_ol_flag_name(uint64_t mask);
361 
374 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
375 
386 const char *rte_get_tx_ol_flag_name(uint64_t mask);
387 
400 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
401 
408 #define RTE_MBUF_DEFAULT_DATAROOM 2048
409 #define RTE_MBUF_DEFAULT_BUF_SIZE \
410  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
411 
412 /* define a set of marker types that can be used to refer to set points in the
413  * mbuf */
414 __extension__
415 typedef void *MARKER[0];
416 __extension__
417 typedef uint8_t MARKER8[0];
418 __extension__
419 typedef uint64_t MARKER64[0];
425 struct rte_mbuf {
426  MARKER cacheline0;
427 
428  void *buf_addr;
436  union {
437  rte_iova_t buf_iova;
439  } __rte_aligned(sizeof(rte_iova_t));
440 
441  /* next 8 bytes are initialised on RX descriptor rearm */
442  MARKER64 rearm_data;
443  uint16_t data_off;
444 
455  union {
457  uint16_t refcnt;
458  };
459  uint16_t nb_segs;
462  uint16_t port;
463 
464  uint64_t ol_flags;
466  /* remaining bytes are set on RX when pulling packet from descriptor */
467  MARKER rx_descriptor_fields1;
468 
469  /*
470  * The packet type, which is the combination of outer/inner L2, L3, L4
471  * and tunnel types. The packet_type is about data really present in the
472  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
473  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
474  * vlan is stripped from the data.
475  */
477  union {
478  uint32_t packet_type;
479  struct {
480  uint32_t l2_type:4;
481  uint32_t l3_type:4;
482  uint32_t l4_type:4;
483  uint32_t tun_type:4;
485  union {
491  __extension__
492  struct {
493  uint8_t inner_l2_type:4;
495  uint8_t inner_l3_type:4;
497  };
498  };
499  uint32_t inner_l4_type:4;
500  };
501  };
502 
503  uint32_t pkt_len;
504  uint16_t data_len;
506  uint16_t vlan_tci;
507 
508  union {
509  uint32_t rss;
510  struct {
512  union {
513  struct {
514  uint16_t hash;
515  uint16_t id;
516  };
517  uint32_t lo;
519  };
520  uint32_t hi;
523  } fdir;
524  struct {
525  uint32_t lo;
526  uint32_t hi;
527  } sched;
528  uint32_t usr;
529  } hash;
532  uint16_t vlan_tci_outer;
533 
534  uint16_t buf_len;
539  uint64_t timestamp;
540 
541  /* second cache line - fields only used in slow path or on TX */
542  MARKER cacheline1 __rte_cache_min_aligned;
543 
545  union {
546  void *userdata;
547  uint64_t udata64;
548  };
549 
550  struct rte_mempool *pool;
551  struct rte_mbuf *next;
553  /* fields to support TX offloads */
555  union {
556  uint64_t tx_offload;
557  __extension__
558  struct {
559  uint64_t l2_len:7;
563  uint64_t l3_len:9;
564  uint64_t l4_len:8;
565  uint64_t tso_segsz:16;
567  /* fields for TX offloading of tunnels */
568  uint64_t outer_l3_len:9;
569  uint64_t outer_l2_len:7;
571  /* uint64_t unused:8; */
572  };
573  };
574 
577  uint16_t priv_size;
578 
580  uint16_t timesync;
581 
583  uint32_t seqn;
584 
586 
588 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
589 
600 static inline void
602 {
603  rte_prefetch0(&m->cacheline0);
604 }
605 
617 static inline void
619 {
620 #if RTE_CACHE_LINE_SIZE == 64
621  rte_prefetch0(&m->cacheline1);
622 #else
623  RTE_SET_USED(m);
624 #endif
625 }
626 
627 
628 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
629 
638 static inline rte_iova_t
639 rte_mbuf_data_iova(const struct rte_mbuf *mb)
640 {
641  return mb->buf_iova + mb->data_off;
642 }
643 
644 __rte_deprecated
645 static inline phys_addr_t
646 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
647 {
648  return rte_mbuf_data_iova(mb);
649 }
650 
663 static inline rte_iova_t
665 {
666  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
667 }
668 
669 __rte_deprecated
670 static inline phys_addr_t
671 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
672 {
673  return rte_mbuf_data_iova_default(mb);
674 }
675 
684 static inline struct rte_mbuf *
686 {
687  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
688 }
689 
698 static inline char *
700 {
701  char *buffer_addr;
702  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
703  return buffer_addr;
704 }
705 
709 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
710 
714 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
715 
724  uint16_t mbuf_priv_size;
725 };
726 
727 #ifdef RTE_LIBRTE_MBUF_DEBUG
728 
730 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
731 
732 #else /* RTE_LIBRTE_MBUF_DEBUG */
733 
735 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
736 
737 #endif /* RTE_LIBRTE_MBUF_DEBUG */
738 
739 #ifdef RTE_MBUF_REFCNT_ATOMIC
740 
748 static inline uint16_t
749 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
750 {
751  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
752 }
753 
761 static inline void
762 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
763 {
764  rte_atomic16_set(&m->refcnt_atomic, new_value);
765 }
766 
776 static inline uint16_t
777 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
778 {
779  /*
780  * The atomic_add is an expensive operation, so we don't want to
781  * call it in the case where we know we are the uniq holder of
782  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
783  * operation has to be used because concurrent accesses on the
784  * reference counter can occur.
785  */
786  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
787  rte_mbuf_refcnt_set(m, 1 + value);
788  return 1 + value;
789  }
790 
791  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
792 }
793 
794 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
795 
799 static inline uint16_t
800 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
801 {
802  m->refcnt = (uint16_t)(m->refcnt + value);
803  return m->refcnt;
804 }
805 
809 static inline uint16_t
811 {
812  return m->refcnt;
813 }
814 
818 static inline void
819 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
820 {
821  m->refcnt = new_value;
822 }
823 
824 #endif /* RTE_MBUF_REFCNT_ATOMIC */
825 
827 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
828  if ((m) != NULL) \
829  rte_prefetch0(m); \
830 } while (0)
831 
832 
845 void
846 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
847 
848 #define MBUF_RAW_ALLOC_CHECK(m) do { \
849  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
850  RTE_ASSERT((m)->next == NULL); \
851  RTE_ASSERT((m)->nb_segs == 1); \
852  __rte_mbuf_sanity_check(m, 0); \
853 } while (0)
854 
874 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
875 {
876  struct rte_mbuf *m;
877  void *mb = NULL;
878 
879  if (rte_mempool_get(mp, &mb) < 0)
880  return NULL;
881  m = (struct rte_mbuf *)mb;
882  MBUF_RAW_ALLOC_CHECK(m);
883  return m;
884 }
885 
900 static __rte_always_inline void
902 {
903  RTE_ASSERT(RTE_MBUF_DIRECT(m));
904  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
905  RTE_ASSERT(m->next == NULL);
906  RTE_ASSERT(m->nb_segs == 1);
908  rte_mempool_put(m->pool, m);
909 }
910 
911 /* compat with older versions */
912 __rte_deprecated
913 static inline void
914 __rte_mbuf_raw_free(struct rte_mbuf *m)
915 {
917 }
918 
919 /* Operations on ctrl mbuf */
920 
940 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
941  void *m, unsigned i);
942 
955 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
956 
963 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
964 
973 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
974 
983 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
984 
994 static inline int
996 {
997  return !!(m->ol_flags & CTRL_MBUF_FLAG);
998 }
999 
1000 /* Operations on pkt mbuf */
1001 
1021 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1022  void *m, unsigned i);
1023 
1024 
1042 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1043 
1078 struct rte_mempool *
1079 rte_pktmbuf_pool_create(const char *name, unsigned n,
1080  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1081  int socket_id);
1082 
1094 static inline uint16_t
1096 {
1097  struct rte_pktmbuf_pool_private *mbp_priv;
1098 
1099  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1100  return mbp_priv->mbuf_data_room_size;
1101 }
1102 
1115 static inline uint16_t
1117 {
1118  struct rte_pktmbuf_pool_private *mbp_priv;
1119 
1120  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1121  return mbp_priv->mbuf_priv_size;
1122 }
1123 
1132 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1133 {
1134  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1135 }
1136 
1145 #define MBUF_INVALID_PORT UINT16_MAX
1146 
1147 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1148 {
1149  m->next = NULL;
1150  m->pkt_len = 0;
1151  m->tx_offload = 0;
1152  m->vlan_tci = 0;
1153  m->vlan_tci_outer = 0;
1154  m->nb_segs = 1;
1155  m->port = MBUF_INVALID_PORT;
1156 
1157  m->ol_flags = 0;
1158  m->packet_type = 0;
1160 
1161  m->data_len = 0;
1163 }
1164 
1178 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1179 {
1180  struct rte_mbuf *m;
1181  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1182  rte_pktmbuf_reset(m);
1183  return m;
1184 }
1185 
1200 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1201  struct rte_mbuf **mbufs, unsigned count)
1202 {
1203  unsigned idx = 0;
1204  int rc;
1205 
1206  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1207  if (unlikely(rc))
1208  return rc;
1209 
1210  /* To understand duff's device on loop unwinding optimization, see
1211  * https://en.wikipedia.org/wiki/Duff's_device.
1212  * Here while() loop is used rather than do() while{} to avoid extra
1213  * check if count is zero.
1214  */
1215  switch (count % 4) {
1216  case 0:
1217  while (idx != count) {
1218  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1219  rte_pktmbuf_reset(mbufs[idx]);
1220  idx++;
1221  /* fall-through */
1222  case 3:
1223  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1224  rte_pktmbuf_reset(mbufs[idx]);
1225  idx++;
1226  /* fall-through */
1227  case 2:
1228  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1229  rte_pktmbuf_reset(mbufs[idx]);
1230  idx++;
1231  /* fall-through */
1232  case 1:
1233  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1234  rte_pktmbuf_reset(mbufs[idx]);
1235  idx++;
1236  /* fall-through */
1237  }
1238  }
1239  return 0;
1240 }
1241 
1259 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1260 {
1261  struct rte_mbuf *md;
1262 
1263  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1264  rte_mbuf_refcnt_read(mi) == 1);
1265 
1266  /* if m is not direct, get the mbuf that embeds the data */
1267  if (RTE_MBUF_DIRECT(m))
1268  md = m;
1269  else
1270  md = rte_mbuf_from_indirect(m);
1271 
1272  rte_mbuf_refcnt_update(md, 1);
1273  mi->priv_size = m->priv_size;
1274  mi->buf_iova = m->buf_iova;
1275  mi->buf_addr = m->buf_addr;
1276  mi->buf_len = m->buf_len;
1277 
1278  mi->data_off = m->data_off;
1279  mi->data_len = m->data_len;
1280  mi->port = m->port;
1281  mi->vlan_tci = m->vlan_tci;
1282  mi->vlan_tci_outer = m->vlan_tci_outer;
1283  mi->tx_offload = m->tx_offload;
1284  mi->hash = m->hash;
1285 
1286  mi->next = NULL;
1287  mi->pkt_len = mi->data_len;
1288  mi->nb_segs = 1;
1289  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1290  mi->packet_type = m->packet_type;
1291  mi->timestamp = m->timestamp;
1292 
1293  __rte_mbuf_sanity_check(mi, 1);
1295 }
1296 
1310 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1311 {
1312  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1313  struct rte_mempool *mp = m->pool;
1314  uint32_t mbuf_size, buf_len, priv_size;
1315 
1316  priv_size = rte_pktmbuf_priv_size(mp);
1317  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1318  buf_len = rte_pktmbuf_data_room_size(mp);
1319 
1320  m->priv_size = priv_size;
1321  m->buf_addr = (char *)m + mbuf_size;
1322  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1323  m->buf_len = (uint16_t)buf_len;
1325  m->data_len = 0;
1326  m->ol_flags = 0;
1327 
1328  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1329  md->next = NULL;
1330  md->nb_segs = 1;
1331  rte_mbuf_refcnt_set(md, 1);
1332  rte_mbuf_raw_free(md);
1333  }
1334 }
1335 
1350 static __rte_always_inline struct rte_mbuf *
1352 {
1354 
1355  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1356 
1357  if (RTE_MBUF_INDIRECT(m))
1358  rte_pktmbuf_detach(m);
1359 
1360  if (m->next != NULL) {
1361  m->next = NULL;
1362  m->nb_segs = 1;
1363  }
1364 
1365  return m;
1366 
1367  } else if (rte_atomic16_add_return(&m->refcnt_atomic, -1) == 0) {
1368 
1369 
1370  if (RTE_MBUF_INDIRECT(m))
1371  rte_pktmbuf_detach(m);
1372 
1373  if (m->next != NULL) {
1374  m->next = NULL;
1375  m->nb_segs = 1;
1376  }
1377  rte_mbuf_refcnt_set(m, 1);
1378 
1379  return m;
1380  }
1381  return NULL;
1382 }
1383 
1384 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1385 __rte_deprecated
1386 static inline struct rte_mbuf *
1387 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1388 {
1389  return rte_pktmbuf_prefree_seg(m);
1390 }
1391 
1401 static __rte_always_inline void
1403 {
1404  m = rte_pktmbuf_prefree_seg(m);
1405  if (likely(m != NULL))
1406  rte_mbuf_raw_free(m);
1407 }
1408 
1418 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1419 {
1420  struct rte_mbuf *m_next;
1421 
1423 
1424  while (m != NULL) {
1425  m_next = m->next;
1427  m = m_next;
1428  }
1429 }
1430 
1448 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1449  struct rte_mempool *mp)
1450 {
1451  struct rte_mbuf *mc, *mi, **prev;
1452  uint32_t pktlen;
1453  uint16_t nseg;
1454 
1455  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1456  return NULL;
1457 
1458  mi = mc;
1459  prev = &mi->next;
1460  pktlen = md->pkt_len;
1461  nseg = 0;
1462 
1463  do {
1464  nseg++;
1465  rte_pktmbuf_attach(mi, md);
1466  *prev = mi;
1467  prev = &mi->next;
1468  } while ((md = md->next) != NULL &&
1469  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1470 
1471  *prev = NULL;
1472  mc->nb_segs = nseg;
1473  mc->pkt_len = pktlen;
1474 
1475  /* Allocation of new indirect segment failed */
1476  if (unlikely (mi == NULL)) {
1477  rte_pktmbuf_free(mc);
1478  return NULL;
1479  }
1480 
1481  __rte_mbuf_sanity_check(mc, 1);
1482  return mc;
1483 }
1484 
1496 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1497 {
1499 
1500  do {
1501  rte_mbuf_refcnt_update(m, v);
1502  } while ((m = m->next) != NULL);
1503 }
1504 
1513 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1514 {
1516  return m->data_off;
1517 }
1518 
1527 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1528 {
1530  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1531  m->data_len);
1532 }
1533 
1542 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1543 {
1544  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1545 
1547  while (m2->next != NULL)
1548  m2 = m2->next;
1549  return m2;
1550 }
1551 
1566 #define rte_pktmbuf_mtod_offset(m, t, o) \
1567  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1568 
1581 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1582 
1592 #define rte_pktmbuf_iova_offset(m, o) \
1593  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1594 
1595 /* deprecated */
1596 #define rte_pktmbuf_mtophys_offset(m, o) \
1597  rte_pktmbuf_iova_offset(m, o)
1598 
1606 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1607 
1608 /* deprecated */
1609 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1610 
1619 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1620 
1629 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1630 
1646 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1647  uint16_t len)
1648 {
1650 
1651  if (unlikely(len > rte_pktmbuf_headroom(m)))
1652  return NULL;
1653 
1654  m->data_off -= len;
1655  m->data_len = (uint16_t)(m->data_len + len);
1656  m->pkt_len = (m->pkt_len + len);
1657 
1658  return (char *)m->buf_addr + m->data_off;
1659 }
1660 
1676 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1677 {
1678  void *tail;
1679  struct rte_mbuf *m_last;
1680 
1682 
1683  m_last = rte_pktmbuf_lastseg(m);
1684  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1685  return NULL;
1686 
1687  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1688  m_last->data_len = (uint16_t)(m_last->data_len + len);
1689  m->pkt_len = (m->pkt_len + len);
1690  return (char*) tail;
1691 }
1692 
1707 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1708 {
1710 
1711  if (unlikely(len > m->data_len))
1712  return NULL;
1713 
1714  m->data_len = (uint16_t)(m->data_len - len);
1715  m->data_off += len;
1716  m->pkt_len = (m->pkt_len - len);
1717  return (char *)m->buf_addr + m->data_off;
1718 }
1719 
1734 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1735 {
1736  struct rte_mbuf *m_last;
1737 
1739 
1740  m_last = rte_pktmbuf_lastseg(m);
1741  if (unlikely(len > m_last->data_len))
1742  return -1;
1743 
1744  m_last->data_len = (uint16_t)(m_last->data_len - len);
1745  m->pkt_len = (m->pkt_len - len);
1746  return 0;
1747 }
1748 
1758 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1759 {
1761  return !!(m->nb_segs == 1);
1762 }
1763 
1767 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1768  uint32_t len, void *buf);
1769 
1790 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1791  uint32_t off, uint32_t len, void *buf)
1792 {
1793  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1794  return rte_pktmbuf_mtod_offset(m, char *, off);
1795  else
1796  return __rte_pktmbuf_read(m, off, len, buf);
1797 }
1798 
1815 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1816 {
1817  struct rte_mbuf *cur_tail;
1818 
1819  /* Check for number-of-segments-overflow */
1820  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1821  return -EOVERFLOW;
1822 
1823  /* Chain 'tail' onto the old tail */
1824  cur_tail = rte_pktmbuf_lastseg(head);
1825  cur_tail->next = tail;
1826 
1827  /* accumulate number of segments and total length. */
1828  head->nb_segs += tail->nb_segs;
1829  head->pkt_len += tail->pkt_len;
1830 
1831  /* pkt_len is only set in the head */
1832  tail->pkt_len = tail->data_len;
1833 
1834  return 0;
1835 }
1836 
1847 static inline int
1849 {
1850  uint64_t ol_flags = m->ol_flags;
1851  uint64_t inner_l3_offset = m->l2_len;
1852 
1853  /* Does packet set any of available offloads? */
1854  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1855  return 0;
1856 
1857  if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1858  inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
1859 
1860  /* Headers are fragmented */
1861  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
1862  return -ENOTSUP;
1863 
1864  /* IP checksum can be counted only for IPv4 packet */
1865  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1866  return -EINVAL;
1867 
1868  /* IP type not set when required */
1869  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1870  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1871  return -EINVAL;
1872 
1873  /* Check requirements for TSO packet */
1874  if (ol_flags & PKT_TX_TCP_SEG)
1875  if ((m->tso_segsz == 0) ||
1876  ((ol_flags & PKT_TX_IPV4) &&
1877  !(ol_flags & PKT_TX_IP_CKSUM)))
1878  return -EINVAL;
1879 
1880  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1881  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1882  !(ol_flags & PKT_TX_OUTER_IPV4))
1883  return -EINVAL;
1884 
1885  return 0;
1886 }
1887 
1900 static inline int
1902 {
1903  int seg_len, copy_len;
1904  struct rte_mbuf *m;
1905  struct rte_mbuf *m_next;
1906  char *buffer;
1907 
1908  if (rte_pktmbuf_is_contiguous(mbuf))
1909  return 0;
1910 
1911  /* Extend first segment to the total packet length */
1912  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
1913 
1914  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
1915  return -1;
1916 
1917  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
1918  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
1919 
1920  /* Append data from next segments to the first one */
1921  m = mbuf->next;
1922  while (m != NULL) {
1923  m_next = m->next;
1924 
1925  seg_len = rte_pktmbuf_data_len(m);
1926  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
1927  buffer += seg_len;
1928 
1930  m = m_next;
1931  }
1932 
1933  mbuf->next = NULL;
1934  mbuf->nb_segs = 1;
1935 
1936  return 0;
1937 }
1938 
1953 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1954 
1955 #ifdef __cplusplus
1956 }
1957 #endif
1958 
1959 #endif /* _RTE_MBUF_H_ */
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:639
struct rte_mbuf * next
Definition: rte_mbuf.h:551
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:723
uint64_t timestamp
Definition: rte_mbuf.h:539
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:532
#define __rte_always_inline
Definition: rte_common.h:137
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:204
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1178
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:486
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:415
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:714
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:344
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:438
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1116
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1848
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1418
uint64_t l2_len
Definition: rte_mbuf.h:559
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1448
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1402
void * buf_addr
Definition: rte_mbuf.h:428
uint32_t l2_type
Definition: rte_mbuf.h:480
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:685
uint16_t data_len
Definition: rte_mbuf.h:504
uint32_t lo
Definition: rte_mbuf.h:517
void * userdata
Definition: rte_mbuf.h:546
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1815
struct rte_mbuf::@113::@124 fdir
uint8_t inner_l2_type
Definition: rte_mbuf.h:493
uint64_t tso_segsz
Definition: rte_mbuf.h:565
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:417
uint64_t l4_len
Definition: rte_mbuf.h:564
struct rte_mbuf::@113::@125 sched
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1513
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1200
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1132
uint32_t cache_size
Definition: rte_mempool.h:240
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:310
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:618
#define PKT_TX_IPV6
Definition: rte_mbuf.h:298
uint16_t nb_segs
Definition: rte_mbuf.h:459
uint16_t port
Definition: rte_mbuf.h:462
uint64_t outer_l3_len
Definition: rte_mbuf.h:568
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1351
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1758
uint64_t l3_len
Definition: rte_mbuf.h:563
uint32_t l4_type
Definition: rte_mbuf.h:482
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:317
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1527
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:901
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:254
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:577
uint16_t timesync
Definition: rte_mbuf.h:580
uint32_t hi
Definition: rte_mbuf.h:520
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:419
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:317
#define PKT_TX_IPV4
Definition: rte_mbuf.h:290
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:735
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:810
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1901
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1391
uint64_t outer_l2_len
Definition: rte_mbuf.h:569
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:218
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:347
uint16_t refcnt
Definition: rte_mbuf.h:457
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1707
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1619
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1259
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1363
uint32_t tun_type
Definition: rte_mbuf.h:483
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:299
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:995
uint64_t ol_flags
Definition: rte_mbuf.h:464
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1310
uint32_t pkt_len
Definition: rte_mbuf.h:503
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:273
uint16_t buf_len
Definition: rte_mbuf.h:534
uint32_t inner_l4_type
Definition: rte_mbuf.h:499
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1629
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1581
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:800
uint32_t packet_type
Definition: rte_mbuf.h:478
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1145
uint32_t seqn
Definition: rte_mbuf.h:583
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1095
uint8_t inner_l3_type
Definition: rte_mbuf.h:495
const char * rte_get_rx_ol_flag_name(uint64_t mask)
#define RTE_STD_C11
Definition: rte_common.h:64
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:282
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:550
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1676
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:819
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:664
uint32_t rss
Definition: rte_mbuf.h:509
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1734
uint64_t rte_iova_t
Definition: rte_memory.h:106
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:699
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1790
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1646
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:874
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1496
uint64_t phys_addr_t
Definition: rte_memory.h:97
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:154
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1542
RTE_STD_C11 union rte_mbuf::@110 __rte_aligned
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:330
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1474
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1232
uint64_t udata64
Definition: rte_mbuf.h:547
uint32_t l3_type
Definition: rte_mbuf.h:481
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:601
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:456
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1509
uint64_t tx_offload
Definition: rte_mbuf.h:556
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:229
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
uint16_t vlan_tci
Definition: rte_mbuf.h:506
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:709
#define RTE_SET_USED(x)
Definition: rte_common.h:109
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1566
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:528