1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright 2014 6WIND S.A. 4 */ 5 6 #ifndef _RTE_MBUF_CORE_H_ 7 #define _RTE_MBUF_CORE_H_ 8 9 /** 10 * @file 11 * This file contains definition of RTE mbuf structure itself, 12 * packet offload flags and some related macros. 13 * For majority of DPDK entities, it is not recommended to include 14 * this file directly, use include <rte_mbuf.h> instead. 15 * 16 * New fields and flags should fit in the "dynamic space". 17 */ 18 19 #include <stdalign.h> 20 #include <stdint.h> 21 22 #include <rte_byteorder.h> 23 #include <rte_stdatomic.h> 24 25 #ifdef __cplusplus 26 extern "C" { 27 #endif 28 29 /* 30 * Packet Offload Features Flags. It also carry packet type information. 31 * Critical resources. Both rx/tx shared these bits. Be cautious on any change 32 * 33 * - RX flags start at bit position zero, and get added to the left of previous 34 * flags. 35 * - The most-significant 3 bits are reserved for generic mbuf flags 36 * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get 37 * added to the right of the previously defined flags i.e. they should count 38 * downwards, not upwards. 39 * 40 * Keep these flags synchronized with rte_get_rx_ol_flag_name() and 41 * rte_get_tx_ol_flag_name(). 42 */ 43 44 /** 45 * The RX packet is a 802.1q VLAN packet, and the tci has been 46 * saved in mbuf->vlan_tci. 47 * If the flag RTE_MBUF_F_RX_VLAN_STRIPPED is also present, the VLAN 48 * header has been stripped from mbuf data, else it is still 49 * present. 50 */ 51 #define RTE_MBUF_F_RX_VLAN (1ULL << 0) 52 53 /** RX packet with RSS hash result. */ 54 #define RTE_MBUF_F_RX_RSS_HASH (1ULL << 1) 55 56 /** RX packet with FDIR match indicate. */ 57 #define RTE_MBUF_F_RX_FDIR (1ULL << 2) 58 59 /** 60 * This flag is set when the outermost IP header checksum is detected as 61 * wrong by the hardware. 62 */ 63 #define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD (1ULL << 5) 64 65 /** 66 * A vlan has been stripped by the hardware and its tci is saved in 67 * mbuf->vlan_tci. This can only happen if vlan stripping is enabled 68 * in the RX configuration of the PMD. 69 * When RTE_MBUF_F_RX_VLAN_STRIPPED is set, RTE_MBUF_F_RX_VLAN must also be set. 70 */ 71 #define RTE_MBUF_F_RX_VLAN_STRIPPED (1ULL << 6) 72 73 /** 74 * Mask of bits used to determine the status of RX IP checksum. 75 * - RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum 76 * - RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong 77 * - RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid 78 * - RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet 79 * data, but the integrity of the IP header is verified. 80 */ 81 #define RTE_MBUF_F_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) 82 83 #define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN 0 84 #define RTE_MBUF_F_RX_IP_CKSUM_BAD (1ULL << 4) 85 #define RTE_MBUF_F_RX_IP_CKSUM_GOOD (1ULL << 7) 86 #define RTE_MBUF_F_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) 87 88 /** 89 * Mask of bits used to determine the status of RX L4 checksum. 90 * - RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum 91 * - RTE_MBUF_F_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong 92 * - RTE_MBUF_F_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid 93 * - RTE_MBUF_F_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet 94 * data, but the integrity of the L4 data is verified. 95 */ 96 #define RTE_MBUF_F_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) 97 98 #define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN 0 99 #define RTE_MBUF_F_RX_L4_CKSUM_BAD (1ULL << 3) 100 #define RTE_MBUF_F_RX_L4_CKSUM_GOOD (1ULL << 8) 101 #define RTE_MBUF_F_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) 102 103 /** RX IEEE1588 L2 Ethernet PT Packet. */ 104 #define RTE_MBUF_F_RX_IEEE1588_PTP (1ULL << 9) 105 106 /** RX IEEE1588 L2/L4 timestamped packet.*/ 107 #define RTE_MBUF_F_RX_IEEE1588_TMST (1ULL << 10) 108 109 /** FD id reported if FDIR match. */ 110 #define RTE_MBUF_F_RX_FDIR_ID (1ULL << 13) 111 112 /** Flexible bytes reported if FDIR match. */ 113 #define RTE_MBUF_F_RX_FDIR_FLX (1ULL << 14) 114 115 /** 116 * The outer VLAN has been stripped by the hardware and its TCI is 117 * saved in mbuf->vlan_tci_outer. 118 * This can only happen if VLAN stripping is enabled in the Rx 119 * configuration of the PMD. 120 * When RTE_MBUF_F_RX_QINQ_STRIPPED is set, the flags RTE_MBUF_F_RX_VLAN 121 * and RTE_MBUF_F_RX_QINQ must also be set. 122 * 123 * - If both RTE_MBUF_F_RX_QINQ_STRIPPED and RTE_MBUF_F_RX_VLAN_STRIPPED are 124 * set, the 2 VLANs have been stripped by the hardware and their TCIs are 125 * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). 126 * - If RTE_MBUF_F_RX_QINQ_STRIPPED is set and RTE_MBUF_F_RX_VLAN_STRIPPED 127 * is unset, only the outer VLAN is removed from packet data, but both tci 128 * are saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). 129 */ 130 #define RTE_MBUF_F_RX_QINQ_STRIPPED (1ULL << 15) 131 132 /** 133 * When packets are coalesced by a hardware or virtual driver, this flag 134 * can be set in the RX mbuf, meaning that the m->tso_segsz field is 135 * valid and is set to the segment size of original packets. 136 */ 137 #define RTE_MBUF_F_RX_LRO (1ULL << 16) 138 139 /* There is no flag defined at offset 17. It is free for any future use. */ 140 141 /** 142 * Indicate that security offload processing was applied on the RX packet. 143 */ 144 #define RTE_MBUF_F_RX_SEC_OFFLOAD (1ULL << 18) 145 146 /** 147 * Indicate that security offload processing failed on the RX packet. 148 */ 149 #define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED (1ULL << 19) 150 151 /** 152 * The RX packet is a double VLAN, and the outer tci has been 153 * saved in mbuf->vlan_tci_outer. If this flag is set, RTE_MBUF_F_RX_VLAN 154 * must also be set and the inner tci is saved in mbuf->vlan_tci. 155 * If the flag RTE_MBUF_F_RX_QINQ_STRIPPED is also present, both VLANs 156 * headers have been stripped from mbuf data, else they are still 157 * present. 158 */ 159 #define RTE_MBUF_F_RX_QINQ (1ULL << 20) 160 161 /** 162 * Mask of bits used to determine the status of outer RX L4 checksum. 163 * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 164 * checksum 165 * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet 166 * is wrong 167 * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet 168 * is valid 169 * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state. 170 * 171 * The detection of RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD shall be based on the 172 * given HW capability, At minimum, the PMD should support 173 * RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN and RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD 174 * states if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available. 175 */ 176 #define RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22)) 177 178 #define RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN 0 179 #define RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD (1ULL << 21) 180 #define RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22) 181 #define RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22)) 182 183 /* add new RX flags here, don't forget to update RTE_MBUF_F_FIRST_FREE */ 184 185 #define RTE_MBUF_F_FIRST_FREE (1ULL << 23) 186 #define RTE_MBUF_F_LAST_FREE (1ULL << 40) 187 188 /* add new TX flags here, don't forget to update RTE_MBUF_F_LAST_FREE */ 189 190 /** 191 * Outer UDP checksum offload flag. This flag is used for enabling 192 * outer UDP checksum in PMD. To use outer UDP checksum, the user needs to 193 * 1) Enable the following in mbuf, 194 * a) Fill outer_l2_len and outer_l3_len in mbuf. 195 * b) Set the RTE_MBUF_F_TX_OUTER_UDP_CKSUM flag. 196 * c) Set the RTE_MBUF_F_TX_OUTER_IPV4 or RTE_MBUF_F_TX_OUTER_IPV6 flag. 197 * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag. 198 */ 199 #define RTE_MBUF_F_TX_OUTER_UDP_CKSUM (1ULL << 41) 200 201 /** 202 * UDP Fragmentation Offload flag. This flag is used for enabling UDP 203 * fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used 204 * to store the MSS of UDP fragments. 205 */ 206 #define RTE_MBUF_F_TX_UDP_SEG (1ULL << 42) 207 208 /** 209 * Request security offload processing on the TX packet. 210 * To use Tx security offload, the user needs to fill l2_len in mbuf 211 * indicating L2 header size and where L3 header starts. Similarly, 212 * l3_len should also be filled along with ol_flags reflecting current L3 type. 213 */ 214 #define RTE_MBUF_F_TX_SEC_OFFLOAD (1ULL << 43) 215 216 /** 217 * Offload the MACsec. This flag must be set by the application to enable 218 * this offload feature for a packet to be transmitted. 219 */ 220 #define RTE_MBUF_F_TX_MACSEC (1ULL << 44) 221 222 /** 223 * Bits 45:48 used for the tunnel type. 224 * The tunnel type must be specified for TSO or checksum on the inner part 225 * of tunnel packets. 226 * These flags can be used with RTE_MBUF_F_TX_TCP_SEG for TSO, or 227 * RTE_MBUF_F_TX_xxx_CKSUM. 228 * The mbuf fields for inner and outer header lengths are required: 229 * outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO. 230 */ 231 #define RTE_MBUF_F_TX_TUNNEL_VXLAN (0x1ULL << 45) 232 #define RTE_MBUF_F_TX_TUNNEL_GRE (0x2ULL << 45) 233 #define RTE_MBUF_F_TX_TUNNEL_IPIP (0x3ULL << 45) 234 #define RTE_MBUF_F_TX_TUNNEL_GENEVE (0x4ULL << 45) 235 /** TX packet with MPLS-in-UDP RFC 7510 header. */ 236 #define RTE_MBUF_F_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) 237 #define RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45) 238 #define RTE_MBUF_F_TX_TUNNEL_GTP (0x7ULL << 45) 239 #define RTE_MBUF_F_TX_TUNNEL_ESP (0x8ULL << 45) 240 /** 241 * Generic IP encapsulated tunnel type, used for TSO and checksum offload. 242 * It can be used for tunnels which are not standards or listed above. 243 * It is preferred to use specific tunnel flags like RTE_MBUF_F_TX_TUNNEL_GRE 244 * or RTE_MBUF_F_TX_TUNNEL_IPIP if possible. 245 * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO. 246 * Outer and inner checksums are done according to the existing flags like 247 * RTE_MBUF_F_TX_xxx_CKSUM. 248 * Specific tunnel headers that contain payload length, sequence id 249 * or checksum are not expected to be updated. 250 */ 251 #define RTE_MBUF_F_TX_TUNNEL_IP (0xDULL << 45) 252 /** 253 * Generic UDP encapsulated tunnel type, used for TSO and checksum offload. 254 * UDP tunnel type implies outer IP layer. 255 * It can be used for tunnels which are not standards or listed above. 256 * It is preferred to use specific tunnel flags like RTE_MBUF_F_TX_TUNNEL_VXLAN 257 * if possible. 258 * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO. 259 * Outer and inner checksums are done according to the existing flags like 260 * RTE_MBUF_F_TX_xxx_CKSUM. 261 * Specific tunnel headers that contain payload length, sequence id 262 * or checksum are not expected to be updated. 263 */ 264 #define RTE_MBUF_F_TX_TUNNEL_UDP (0xEULL << 45) 265 /* add new TX TUNNEL type here */ 266 #define RTE_MBUF_F_TX_TUNNEL_MASK (0xFULL << 45) 267 268 /** 269 * Double VLAN insertion (QinQ) request to driver, driver may offload the 270 * insertion based on device capability. 271 * mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set. 272 */ 273 #define RTE_MBUF_F_TX_QINQ (1ULL << 49) 274 275 /** 276 * TCP segmentation offload. To enable this offload feature for a 277 * packet to be transmitted on hardware supporting TSO: 278 * - set the RTE_MBUF_F_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies 279 * RTE_MBUF_F_TX_TCP_CKSUM) 280 * - set the flag RTE_MBUF_F_TX_IPV4 or RTE_MBUF_F_TX_IPV6 281 * - if it's IPv4, set the RTE_MBUF_F_TX_IP_CKSUM flag 282 * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz 283 */ 284 #define RTE_MBUF_F_TX_TCP_SEG (1ULL << 50) 285 286 /** TX IEEE1588 packet to timestamp. */ 287 #define RTE_MBUF_F_TX_IEEE1588_TMST (1ULL << 51) 288 289 /* 290 * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved, 291 * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware 292 * L4 checksum offload, the user needs to: 293 * - fill l2_len and l3_len in mbuf 294 * - set the flags RTE_MBUF_F_TX_TCP_CKSUM, RTE_MBUF_F_TX_SCTP_CKSUM or 295 * RTE_MBUF_F_TX_UDP_CKSUM 296 * - set the flag RTE_MBUF_F_TX_IPV4 or RTE_MBUF_F_TX_IPV6 297 */ 298 299 /** Disable L4 cksum of TX pkt. */ 300 #define RTE_MBUF_F_TX_L4_NO_CKSUM (0ULL << 52) 301 302 /** TCP cksum of TX pkt. computed by NIC. */ 303 #define RTE_MBUF_F_TX_TCP_CKSUM (1ULL << 52) 304 305 /** SCTP cksum of TX pkt. computed by NIC. */ 306 #define RTE_MBUF_F_TX_SCTP_CKSUM (2ULL << 52) 307 308 /** UDP cksum of TX pkt. computed by NIC. */ 309 #define RTE_MBUF_F_TX_UDP_CKSUM (3ULL << 52) 310 311 /** Mask for L4 cksum offload request. */ 312 #define RTE_MBUF_F_TX_L4_MASK (3ULL << 52) 313 314 /** 315 * Offload the IP checksum in the hardware. The flag RTE_MBUF_F_TX_IPV4 should 316 * also be set by the application, although a PMD will only check 317 * RTE_MBUF_F_TX_IP_CKSUM. 318 * - fill the mbuf offload information: l2_len, l3_len 319 */ 320 #define RTE_MBUF_F_TX_IP_CKSUM (1ULL << 54) 321 322 /** 323 * Packet is IPv4. This flag must be set when using any offload feature 324 * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4 325 * packet. If the packet is a tunneled packet, this flag is related to 326 * the inner headers. 327 */ 328 #define RTE_MBUF_F_TX_IPV4 (1ULL << 55) 329 330 /** 331 * Packet is IPv6. This flag must be set when using an offload feature 332 * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6 333 * packet. If the packet is a tunneled packet, this flag is related to 334 * the inner headers. 335 */ 336 #define RTE_MBUF_F_TX_IPV6 (1ULL << 56) 337 338 /** 339 * VLAN tag insertion request to driver, driver may offload the insertion 340 * based on the device capability. 341 * mbuf 'vlan_tci' field must be valid when this flag is set. 342 */ 343 #define RTE_MBUF_F_TX_VLAN (1ULL << 57) 344 345 /** 346 * Offload the IP checksum of an external header in the hardware. The 347 * flag RTE_MBUF_F_TX_OUTER_IPV4 should also be set by the application, although 348 * a PMD will only check RTE_MBUF_F_TX_OUTER_IP_CKSUM. 349 * - fill the mbuf offload information: outer_l2_len, outer_l3_len 350 */ 351 #define RTE_MBUF_F_TX_OUTER_IP_CKSUM (1ULL << 58) 352 353 /** 354 * Packet outer header is IPv4. This flag must be set when using any 355 * outer offload feature (L3 or L4 checksum) to tell the NIC that the 356 * outer header of the tunneled packet is an IPv4 packet. 357 */ 358 #define RTE_MBUF_F_TX_OUTER_IPV4 (1ULL << 59) 359 360 /** 361 * Packet outer header is IPv6. This flag must be set when using any 362 * outer offload feature (L4 checksum) to tell the NIC that the outer 363 * header of the tunneled packet is an IPv6 packet. 364 */ 365 #define RTE_MBUF_F_TX_OUTER_IPV6 (1ULL << 60) 366 367 /** 368 * Bitmask of all supported packet Tx offload features flags, 369 * which can be set for packet. 370 */ 371 #define RTE_MBUF_F_TX_OFFLOAD_MASK ( \ 372 RTE_MBUF_F_TX_OUTER_IPV6 | \ 373 RTE_MBUF_F_TX_OUTER_IPV4 | \ 374 RTE_MBUF_F_TX_OUTER_IP_CKSUM | \ 375 RTE_MBUF_F_TX_VLAN | \ 376 RTE_MBUF_F_TX_IPV6 | \ 377 RTE_MBUF_F_TX_IPV4 | \ 378 RTE_MBUF_F_TX_IP_CKSUM | \ 379 RTE_MBUF_F_TX_L4_MASK | \ 380 RTE_MBUF_F_TX_IEEE1588_TMST | \ 381 RTE_MBUF_F_TX_TCP_SEG | \ 382 RTE_MBUF_F_TX_QINQ | \ 383 RTE_MBUF_F_TX_TUNNEL_MASK | \ 384 RTE_MBUF_F_TX_MACSEC | \ 385 RTE_MBUF_F_TX_SEC_OFFLOAD | \ 386 RTE_MBUF_F_TX_UDP_SEG | \ 387 RTE_MBUF_F_TX_OUTER_UDP_CKSUM) 388 389 /** 390 * Mbuf having an external buffer attached. shinfo in mbuf must be filled. 391 */ 392 #define RTE_MBUF_F_EXTERNAL (1ULL << 61) 393 394 #define RTE_MBUF_F_INDIRECT (1ULL << 62) /**< Indirect attached mbuf */ 395 396 /** Alignment constraint of mbuf private area. */ 397 #define RTE_MBUF_PRIV_ALIGN 8 398 399 /** 400 * Some NICs need at least 2KB buffer to RX standard Ethernet frame without 401 * splitting it into multiple segments. 402 * So, for mbufs that planned to be involved into RX/TX, the recommended 403 * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM. 404 */ 405 #define RTE_MBUF_DEFAULT_DATAROOM 2048 406 #define RTE_MBUF_DEFAULT_BUF_SIZE \ 407 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) 408 409 struct rte_mbuf_sched { 410 uint32_t queue_id; /**< Queue ID. */ 411 uint8_t traffic_class; 412 /**< Traffic class ID. Traffic class 0 413 * is the highest priority traffic class. 414 */ 415 uint8_t color; 416 /**< Color. @see enum rte_color.*/ 417 uint16_t reserved; /**< Reserved. */ 418 }; /**< Hierarchical scheduler */ 419 420 /** 421 * enum for the tx_offload bit-fields lengths and offsets. 422 * defines the layout of rte_mbuf tx_offload field. 423 */ 424 enum { 425 RTE_MBUF_L2_LEN_BITS = 7, 426 RTE_MBUF_L3_LEN_BITS = 9, 427 RTE_MBUF_L4_LEN_BITS = 8, 428 RTE_MBUF_TSO_SEGSZ_BITS = 16, 429 RTE_MBUF_OUTL3_LEN_BITS = 9, 430 RTE_MBUF_OUTL2_LEN_BITS = 7, 431 RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT - 432 RTE_MBUF_L2_LEN_BITS - 433 RTE_MBUF_L3_LEN_BITS - 434 RTE_MBUF_L4_LEN_BITS - 435 RTE_MBUF_TSO_SEGSZ_BITS - 436 RTE_MBUF_OUTL3_LEN_BITS - 437 RTE_MBUF_OUTL2_LEN_BITS, 438 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 439 RTE_MBUF_L2_LEN_OFS = 440 sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS, 441 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS, 442 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS, 443 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS, 444 RTE_MBUF_OUTL3_LEN_OFS = 445 RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS, 446 RTE_MBUF_OUTL2_LEN_OFS = 447 RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS, 448 RTE_MBUF_TXOFLD_UNUSED_OFS = 449 RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS, 450 #else 451 RTE_MBUF_L2_LEN_OFS = 0, 452 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS, 453 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS, 454 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS, 455 RTE_MBUF_OUTL3_LEN_OFS = 456 RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS, 457 RTE_MBUF_OUTL2_LEN_OFS = 458 RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS, 459 RTE_MBUF_TXOFLD_UNUSED_OFS = 460 RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS, 461 #endif 462 }; 463 464 /** 465 * The generic rte_mbuf, containing a packet mbuf. 466 */ 467 struct rte_mbuf { 468 RTE_MARKER cacheline0; 469 470 void *buf_addr; /**< Virtual address of segment buffer. */ 471 #if RTE_IOVA_IN_MBUF 472 /** 473 * Physical address of segment buffer. 474 * This field is undefined if the build is configured to use only 475 * virtual address as IOVA (i.e. RTE_IOVA_IN_MBUF is 0). 476 * Force alignment to 8-bytes, so as to ensure we have the exact 477 * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes 478 * working on vector drivers easier. 479 */ 480 alignas(sizeof(rte_iova_t)) rte_iova_t buf_iova; 481 #else 482 /** 483 * Next segment of scattered packet. 484 * This field is valid when physical address field is undefined. 485 * Otherwise next pointer in the second cache line will be used. 486 */ 487 struct rte_mbuf *next; 488 #endif 489 490 /* next 8 bytes are initialised on RX descriptor rearm */ 491 RTE_MARKER64 rearm_data; 492 uint16_t data_off; 493 494 /** 495 * Reference counter. Its size should at least equal to the size 496 * of port field (16 bits), to support zero-copy broadcast. 497 * It should only be accessed using the following functions: 498 * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and 499 * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, 500 * or non-atomic) is controlled by the RTE_MBUF_REFCNT_ATOMIC flag. 501 */ 502 RTE_ATOMIC(uint16_t) refcnt; 503 504 /** 505 * Number of segments. Only valid for the first segment of an mbuf 506 * chain. 507 */ 508 uint16_t nb_segs; 509 510 /** Input port (16 bits to support more than 256 virtual ports). 511 * The event eth Tx adapter uses this field to specify the output port. 512 */ 513 uint16_t port; 514 515 uint64_t ol_flags; /**< Offload features. */ 516 517 /* remaining bytes are set on RX when pulling packet from descriptor */ 518 RTE_MARKER rx_descriptor_fields1; 519 520 /* 521 * The packet type, which is the combination of outer/inner L2, L3, L4 522 * and tunnel types. The packet_type is about data really present in the 523 * mbuf. Example: if vlan stripping is enabled, a received vlan packet 524 * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the 525 * vlan is stripped from the data. 526 */ 527 union { 528 uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */ 529 __extension__ 530 struct { 531 uint8_t l2_type:4; /**< (Outer) L2 type. */ 532 uint8_t l3_type:4; /**< (Outer) L3 type. */ 533 uint8_t l4_type:4; /**< (Outer) L4 type. */ 534 uint8_t tun_type:4; /**< Tunnel type. */ 535 union { 536 uint8_t inner_esp_next_proto; 537 /**< ESP next protocol type, valid if 538 * RTE_PTYPE_TUNNEL_ESP tunnel type is set 539 * on both Tx and Rx. 540 */ 541 __extension__ 542 struct { 543 uint8_t inner_l2_type:4; 544 /**< Inner L2 type. */ 545 uint8_t inner_l3_type:4; 546 /**< Inner L3 type. */ 547 }; 548 }; 549 uint8_t inner_l4_type:4; /**< Inner L4 type. */ 550 }; 551 }; 552 553 uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ 554 uint16_t data_len; /**< Amount of data in segment buffer. */ 555 /** VLAN TCI (CPU order), valid if RTE_MBUF_F_RX_VLAN is set. */ 556 uint16_t vlan_tci; 557 558 union { 559 union { 560 uint32_t rss; /**< RSS hash result if RSS enabled */ 561 struct { 562 union { 563 struct { 564 uint16_t hash; 565 uint16_t id; 566 }; 567 uint32_t lo; 568 /**< Second 4 flexible bytes */ 569 }; 570 uint32_t hi; 571 /**< First 4 flexible bytes or FD ID, dependent 572 * on RTE_MBUF_F_RX_FDIR_* flag in ol_flags. 573 */ 574 } fdir; /**< Filter identifier if FDIR enabled */ 575 struct rte_mbuf_sched sched; 576 /**< Hierarchical scheduler : 8 bytes */ 577 struct { 578 uint32_t reserved1; 579 uint16_t reserved2; 580 uint16_t txq; 581 /**< The event eth Tx adapter uses this field 582 * to store Tx queue id. 583 * @see rte_event_eth_tx_adapter_txq_set() 584 */ 585 } txadapter; /**< Eventdev ethdev Tx adapter */ 586 uint32_t usr; 587 /**< User defined tags. See rte_distributor_process() */ 588 } hash; /**< hash information */ 589 }; 590 591 /** Outer VLAN TCI (CPU order), valid if RTE_MBUF_F_RX_QINQ is set. */ 592 uint16_t vlan_tci_outer; 593 594 uint16_t buf_len; /**< Length of segment buffer. */ 595 596 struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ 597 598 /* second cache line - fields only used in slow path or on TX */ 599 alignas(RTE_CACHE_LINE_MIN_SIZE) RTE_MARKER cacheline1; 600 601 #if RTE_IOVA_IN_MBUF 602 /** 603 * Next segment of scattered packet. Must be NULL in the last 604 * segment or in case of non-segmented packet. 605 */ 606 struct rte_mbuf *next; 607 #else 608 /** 609 * Reserved for dynamic fields 610 * when the next pointer is in first cache line (i.e. RTE_IOVA_IN_MBUF is 0). 611 */ 612 uint64_t dynfield2; 613 #endif 614 615 /* fields to support TX offloads */ 616 union { 617 uint64_t tx_offload; /**< combined for easy fetch */ 618 __extension__ 619 struct { 620 uint64_t l2_len:RTE_MBUF_L2_LEN_BITS; 621 /**< L2 (MAC) Header Length for non-tunneling pkt. 622 * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. 623 */ 624 uint64_t l3_len:RTE_MBUF_L3_LEN_BITS; 625 /**< L3 (IP) Header Length. */ 626 uint64_t l4_len:RTE_MBUF_L4_LEN_BITS; 627 /**< L4 (TCP/UDP) Header Length. */ 628 uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS; 629 /**< TCP TSO segment size */ 630 631 /* 632 * Fields for Tx offloading of tunnels. 633 * These are undefined for packets which don't request 634 * any tunnel offloads (outer IP or UDP checksum, 635 * tunnel TSO). 636 * 637 * PMDs should not use these fields unconditionally 638 * when calculating offsets. 639 * 640 * Applications are expected to set appropriate tunnel 641 * offload flags when they fill in these fields. 642 */ 643 uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS; 644 /**< Outer L3 (IP) Hdr Length. */ 645 uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS; 646 /**< Outer L2 (MAC) Hdr Length. */ 647 648 /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */ 649 }; 650 }; 651 652 /** Shared data for external buffer attached to mbuf. See 653 * rte_pktmbuf_attach_extbuf(). 654 */ 655 struct rte_mbuf_ext_shared_info *shinfo; 656 657 /** Size of the application private data. In case of an indirect 658 * mbuf, it stores the direct mbuf private data size. 659 */ 660 uint16_t priv_size; 661 662 /** Timesync flags for use with IEEE1588. */ 663 uint16_t timesync; 664 665 uint32_t dynfield1[9]; /**< Reserved for dynamic fields. */ 666 } __rte_cache_aligned; 667 668 /** 669 * Function typedef of callback to free externally attached buffer. 670 */ 671 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque); 672 673 /** 674 * Shared data at the end of an external buffer. 675 */ 676 struct rte_mbuf_ext_shared_info { 677 rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */ 678 void *fcb_opaque; /**< Free callback argument */ 679 RTE_ATOMIC(uint16_t) refcnt; 680 }; 681 682 /** Maximum number of nb_segs allowed. */ 683 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX 684 685 /** 686 * Returns TRUE if given mbuf is cloned by mbuf indirection, or FALSE 687 * otherwise. 688 * 689 * If a mbuf has its data in another mbuf and references it by mbuf 690 * indirection, this mbuf can be defined as a cloned mbuf. 691 */ 692 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & RTE_MBUF_F_INDIRECT) 693 694 /** 695 * Returns TRUE if given mbuf has an external buffer, or FALSE otherwise. 696 * 697 * External buffer is a user-provided anonymous buffer. 698 */ 699 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & RTE_MBUF_F_EXTERNAL) 700 701 /** 702 * Returns TRUE if given mbuf is direct, or FALSE otherwise. 703 * 704 * If a mbuf embeds its own data after the rte_mbuf structure, this mbuf 705 * can be defined as a direct mbuf. 706 */ 707 #define RTE_MBUF_DIRECT(mb) \ 708 (!((mb)->ol_flags & (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL))) 709 710 /** Uninitialized or unspecified port. */ 711 #define RTE_MBUF_PORT_INVALID UINT16_MAX 712 /** For backwards compatibility. */ 713 #define MBUF_INVALID_PORT RTE_MBUF_PORT_INVALID 714 715 /** 716 * A macro that points to an offset into the data in the mbuf. 717 * 718 * The returned pointer is cast to type t. Before using this 719 * function, the user must ensure that the first segment is large 720 * enough to accommodate its data. 721 * 722 * @param m 723 * The packet mbuf. 724 * @param o 725 * The offset into the mbuf data. 726 * @param t 727 * The type to cast the result into. 728 */ 729 #define rte_pktmbuf_mtod_offset(m, t, o) \ 730 ((t)(void *)((char *)(m)->buf_addr + (m)->data_off + (o))) 731 732 /** 733 * A macro that points to the start of the data in the mbuf. 734 * 735 * The returned pointer is cast to type t. Before using this 736 * function, the user must ensure that the first segment is large 737 * enough to accommodate its data. 738 * 739 * @param m 740 * The packet mbuf. 741 * @param t 742 * The type to cast the result into. 743 */ 744 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) 745 746 /** 747 * A macro that returns the IO address that points to an offset of the 748 * start of the data in the mbuf 749 * 750 * @param m 751 * The packet mbuf. 752 * @param o 753 * The offset into the data to calculate address from. 754 */ 755 #define rte_pktmbuf_iova_offset(m, o) \ 756 (rte_iova_t)(rte_mbuf_iova_get(m) + (m)->data_off + (o)) 757 758 /** 759 * A macro that returns the IO address that points to the start of the 760 * data in the mbuf 761 * 762 * @param m 763 * The packet mbuf. 764 */ 765 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) 766 767 #ifdef __cplusplus 768 } 769 #endif 770 771 #endif /* _RTE_MBUF_CORE_H_ */ 772