1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright 2014 6WIND S.A. 4 */ 5 6 #ifndef _RTE_MBUF_CORE_H_ 7 #define _RTE_MBUF_CORE_H_ 8 9 /** 10 * @file 11 * This file contains definion of RTE mbuf structure itself, 12 * packet offload flags and some related macros. 13 * For majority of DPDK entities, it is not recommended to include 14 * this file directly, use include <rte_mbuf.h> instead. 15 * 16 * New fields and flags should fit in the "dynamic space". 17 */ 18 19 #include <stdint.h> 20 21 #include <rte_compat.h> 22 #include <rte_byteorder.h> 23 24 #ifdef __cplusplus 25 extern "C" { 26 #endif 27 28 /* 29 * Packet Offload Features Flags. It also carry packet type information. 30 * Critical resources. Both rx/tx shared these bits. Be cautious on any change 31 * 32 * - RX flags start at bit position zero, and get added to the left of previous 33 * flags. 34 * - The most-significant 3 bits are reserved for generic mbuf flags 35 * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get 36 * added to the right of the previously defined flags i.e. they should count 37 * downwards, not upwards. 38 * 39 * Keep these flags synchronized with rte_get_rx_ol_flag_name() and 40 * rte_get_tx_ol_flag_name(). 41 */ 42 43 /** 44 * The RX packet is a 802.1q VLAN packet, and the tci has been 45 * saved in in mbuf->vlan_tci. 46 * If the flag PKT_RX_VLAN_STRIPPED is also present, the VLAN 47 * header has been stripped from mbuf data, else it is still 48 * present. 49 */ 50 #define PKT_RX_VLAN (1ULL << 0) 51 52 /** RX packet with RSS hash result. */ 53 #define PKT_RX_RSS_HASH (1ULL << 1) 54 55 /** RX packet with FDIR match indicate. */ 56 #define PKT_RX_FDIR (1ULL << 2) 57 58 /** 59 * Deprecated. 60 * Checking this flag alone is deprecated: check the 2 bits of 61 * PKT_RX_L4_CKSUM_MASK. 62 * This flag was set when the L4 checksum of a packet was detected as 63 * wrong by the hardware. 64 */ 65 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 66 67 /** 68 * Deprecated. 69 * Checking this flag alone is deprecated: check the 2 bits of 70 * PKT_RX_IP_CKSUM_MASK. 71 * This flag was set when the IP checksum of a packet was detected as 72 * wrong by the hardware. 73 */ 74 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 75 76 /** 77 * This flag is set when the outermost IP header checksum is detected as 78 * wrong by the hardware. 79 */ 80 #define PKT_RX_OUTER_IP_CKSUM_BAD (1ULL << 5) 81 82 /** 83 * Deprecated. 84 * This flag has been renamed, use PKT_RX_OUTER_IP_CKSUM_BAD instead. 85 */ 86 #define PKT_RX_EIP_CKSUM_BAD \ 87 RTE_DEPRECATED(PKT_RX_EIP_CKSUM_BAD) PKT_RX_OUTER_IP_CKSUM_BAD 88 89 /** 90 * A vlan has been stripped by the hardware and its tci is saved in 91 * mbuf->vlan_tci. This can only happen if vlan stripping is enabled 92 * in the RX configuration of the PMD. 93 * When PKT_RX_VLAN_STRIPPED is set, PKT_RX_VLAN must also be set. 94 */ 95 #define PKT_RX_VLAN_STRIPPED (1ULL << 6) 96 97 /** 98 * Mask of bits used to determine the status of RX IP checksum. 99 * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum 100 * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong 101 * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid 102 * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet 103 * data, but the integrity of the IP header is verified. 104 */ 105 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) 106 107 #define PKT_RX_IP_CKSUM_UNKNOWN 0 108 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 109 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) 110 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) 111 112 /** 113 * Mask of bits used to determine the status of RX L4 checksum. 114 * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum 115 * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong 116 * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid 117 * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet 118 * data, but the integrity of the L4 data is verified. 119 */ 120 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) 121 122 #define PKT_RX_L4_CKSUM_UNKNOWN 0 123 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 124 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) 125 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) 126 127 /** RX IEEE1588 L2 Ethernet PT Packet. */ 128 #define PKT_RX_IEEE1588_PTP (1ULL << 9) 129 130 /** RX IEEE1588 L2/L4 timestamped packet.*/ 131 #define PKT_RX_IEEE1588_TMST (1ULL << 10) 132 133 /** FD id reported if FDIR match. */ 134 #define PKT_RX_FDIR_ID (1ULL << 13) 135 136 /** Flexible bytes reported if FDIR match. */ 137 #define PKT_RX_FDIR_FLX (1ULL << 14) 138 139 /** 140 * The outer VLAN has been stripped by the hardware and its TCI is 141 * saved in mbuf->vlan_tci_outer. 142 * This can only happen if VLAN stripping is enabled in the Rx 143 * configuration of the PMD. 144 * When PKT_RX_QINQ_STRIPPED is set, the flags PKT_RX_VLAN and PKT_RX_QINQ 145 * must also be set. 146 * 147 * - If both PKT_RX_QINQ_STRIPPED and PKT_RX_VLAN_STRIPPED are set, the 2 VLANs 148 * have been stripped by the hardware and their TCIs are saved in 149 * mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). 150 * - If PKT_RX_QINQ_STRIPPED is set and PKT_RX_VLAN_STRIPPED is unset, only the 151 * outer VLAN is removed from packet data, but both tci are saved in 152 * mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). 153 */ 154 #define PKT_RX_QINQ_STRIPPED (1ULL << 15) 155 156 /** 157 * When packets are coalesced by a hardware or virtual driver, this flag 158 * can be set in the RX mbuf, meaning that the m->tso_segsz field is 159 * valid and is set to the segment size of original packets. 160 */ 161 #define PKT_RX_LRO (1ULL << 16) 162 163 /* There is no flag defined at offset 17. It is free for any future use. */ 164 165 /** 166 * Indicate that security offload processing was applied on the RX packet. 167 */ 168 #define PKT_RX_SEC_OFFLOAD (1ULL << 18) 169 170 /** 171 * Indicate that security offload processing failed on the RX packet. 172 */ 173 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19) 174 175 /** 176 * The RX packet is a double VLAN, and the outer tci has been 177 * saved in mbuf->vlan_tci_outer. If this flag is set, PKT_RX_VLAN 178 * must also be set and the inner tci is saved in mbuf->vlan_tci. 179 * If the flag PKT_RX_QINQ_STRIPPED is also present, both VLANs 180 * headers have been stripped from mbuf data, else they are still 181 * present. 182 */ 183 #define PKT_RX_QINQ (1ULL << 20) 184 185 /** 186 * Mask of bits used to determine the status of outer RX L4 checksum. 187 * - PKT_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 checksum 188 * - PKT_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet is wrong 189 * - PKT_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet is valid 190 * - PKT_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state. 191 * 192 * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given 193 * HW capability, At minimum, the PMD should support 194 * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states 195 * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available. 196 */ 197 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22)) 198 199 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0 200 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21) 201 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22) 202 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22)) 203 204 /* add new RX flags here, don't forget to update PKT_FIRST_FREE */ 205 206 #define PKT_FIRST_FREE (1ULL << 23) 207 #define PKT_LAST_FREE (1ULL << 40) 208 209 /* add new TX flags here, don't forget to update PKT_LAST_FREE */ 210 211 /** 212 * Outer UDP checksum offload flag. This flag is used for enabling 213 * outer UDP checksum in PMD. To use outer UDP checksum, the user needs to 214 * 1) Enable the following in mbuf, 215 * a) Fill outer_l2_len and outer_l3_len in mbuf. 216 * b) Set the PKT_TX_OUTER_UDP_CKSUM flag. 217 * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag. 218 * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag. 219 */ 220 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41) 221 222 /** 223 * UDP Fragmentation Offload flag. This flag is used for enabling UDP 224 * fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used 225 * to store the MSS of UDP fragments. 226 */ 227 #define PKT_TX_UDP_SEG (1ULL << 42) 228 229 /** 230 * Request security offload processing on the TX packet. 231 * To use Tx security offload, the user needs to fill l2_len in mbuf 232 * indicating L2 header size and where L3 header starts. 233 */ 234 #define PKT_TX_SEC_OFFLOAD (1ULL << 43) 235 236 /** 237 * Offload the MACsec. This flag must be set by the application to enable 238 * this offload feature for a packet to be transmitted. 239 */ 240 #define PKT_TX_MACSEC (1ULL << 44) 241 242 /** 243 * Bits 45:48 used for the tunnel type. 244 * The tunnel type must be specified for TSO or checksum on the inner part 245 * of tunnel packets. 246 * These flags can be used with PKT_TX_TCP_SEG for TSO, or PKT_TX_xxx_CKSUM. 247 * The mbuf fields for inner and outer header lengths are required: 248 * outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO. 249 */ 250 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) 251 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45) 252 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) 253 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) 254 /** TX packet with MPLS-in-UDP RFC 7510 header. */ 255 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) 256 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45) 257 #define PKT_TX_TUNNEL_GTP (0x7ULL << 45) 258 /** 259 * Generic IP encapsulated tunnel type, used for TSO and checksum offload. 260 * It can be used for tunnels which are not standards or listed above. 261 * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE 262 * or PKT_TX_TUNNEL_IPIP if possible. 263 * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO. 264 * Outer and inner checksums are done according to the existing flags like 265 * PKT_TX_xxx_CKSUM. 266 * Specific tunnel headers that contain payload length, sequence id 267 * or checksum are not expected to be updated. 268 */ 269 #define PKT_TX_TUNNEL_IP (0xDULL << 45) 270 /** 271 * Generic UDP encapsulated tunnel type, used for TSO and checksum offload. 272 * UDP tunnel type implies outer IP layer. 273 * It can be used for tunnels which are not standards or listed above. 274 * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN 275 * if possible. 276 * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO. 277 * Outer and inner checksums are done according to the existing flags like 278 * PKT_TX_xxx_CKSUM. 279 * Specific tunnel headers that contain payload length, sequence id 280 * or checksum are not expected to be updated. 281 */ 282 #define PKT_TX_TUNNEL_UDP (0xEULL << 45) 283 /* add new TX TUNNEL type here */ 284 #define PKT_TX_TUNNEL_MASK (0xFULL << 45) 285 286 /** 287 * Double VLAN insertion (QinQ) request to driver, driver may offload the 288 * insertion based on device capability. 289 * mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set. 290 */ 291 #define PKT_TX_QINQ (1ULL << 49) 292 /** This old name is deprecated. */ 293 #define PKT_TX_QINQ_PKT PKT_TX_QINQ 294 295 /** 296 * TCP segmentation offload. To enable this offload feature for a 297 * packet to be transmitted on hardware supporting TSO: 298 * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies 299 * PKT_TX_TCP_CKSUM) 300 * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 301 * - if it's IPv4, set the PKT_TX_IP_CKSUM flag 302 * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz 303 */ 304 #define PKT_TX_TCP_SEG (1ULL << 50) 305 306 /** TX IEEE1588 packet to timestamp. */ 307 #define PKT_TX_IEEE1588_TMST (1ULL << 51) 308 309 /** 310 * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved, 311 * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware 312 * L4 checksum offload, the user needs to: 313 * - fill l2_len and l3_len in mbuf 314 * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM 315 * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 316 */ 317 #define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */ 318 319 /** TCP cksum of TX pkt. computed by NIC. */ 320 #define PKT_TX_TCP_CKSUM (1ULL << 52) 321 322 /** SCTP cksum of TX pkt. computed by NIC. */ 323 #define PKT_TX_SCTP_CKSUM (2ULL << 52) 324 325 /** UDP cksum of TX pkt. computed by NIC. */ 326 #define PKT_TX_UDP_CKSUM (3ULL << 52) 327 328 /** Mask for L4 cksum offload request. */ 329 #define PKT_TX_L4_MASK (3ULL << 52) 330 331 /** 332 * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should 333 * also be set by the application, although a PMD will only check 334 * PKT_TX_IP_CKSUM. 335 * - fill the mbuf offload information: l2_len, l3_len 336 */ 337 #define PKT_TX_IP_CKSUM (1ULL << 54) 338 339 /** 340 * Packet is IPv4. This flag must be set when using any offload feature 341 * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4 342 * packet. If the packet is a tunneled packet, this flag is related to 343 * the inner headers. 344 */ 345 #define PKT_TX_IPV4 (1ULL << 55) 346 347 /** 348 * Packet is IPv6. This flag must be set when using an offload feature 349 * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6 350 * packet. If the packet is a tunneled packet, this flag is related to 351 * the inner headers. 352 */ 353 #define PKT_TX_IPV6 (1ULL << 56) 354 355 /** 356 * VLAN tag insertion request to driver, driver may offload the insertion 357 * based on the device capability. 358 * mbuf 'vlan_tci' field must be valid when this flag is set. 359 */ 360 #define PKT_TX_VLAN (1ULL << 57) 361 /* this old name is deprecated */ 362 #define PKT_TX_VLAN_PKT PKT_TX_VLAN 363 364 /** 365 * Offload the IP checksum of an external header in the hardware. The 366 * flag PKT_TX_OUTER_IPV4 should also be set by the application, although 367 * a PMD will only check PKT_TX_OUTER_IP_CKSUM. 368 * - fill the mbuf offload information: outer_l2_len, outer_l3_len 369 */ 370 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) 371 372 /** 373 * Packet outer header is IPv4. This flag must be set when using any 374 * outer offload feature (L3 or L4 checksum) to tell the NIC that the 375 * outer header of the tunneled packet is an IPv4 packet. 376 */ 377 #define PKT_TX_OUTER_IPV4 (1ULL << 59) 378 379 /** 380 * Packet outer header is IPv6. This flag must be set when using any 381 * outer offload feature (L4 checksum) to tell the NIC that the outer 382 * header of the tunneled packet is an IPv6 packet. 383 */ 384 #define PKT_TX_OUTER_IPV6 (1ULL << 60) 385 386 /** 387 * Bitmask of all supported packet Tx offload features flags, 388 * which can be set for packet. 389 */ 390 #define PKT_TX_OFFLOAD_MASK ( \ 391 PKT_TX_OUTER_IPV6 | \ 392 PKT_TX_OUTER_IPV4 | \ 393 PKT_TX_OUTER_IP_CKSUM | \ 394 PKT_TX_VLAN_PKT | \ 395 PKT_TX_IPV6 | \ 396 PKT_TX_IPV4 | \ 397 PKT_TX_IP_CKSUM | \ 398 PKT_TX_L4_MASK | \ 399 PKT_TX_IEEE1588_TMST | \ 400 PKT_TX_TCP_SEG | \ 401 PKT_TX_QINQ_PKT | \ 402 PKT_TX_TUNNEL_MASK | \ 403 PKT_TX_MACSEC | \ 404 PKT_TX_SEC_OFFLOAD | \ 405 PKT_TX_UDP_SEG | \ 406 PKT_TX_OUTER_UDP_CKSUM) 407 408 /** 409 * Mbuf having an external buffer attached. shinfo in mbuf must be filled. 410 */ 411 #define EXT_ATTACHED_MBUF (1ULL << 61) 412 413 #define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */ 414 415 /** Alignment constraint of mbuf private area. */ 416 #define RTE_MBUF_PRIV_ALIGN 8 417 418 /** 419 * Some NICs need at least 2KB buffer to RX standard Ethernet frame without 420 * splitting it into multiple segments. 421 * So, for mbufs that planned to be involved into RX/TX, the recommended 422 * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM. 423 */ 424 #define RTE_MBUF_DEFAULT_DATAROOM 2048 425 #define RTE_MBUF_DEFAULT_BUF_SIZE \ 426 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) 427 428 struct rte_mbuf_sched { 429 uint32_t queue_id; /**< Queue ID. */ 430 uint8_t traffic_class; 431 /**< Traffic class ID. Traffic class 0 432 * is the highest priority traffic class. 433 */ 434 uint8_t color; 435 /**< Color. @see enum rte_color.*/ 436 uint16_t reserved; /**< Reserved. */ 437 }; /**< Hierarchical scheduler */ 438 439 /** 440 * enum for the tx_offload bit-fields lengths and offsets. 441 * defines the layout of rte_mbuf tx_offload field. 442 */ 443 enum { 444 RTE_MBUF_L2_LEN_BITS = 7, 445 RTE_MBUF_L3_LEN_BITS = 9, 446 RTE_MBUF_L4_LEN_BITS = 8, 447 RTE_MBUF_TSO_SEGSZ_BITS = 16, 448 RTE_MBUF_OUTL3_LEN_BITS = 9, 449 RTE_MBUF_OUTL2_LEN_BITS = 7, 450 RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT - 451 RTE_MBUF_L2_LEN_BITS - 452 RTE_MBUF_L3_LEN_BITS - 453 RTE_MBUF_L4_LEN_BITS - 454 RTE_MBUF_TSO_SEGSZ_BITS - 455 RTE_MBUF_OUTL3_LEN_BITS - 456 RTE_MBUF_OUTL2_LEN_BITS, 457 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 458 RTE_MBUF_L2_LEN_OFS = 459 sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS, 460 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS, 461 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS, 462 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS, 463 RTE_MBUF_OUTL3_LEN_OFS = 464 RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS, 465 RTE_MBUF_OUTL2_LEN_OFS = 466 RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS, 467 RTE_MBUF_TXOFLD_UNUSED_OFS = 468 RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS, 469 #else 470 RTE_MBUF_L2_LEN_OFS = 0, 471 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS, 472 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS, 473 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS, 474 RTE_MBUF_OUTL3_LEN_OFS = 475 RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS, 476 RTE_MBUF_OUTL2_LEN_OFS = 477 RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS, 478 RTE_MBUF_TXOFLD_UNUSED_OFS = 479 RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS, 480 #endif 481 }; 482 483 /** 484 * The generic rte_mbuf, containing a packet mbuf. 485 */ 486 struct rte_mbuf { 487 RTE_MARKER cacheline0; 488 489 void *buf_addr; /**< Virtual address of segment buffer. */ 490 /** 491 * Physical address of segment buffer. 492 * Force alignment to 8-bytes, so as to ensure we have the exact 493 * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes 494 * working on vector drivers easier. 495 */ 496 rte_iova_t buf_iova __rte_aligned(sizeof(rte_iova_t)); 497 498 /* next 8 bytes are initialised on RX descriptor rearm */ 499 RTE_MARKER64 rearm_data; 500 uint16_t data_off; 501 502 /** 503 * Reference counter. Its size should at least equal to the size 504 * of port field (16 bits), to support zero-copy broadcast. 505 * It should only be accessed using the following functions: 506 * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and 507 * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, 508 * or non-atomic) is controlled by the RTE_MBUF_REFCNT_ATOMIC flag. 509 */ 510 uint16_t refcnt; 511 uint16_t nb_segs; /**< Number of segments. */ 512 513 /** Input port (16 bits to support more than 256 virtual ports). 514 * The event eth Tx adapter uses this field to specify the output port. 515 */ 516 uint16_t port; 517 518 uint64_t ol_flags; /**< Offload features. */ 519 520 /* remaining bytes are set on RX when pulling packet from descriptor */ 521 RTE_MARKER rx_descriptor_fields1; 522 523 /* 524 * The packet type, which is the combination of outer/inner L2, L3, L4 525 * and tunnel types. The packet_type is about data really present in the 526 * mbuf. Example: if vlan stripping is enabled, a received vlan packet 527 * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the 528 * vlan is stripped from the data. 529 */ 530 RTE_STD_C11 531 union { 532 uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */ 533 __extension__ 534 struct { 535 uint8_t l2_type:4; /**< (Outer) L2 type. */ 536 uint8_t l3_type:4; /**< (Outer) L3 type. */ 537 uint8_t l4_type:4; /**< (Outer) L4 type. */ 538 uint8_t tun_type:4; /**< Tunnel type. */ 539 RTE_STD_C11 540 union { 541 uint8_t inner_esp_next_proto; 542 /**< ESP next protocol type, valid if 543 * RTE_PTYPE_TUNNEL_ESP tunnel type is set 544 * on both Tx and Rx. 545 */ 546 __extension__ 547 struct { 548 uint8_t inner_l2_type:4; 549 /**< Inner L2 type. */ 550 uint8_t inner_l3_type:4; 551 /**< Inner L3 type. */ 552 }; 553 }; 554 uint8_t inner_l4_type:4; /**< Inner L4 type. */ 555 }; 556 }; 557 558 uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ 559 uint16_t data_len; /**< Amount of data in segment buffer. */ 560 /** VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */ 561 uint16_t vlan_tci; 562 563 RTE_STD_C11 564 union { 565 union { 566 uint32_t rss; /**< RSS hash result if RSS enabled */ 567 struct { 568 union { 569 struct { 570 uint16_t hash; 571 uint16_t id; 572 }; 573 uint32_t lo; 574 /**< Second 4 flexible bytes */ 575 }; 576 uint32_t hi; 577 /**< First 4 flexible bytes or FD ID, dependent 578 * on PKT_RX_FDIR_* flag in ol_flags. 579 */ 580 } fdir; /**< Filter identifier if FDIR enabled */ 581 struct rte_mbuf_sched sched; 582 /**< Hierarchical scheduler : 8 bytes */ 583 struct { 584 uint32_t reserved1; 585 uint16_t reserved2; 586 uint16_t txq; 587 /**< The event eth Tx adapter uses this field 588 * to store Tx queue id. 589 * @see rte_event_eth_tx_adapter_txq_set() 590 */ 591 } txadapter; /**< Eventdev ethdev Tx adapter */ 592 /**< User defined tags. See rte_distributor_process() */ 593 uint32_t usr; 594 } hash; /**< hash information */ 595 }; 596 597 /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */ 598 uint16_t vlan_tci_outer; 599 600 uint16_t buf_len; /**< Length of segment buffer. */ 601 602 struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ 603 604 /* second cache line - fields only used in slow path or on TX */ 605 RTE_MARKER cacheline1 __rte_cache_min_aligned; 606 607 struct rte_mbuf *next; /**< Next segment of scattered packet. */ 608 609 /* fields to support TX offloads */ 610 RTE_STD_C11 611 union { 612 uint64_t tx_offload; /**< combined for easy fetch */ 613 __extension__ 614 struct { 615 uint64_t l2_len:RTE_MBUF_L2_LEN_BITS; 616 /**< L2 (MAC) Header Length for non-tunneling pkt. 617 * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. 618 */ 619 uint64_t l3_len:RTE_MBUF_L3_LEN_BITS; 620 /**< L3 (IP) Header Length. */ 621 uint64_t l4_len:RTE_MBUF_L4_LEN_BITS; 622 /**< L4 (TCP/UDP) Header Length. */ 623 uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS; 624 /**< TCP TSO segment size */ 625 626 /* 627 * Fields for Tx offloading of tunnels. 628 * These are undefined for packets which don't request 629 * any tunnel offloads (outer IP or UDP checksum, 630 * tunnel TSO). 631 * 632 * PMDs should not use these fields unconditionally 633 * when calculating offsets. 634 * 635 * Applications are expected to set appropriate tunnel 636 * offload flags when they fill in these fields. 637 */ 638 uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS; 639 /**< Outer L3 (IP) Hdr Length. */ 640 uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS; 641 /**< Outer L2 (MAC) Hdr Length. */ 642 643 /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */ 644 }; 645 }; 646 647 /** Shared data for external buffer attached to mbuf. See 648 * rte_pktmbuf_attach_extbuf(). 649 */ 650 struct rte_mbuf_ext_shared_info *shinfo; 651 652 /** Size of the application private data. In case of an indirect 653 * mbuf, it stores the direct mbuf private data size. 654 */ 655 uint16_t priv_size; 656 657 /** Timesync flags for use with IEEE1588. */ 658 uint16_t timesync; 659 660 uint32_t dynfield1[9]; /**< Reserved for dynamic fields. */ 661 } __rte_cache_aligned; 662 663 /** 664 * Function typedef of callback to free externally attached buffer. 665 */ 666 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque); 667 668 /** 669 * Shared data at the end of an external buffer. 670 */ 671 struct rte_mbuf_ext_shared_info { 672 rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */ 673 void *fcb_opaque; /**< Free callback argument */ 674 uint16_t refcnt; 675 }; 676 677 /** Maximum number of nb_segs allowed. */ 678 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX 679 680 /** 681 * Returns TRUE if given mbuf is cloned by mbuf indirection, or FALSE 682 * otherwise. 683 * 684 * If a mbuf has its data in another mbuf and references it by mbuf 685 * indirection, this mbuf can be defined as a cloned mbuf. 686 */ 687 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF) 688 689 /** 690 * Returns TRUE if given mbuf has an external buffer, or FALSE otherwise. 691 * 692 * External buffer is a user-provided anonymous buffer. 693 */ 694 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF) 695 696 /** 697 * Returns TRUE if given mbuf is direct, or FALSE otherwise. 698 * 699 * If a mbuf embeds its own data after the rte_mbuf structure, this mbuf 700 * can be defined as a direct mbuf. 701 */ 702 #define RTE_MBUF_DIRECT(mb) \ 703 (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF))) 704 705 /** Uninitialized or unspecified port. */ 706 #define RTE_MBUF_PORT_INVALID UINT16_MAX 707 /** For backwards compatibility. */ 708 #define MBUF_INVALID_PORT RTE_MBUF_PORT_INVALID 709 710 /** 711 * A macro that points to an offset into the data in the mbuf. 712 * 713 * The returned pointer is cast to type t. Before using this 714 * function, the user must ensure that the first segment is large 715 * enough to accommodate its data. 716 * 717 * @param m 718 * The packet mbuf. 719 * @param o 720 * The offset into the mbuf data. 721 * @param t 722 * The type to cast the result into. 723 */ 724 #define rte_pktmbuf_mtod_offset(m, t, o) \ 725 ((t)((char *)(m)->buf_addr + (m)->data_off + (o))) 726 727 /** 728 * A macro that points to the start of the data in the mbuf. 729 * 730 * The returned pointer is cast to type t. Before using this 731 * function, the user must ensure that the first segment is large 732 * enough to accommodate its data. 733 * 734 * @param m 735 * The packet mbuf. 736 * @param t 737 * The type to cast the result into. 738 */ 739 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) 740 741 /** 742 * A macro that returns the IO address that points to an offset of the 743 * start of the data in the mbuf 744 * 745 * @param m 746 * The packet mbuf. 747 * @param o 748 * The offset into the data to calculate address from. 749 */ 750 #define rte_pktmbuf_iova_offset(m, o) \ 751 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o)) 752 753 /** 754 * A macro that returns the IO address that points to the start of the 755 * data in the mbuf 756 * 757 * @param m 758 * The packet mbuf. 759 */ 760 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) 761 762 #ifdef __cplusplus 763 } 764 #endif 765 766 #endif /* _RTE_MBUF_CORE_H_ */ 767