1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 */ 4 5 #ifndef _RTE_NET_PTYPE_H_ 6 #define _RTE_NET_PTYPE_H_ 7 8 #include <rte_ip.h> 9 #include <rte_udp.h> 10 #include <rte_tcp.h> 11 12 #ifdef __cplusplus 13 extern "C" { 14 #endif 15 16 /** 17 * Structure containing header lengths associated to a packet, filled 18 * by rte_net_get_ptype(). 19 */ 20 struct rte_net_hdr_lens { 21 uint8_t l2_len; 22 uint8_t inner_l2_len; 23 uint16_t l3_len; 24 uint16_t inner_l3_len; 25 uint16_t tunnel_len; 26 uint8_t l4_len; 27 uint8_t inner_l4_len; 28 }; 29 30 /** 31 * Skip IPv6 header extensions. 32 * 33 * This function skips all IPv6 extensions, returning size of 34 * complete header including options and final protocol value. 35 * 36 * @param proto 37 * Protocol field of IPv6 header. 38 * @param m 39 * The packet mbuf to be parsed. 40 * @param off 41 * On input, must contain the offset to the first byte following 42 * IPv6 header, on output, contains offset to the first byte 43 * of next layer (after any IPv6 extension header) 44 * @param frag 45 * Contains 1 in output if packet is an IPv6 fragment. 46 * @return 47 * Protocol that follows IPv6 header. 48 * -1 if an error occurs during mbuf parsing. 49 */ 50 int 51 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, 52 int *frag); 53 54 /** 55 * Parse an Ethernet packet to get its packet type. 56 * 57 * This function parses the network headers in mbuf data and return its 58 * packet type. 59 * 60 * If it is provided by the user, it also fills a rte_net_hdr_lens 61 * structure that contains the lengths of the parsed network 62 * headers. Each length field is valid only if the associated packet 63 * type is set. For instance, hdr_lens->l2_len is valid only if 64 * (retval & RTE_PTYPE_L2_MASK) != RTE_PTYPE_UNKNOWN. 65 * 66 * Supported packet types are: 67 * L2: Ether, Vlan, QinQ 68 * L3: IPv4, IPv6 69 * L4: TCP, UDP, SCTP 70 * Tunnels: IPv4, IPv6, Gre, Nvgre 71 * 72 * @param m 73 * The packet mbuf to be parsed. 74 * @param hdr_lens 75 * A pointer to a structure where the header lengths will be returned, 76 * or NULL. 77 * @param layers 78 * List of layers to parse. The function will stop at the first 79 * empty layer. Examples: 80 * - To parse all known layers, use RTE_PTYPE_ALL_MASK. 81 * - To parse only L2 and L3, use RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 82 * @return 83 * The packet type of the packet. 84 */ 85 uint32_t rte_net_get_ptype(const struct rte_mbuf *m, 86 struct rte_net_hdr_lens *hdr_lens, uint32_t layers); 87 88 /** 89 * Prepare pseudo header checksum 90 * 91 * This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in 92 * provided mbufs packet data and based on the requested offload flags. 93 * 94 * - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set 95 * in packet data, 96 * - for TSO the IP payload length is not included in pseudo header. 97 * 98 * This function expects that used headers are in the first data segment of 99 * mbuf, are not fragmented and can be safely modified. 100 * 101 * @param m 102 * The packet mbuf to be fixed. 103 * @param ol_flags 104 * TX offloads flags to use with this packet. 105 * @return 106 * 0 if checksum is initialized properly 107 */ 108 static inline int 109 rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) 110 { 111 const uint64_t inner_requests = RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | 112 RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG; 113 const uint64_t outer_requests = RTE_MBUF_F_TX_OUTER_IP_CKSUM | 114 RTE_MBUF_F_TX_OUTER_UDP_CKSUM; 115 /* Initialise ipv4_hdr to avoid false positive compiler warnings. */ 116 struct rte_ipv4_hdr *ipv4_hdr = NULL; 117 struct rte_ipv6_hdr *ipv6_hdr; 118 struct rte_tcp_hdr *tcp_hdr; 119 struct rte_udp_hdr *udp_hdr; 120 uint64_t inner_l3_offset = m->l2_len; 121 122 /* 123 * Does packet set any of available offloads? 124 * Mainly it is required to avoid fragmented headers check if 125 * no offloads are requested. 126 */ 127 if (!(ol_flags & (inner_requests | outer_requests))) 128 return 0; 129 130 if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) { 131 inner_l3_offset += m->outer_l2_len + m->outer_l3_len; 132 /* 133 * prepare outer IPv4 header checksum by setting it to 0, 134 * in order to be computed by hardware NICs. 135 */ 136 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) { 137 ipv4_hdr = rte_pktmbuf_mtod_offset(m, 138 struct rte_ipv4_hdr *, m->outer_l2_len); 139 ipv4_hdr->hdr_checksum = 0; 140 } 141 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM || ol_flags & inner_requests) { 142 if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { 143 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 144 m->outer_l2_len); 145 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + 146 m->outer_l3_len); 147 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) 148 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, 149 m->ol_flags); 150 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) 151 udp_hdr->dgram_cksum = 0; 152 } else { 153 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, 154 m->outer_l2_len); 155 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, 156 m->outer_l2_len + m->outer_l3_len); 157 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) 158 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, 159 m->ol_flags); 160 else if (ipv6_hdr->proto == IPPROTO_UDP) 161 udp_hdr->dgram_cksum = 0; 162 } 163 } 164 } 165 166 /* 167 * Check if headers are fragmented. 168 * The check could be less strict depending on which offloads are 169 * requested and headers to be used, but let's keep it simple. 170 */ 171 if (unlikely(rte_pktmbuf_data_len(m) < 172 inner_l3_offset + m->l3_len + m->l4_len)) 173 return -ENOTSUP; 174 175 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 176 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 177 inner_l3_offset); 178 179 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 180 ipv4_hdr->hdr_checksum = 0; 181 } 182 183 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM || 184 (ol_flags & RTE_MBUF_F_TX_UDP_SEG)) { 185 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 186 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + 187 m->l3_len); 188 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, 189 ol_flags); 190 } else { 191 ipv6_hdr = rte_pktmbuf_mtod_offset(m, 192 struct rte_ipv6_hdr *, inner_l3_offset); 193 /* non-TSO udp */ 194 udp_hdr = rte_pktmbuf_mtod_offset(m, 195 struct rte_udp_hdr *, 196 inner_l3_offset + m->l3_len); 197 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, 198 ol_flags); 199 } 200 } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM || 201 (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { 202 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 203 /* non-TSO tcp or TSO */ 204 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + 205 m->l3_len); 206 tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr, 207 ol_flags); 208 } else { 209 ipv6_hdr = rte_pktmbuf_mtod_offset(m, 210 struct rte_ipv6_hdr *, inner_l3_offset); 211 /* non-TSO tcp or TSO */ 212 tcp_hdr = rte_pktmbuf_mtod_offset(m, 213 struct rte_tcp_hdr *, 214 inner_l3_offset + m->l3_len); 215 tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr, 216 ol_flags); 217 } 218 } 219 220 return 0; 221 } 222 223 /** 224 * Prepare pseudo header checksum 225 * 226 * This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in 227 * provided mbufs packet data. 228 * 229 * - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set 230 * in packet data, 231 * - for TSO the IP payload length is not included in pseudo header. 232 * 233 * This function expects that used headers are in the first data segment of 234 * mbuf, are not fragmented and can be safely modified. 235 * 236 * @param m 237 * The packet mbuf to be fixed. 238 * @return 239 * 0 if checksum is initialized properly 240 */ 241 static inline int 242 rte_net_intel_cksum_prepare(struct rte_mbuf *m) 243 { 244 return rte_net_intel_cksum_flags_prepare(m, m->ol_flags); 245 } 246 247 #ifdef __cplusplus 248 } 249 #endif 250 251 252 #endif /* _RTE_NET_PTYPE_H_ */ 253