xref: /dpdk/lib/net/rte_net.h (revision decb35d890209f603b01c1d23f35995bd51228fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  */
4 
5 #ifndef _RTE_NET_PTYPE_H_
6 #define _RTE_NET_PTYPE_H_
7 
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11 
12 #include <rte_ip.h>
13 #include <rte_udp.h>
14 #include <rte_tcp.h>
15 
16 /**
17  * Structure containing header lengths associated to a packet, filled
18  * by rte_net_get_ptype().
19  */
20 struct rte_net_hdr_lens {
21 	uint8_t l2_len;
22 	uint8_t inner_l2_len;
23 	uint16_t l3_len;
24 	uint16_t inner_l3_len;
25 	uint16_t tunnel_len;
26 	uint8_t l4_len;
27 	uint8_t inner_l4_len;
28 };
29 
30 /**
31  * Skip IPv6 header extensions.
32  *
33  * This function skips all IPv6 extensions, returning size of
34  * complete header including options and final protocol value.
35  *
36  * @param proto
37  *   Protocol field of IPv6 header.
38  * @param m
39  *   The packet mbuf to be parsed.
40  * @param off
41  *   On input, must contain the offset to the first byte following
42  *   IPv6 header, on output, contains offset to the first byte
43  *   of next layer (after any IPv6 extension header)
44  * @param frag
45  *   Contains 1 in output if packet is an IPv6 fragment.
46  * @return
47  *   Protocol that follows IPv6 header.
48  *   -1 if an error occurs during mbuf parsing.
49  */
50 int
51 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
52 	int *frag);
53 
54 /**
55  * Parse an Ethernet packet to get its packet type.
56  *
57  * This function parses the network headers in mbuf data and return its
58  * packet type.
59  *
60  * If it is provided by the user, it also fills a rte_net_hdr_lens
61  * structure that contains the lengths of the parsed network
62  * headers. Each length field is valid only if the associated packet
63  * type is set. For instance, hdr_lens->l2_len is valid only if
64  * (retval & RTE_PTYPE_L2_MASK) != RTE_PTYPE_UNKNOWN.
65  *
66  * Supported packet types are:
67  *   L2: Ether, Vlan, QinQ
68  *   L3: IPv4, IPv6
69  *   L4: TCP, UDP, SCTP
70  *   Tunnels: IPv4, IPv6, Gre, Nvgre
71  *
72  * @param m
73  *   The packet mbuf to be parsed.
74  * @param hdr_lens
75  *   A pointer to a structure where the header lengths will be returned,
76  *   or NULL.
77  * @param layers
78  *   List of layers to parse. The function will stop at the first
79  *   empty layer. Examples:
80  *   - To parse all known layers, use RTE_PTYPE_ALL_MASK.
81  *   - To parse only L2 and L3, use RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
82  * @return
83  *   The packet type of the packet.
84  */
85 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
86 	struct rte_net_hdr_lens *hdr_lens, uint32_t layers);
87 
88 /**
89  * Prepare pseudo header checksum
90  *
91  * This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in
92  * provided mbufs packet data and based on the requested offload flags.
93  *
94  * - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set
95  *   in packet data,
96  * - for TSO the IP payload length is not included in pseudo header.
97  *
98  * This function expects that used headers are in the first data segment of
99  * mbuf, are not fragmented and can be safely modified.
100  *
101  * @param m
102  *   The packet mbuf to be fixed.
103  * @param ol_flags
104  *   TX offloads flags to use with this packet.
105  * @return
106  *   0 if checksum is initialized properly
107  */
108 static inline int
109 rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
110 {
111 	/* Initialise ipv4_hdr to avoid false positive compiler warnings. */
112 	struct rte_ipv4_hdr *ipv4_hdr = NULL;
113 	struct rte_ipv6_hdr *ipv6_hdr;
114 	struct rte_tcp_hdr *tcp_hdr;
115 	struct rte_udp_hdr *udp_hdr;
116 	uint64_t inner_l3_offset = m->l2_len;
117 
118 	/*
119 	 * Does packet set any of available offloads?
120 	 * Mainly it is required to avoid fragmented headers check if
121 	 * no offloads are requested.
122 	 */
123 	if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG |
124 			  RTE_MBUF_F_TX_OUTER_IP_CKSUM)))
125 		return 0;
126 
127 	if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) {
128 		inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
129 		/*
130 		 * prepare outer IPv4 header checksum by setting it to 0,
131 		 * in order to be computed by hardware NICs.
132 		 */
133 		if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
134 			ipv4_hdr = rte_pktmbuf_mtod_offset(m,
135 					struct rte_ipv4_hdr *, m->outer_l2_len);
136 			ipv4_hdr->hdr_checksum = 0;
137 		}
138 	}
139 
140 	/*
141 	 * Check if headers are fragmented.
142 	 * The check could be less strict depending on which offloads are
143 	 * requested and headers to be used, but let's keep it simple.
144 	 */
145 	if (unlikely(rte_pktmbuf_data_len(m) <
146 		     inner_l3_offset + m->l3_len + m->l4_len))
147 		return -ENOTSUP;
148 
149 	if (ol_flags & RTE_MBUF_F_TX_IPV4) {
150 		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
151 				inner_l3_offset);
152 
153 		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
154 			ipv4_hdr->hdr_checksum = 0;
155 	}
156 
157 	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) {
158 		if (ol_flags & RTE_MBUF_F_TX_IPV4) {
159 			udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
160 					m->l3_len);
161 			udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
162 					ol_flags);
163 		} else {
164 			ipv6_hdr = rte_pktmbuf_mtod_offset(m,
165 				struct rte_ipv6_hdr *, inner_l3_offset);
166 			/* non-TSO udp */
167 			udp_hdr = rte_pktmbuf_mtod_offset(m,
168 					struct rte_udp_hdr *,
169 					inner_l3_offset + m->l3_len);
170 			udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
171 					ol_flags);
172 		}
173 	} else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM ||
174 			(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
175 		if (ol_flags & RTE_MBUF_F_TX_IPV4) {
176 			/* non-TSO tcp or TSO */
177 			tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
178 					m->l3_len);
179 			tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
180 					ol_flags);
181 		} else {
182 			ipv6_hdr = rte_pktmbuf_mtod_offset(m,
183 				struct rte_ipv6_hdr *, inner_l3_offset);
184 			/* non-TSO tcp or TSO */
185 			tcp_hdr = rte_pktmbuf_mtod_offset(m,
186 					struct rte_tcp_hdr *,
187 					inner_l3_offset + m->l3_len);
188 			tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
189 					ol_flags);
190 		}
191 	}
192 
193 	return 0;
194 }
195 
196 /**
197  * Prepare pseudo header checksum
198  *
199  * This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in
200  * provided mbufs packet data.
201  *
202  * - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set
203  *   in packet data,
204  * - for TSO the IP payload length is not included in pseudo header.
205  *
206  * This function expects that used headers are in the first data segment of
207  * mbuf, are not fragmented and can be safely modified.
208  *
209  * @param m
210  *   The packet mbuf to be fixed.
211  * @return
212  *   0 if checksum is initialized properly
213  */
214 static inline int
215 rte_net_intel_cksum_prepare(struct rte_mbuf *m)
216 {
217 	return rte_net_intel_cksum_flags_prepare(m, m->ol_flags);
218 }
219 
220 #ifdef __cplusplus
221 }
222 #endif
223 
224 
225 #endif /* _RTE_NET_PTYPE_H_ */
226