1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2018 Intel Corporation. 3 * Copyright(c) 2017-2018 Linaro Limited. 4 */ 5 6 7 #ifndef _L3FWD_COMMON_H_ 8 #define _L3FWD_COMMON_H_ 9 10 #include "pkt_group.h" 11 12 #ifdef DO_RFC_1812_CHECKS 13 14 #define IPV4_MIN_VER_IHL 0x45 15 #define IPV4_MAX_VER_IHL 0x4f 16 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL) 17 18 /* Minimum value of IPV4 total length (20B) in network byte order. */ 19 #define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8) 20 21 /* 22 * send_packet_multi() specific number of dest ports 23 * due to implementation we need to allocate array bigger then 24 * actual max number of elements in the array. 25 */ 26 #define SENDM_PORT_OVERHEAD(x) (x) 27 28 /* 29 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2: 30 * - The IP version number must be 4. 31 * - The IP header length field must be large enough to hold the 32 * minimum length legal IP datagram (20 bytes = 5 words). 33 * - The IP total length field must be large enough to hold the IP 34 * datagram header, whose length is specified in the IP header length 35 * field. 36 * If we encounter invalid IPV4 packet, then set destination port for it 37 * to BAD_PORT value. 38 */ 39 static __rte_always_inline void 40 rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype) 41 { 42 uint8_t ihl; 43 44 if (RTE_ETH_IS_IPV4_HDR(ptype)) { 45 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL; 46 47 ipv4_hdr->time_to_live--; 48 ipv4_hdr->hdr_checksum++; 49 50 if (ihl > IPV4_MAX_VER_IHL_DIFF || 51 ((uint8_t)ipv4_hdr->total_length == 0 && 52 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) 53 dp[0] = BAD_PORT; 54 55 } 56 } 57 58 #else 59 #define rfc1812_process(mb, dp, ptype) do { } while (0) 60 #endif /* DO_RFC_1812_CHECKS */ 61 62 static __rte_always_inline void 63 send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[], 64 uint32_t num) 65 { 66 uint32_t len, j, n; 67 68 len = qconf->tx_mbufs[port].len; 69 70 /* 71 * If TX buffer for that queue is empty, and we have enough packets, 72 * then send them straightway. 73 */ 74 if (num >= MAX_TX_BURST && len == 0) { 75 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num); 76 if (unlikely(n < num)) { 77 do { 78 rte_pktmbuf_free(m[n]); 79 } while (++n < num); 80 } 81 return; 82 } 83 84 /* 85 * Put packets into TX buffer for that queue. 86 */ 87 88 n = len + num; 89 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num; 90 91 j = 0; 92 switch (n % FWDSTEP) { 93 while (j < n) { 94 case 0: 95 qconf->tx_mbufs[port].m_table[len + j] = m[j]; 96 j++; 97 /* fallthrough */ 98 case 3: 99 qconf->tx_mbufs[port].m_table[len + j] = m[j]; 100 j++; 101 /* fallthrough */ 102 case 2: 103 qconf->tx_mbufs[port].m_table[len + j] = m[j]; 104 j++; 105 /* fallthrough */ 106 case 1: 107 qconf->tx_mbufs[port].m_table[len + j] = m[j]; 108 j++; 109 } 110 } 111 112 len += n; 113 114 /* enough pkts to be sent */ 115 if (unlikely(len == MAX_PKT_BURST)) { 116 117 send_burst(qconf, MAX_PKT_BURST, port); 118 119 /* copy rest of the packets into the TX buffer. */ 120 len = num - n; 121 if (len == 0) 122 goto exit; 123 124 j = 0; 125 switch (len % FWDSTEP) { 126 while (j < len) { 127 case 0: 128 qconf->tx_mbufs[port].m_table[j] = m[n + j]; 129 j++; 130 /* fallthrough */ 131 case 3: 132 qconf->tx_mbufs[port].m_table[j] = m[n + j]; 133 j++; 134 /* fallthrough */ 135 case 2: 136 qconf->tx_mbufs[port].m_table[j] = m[n + j]; 137 j++; 138 /* fallthrough */ 139 case 1: 140 qconf->tx_mbufs[port].m_table[j] = m[n + j]; 141 j++; 142 } 143 } 144 } 145 146 exit: 147 qconf->tx_mbufs[port].len = len; 148 } 149 150 #endif /* _L3FWD_COMMON_H_ */ 151