171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */ 2015f8cc5SEric Joyner /* Copyright (c) 2024, Intel Corporation 371d10453SEric Joyner * All rights reserved. 471d10453SEric Joyner * 571d10453SEric Joyner * Redistribution and use in source and binary forms, with or without 671d10453SEric Joyner * modification, are permitted provided that the following conditions are met: 771d10453SEric Joyner * 871d10453SEric Joyner * 1. Redistributions of source code must retain the above copyright notice, 971d10453SEric Joyner * this list of conditions and the following disclaimer. 1071d10453SEric Joyner * 1171d10453SEric Joyner * 2. Redistributions in binary form must reproduce the above copyright 1271d10453SEric Joyner * notice, this list of conditions and the following disclaimer in the 1371d10453SEric Joyner * documentation and/or other materials provided with the distribution. 1471d10453SEric Joyner * 1571d10453SEric Joyner * 3. Neither the name of the Intel Corporation nor the names of its 1671d10453SEric Joyner * contributors may be used to endorse or promote products derived from 1771d10453SEric Joyner * this software without specific prior written permission. 1871d10453SEric Joyner * 1971d10453SEric Joyner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 2071d10453SEric Joyner * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2171d10453SEric Joyner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2271d10453SEric Joyner * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 2371d10453SEric Joyner * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2471d10453SEric Joyner * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2571d10453SEric Joyner * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2671d10453SEric Joyner * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2771d10453SEric Joyner * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2871d10453SEric Joyner * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 2971d10453SEric Joyner * POSSIBILITY OF SUCH DAMAGE. 3071d10453SEric Joyner */ 3171d10453SEric Joyner 3271d10453SEric Joyner /** 3371d10453SEric Joyner * @file ice_common_txrx.h 3471d10453SEric Joyner * @brief common Tx/Rx utility functions 3571d10453SEric Joyner * 3671d10453SEric Joyner * Contains common utility functions for the Tx/Rx hot path. 3771d10453SEric Joyner * 3871d10453SEric Joyner * The functions do depend on the if_pkt_info_t structure. A suitable 3971d10453SEric Joyner * implementation of this structure must be provided if these functions are to 4071d10453SEric Joyner * be used without the iflib networking stack. 4171d10453SEric Joyner */ 4271d10453SEric Joyner 4371d10453SEric Joyner #ifndef _ICE_COMMON_TXRX_H_ 4471d10453SEric Joyner #define _ICE_COMMON_TXRX_H_ 4571d10453SEric Joyner 4671d10453SEric Joyner #include <netinet/udp.h> 4771d10453SEric Joyner #include <netinet/sctp.h> 4871d10453SEric Joyner 4971d10453SEric Joyner /** 5071d10453SEric Joyner * ice_tso_detect_sparse - detect TSO packets with too many segments 5171d10453SEric Joyner * @pi: packet information 5271d10453SEric Joyner * 5371d10453SEric Joyner * Hardware only transmits packets with a maximum of 8 descriptors. For TSO 5471d10453SEric Joyner * packets, hardware needs to be able to build the split packets using 8 or 5571d10453SEric Joyner * fewer descriptors. Additionally, the header must be contained within at 5671d10453SEric Joyner * most 3 descriptors. 5771d10453SEric Joyner * 5871d10453SEric Joyner * To verify this, we walk the headers to find out how many descriptors the 5971d10453SEric Joyner * headers require (usually 1). Then we ensure that, for each TSO segment, its 6071d10453SEric Joyner * data plus the headers are contained within 8 or fewer descriptors. 6171d10453SEric Joyner */ 6271d10453SEric Joyner static inline int 6371d10453SEric Joyner ice_tso_detect_sparse(if_pkt_info_t pi) 6471d10453SEric Joyner { 6571d10453SEric Joyner int count, curseg, i, hlen, segsz, seglen, tsolen, hdrs, maxsegs; 6671d10453SEric Joyner bus_dma_segment_t *segs = pi->ipi_segs; 6771d10453SEric Joyner int nsegs = pi->ipi_nsegs; 6871d10453SEric Joyner 6971d10453SEric Joyner curseg = hdrs = 0; 7071d10453SEric Joyner 7171d10453SEric Joyner hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; 7271d10453SEric Joyner tsolen = pi->ipi_len - hlen; 7371d10453SEric Joyner 7471d10453SEric Joyner /* First, count the number of descriptors for the header. 7571d10453SEric Joyner * Additionally, make sure it does not span more than 3 segments. 7671d10453SEric Joyner */ 7771d10453SEric Joyner i = 0; 7871d10453SEric Joyner curseg = segs[0].ds_len; 7971d10453SEric Joyner while (hlen > 0) { 8071d10453SEric Joyner hdrs++; 8171d10453SEric Joyner if (hdrs > ICE_MAX_TSO_HDR_SEGS) 8271d10453SEric Joyner return (1); 8371d10453SEric Joyner if (curseg == 0) { 8471d10453SEric Joyner i++; 8571d10453SEric Joyner if (__predict_false(i == nsegs)) 8671d10453SEric Joyner return (1); 8771d10453SEric Joyner 8871d10453SEric Joyner curseg = segs[i].ds_len; 8971d10453SEric Joyner } 9071d10453SEric Joyner seglen = min(curseg, hlen); 9171d10453SEric Joyner curseg -= seglen; 9271d10453SEric Joyner hlen -= seglen; 9371d10453SEric Joyner } 9471d10453SEric Joyner 9571d10453SEric Joyner maxsegs = ICE_MAX_TX_SEGS - hdrs; 9671d10453SEric Joyner 9771d10453SEric Joyner /* We must count the headers, in order to verify that they take up 9871d10453SEric Joyner * 3 or fewer descriptors. However, we don't need to check the data 9971d10453SEric Joyner * if the total segments is small. 10071d10453SEric Joyner */ 10171d10453SEric Joyner if (nsegs <= maxsegs) 10271d10453SEric Joyner return (0); 10371d10453SEric Joyner 10471d10453SEric Joyner count = 0; 10571d10453SEric Joyner 10671d10453SEric Joyner /* Now check the data to make sure that each TSO segment is made up of 10771d10453SEric Joyner * no more than maxsegs descriptors. This ensures that hardware will 10871d10453SEric Joyner * be capable of performing TSO offload. 10971d10453SEric Joyner */ 11071d10453SEric Joyner while (tsolen > 0) { 11171d10453SEric Joyner segsz = pi->ipi_tso_segsz; 11271d10453SEric Joyner while (segsz > 0 && tsolen != 0) { 11371d10453SEric Joyner count++; 11471d10453SEric Joyner if (count > maxsegs) { 11571d10453SEric Joyner return (1); 11671d10453SEric Joyner } 11771d10453SEric Joyner if (curseg == 0) { 11871d10453SEric Joyner i++; 11971d10453SEric Joyner if (__predict_false(i == nsegs)) { 12071d10453SEric Joyner return (1); 12171d10453SEric Joyner } 12271d10453SEric Joyner curseg = segs[i].ds_len; 12371d10453SEric Joyner } 12471d10453SEric Joyner seglen = min(curseg, segsz); 12571d10453SEric Joyner segsz -= seglen; 12671d10453SEric Joyner curseg -= seglen; 12771d10453SEric Joyner tsolen -= seglen; 12871d10453SEric Joyner } 12971d10453SEric Joyner count = 0; 13071d10453SEric Joyner } 13171d10453SEric Joyner 13271d10453SEric Joyner return (0); 13371d10453SEric Joyner } 13471d10453SEric Joyner 13571d10453SEric Joyner /** 13671d10453SEric Joyner * ice_tso_setup - Setup a context descriptor to prepare for a TSO packet 13771d10453SEric Joyner * @txq: the Tx queue to use 13871d10453SEric Joyner * @pi: the packet info to prepare for 13971d10453SEric Joyner * 14071d10453SEric Joyner * Setup a context descriptor in preparation for sending a Tx packet that 14171d10453SEric Joyner * requires the TSO offload. Returns the index of the descriptor to use when 14271d10453SEric Joyner * encapsulating the Tx packet data into descriptors. 14371d10453SEric Joyner */ 14471d10453SEric Joyner static inline int 14571d10453SEric Joyner ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi) 14671d10453SEric Joyner { 14771d10453SEric Joyner struct ice_tx_ctx_desc *txd; 14871d10453SEric Joyner u32 cmd, mss, type, tsolen; 14971d10453SEric Joyner int idx; 15071d10453SEric Joyner u64 type_cmd_tso_mss; 15171d10453SEric Joyner 15271d10453SEric Joyner idx = pi->ipi_pidx; 15371d10453SEric Joyner txd = (struct ice_tx_ctx_desc *)&txq->tx_base[idx]; 15471d10453SEric Joyner tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen); 15571d10453SEric Joyner 15671d10453SEric Joyner type = ICE_TX_DESC_DTYPE_CTX; 15771d10453SEric Joyner cmd = ICE_TX_CTX_DESC_TSO; 15871d10453SEric Joyner /* TSO MSS must not be less than 64 */ 15971d10453SEric Joyner if (pi->ipi_tso_segsz < ICE_MIN_TSO_MSS) { 16071d10453SEric Joyner txq->stats.mss_too_small++; 16171d10453SEric Joyner pi->ipi_tso_segsz = ICE_MIN_TSO_MSS; 16271d10453SEric Joyner } 16371d10453SEric Joyner mss = pi->ipi_tso_segsz; 16471d10453SEric Joyner 16571d10453SEric Joyner type_cmd_tso_mss = ((u64)type << ICE_TXD_CTX_QW1_DTYPE_S) | 16671d10453SEric Joyner ((u64)cmd << ICE_TXD_CTX_QW1_CMD_S) | 16771d10453SEric Joyner ((u64)tsolen << ICE_TXD_CTX_QW1_TSO_LEN_S) | 16871d10453SEric Joyner ((u64)mss << ICE_TXD_CTX_QW1_MSS_S); 16971d10453SEric Joyner txd->qw1 = htole64(type_cmd_tso_mss); 17071d10453SEric Joyner 17171d10453SEric Joyner txd->tunneling_params = htole32(0); 172*f2635e84SEric Joyner txq->stats.tso++; 17371d10453SEric Joyner 17471d10453SEric Joyner return ((idx + 1) & (txq->desc_count-1)); 17571d10453SEric Joyner } 17671d10453SEric Joyner 17771d10453SEric Joyner /** 17871d10453SEric Joyner * ice_tx_setup_offload - Setup register values for performing a Tx offload 17971d10453SEric Joyner * @txq: The Tx queue, used to track checksum offload stats 18071d10453SEric Joyner * @pi: the packet info to program for 18171d10453SEric Joyner * @cmd: the cmd register value to update 18271d10453SEric Joyner * @off: the off register value to update 18371d10453SEric Joyner * 18471d10453SEric Joyner * Based on the packet info provided, update the cmd and off values for 18571d10453SEric Joyner * enabling Tx offloads. This depends on the packet type and which offloads 18671d10453SEric Joyner * have been requested. 18771d10453SEric Joyner * 18871d10453SEric Joyner * We also track the total number of times that we've requested hardware 18971d10453SEric Joyner * offload a particular type of checksum for debugging purposes. 19071d10453SEric Joyner */ 19171d10453SEric Joyner static inline void 19271d10453SEric Joyner ice_tx_setup_offload(struct ice_tx_queue *txq, if_pkt_info_t pi, u32 *cmd, u32 *off) 19371d10453SEric Joyner { 19471d10453SEric Joyner u32 remaining_csum_flags = pi->ipi_csum_flags; 19571d10453SEric Joyner 19671d10453SEric Joyner switch (pi->ipi_etype) { 19771d10453SEric Joyner #ifdef INET 19871d10453SEric Joyner case ETHERTYPE_IP: 19971d10453SEric Joyner if (pi->ipi_csum_flags & ICE_CSUM_IP) { 20071d10453SEric Joyner *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 20171d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_IP4]++; 20271d10453SEric Joyner remaining_csum_flags &= ~CSUM_IP; 20371d10453SEric Joyner } else 20471d10453SEric Joyner *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 20571d10453SEric Joyner break; 20671d10453SEric Joyner #endif 20771d10453SEric Joyner #ifdef INET6 20871d10453SEric Joyner case ETHERTYPE_IPV6: 20971d10453SEric Joyner *cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 21071d10453SEric Joyner /* 21171d10453SEric Joyner * This indicates that the IIPT flag was set to the IPV6 value; 21271d10453SEric Joyner * there's no checksum for IPv6 packets. 21371d10453SEric Joyner */ 21471d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_IP6]++; 21571d10453SEric Joyner break; 21671d10453SEric Joyner #endif 21771d10453SEric Joyner default: 21871d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_L3_ERR]++; 21971d10453SEric Joyner break; 22071d10453SEric Joyner } 22171d10453SEric Joyner 22271d10453SEric Joyner *off |= (pi->ipi_ehdrlen >> 1) << ICE_TX_DESC_LEN_MACLEN_S; 22371d10453SEric Joyner *off |= (pi->ipi_ip_hlen >> 2) << ICE_TX_DESC_LEN_IPLEN_S; 22471d10453SEric Joyner 22571d10453SEric Joyner if (!(remaining_csum_flags & ~ICE_RX_CSUM_FLAGS)) 22671d10453SEric Joyner return; 22771d10453SEric Joyner 22871d10453SEric Joyner switch (pi->ipi_ipproto) { 22971d10453SEric Joyner case IPPROTO_TCP: 23071d10453SEric Joyner if (pi->ipi_csum_flags & ICE_CSUM_TCP) { 23171d10453SEric Joyner *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 23271d10453SEric Joyner *off |= (pi->ipi_tcp_hlen >> 2) << 23371d10453SEric Joyner ICE_TX_DESC_LEN_L4_LEN_S; 23471d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_TCP]++; 23571d10453SEric Joyner } 23671d10453SEric Joyner break; 23771d10453SEric Joyner case IPPROTO_UDP: 23871d10453SEric Joyner if (pi->ipi_csum_flags & ICE_CSUM_UDP) { 23971d10453SEric Joyner *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 24071d10453SEric Joyner *off |= (sizeof(struct udphdr) >> 2) << 24171d10453SEric Joyner ICE_TX_DESC_LEN_L4_LEN_S; 24271d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_UDP]++; 24371d10453SEric Joyner } 24471d10453SEric Joyner break; 24571d10453SEric Joyner case IPPROTO_SCTP: 24671d10453SEric Joyner if (pi->ipi_csum_flags & ICE_CSUM_SCTP) { 24771d10453SEric Joyner *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 24871d10453SEric Joyner *off |= (sizeof(struct sctphdr) >> 2) << 24971d10453SEric Joyner ICE_TX_DESC_LEN_L4_LEN_S; 25071d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_SCTP]++; 25171d10453SEric Joyner } 25271d10453SEric Joyner break; 25371d10453SEric Joyner default: 25471d10453SEric Joyner txq->stats.cso[ICE_CSO_STAT_TX_L4_ERR]++; 25571d10453SEric Joyner break; 25671d10453SEric Joyner } 25771d10453SEric Joyner } 25871d10453SEric Joyner 25971d10453SEric Joyner /** 26071d10453SEric Joyner * ice_rx_checksum - verify hardware checksum is valid or not 26171d10453SEric Joyner * @rxq: the Rx queue structure 26271d10453SEric Joyner * @flags: checksum flags to update 26371d10453SEric Joyner * @data: checksum data to update 26471d10453SEric Joyner * @status0: descriptor status data 26571d10453SEric Joyner * @ptype: packet type 26671d10453SEric Joyner * 26771d10453SEric Joyner * Determine whether the hardware indicated that the Rx checksum is valid. If 26871d10453SEric Joyner * so, update the checksum flags and data, informing the stack of the status 26971d10453SEric Joyner * of the checksum so that it does not spend time verifying it manually. 27071d10453SEric Joyner */ 27171d10453SEric Joyner static void 27271d10453SEric Joyner ice_rx_checksum(struct ice_rx_queue *rxq, uint32_t *flags, uint32_t *data, 27371d10453SEric Joyner u16 status0, u16 ptype) 27471d10453SEric Joyner { 27571d10453SEric Joyner const u16 l3_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 27671d10453SEric Joyner BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)); 27771d10453SEric Joyner const u16 l4_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | 27871d10453SEric Joyner BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)); 27971d10453SEric Joyner const u16 xsum_errors = (l3_error | l4_error | 28071d10453SEric Joyner BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)); 28171d10453SEric Joyner struct ice_rx_ptype_decoded decoded; 28271d10453SEric Joyner bool is_ipv4, is_ipv6; 28371d10453SEric Joyner 28471d10453SEric Joyner /* No L3 or L4 checksum was calculated */ 28571d10453SEric Joyner if (!(status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) { 28671d10453SEric Joyner return; 28771d10453SEric Joyner } 28871d10453SEric Joyner 28971d10453SEric Joyner decoded = ice_decode_rx_desc_ptype(ptype); 29071d10453SEric Joyner *flags = 0; 29171d10453SEric Joyner 29271d10453SEric Joyner if (!(decoded.known && decoded.outer_ip)) 29371d10453SEric Joyner return; 29471d10453SEric Joyner 29571d10453SEric Joyner is_ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 29671d10453SEric Joyner (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 29771d10453SEric Joyner is_ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 29871d10453SEric Joyner (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 29971d10453SEric Joyner 30071d10453SEric Joyner /* No checksum errors were reported */ 30171d10453SEric Joyner if (!(status0 & xsum_errors)) { 30271d10453SEric Joyner if (is_ipv4) 30371d10453SEric Joyner *flags |= CSUM_L3_CALC | CSUM_L3_VALID; 30471d10453SEric Joyner 30571d10453SEric Joyner switch (decoded.inner_prot) { 30671d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_TCP: 30771d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_UDP: 30871d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_SCTP: 30971d10453SEric Joyner *flags |= CSUM_L4_CALC | CSUM_L4_VALID; 31071d10453SEric Joyner *data |= htons(0xffff); 31171d10453SEric Joyner break; 31271d10453SEric Joyner default: 31371d10453SEric Joyner break; 31471d10453SEric Joyner } 31571d10453SEric Joyner 31671d10453SEric Joyner return; 31771d10453SEric Joyner } 31871d10453SEric Joyner 31971d10453SEric Joyner /* 32071d10453SEric Joyner * Certain IPv6 extension headers impact the validity of L4 checksums. 32171d10453SEric Joyner * If one of these headers exist, hardware will set the IPV6EXADD bit 32271d10453SEric Joyner * in the descriptor. If the bit is set then pretend like hardware 32371d10453SEric Joyner * didn't checksum this packet. 32471d10453SEric Joyner */ 32571d10453SEric Joyner if (is_ipv6 && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))) { 32671d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_IP6_ERR]++; 32771d10453SEric Joyner return; 32871d10453SEric Joyner } 32971d10453SEric Joyner 33071d10453SEric Joyner /* 33171d10453SEric Joyner * At this point, status0 must have at least one of the l3_error or 33271d10453SEric Joyner * l4_error bits set. 33371d10453SEric Joyner */ 33471d10453SEric Joyner 33571d10453SEric Joyner if (status0 & l3_error) { 33671d10453SEric Joyner if (is_ipv4) { 33771d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_IP4_ERR]++; 33871d10453SEric Joyner *flags |= CSUM_L3_CALC; 33971d10453SEric Joyner } else { 34071d10453SEric Joyner /* Hardware indicated L3 error but this isn't IPv4? */ 34171d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_L3_ERR]++; 34271d10453SEric Joyner } 34371d10453SEric Joyner /* don't bother reporting L4 errors if we got an L3 error */ 34471d10453SEric Joyner return; 34571d10453SEric Joyner } else if (is_ipv4) { 34671d10453SEric Joyner *flags |= CSUM_L3_CALC | CSUM_L3_VALID; 34771d10453SEric Joyner } 34871d10453SEric Joyner 34971d10453SEric Joyner if (status0 & l4_error) { 35071d10453SEric Joyner switch (decoded.inner_prot) { 35171d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_TCP: 35271d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_TCP_ERR]++; 35371d10453SEric Joyner *flags |= CSUM_L4_CALC; 35471d10453SEric Joyner break; 35571d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_UDP: 35671d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_UDP_ERR]++; 35771d10453SEric Joyner *flags |= CSUM_L4_CALC; 35871d10453SEric Joyner break; 35971d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_SCTP: 36071d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_SCTP_ERR]++; 36171d10453SEric Joyner *flags |= CSUM_L4_CALC; 36271d10453SEric Joyner break; 36371d10453SEric Joyner default: 36471d10453SEric Joyner /* 36571d10453SEric Joyner * Hardware indicated L4 error, but this isn't one of 36671d10453SEric Joyner * the expected protocols. 36771d10453SEric Joyner */ 36871d10453SEric Joyner rxq->stats.cso[ICE_CSO_STAT_RX_L4_ERR]++; 36971d10453SEric Joyner } 37071d10453SEric Joyner } 37171d10453SEric Joyner } 37271d10453SEric Joyner 37371d10453SEric Joyner /** 37471d10453SEric Joyner * ice_ptype_to_hash - Convert packet type to a hash value 37571d10453SEric Joyner * @ptype: the packet type to convert 37671d10453SEric Joyner * 37771d10453SEric Joyner * Given the packet type, convert to a suitable hashtype to report to the 37871d10453SEric Joyner * upper stack via the iri_rsstype value of the if_rxd_info_t structure. 37971d10453SEric Joyner * 38071d10453SEric Joyner * If the hash type is unknown we'll report M_HASHTYPE_OPAQUE. 38171d10453SEric Joyner */ 38271d10453SEric Joyner static inline int 38371d10453SEric Joyner ice_ptype_to_hash(u16 ptype) 38471d10453SEric Joyner { 38571d10453SEric Joyner struct ice_rx_ptype_decoded decoded; 38671d10453SEric Joyner 38771d10453SEric Joyner if (ptype >= ARRAY_SIZE(ice_ptype_lkup)) 38871d10453SEric Joyner return M_HASHTYPE_OPAQUE; 38971d10453SEric Joyner 39071d10453SEric Joyner decoded = ice_decode_rx_desc_ptype(ptype); 39171d10453SEric Joyner 39271d10453SEric Joyner if (!decoded.known) 39371d10453SEric Joyner return M_HASHTYPE_OPAQUE; 39471d10453SEric Joyner 39571d10453SEric Joyner if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) 39671d10453SEric Joyner return M_HASHTYPE_OPAQUE; 39771d10453SEric Joyner 39871d10453SEric Joyner /* Note: anything that gets to this point is IP */ 39971d10453SEric Joyner if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6) { 40071d10453SEric Joyner switch (decoded.inner_prot) { 40171d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_TCP: 40271d10453SEric Joyner return M_HASHTYPE_RSS_TCP_IPV6; 40371d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_UDP: 40471d10453SEric Joyner return M_HASHTYPE_RSS_UDP_IPV6; 40571d10453SEric Joyner default: 40671d10453SEric Joyner return M_HASHTYPE_RSS_IPV6; 40771d10453SEric Joyner } 40871d10453SEric Joyner } 40971d10453SEric Joyner if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4) { 41071d10453SEric Joyner switch (decoded.inner_prot) { 41171d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_TCP: 41271d10453SEric Joyner return M_HASHTYPE_RSS_TCP_IPV4; 41371d10453SEric Joyner case ICE_RX_PTYPE_INNER_PROT_UDP: 41471d10453SEric Joyner return M_HASHTYPE_RSS_UDP_IPV4; 41571d10453SEric Joyner default: 41671d10453SEric Joyner return M_HASHTYPE_RSS_IPV4; 41771d10453SEric Joyner } 41871d10453SEric Joyner } 41971d10453SEric Joyner 42071d10453SEric Joyner /* We should never get here!! */ 42171d10453SEric Joyner return M_HASHTYPE_OPAQUE; 42271d10453SEric Joyner } 42371d10453SEric Joyner #endif 424