1fcba820dSWei Hu (Xavier) /* SPDX-License-Identifier: BSD-3-Clause 253e6f86cSMin Hu (Connor) * Copyright(c) 2018-2021 HiSilicon Limited. 3fcba820dSWei Hu (Xavier) */ 4fcba820dSWei Hu (Xavier) 5fcba820dSWei Hu (Xavier) #include <rte_flow_driver.h> 6fcba820dSWei Hu (Xavier) #include <rte_io.h> 7fcba820dSWei Hu (Xavier) #include <rte_malloc.h> 8fcba820dSWei Hu (Xavier) 9fcba820dSWei Hu (Xavier) #include "hns3_ethdev.h" 10fcba820dSWei Hu (Xavier) #include "hns3_logs.h" 113600ffc9SMin Hu (Connor) #include "hns3_flow.h" 12fcba820dSWei Hu (Xavier) 13e3069658SHuisong Li #define NEXT_ITEM_OF_ACTION(act, actions, index) \ 14e3069658SHuisong Li do { \ 15e3069658SHuisong Li (act) = (actions) + (index); \ 16e3069658SHuisong Li while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \ 17e3069658SHuisong Li (index)++; \ 18e3069658SHuisong Li (act) = (actions) + (index); \ 19e3069658SHuisong Li } \ 20e3069658SHuisong Li } while (0) 21e3069658SHuisong Li 22e3069658SHuisong Li #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \ 23e3069658SHuisong Li do { \ 24e3069658SHuisong Li (item) = (pattern) + (index); \ 25e3069658SHuisong Li while ((item)->type == RTE_FLOW_ITEM_TYPE_VOID) { \ 26e3069658SHuisong Li (index)++; \ 27e3069658SHuisong Li (item) = (pattern) + (index); \ 28e3069658SHuisong Li } \ 29e3069658SHuisong Li } while (0) 30e3069658SHuisong Li 31e3069658SHuisong Li #define HNS3_HASH_HDR_ETH RTE_BIT64(0) 32e3069658SHuisong Li #define HNS3_HASH_HDR_IPV4 RTE_BIT64(1) 33e3069658SHuisong Li #define HNS3_HASH_HDR_IPV6 RTE_BIT64(2) 34e3069658SHuisong Li #define HNS3_HASH_HDR_TCP RTE_BIT64(3) 35e3069658SHuisong Li #define HNS3_HASH_HDR_UDP RTE_BIT64(4) 36e3069658SHuisong Li #define HNS3_HASH_HDR_SCTP RTE_BIT64(5) 37e3069658SHuisong Li 38e3069658SHuisong Li #define HNS3_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH) 39e3069658SHuisong Li 40e3069658SHuisong Li #define HNS3_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \ 41e3069658SHuisong Li BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6)) 42e3069658SHuisong Li 43e3069658SHuisong Li #define HNS3_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \ 44e3069658SHuisong Li BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \ 45e3069658SHuisong Li BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP)) 46e3069658SHuisong Li 47e3069658SHuisong Li static const uint64_t hash_pattern_next_allow_items[] = { 48e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_VOID] = HNS3_HASH_VOID_NEXT_ALLOW, 49e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_ETH] = HNS3_HASH_ETH_NEXT_ALLOW, 50e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_IP_NEXT_ALLOW, 51e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_IP_NEXT_ALLOW, 52e3069658SHuisong Li }; 53e3069658SHuisong Li 54e3069658SHuisong Li static const uint64_t hash_pattern_item_header[] = { 55e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_ETH] = HNS3_HASH_HDR_ETH, 56e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_HDR_IPV4, 57e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_HDR_IPV6, 58e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_TCP] = HNS3_HASH_HDR_TCP, 59e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_UDP] = HNS3_HASH_HDR_UDP, 60e3069658SHuisong Li [RTE_FLOW_ITEM_TYPE_SCTP] = HNS3_HASH_HDR_SCTP, 61e3069658SHuisong Li }; 62e3069658SHuisong Li 63e3069658SHuisong Li #define HNS3_HASH_IPV4 (HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV4) 64e3069658SHuisong Li #define HNS3_HASH_IPV4_TCP (HNS3_HASH_HDR_ETH | \ 65e3069658SHuisong Li HNS3_HASH_HDR_IPV4 | \ 66e3069658SHuisong Li HNS3_HASH_HDR_TCP) 67e3069658SHuisong Li #define HNS3_HASH_IPV4_UDP (HNS3_HASH_HDR_ETH | \ 68e3069658SHuisong Li HNS3_HASH_HDR_IPV4 | \ 69e3069658SHuisong Li HNS3_HASH_HDR_UDP) 70e3069658SHuisong Li #define HNS3_HASH_IPV4_SCTP (HNS3_HASH_HDR_ETH | \ 71e3069658SHuisong Li HNS3_HASH_HDR_IPV4 | \ 72e3069658SHuisong Li HNS3_HASH_HDR_SCTP) 73e3069658SHuisong Li #define HNS3_HASH_IPV6 (HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV6) 74e3069658SHuisong Li #define HNS3_HASH_IPV6_TCP (HNS3_HASH_HDR_ETH | \ 75e3069658SHuisong Li HNS3_HASH_HDR_IPV6 | \ 76e3069658SHuisong Li HNS3_HASH_HDR_TCP) 77e3069658SHuisong Li #define HNS3_HASH_IPV6_UDP (HNS3_HASH_HDR_ETH | \ 78e3069658SHuisong Li HNS3_HASH_HDR_IPV6 | \ 79e3069658SHuisong Li HNS3_HASH_HDR_UDP) 80e3069658SHuisong Li #define HNS3_HASH_IPV6_SCTP (HNS3_HASH_HDR_ETH | \ 81e3069658SHuisong Li HNS3_HASH_HDR_IPV6 | \ 82e3069658SHuisong Li HNS3_HASH_HDR_SCTP) 83e3069658SHuisong Li 84e3069658SHuisong Li static const struct hns3_hash_map_info { 85e3069658SHuisong Li /* flow type specified, zero means action works for all flow types. */ 86e3069658SHuisong Li uint64_t pattern_type; 87e3069658SHuisong Li uint64_t rss_pctype; /* packet type with prefix RTE_ETH_RSS_xxx */ 88e3069658SHuisong Li uint64_t l3l4_types; /* Supported L3/L4 RSS types for this packet type */ 89e3069658SHuisong Li uint64_t hw_pctype; /* packet type in driver */ 90e3069658SHuisong Li uint64_t tuple_mask; /* full tuples of the hw_pctype */ 91e3069658SHuisong Li } hash_map_table[] = { 92e3069658SHuisong Li /* IPV4 */ 93e3069658SHuisong Li { HNS3_HASH_IPV4, 94e3069658SHuisong Li RTE_ETH_RSS_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST, 95e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M }, 96e3069658SHuisong Li { HNS3_HASH_IPV4, 97e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV4_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST, 98e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M }, 99e3069658SHuisong Li { HNS3_HASH_IPV4, 100e3069658SHuisong Li RTE_ETH_RSS_FRAG_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST, 101e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV4_FLAG, HNS3_RSS_TUPLE_IPV4_FLAG_M }, 102e3069658SHuisong Li { HNS3_HASH_IPV4_TCP, 103e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV4_TCP, HNS3_RSS_SUPPORT_L3L4, 104e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV4_TCP, HNS3_RSS_TUPLE_IPV4_TCP_M }, 105e3069658SHuisong Li { HNS3_HASH_IPV4_UDP, 106e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV4_UDP, HNS3_RSS_SUPPORT_L3L4, 107e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV4_UDP, HNS3_RSS_TUPLE_IPV4_UDP_M }, 108e3069658SHuisong Li { HNS3_HASH_IPV4_SCTP, 109e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV4_SCTP, HNS3_RSS_SUPPORT_L3L4, 110e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV4_SCTP, HNS3_RSS_TUPLE_IPV4_SCTP_M }, 111e3069658SHuisong Li /* IPV6 */ 112e3069658SHuisong Li { HNS3_HASH_IPV6, 113e3069658SHuisong Li RTE_ETH_RSS_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST, 114e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M }, 115e3069658SHuisong Li { HNS3_HASH_IPV6, 116e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV6_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST, 117e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M }, 118e3069658SHuisong Li { HNS3_HASH_IPV6, 119e3069658SHuisong Li RTE_ETH_RSS_FRAG_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST, 120e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV6_FLAG, HNS3_RSS_TUPLE_IPV6_FLAG_M }, 121e3069658SHuisong Li { HNS3_HASH_IPV6_TCP, 122e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV6_TCP, HNS3_RSS_SUPPORT_L3L4, 123e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV6_TCP, HNS3_RSS_TUPLE_IPV6_TCP_M }, 124e3069658SHuisong Li { HNS3_HASH_IPV6_UDP, 125e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV6_UDP, HNS3_RSS_SUPPORT_L3L4, 126e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV6_UDP, HNS3_RSS_TUPLE_IPV6_UDP_M }, 127e3069658SHuisong Li { HNS3_HASH_IPV6_SCTP, 128e3069658SHuisong Li RTE_ETH_RSS_NONFRAG_IPV6_SCTP, HNS3_RSS_SUPPORT_L3L4, 129e3069658SHuisong Li HNS3_RSS_PCTYPE_IPV6_SCTP, HNS3_RSS_TUPLE_IPV6_SCTP_M }, 130e3069658SHuisong Li }; 131e3069658SHuisong Li 132fcba820dSWei Hu (Xavier) static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF }; 133fcba820dSWei Hu (Xavier) static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 }; 134fcba820dSWei Hu (Xavier) 135fcba820dSWei Hu (Xavier) /* Special Filter id for non-specific packet flagging. Don't change value */ 136fcba820dSWei Hu (Xavier) #define HNS3_MAX_FILTER_ID 0x0FFF 137fcba820dSWei Hu (Xavier) 138fcba820dSWei Hu (Xavier) #define ETHER_TYPE_MASK 0xFFFF 139fcba820dSWei Hu (Xavier) #define IPPROTO_MASK 0xFF 140fcba820dSWei Hu (Xavier) #define TUNNEL_TYPE_MASK 0xFFFF 141fcba820dSWei Hu (Xavier) 142fcba820dSWei Hu (Xavier) #define HNS3_TUNNEL_TYPE_VXLAN 0x12B5 143fcba820dSWei Hu (Xavier) #define HNS3_TUNNEL_TYPE_VXLAN_GPE 0x12B6 144fcba820dSWei Hu (Xavier) #define HNS3_TUNNEL_TYPE_GENEVE 0x17C1 145fcba820dSWei Hu (Xavier) #define HNS3_TUNNEL_TYPE_NVGRE 0x6558 146fcba820dSWei Hu (Xavier) 147fcba820dSWei Hu (Xavier) static enum rte_flow_item_type first_items[] = { 148fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_ETH, 149fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_IPV4, 150fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_IPV6, 151fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_TCP, 152fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_UDP, 153fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_SCTP, 154fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_ICMP, 155fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_NVGRE, 156fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_VXLAN, 157fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_GENEVE, 15890294fa5SChengwen Feng RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 15990294fa5SChengwen Feng RTE_FLOW_ITEM_TYPE_PTYPE 160fcba820dSWei Hu (Xavier) }; 161fcba820dSWei Hu (Xavier) 162fcba820dSWei Hu (Xavier) static enum rte_flow_item_type L2_next_items[] = { 163fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_VLAN, 164fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_IPV4, 16590294fa5SChengwen Feng RTE_FLOW_ITEM_TYPE_IPV6, 16690294fa5SChengwen Feng RTE_FLOW_ITEM_TYPE_PTYPE 167fcba820dSWei Hu (Xavier) }; 168fcba820dSWei Hu (Xavier) 169fcba820dSWei Hu (Xavier) static enum rte_flow_item_type L3_next_items[] = { 170fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_TCP, 171fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_UDP, 172fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_SCTP, 173fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_NVGRE, 17490294fa5SChengwen Feng RTE_FLOW_ITEM_TYPE_ICMP, 17590294fa5SChengwen Feng RTE_FLOW_ITEM_TYPE_PTYPE 176fcba820dSWei Hu (Xavier) }; 177fcba820dSWei Hu (Xavier) 178fcba820dSWei Hu (Xavier) static enum rte_flow_item_type L4_next_items[] = { 179fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_VXLAN, 180fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_GENEVE, 181e9550856SChengwen Feng RTE_FLOW_ITEM_TYPE_VXLAN_GPE 182fcba820dSWei Hu (Xavier) }; 183fcba820dSWei Hu (Xavier) 184fcba820dSWei Hu (Xavier) static enum rte_flow_item_type tunnel_next_items[] = { 185fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_ETH, 186fcba820dSWei Hu (Xavier) RTE_FLOW_ITEM_TYPE_VLAN 187fcba820dSWei Hu (Xavier) }; 188fcba820dSWei Hu (Xavier) 189fcba820dSWei Hu (Xavier) struct items_step_mngr { 190fcba820dSWei Hu (Xavier) enum rte_flow_item_type *items; 19182c2ca6dSMin Hu (Connor) size_t count; 192fcba820dSWei Hu (Xavier) }; 193fcba820dSWei Hu (Xavier) 194fcba820dSWei Hu (Xavier) static inline void 195fcba820dSWei Hu (Xavier) net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) 196fcba820dSWei Hu (Xavier) { 197fcba820dSWei Hu (Xavier) size_t i; 198fcba820dSWei Hu (Xavier) 199fcba820dSWei Hu (Xavier) for (i = 0; i < len; i++) 200fcba820dSWei Hu (Xavier) dst[i] = rte_be_to_cpu_32(src[i]); 201fcba820dSWei Hu (Xavier) } 202fcba820dSWei Hu (Xavier) 203f8e7fcbfSChengwen Feng /* 204e3069658SHuisong Li * This function is used to parse filter type. 205f8e7fcbfSChengwen Feng * 1. As we know RSS is used to spread packets among several queues, the flow 206f77b3c3aSLijun Ou * API provide the struct rte_flow_action_rss, user could config its field 207f8e7fcbfSChengwen Feng * sush as: func/level/types/key/queue to control RSS function. 208f77b3c3aSLijun Ou * 2. The flow API also supports queue region configuration for hns3. It was 209f8e7fcbfSChengwen Feng * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule 210f8e7fcbfSChengwen Feng * which action is RSS queues region. 211f8e7fcbfSChengwen Feng * 3. When action is RSS, we use the following rule to distinguish: 212e3069658SHuisong Li * Case 1: pattern has ETH and all fields in RSS action except 'queues' are 213e3069658SHuisong Li * zero or default, indicate it is queue region configuration. 214f8e7fcbfSChengwen Feng * Case other: an rss general action. 215f8e7fcbfSChengwen Feng */ 216e3069658SHuisong Li static void 217e3069658SHuisong Li hns3_parse_filter_type(const struct rte_flow_item pattern[], 218e3069658SHuisong Li const struct rte_flow_action actions[], 219e3069658SHuisong Li struct hns3_filter_info *filter_info) 220c37ca66fSWei Hu (Xavier) { 221815c7db5SHuisong Li const struct rte_flow_action_rss *rss_act; 222f8e7fcbfSChengwen Feng const struct rte_flow_action *act = NULL; 223e3069658SHuisong Li bool only_has_queues = false; 224f8e7fcbfSChengwen Feng bool have_eth = false; 225c37ca66fSWei Hu (Xavier) 226f8e7fcbfSChengwen Feng for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 227f8e7fcbfSChengwen Feng if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { 228f8e7fcbfSChengwen Feng act = actions; 229f8e7fcbfSChengwen Feng break; 230c37ca66fSWei Hu (Xavier) } 231f8e7fcbfSChengwen Feng } 232e3069658SHuisong Li if (act == NULL) { 233e3069658SHuisong Li filter_info->type = RTE_ETH_FILTER_FDIR; 234e3069658SHuisong Li return; 235e3069658SHuisong Li } 236f8e7fcbfSChengwen Feng 237f8e7fcbfSChengwen Feng for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 238f8e7fcbfSChengwen Feng if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) { 239f8e7fcbfSChengwen Feng have_eth = true; 240f8e7fcbfSChengwen Feng break; 241f8e7fcbfSChengwen Feng } 242f8e7fcbfSChengwen Feng } 243f8e7fcbfSChengwen Feng 244815c7db5SHuisong Li rss_act = act->conf; 245e3069658SHuisong Li only_has_queues = (rss_act->queue_num > 0) && 246e3069658SHuisong Li (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT && 247e3069658SHuisong Li rss_act->types == 0 && rss_act->key_len == 0); 248e3069658SHuisong Li if (have_eth && only_has_queues) { 249f8e7fcbfSChengwen Feng /* 250e3069658SHuisong Li * Pattern has ETH and all fields in RSS action except 'queues' 251e3069658SHuisong Li * are zero or default, which indicates this is queue region 252e3069658SHuisong Li * configuration. 253f8e7fcbfSChengwen Feng */ 254e3069658SHuisong Li filter_info->type = RTE_ETH_FILTER_FDIR; 255e3069658SHuisong Li return; 256f8e7fcbfSChengwen Feng } 257f8e7fcbfSChengwen Feng 258e3069658SHuisong Li filter_info->type = RTE_ETH_FILTER_HASH; 259c37ca66fSWei Hu (Xavier) } 260c37ca66fSWei Hu (Xavier) 261fcba820dSWei Hu (Xavier) static inline struct hns3_flow_counter * 262fcba820dSWei Hu (Xavier) hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id) 263fcba820dSWei Hu (Xavier) { 264fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 265fcba820dSWei Hu (Xavier) struct hns3_pf *pf = &hns->pf; 266fcba820dSWei Hu (Xavier) struct hns3_flow_counter *cnt; 267fcba820dSWei Hu (Xavier) 268fcba820dSWei Hu (Xavier) LIST_FOREACH(cnt, &pf->flow_counters, next) { 269fcba820dSWei Hu (Xavier) if (cnt->id == id) 270fcba820dSWei Hu (Xavier) return cnt; 271fcba820dSWei Hu (Xavier) } 272fcba820dSWei Hu (Xavier) return NULL; 273fcba820dSWei Hu (Xavier) } 274fcba820dSWei Hu (Xavier) 275fcba820dSWei Hu (Xavier) static int 276fdfcb94dSChengwen Feng hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id, 277fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 278fcba820dSWei Hu (Xavier) { 279fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 280fcba820dSWei Hu (Xavier) struct hns3_pf *pf = &hns->pf; 281a06c3b0eSChengwen Feng struct hns3_hw *hw = &hns->hw; 282fcba820dSWei Hu (Xavier) struct hns3_flow_counter *cnt; 283a06c3b0eSChengwen Feng uint64_t value; 284a06c3b0eSChengwen Feng int ret; 285fcba820dSWei Hu (Xavier) 286fcba820dSWei Hu (Xavier) cnt = hns3_counter_lookup(dev, id); 287fcba820dSWei Hu (Xavier) if (cnt) { 288fdfcb94dSChengwen Feng if (!cnt->indirect || cnt->indirect != indirect) 289*585f1f68SDengdui Huang return rte_flow_error_set(error, EINVAL, 290f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION_CONF, 291fcba820dSWei Hu (Xavier) cnt, 292fdfcb94dSChengwen Feng "Counter id is used, indirect flag not match"); 293fdfcb94dSChengwen Feng /* Clear the indirect counter on first use. */ 294fdfcb94dSChengwen Feng if (cnt->indirect && cnt->ref_cnt == 1) 29552600004SHuisong Li (void)hns3_fd_get_count(hw, id, &value); 296fcba820dSWei Hu (Xavier) cnt->ref_cnt++; 297fcba820dSWei Hu (Xavier) return 0; 298fcba820dSWei Hu (Xavier) } 299fcba820dSWei Hu (Xavier) 300a06c3b0eSChengwen Feng /* Clear the counter by read ops because the counter is read-clear */ 30152600004SHuisong Li ret = hns3_fd_get_count(hw, id, &value); 302a06c3b0eSChengwen Feng if (ret) 303a06c3b0eSChengwen Feng return rte_flow_error_set(error, EIO, 304a06c3b0eSChengwen Feng RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 305a06c3b0eSChengwen Feng "Clear counter failed!"); 306a06c3b0eSChengwen Feng 307fcba820dSWei Hu (Xavier) cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); 308fcba820dSWei Hu (Xavier) if (cnt == NULL) 309fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOMEM, 310f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, cnt, 311fcba820dSWei Hu (Xavier) "Alloc mem for counter failed"); 312fcba820dSWei Hu (Xavier) cnt->id = id; 313fdfcb94dSChengwen Feng cnt->indirect = indirect; 314fcba820dSWei Hu (Xavier) cnt->ref_cnt = 1; 315fcba820dSWei Hu (Xavier) cnt->hits = 0; 316fcba820dSWei Hu (Xavier) LIST_INSERT_HEAD(&pf->flow_counters, cnt, next); 317fcba820dSWei Hu (Xavier) return 0; 318fcba820dSWei Hu (Xavier) } 319fcba820dSWei Hu (Xavier) 320fcba820dSWei Hu (Xavier) static int 321fcba820dSWei Hu (Xavier) hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, 322fcba820dSWei Hu (Xavier) struct rte_flow_query_count *qc, 323fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 324fcba820dSWei Hu (Xavier) { 325fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 326fcba820dSWei Hu (Xavier) struct hns3_flow_counter *cnt; 327fcba820dSWei Hu (Xavier) uint64_t value; 328fcba820dSWei Hu (Xavier) int ret; 329fcba820dSWei Hu (Xavier) 330fcba820dSWei Hu (Xavier) /* FDIR is available only in PF driver */ 331fcba820dSWei Hu (Xavier) if (hns->is_vf) 332fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 333fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 334fcba820dSWei Hu (Xavier) "Fdir is not supported in VF"); 335fcba820dSWei Hu (Xavier) cnt = hns3_counter_lookup(dev, flow->counter_id); 336fcba820dSWei Hu (Xavier) if (cnt == NULL) 337fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 338f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 339fcba820dSWei Hu (Xavier) "Can't find counter id"); 340fcba820dSWei Hu (Xavier) 34152600004SHuisong Li ret = hns3_fd_get_count(&hns->hw, flow->counter_id, &value); 342fcba820dSWei Hu (Xavier) if (ret) { 343ee160716SLijun Ou rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, 344fcba820dSWei Hu (Xavier) NULL, "Read counter fail."); 345fcba820dSWei Hu (Xavier) return ret; 346fcba820dSWei Hu (Xavier) } 347fcba820dSWei Hu (Xavier) qc->hits_set = 1; 348fcba820dSWei Hu (Xavier) qc->hits = value; 3497ab81664SChengwen Feng qc->bytes_set = 0; 3507ab81664SChengwen Feng qc->bytes = 0; 351fcba820dSWei Hu (Xavier) 352fcba820dSWei Hu (Xavier) return 0; 353fcba820dSWei Hu (Xavier) } 354fcba820dSWei Hu (Xavier) 355fcba820dSWei Hu (Xavier) static int 356fcba820dSWei Hu (Xavier) hns3_counter_release(struct rte_eth_dev *dev, uint32_t id) 357fcba820dSWei Hu (Xavier) { 358fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 359fcba820dSWei Hu (Xavier) struct hns3_hw *hw = &hns->hw; 360fcba820dSWei Hu (Xavier) struct hns3_flow_counter *cnt; 361fcba820dSWei Hu (Xavier) 362fcba820dSWei Hu (Xavier) cnt = hns3_counter_lookup(dev, id); 363fcba820dSWei Hu (Xavier) if (cnt == NULL) { 364fcba820dSWei Hu (Xavier) hns3_err(hw, "Can't find available counter to release"); 365fcba820dSWei Hu (Xavier) return -EINVAL; 366fcba820dSWei Hu (Xavier) } 367fcba820dSWei Hu (Xavier) cnt->ref_cnt--; 368fcba820dSWei Hu (Xavier) if (cnt->ref_cnt == 0) { 369fcba820dSWei Hu (Xavier) LIST_REMOVE(cnt, next); 370fcba820dSWei Hu (Xavier) rte_free(cnt); 371fcba820dSWei Hu (Xavier) } 372fcba820dSWei Hu (Xavier) return 0; 373fcba820dSWei Hu (Xavier) } 374fcba820dSWei Hu (Xavier) 375fcba820dSWei Hu (Xavier) static void 376fcba820dSWei Hu (Xavier) hns3_counter_flush(struct rte_eth_dev *dev) 377fcba820dSWei Hu (Xavier) { 378fdfcb94dSChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 379fdfcb94dSChengwen Feng LIST_HEAD(counters, hns3_flow_counter) indir_counters; 380fcba820dSWei Hu (Xavier) struct hns3_flow_counter *cnt_ptr; 381fcba820dSWei Hu (Xavier) 382fdfcb94dSChengwen Feng LIST_INIT(&indir_counters); 383fcba820dSWei Hu (Xavier) cnt_ptr = LIST_FIRST(&pf->flow_counters); 384fcba820dSWei Hu (Xavier) while (cnt_ptr) { 385fcba820dSWei Hu (Xavier) LIST_REMOVE(cnt_ptr, next); 386fdfcb94dSChengwen Feng if (cnt_ptr->indirect) 387fdfcb94dSChengwen Feng LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next); 388fdfcb94dSChengwen Feng else 389fcba820dSWei Hu (Xavier) rte_free(cnt_ptr); 390fcba820dSWei Hu (Xavier) cnt_ptr = LIST_FIRST(&pf->flow_counters); 391fcba820dSWei Hu (Xavier) } 392fdfcb94dSChengwen Feng 393fdfcb94dSChengwen Feng /* Reset the indirect action and add to pf->flow_counters list. */ 394fdfcb94dSChengwen Feng cnt_ptr = LIST_FIRST(&indir_counters); 395fdfcb94dSChengwen Feng while (cnt_ptr) { 396fdfcb94dSChengwen Feng LIST_REMOVE(cnt_ptr, next); 397fdfcb94dSChengwen Feng cnt_ptr->ref_cnt = 1; 398fdfcb94dSChengwen Feng cnt_ptr->hits = 0; 399fdfcb94dSChengwen Feng LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next); 400fdfcb94dSChengwen Feng cnt_ptr = LIST_FIRST(&indir_counters); 401fdfcb94dSChengwen Feng } 402fcba820dSWei Hu (Xavier) } 403fcba820dSWei Hu (Xavier) 404fcba820dSWei Hu (Xavier) static int 405fcba820dSWei Hu (Xavier) hns3_handle_action_queue(struct rte_eth_dev *dev, 406fcba820dSWei Hu (Xavier) const struct rte_flow_action *action, 407fcba820dSWei Hu (Xavier) struct hns3_fdir_rule *rule, 408fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 409fcba820dSWei Hu (Xavier) { 410fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 411fcba820dSWei Hu (Xavier) const struct rte_flow_action_queue *queue; 412a951c1edSWei Hu (Xavier) struct hns3_hw *hw = &hns->hw; 413fcba820dSWei Hu (Xavier) 414fcba820dSWei Hu (Xavier) queue = (const struct rte_flow_action_queue *)action->conf; 415aa3497d4SChengchang Tang if (queue->index >= hw->data->nb_rx_queues) { 4165bddaf38SHuisong Li hns3_err(hw, "queue ID(%u) is greater than number of available queue (%u) in driver.", 417aa3497d4SChengchang Tang queue->index, hw->data->nb_rx_queues); 418fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 419f8e7fcbfSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 420f8e7fcbfSChengwen Feng action, "Invalid queue ID in PF"); 421a951c1edSWei Hu (Xavier) } 422a951c1edSWei Hu (Xavier) 423fcba820dSWei Hu (Xavier) rule->queue_id = queue->index; 424f8e7fcbfSChengwen Feng rule->nb_queues = 1; 425f8e7fcbfSChengwen Feng rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; 426f8e7fcbfSChengwen Feng return 0; 427f8e7fcbfSChengwen Feng } 428f8e7fcbfSChengwen Feng 429f8e7fcbfSChengwen Feng static int 430f8e7fcbfSChengwen Feng hns3_handle_action_queue_region(struct rte_eth_dev *dev, 431f8e7fcbfSChengwen Feng const struct rte_flow_action *action, 432f8e7fcbfSChengwen Feng struct hns3_fdir_rule *rule, 433f8e7fcbfSChengwen Feng struct rte_flow_error *error) 434f8e7fcbfSChengwen Feng { 435f8e7fcbfSChengwen Feng struct hns3_adapter *hns = dev->data->dev_private; 436f8e7fcbfSChengwen Feng const struct rte_flow_action_rss *conf = action->conf; 437f8e7fcbfSChengwen Feng struct hns3_hw *hw = &hns->hw; 438f8e7fcbfSChengwen Feng uint16_t idx; 439f8e7fcbfSChengwen Feng 440efcaa81eSChengchang Tang if (!hns3_dev_get_support(hw, FD_QUEUE_REGION)) 441f8e7fcbfSChengwen Feng return rte_flow_error_set(error, ENOTSUP, 442f8e7fcbfSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION, action, 443f8e7fcbfSChengwen Feng "Not support config queue region!"); 444f8e7fcbfSChengwen Feng 445f8e7fcbfSChengwen Feng if ((!rte_is_power_of_2(conf->queue_num)) || 446f8e7fcbfSChengwen Feng conf->queue_num > hw->rss_size_max || 447aa3497d4SChengchang Tang conf->queue[0] >= hw->data->nb_rx_queues || 448aa3497d4SChengchang Tang conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) { 449f8e7fcbfSChengwen Feng return rte_flow_error_set(error, EINVAL, 450f8e7fcbfSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, 451f8e7fcbfSChengwen Feng "Invalid start queue ID and queue num! the start queue " 452f8e7fcbfSChengwen Feng "ID must valid, the queue num must be power of 2 and " 453f8e7fcbfSChengwen Feng "<= rss_size_max."); 454f8e7fcbfSChengwen Feng } 455f8e7fcbfSChengwen Feng 456f8e7fcbfSChengwen Feng for (idx = 1; idx < conf->queue_num; idx++) { 457f8e7fcbfSChengwen Feng if (conf->queue[idx] != conf->queue[idx - 1] + 1) 458f8e7fcbfSChengwen Feng return rte_flow_error_set(error, EINVAL, 459f8e7fcbfSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, 460f8e7fcbfSChengwen Feng "Invalid queue ID sequence! the queue ID " 461f8e7fcbfSChengwen Feng "must be continuous increment."); 462f8e7fcbfSChengwen Feng } 463f8e7fcbfSChengwen Feng 464f8e7fcbfSChengwen Feng rule->queue_id = conf->queue[0]; 465f8e7fcbfSChengwen Feng rule->nb_queues = conf->queue_num; 466fcba820dSWei Hu (Xavier) rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; 467fcba820dSWei Hu (Xavier) return 0; 468fcba820dSWei Hu (Xavier) } 469fcba820dSWei Hu (Xavier) 470fdfcb94dSChengwen Feng static int 471fdfcb94dSChengwen Feng hns3_handle_action_indirect(struct rte_eth_dev *dev, 472fdfcb94dSChengwen Feng const struct rte_flow_action *action, 473fdfcb94dSChengwen Feng struct hns3_fdir_rule *rule, 474fdfcb94dSChengwen Feng struct rte_flow_error *error) 475fdfcb94dSChengwen Feng { 476fdfcb94dSChengwen Feng const struct rte_flow_action_handle *indir = action->conf; 477fdfcb94dSChengwen Feng 478fdfcb94dSChengwen Feng if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) 479fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 480fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 481fdfcb94dSChengwen Feng action, "Invalid indirect type"); 482fdfcb94dSChengwen Feng 483fdfcb94dSChengwen Feng if (hns3_counter_lookup(dev, indir->counter_id) == NULL) 484fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 485fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 486fdfcb94dSChengwen Feng action, "Counter id not exist"); 487fdfcb94dSChengwen Feng 488fdfcb94dSChengwen Feng rule->act_cnt.id = indir->counter_id; 489fdfcb94dSChengwen Feng rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR); 490fdfcb94dSChengwen Feng 491fdfcb94dSChengwen Feng return 0; 492fdfcb94dSChengwen Feng } 493fdfcb94dSChengwen Feng 494fcba820dSWei Hu (Xavier) /* 495fcba820dSWei Hu (Xavier) * Parse actions structure from the provided pattern. 496fcba820dSWei Hu (Xavier) * The pattern is validated as the items are copied. 497fcba820dSWei Hu (Xavier) * 498fcba820dSWei Hu (Xavier) * @param actions[in] 499fcba820dSWei Hu (Xavier) * @param rule[out] 5007be78d02SJosh Soref * NIC specific actions derived from the actions. 501fcba820dSWei Hu (Xavier) * @param error[out] 502fcba820dSWei Hu (Xavier) */ 503fcba820dSWei Hu (Xavier) static int 504fcba820dSWei Hu (Xavier) hns3_handle_actions(struct rte_eth_dev *dev, 505fcba820dSWei Hu (Xavier) const struct rte_flow_action actions[], 506fcba820dSWei Hu (Xavier) struct hns3_fdir_rule *rule, struct rte_flow_error *error) 507fcba820dSWei Hu (Xavier) { 508fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 509fcba820dSWei Hu (Xavier) const struct rte_flow_action_count *act_count; 510fcba820dSWei Hu (Xavier) const struct rte_flow_action_mark *mark; 511fcba820dSWei Hu (Xavier) struct hns3_pf *pf = &hns->pf; 512fcba820dSWei Hu (Xavier) uint32_t counter_num; 513fcba820dSWei Hu (Xavier) int ret; 514fcba820dSWei Hu (Xavier) 515fcba820dSWei Hu (Xavier) for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 516fcba820dSWei Hu (Xavier) switch (actions->type) { 517fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_QUEUE: 518fcba820dSWei Hu (Xavier) ret = hns3_handle_action_queue(dev, actions, rule, 519fcba820dSWei Hu (Xavier) error); 520fcba820dSWei Hu (Xavier) if (ret) 521fcba820dSWei Hu (Xavier) return ret; 522fcba820dSWei Hu (Xavier) break; 523fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_DROP: 524fcba820dSWei Hu (Xavier) rule->action = HNS3_FD_ACTION_DROP_PACKET; 525fcba820dSWei Hu (Xavier) break; 526f8e7fcbfSChengwen Feng /* 527f8e7fcbfSChengwen Feng * Here RSS's real action is queue region. 528f8e7fcbfSChengwen Feng * Queue region is implemented by FDIR + RSS in hns3 hardware, 529f8e7fcbfSChengwen Feng * the FDIR's action is one queue region (start_queue_id and 530f8e7fcbfSChengwen Feng * queue_num), then RSS spread packets to the queue region by 5317be78d02SJosh Soref * RSS algorithm. 532f8e7fcbfSChengwen Feng */ 533f8e7fcbfSChengwen Feng case RTE_FLOW_ACTION_TYPE_RSS: 534f8e7fcbfSChengwen Feng ret = hns3_handle_action_queue_region(dev, actions, 535f8e7fcbfSChengwen Feng rule, error); 536f8e7fcbfSChengwen Feng if (ret) 537f8e7fcbfSChengwen Feng return ret; 538f8e7fcbfSChengwen Feng break; 539fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_MARK: 540fcba820dSWei Hu (Xavier) mark = 541fcba820dSWei Hu (Xavier) (const struct rte_flow_action_mark *)actions->conf; 542fcba820dSWei Hu (Xavier) if (mark->id >= HNS3_MAX_FILTER_ID) 543fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 544f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION_CONF, 545fcba820dSWei Hu (Xavier) actions, 546fcba820dSWei Hu (Xavier) "Invalid Mark ID"); 547fcba820dSWei Hu (Xavier) rule->fd_id = mark->id; 548fcba820dSWei Hu (Xavier) rule->flags |= HNS3_RULE_FLAG_FDID; 549fcba820dSWei Hu (Xavier) break; 550fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_FLAG: 551fcba820dSWei Hu (Xavier) rule->fd_id = HNS3_MAX_FILTER_ID; 552fcba820dSWei Hu (Xavier) rule->flags |= HNS3_RULE_FLAG_FDID; 553fcba820dSWei Hu (Xavier) break; 554fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_COUNT: 555fcba820dSWei Hu (Xavier) act_count = 556fcba820dSWei Hu (Xavier) (const struct rte_flow_action_count *)actions->conf; 557fcba820dSWei Hu (Xavier) counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]; 558fcba820dSWei Hu (Xavier) if (act_count->id >= counter_num) 559fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 560f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION_CONF, 561fcba820dSWei Hu (Xavier) actions, 562fcba820dSWei Hu (Xavier) "Invalid counter id"); 563fcba820dSWei Hu (Xavier) rule->act_cnt = *act_count; 564fcba820dSWei Hu (Xavier) rule->flags |= HNS3_RULE_FLAG_COUNTER; 565fdfcb94dSChengwen Feng rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR; 566fdfcb94dSChengwen Feng break; 567fdfcb94dSChengwen Feng case RTE_FLOW_ACTION_TYPE_INDIRECT: 568fdfcb94dSChengwen Feng ret = hns3_handle_action_indirect(dev, actions, rule, 569fdfcb94dSChengwen Feng error); 570fdfcb94dSChengwen Feng if (ret) 571fdfcb94dSChengwen Feng return ret; 572fcba820dSWei Hu (Xavier) break; 573fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_VOID: 574fcba820dSWei Hu (Xavier) break; 575fcba820dSWei Hu (Xavier) default: 576fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 577fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION, 578fcba820dSWei Hu (Xavier) NULL, "Unsupported action"); 579fcba820dSWei Hu (Xavier) } 580fcba820dSWei Hu (Xavier) } 581fcba820dSWei Hu (Xavier) 582fcba820dSWei Hu (Xavier) return 0; 583fcba820dSWei Hu (Xavier) } 584fcba820dSWei Hu (Xavier) 585fcba820dSWei Hu (Xavier) static int 586fcba820dSWei Hu (Xavier) hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) 587fcba820dSWei Hu (Xavier) { 588fcba820dSWei Hu (Xavier) if (!attr->ingress) 589fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 590fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 591fcba820dSWei Hu (Xavier) attr, "Ingress can't be zero"); 592fcba820dSWei Hu (Xavier) if (attr->egress) 593fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 594fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 595fcba820dSWei Hu (Xavier) attr, "Not support egress"); 596fcba820dSWei Hu (Xavier) if (attr->transfer) 597fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 598fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 599fcba820dSWei Hu (Xavier) attr, "No support for transfer"); 600fcba820dSWei Hu (Xavier) if (attr->group) 601fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 602fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 603fcba820dSWei Hu (Xavier) attr, "Not support group"); 604fcba820dSWei Hu (Xavier) return 0; 605fcba820dSWei Hu (Xavier) } 606fcba820dSWei Hu (Xavier) 607fcba820dSWei Hu (Xavier) static int 608a4732847SChengwen Feng hns3_check_tuple(const struct rte_eth_dev *dev, const struct hns3_fdir_rule *rule, 609a4732847SChengwen Feng struct rte_flow_error *error) 610a4732847SChengwen Feng { 611a4732847SChengwen Feng const char * const err_msg[] = { 612a4732847SChengwen Feng "Not support outer dst mac", 613a4732847SChengwen Feng "Not support outer src mac", 614a4732847SChengwen Feng "Not support outer vlan1 tag", 615a4732847SChengwen Feng "Not support outer vlan2 tag", 616a4732847SChengwen Feng "Not support outer eth type", 617a4732847SChengwen Feng "Not support outer l2 rsv", 618a4732847SChengwen Feng "Not support outer ip tos", 619a4732847SChengwen Feng "Not support outer ip proto", 620a4732847SChengwen Feng "Not support outer src ip", 621a4732847SChengwen Feng "Not support outer dst ip", 622a4732847SChengwen Feng "Not support outer l3 rsv", 623a4732847SChengwen Feng "Not support outer src port", 624a4732847SChengwen Feng "Not support outer dst port", 625a4732847SChengwen Feng "Not support outer l4 rsv", 626a4732847SChengwen Feng "Not support outer tun vni", 627a4732847SChengwen Feng "Not support outer tun flow id", 628a4732847SChengwen Feng "Not support inner dst mac", 629a4732847SChengwen Feng "Not support inner src mac", 630a4732847SChengwen Feng "Not support inner vlan tag1", 631a4732847SChengwen Feng "Not support inner vlan tag2", 632a4732847SChengwen Feng "Not support inner eth type", 633a4732847SChengwen Feng "Not support inner l2 rsv", 634a4732847SChengwen Feng "Not support inner ip tos", 635a4732847SChengwen Feng "Not support inner ip proto", 636a4732847SChengwen Feng "Not support inner src ip", 637a4732847SChengwen Feng "Not support inner dst ip", 638a4732847SChengwen Feng "Not support inner l3 rsv", 639a4732847SChengwen Feng "Not support inner src port", 640a4732847SChengwen Feng "Not support inner dst port", 641a4732847SChengwen Feng "Not support inner sctp tag", 642a4732847SChengwen Feng }; 643a4732847SChengwen Feng struct hns3_adapter *hns = dev->data->dev_private; 644a4732847SChengwen Feng uint32_t tuple_active = hns->pf.fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1].tuple_active; 645a4732847SChengwen Feng uint32_t i; 646a4732847SChengwen Feng 647a4732847SChengwen Feng for (i = 0; i < MAX_TUPLE; i++) { 648a4732847SChengwen Feng if ((rule->input_set & BIT(i)) == 0) 649a4732847SChengwen Feng continue; 650a4732847SChengwen Feng if (tuple_active & BIT(i)) 651a4732847SChengwen Feng continue; 652a4732847SChengwen Feng return rte_flow_error_set(error, ENOTSUP, 653a4732847SChengwen Feng RTE_FLOW_ERROR_TYPE_ITEM, 654a4732847SChengwen Feng NULL, err_msg[i]); 655a4732847SChengwen Feng } 656a4732847SChengwen Feng 657a4732847SChengwen Feng return 0; 658a4732847SChengwen Feng } 659a4732847SChengwen Feng 660a4732847SChengwen Feng static int 6616f22672fSLijun Ou hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 6626f22672fSLijun Ou struct rte_flow_error *error __rte_unused) 663fcba820dSWei Hu (Xavier) { 664fcba820dSWei Hu (Xavier) const struct rte_flow_item_eth *eth_spec; 665fcba820dSWei Hu (Xavier) const struct rte_flow_item_eth *eth_mask; 666fcba820dSWei Hu (Xavier) 667fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 668fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 669fcba820dSWei Hu (Xavier) return 0; 670fcba820dSWei Hu (Xavier) 671fcba820dSWei Hu (Xavier) eth_mask = item->mask; 672051d4bc9SJie Hai if (eth_mask) { 6738275d5fcSThomas Monjalon if (eth_mask->hdr.ether_type) { 674fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); 675fcba820dSWei Hu (Xavier) rule->key_conf.mask.ether_type = 6768275d5fcSThomas Monjalon rte_be_to_cpu_16(eth_mask->hdr.ether_type); 677fcba820dSWei Hu (Xavier) } 6788275d5fcSThomas Monjalon if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) { 679fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1); 680fcba820dSWei Hu (Xavier) memcpy(rule->key_conf.mask.src_mac, 6818275d5fcSThomas Monjalon eth_mask->hdr.src_addr.addr_bytes, RTE_ETHER_ADDR_LEN); 682fcba820dSWei Hu (Xavier) } 6838275d5fcSThomas Monjalon if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) { 684fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_MAC, 1); 685fcba820dSWei Hu (Xavier) memcpy(rule->key_conf.mask.dst_mac, 6868275d5fcSThomas Monjalon eth_mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN); 687fcba820dSWei Hu (Xavier) } 688051d4bc9SJie Hai if (eth_mask->has_vlan) 689051d4bc9SJie Hai rule->has_vlan_m = true; 690fcba820dSWei Hu (Xavier) } 691fcba820dSWei Hu (Xavier) 692fcba820dSWei Hu (Xavier) eth_spec = item->spec; 693051d4bc9SJie Hai if (eth_mask && eth_mask->has_vlan && eth_spec->has_vlan) { 694051d4bc9SJie Hai rule->key_conf.vlan_num++; 695051d4bc9SJie Hai rule->has_vlan_v = true; 696051d4bc9SJie Hai } 697051d4bc9SJie Hai 6988275d5fcSThomas Monjalon rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type); 6998275d5fcSThomas Monjalon memcpy(rule->key_conf.spec.src_mac, eth_spec->hdr.src_addr.addr_bytes, 700fcba820dSWei Hu (Xavier) RTE_ETHER_ADDR_LEN); 7018275d5fcSThomas Monjalon memcpy(rule->key_conf.spec.dst_mac, eth_spec->hdr.dst_addr.addr_bytes, 702fcba820dSWei Hu (Xavier) RTE_ETHER_ADDR_LEN); 703fcba820dSWei Hu (Xavier) return 0; 704fcba820dSWei Hu (Xavier) } 705fcba820dSWei Hu (Xavier) 706fcba820dSWei Hu (Xavier) static int 707fcba820dSWei Hu (Xavier) hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 708fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 709fcba820dSWei Hu (Xavier) { 710fcba820dSWei Hu (Xavier) const struct rte_flow_item_vlan *vlan_spec; 711fcba820dSWei Hu (Xavier) const struct rte_flow_item_vlan *vlan_mask; 712fcba820dSWei Hu (Xavier) 713051d4bc9SJie Hai if (rule->has_vlan_m && !rule->has_vlan_v) 714051d4bc9SJie Hai return rte_flow_error_set(error, EINVAL, 715051d4bc9SJie Hai RTE_FLOW_ERROR_TYPE_ITEM, item, 716051d4bc9SJie Hai "VLAN item is conflict with 'has_vlan is 0' in ETH item"); 717051d4bc9SJie Hai 718051d4bc9SJie Hai if (rule->has_more_vlan_m && !rule->has_more_vlan_v) 719051d4bc9SJie Hai return rte_flow_error_set(error, EINVAL, 720051d4bc9SJie Hai RTE_FLOW_ERROR_TYPE_ITEM, item, 721051d4bc9SJie Hai "VLAN item is conflict with 'has_more_vlan is 0' in the previous VLAN item"); 722051d4bc9SJie Hai 723051d4bc9SJie Hai if (rule->has_vlan_m && rule->has_vlan_v) { 724051d4bc9SJie Hai rule->has_vlan_m = false; 725051d4bc9SJie Hai rule->key_conf.vlan_num--; 726051d4bc9SJie Hai } 727051d4bc9SJie Hai 728051d4bc9SJie Hai if (rule->has_more_vlan_m && rule->has_more_vlan_v) { 729051d4bc9SJie Hai rule->has_more_vlan_m = false; 730051d4bc9SJie Hai rule->key_conf.vlan_num--; 731051d4bc9SJie Hai } 732051d4bc9SJie Hai 733fcba820dSWei Hu (Xavier) rule->key_conf.vlan_num++; 734fcba820dSWei Hu (Xavier) if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX) 735fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 736fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, item, 737fcba820dSWei Hu (Xavier) "Vlan_num is more than 2"); 738fcba820dSWei Hu (Xavier) 739fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 740fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 741fcba820dSWei Hu (Xavier) return 0; 742fcba820dSWei Hu (Xavier) 743fcba820dSWei Hu (Xavier) vlan_mask = item->mask; 744051d4bc9SJie Hai if (vlan_mask) { 7458275d5fcSThomas Monjalon if (vlan_mask->hdr.vlan_tci) { 746fcba820dSWei Hu (Xavier) if (rule->key_conf.vlan_num == 1) { 747fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_VLAN_TAG1, 748fcba820dSWei Hu (Xavier) 1); 749fcba820dSWei Hu (Xavier) rule->key_conf.mask.vlan_tag1 = 7508275d5fcSThomas Monjalon rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci); 751fcba820dSWei Hu (Xavier) } else { 752fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_VLAN_TAG2, 753fcba820dSWei Hu (Xavier) 1); 754fcba820dSWei Hu (Xavier) rule->key_conf.mask.vlan_tag2 = 7558275d5fcSThomas Monjalon rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci); 756fcba820dSWei Hu (Xavier) } 757fcba820dSWei Hu (Xavier) } 758051d4bc9SJie Hai if (vlan_mask->has_more_vlan) 759051d4bc9SJie Hai rule->has_more_vlan_m = true; 760fcba820dSWei Hu (Xavier) } 761fcba820dSWei Hu (Xavier) 762fcba820dSWei Hu (Xavier) vlan_spec = item->spec; 763fcba820dSWei Hu (Xavier) if (rule->key_conf.vlan_num == 1) 764fcba820dSWei Hu (Xavier) rule->key_conf.spec.vlan_tag1 = 7658275d5fcSThomas Monjalon rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci); 766fcba820dSWei Hu (Xavier) else 767fcba820dSWei Hu (Xavier) rule->key_conf.spec.vlan_tag2 = 7688275d5fcSThomas Monjalon rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci); 769051d4bc9SJie Hai 770051d4bc9SJie Hai if (vlan_mask && vlan_mask->has_more_vlan && vlan_spec->has_more_vlan) { 771051d4bc9SJie Hai rule->key_conf.vlan_num++; 772051d4bc9SJie Hai if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX) 773051d4bc9SJie Hai return rte_flow_error_set(error, EINVAL, 774051d4bc9SJie Hai RTE_FLOW_ERROR_TYPE_ITEM, item, 775051d4bc9SJie Hai "Vlan_num is more than 2"); 776051d4bc9SJie Hai rule->has_more_vlan_v = true; 777051d4bc9SJie Hai } 778051d4bc9SJie Hai 779fcba820dSWei Hu (Xavier) return 0; 780fcba820dSWei Hu (Xavier) } 781fcba820dSWei Hu (Xavier) 7820867543fSLijun Ou static bool 7830867543fSLijun Ou hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask) 7840867543fSLijun Ou { 7850867543fSLijun Ou if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id || 7860867543fSLijun Ou ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live || 7870867543fSLijun Ou ipv4_mask->hdr.hdr_checksum) 7880867543fSLijun Ou return false; 7890867543fSLijun Ou 7900867543fSLijun Ou return true; 7910867543fSLijun Ou } 7920867543fSLijun Ou 793fcba820dSWei Hu (Xavier) static int 794fcba820dSWei Hu (Xavier) hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 795fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 796fcba820dSWei Hu (Xavier) { 797fcba820dSWei Hu (Xavier) const struct rte_flow_item_ipv4 *ipv4_spec; 798fcba820dSWei Hu (Xavier) const struct rte_flow_item_ipv4 *ipv4_mask; 799fcba820dSWei Hu (Xavier) 800fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); 801fcba820dSWei Hu (Xavier) rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4; 802fcba820dSWei Hu (Xavier) rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; 8036f22672fSLijun Ou 804fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 805fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 806fcba820dSWei Hu (Xavier) return 0; 807fcba820dSWei Hu (Xavier) 808fcba820dSWei Hu (Xavier) if (item->mask) { 809fcba820dSWei Hu (Xavier) ipv4_mask = item->mask; 8100867543fSLijun Ou if (!hns3_check_ipv4_mask_supported(ipv4_mask)) { 811fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 812f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, 813fcba820dSWei Hu (Xavier) item, 814fcba820dSWei Hu (Xavier) "Only support src & dst ip,tos,proto in IPV4"); 815fcba820dSWei Hu (Xavier) } 816fcba820dSWei Hu (Xavier) 817fcba820dSWei Hu (Xavier) if (ipv4_mask->hdr.src_addr) { 818fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_IP, 1); 819fcba820dSWei Hu (Xavier) rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] = 820fcba820dSWei Hu (Xavier) rte_be_to_cpu_32(ipv4_mask->hdr.src_addr); 821fcba820dSWei Hu (Xavier) } 822fcba820dSWei Hu (Xavier) 823fcba820dSWei Hu (Xavier) if (ipv4_mask->hdr.dst_addr) { 824fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_IP, 1); 825fcba820dSWei Hu (Xavier) rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] = 826fcba820dSWei Hu (Xavier) rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr); 827fcba820dSWei Hu (Xavier) } 828fcba820dSWei Hu (Xavier) 829fcba820dSWei Hu (Xavier) if (ipv4_mask->hdr.type_of_service) { 830fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_TOS, 1); 831fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_tos = 832fcba820dSWei Hu (Xavier) ipv4_mask->hdr.type_of_service; 833fcba820dSWei Hu (Xavier) } 834fcba820dSWei Hu (Xavier) 835fcba820dSWei Hu (Xavier) if (ipv4_mask->hdr.next_proto_id) { 836fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); 837fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_proto = 838fcba820dSWei Hu (Xavier) ipv4_mask->hdr.next_proto_id; 839fcba820dSWei Hu (Xavier) } 840fcba820dSWei Hu (Xavier) } 841fcba820dSWei Hu (Xavier) 842fcba820dSWei Hu (Xavier) ipv4_spec = item->spec; 843fcba820dSWei Hu (Xavier) rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] = 844fcba820dSWei Hu (Xavier) rte_be_to_cpu_32(ipv4_spec->hdr.src_addr); 845fcba820dSWei Hu (Xavier) rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] = 846fcba820dSWei Hu (Xavier) rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr); 847fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service; 848fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id; 849fcba820dSWei Hu (Xavier) return 0; 850fcba820dSWei Hu (Xavier) } 851fcba820dSWei Hu (Xavier) 852fcba820dSWei Hu (Xavier) static int 853fcba820dSWei Hu (Xavier) hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 854fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 855fcba820dSWei Hu (Xavier) { 856fcba820dSWei Hu (Xavier) const struct rte_flow_item_ipv6 *ipv6_spec; 857fcba820dSWei Hu (Xavier) const struct rte_flow_item_ipv6 *ipv6_mask; 858fcba820dSWei Hu (Xavier) 859fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); 860fcba820dSWei Hu (Xavier) rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6; 861fcba820dSWei Hu (Xavier) rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; 862fcba820dSWei Hu (Xavier) 863fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 864fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 865fcba820dSWei Hu (Xavier) return 0; 866fcba820dSWei Hu (Xavier) 867fcba820dSWei Hu (Xavier) if (item->mask) { 868fcba820dSWei Hu (Xavier) ipv6_mask = item->mask; 869ee160716SLijun Ou if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || 870ee160716SLijun Ou ipv6_mask->hdr.hop_limits) { 871fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 872f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, 873fcba820dSWei Hu (Xavier) item, 874fcba820dSWei Hu (Xavier) "Only support src & dst ip,proto in IPV6"); 875fcba820dSWei Hu (Xavier) } 876fcba820dSWei Hu (Xavier) net_addr_to_host(rule->key_conf.mask.src_ip, 87789b5642dSRobin Jarry (const rte_be32_t *)&ipv6_mask->hdr.src_addr, 878fcba820dSWei Hu (Xavier) IP_ADDR_LEN); 879fcba820dSWei Hu (Xavier) net_addr_to_host(rule->key_conf.mask.dst_ip, 88089b5642dSRobin Jarry (const rte_be32_t *)&ipv6_mask->hdr.dst_addr, 881fcba820dSWei Hu (Xavier) IP_ADDR_LEN); 882fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto; 883fcba820dSWei Hu (Xavier) if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID]) 884fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_IP, 1); 885fcba820dSWei Hu (Xavier) if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID]) 886fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_IP, 1); 887fcba820dSWei Hu (Xavier) if (ipv6_mask->hdr.proto) 888fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); 889fcba820dSWei Hu (Xavier) } 890fcba820dSWei Hu (Xavier) 891fcba820dSWei Hu (Xavier) ipv6_spec = item->spec; 892fcba820dSWei Hu (Xavier) net_addr_to_host(rule->key_conf.spec.src_ip, 89389b5642dSRobin Jarry (const rte_be32_t *)&ipv6_spec->hdr.src_addr, 894fcba820dSWei Hu (Xavier) IP_ADDR_LEN); 895fcba820dSWei Hu (Xavier) net_addr_to_host(rule->key_conf.spec.dst_ip, 89689b5642dSRobin Jarry (const rte_be32_t *)&ipv6_spec->hdr.dst_addr, 897fcba820dSWei Hu (Xavier) IP_ADDR_LEN); 898fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto; 899fcba820dSWei Hu (Xavier) 900fcba820dSWei Hu (Xavier) return 0; 901fcba820dSWei Hu (Xavier) } 902fcba820dSWei Hu (Xavier) 9030867543fSLijun Ou static bool 9040867543fSLijun Ou hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask) 9050867543fSLijun Ou { 9060867543fSLijun Ou if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || 9070867543fSLijun Ou tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags || 9080867543fSLijun Ou tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || 9090867543fSLijun Ou tcp_mask->hdr.tcp_urp) 9100867543fSLijun Ou return false; 9110867543fSLijun Ou 9120867543fSLijun Ou return true; 9130867543fSLijun Ou } 9140867543fSLijun Ou 915fcba820dSWei Hu (Xavier) static int 916fcba820dSWei Hu (Xavier) hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 917fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 918fcba820dSWei Hu (Xavier) { 919fcba820dSWei Hu (Xavier) const struct rte_flow_item_tcp *tcp_spec; 920fcba820dSWei Hu (Xavier) const struct rte_flow_item_tcp *tcp_mask; 921fcba820dSWei Hu (Xavier) 922fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); 923fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_proto = IPPROTO_TCP; 924fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_proto = IPPROTO_MASK; 925fcba820dSWei Hu (Xavier) 926fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 927fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 928fcba820dSWei Hu (Xavier) return 0; 929fcba820dSWei Hu (Xavier) 930fcba820dSWei Hu (Xavier) if (item->mask) { 931fcba820dSWei Hu (Xavier) tcp_mask = item->mask; 9320867543fSLijun Ou if (!hns3_check_tcp_mask_supported(tcp_mask)) { 933fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 934f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, 935fcba820dSWei Hu (Xavier) item, 936fcba820dSWei Hu (Xavier) "Only support src & dst port in TCP"); 937fcba820dSWei Hu (Xavier) } 938fcba820dSWei Hu (Xavier) 939fcba820dSWei Hu (Xavier) if (tcp_mask->hdr.src_port) { 940fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); 941fcba820dSWei Hu (Xavier) rule->key_conf.mask.src_port = 942fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(tcp_mask->hdr.src_port); 943fcba820dSWei Hu (Xavier) } 944fcba820dSWei Hu (Xavier) if (tcp_mask->hdr.dst_port) { 945fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_PORT, 1); 946fcba820dSWei Hu (Xavier) rule->key_conf.mask.dst_port = 947fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(tcp_mask->hdr.dst_port); 948fcba820dSWei Hu (Xavier) } 949fcba820dSWei Hu (Xavier) } 950fcba820dSWei Hu (Xavier) 951fcba820dSWei Hu (Xavier) tcp_spec = item->spec; 952fcba820dSWei Hu (Xavier) rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port); 953fcba820dSWei Hu (Xavier) rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port); 954fcba820dSWei Hu (Xavier) 955fcba820dSWei Hu (Xavier) return 0; 956fcba820dSWei Hu (Xavier) } 957fcba820dSWei Hu (Xavier) 958fcba820dSWei Hu (Xavier) static int 959fcba820dSWei Hu (Xavier) hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 960fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 961fcba820dSWei Hu (Xavier) { 962fcba820dSWei Hu (Xavier) const struct rte_flow_item_udp *udp_spec; 963fcba820dSWei Hu (Xavier) const struct rte_flow_item_udp *udp_mask; 964fcba820dSWei Hu (Xavier) 965fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); 966fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_proto = IPPROTO_UDP; 967fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_proto = IPPROTO_MASK; 9681d82001aSLijun Ou 969fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 970fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 971fcba820dSWei Hu (Xavier) return 0; 972fcba820dSWei Hu (Xavier) 973fcba820dSWei Hu (Xavier) if (item->mask) { 974fcba820dSWei Hu (Xavier) udp_mask = item->mask; 975fcba820dSWei Hu (Xavier) if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { 976fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 977f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, 978fcba820dSWei Hu (Xavier) item, 979fcba820dSWei Hu (Xavier) "Only support src & dst port in UDP"); 980fcba820dSWei Hu (Xavier) } 981fcba820dSWei Hu (Xavier) if (udp_mask->hdr.src_port) { 982fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); 983fcba820dSWei Hu (Xavier) rule->key_conf.mask.src_port = 984fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(udp_mask->hdr.src_port); 985fcba820dSWei Hu (Xavier) } 986fcba820dSWei Hu (Xavier) if (udp_mask->hdr.dst_port) { 987fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_PORT, 1); 988fcba820dSWei Hu (Xavier) rule->key_conf.mask.dst_port = 989fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(udp_mask->hdr.dst_port); 990fcba820dSWei Hu (Xavier) } 991fcba820dSWei Hu (Xavier) } 992fcba820dSWei Hu (Xavier) 993fcba820dSWei Hu (Xavier) udp_spec = item->spec; 994fcba820dSWei Hu (Xavier) rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port); 995fcba820dSWei Hu (Xavier) rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port); 996fcba820dSWei Hu (Xavier) 997fcba820dSWei Hu (Xavier) return 0; 998fcba820dSWei Hu (Xavier) } 999fcba820dSWei Hu (Xavier) 1000fcba820dSWei Hu (Xavier) static int 1001fcba820dSWei Hu (Xavier) hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 1002fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1003fcba820dSWei Hu (Xavier) { 1004fcba820dSWei Hu (Xavier) const struct rte_flow_item_sctp *sctp_spec; 1005fcba820dSWei Hu (Xavier) const struct rte_flow_item_sctp *sctp_mask; 1006fcba820dSWei Hu (Xavier) 1007fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); 1008fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_proto = IPPROTO_SCTP; 1009fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_proto = IPPROTO_MASK; 1010fcba820dSWei Hu (Xavier) 1011fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 1012fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 1013fcba820dSWei Hu (Xavier) return 0; 1014fcba820dSWei Hu (Xavier) 1015fcba820dSWei Hu (Xavier) if (item->mask) { 1016fcba820dSWei Hu (Xavier) sctp_mask = item->mask; 1017fcba820dSWei Hu (Xavier) if (sctp_mask->hdr.cksum) 1018fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1019f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, 1020fcba820dSWei Hu (Xavier) item, 1021fdafdca8SJie Hai "Only support src & dst port & v-tag in SCTP"); 1022fcba820dSWei Hu (Xavier) if (sctp_mask->hdr.src_port) { 1023fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); 1024fcba820dSWei Hu (Xavier) rule->key_conf.mask.src_port = 1025fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(sctp_mask->hdr.src_port); 1026fcba820dSWei Hu (Xavier) } 1027fcba820dSWei Hu (Xavier) if (sctp_mask->hdr.dst_port) { 1028fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_PORT, 1); 1029fcba820dSWei Hu (Xavier) rule->key_conf.mask.dst_port = 1030fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(sctp_mask->hdr.dst_port); 1031fcba820dSWei Hu (Xavier) } 1032fcba820dSWei Hu (Xavier) if (sctp_mask->hdr.tag) { 1033fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1); 1034fcba820dSWei Hu (Xavier) rule->key_conf.mask.sctp_tag = 1035fcba820dSWei Hu (Xavier) rte_be_to_cpu_32(sctp_mask->hdr.tag); 1036fcba820dSWei Hu (Xavier) } 1037fcba820dSWei Hu (Xavier) } 1038fcba820dSWei Hu (Xavier) 1039fcba820dSWei Hu (Xavier) sctp_spec = item->spec; 1040fcba820dSWei Hu (Xavier) rule->key_conf.spec.src_port = 1041fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(sctp_spec->hdr.src_port); 1042fcba820dSWei Hu (Xavier) rule->key_conf.spec.dst_port = 1043fcba820dSWei Hu (Xavier) rte_be_to_cpu_16(sctp_spec->hdr.dst_port); 1044fcba820dSWei Hu (Xavier) rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag); 1045fcba820dSWei Hu (Xavier) 1046fcba820dSWei Hu (Xavier) return 0; 1047fcba820dSWei Hu (Xavier) } 1048fcba820dSWei Hu (Xavier) 1049fcba820dSWei Hu (Xavier) /* 1050fcba820dSWei Hu (Xavier) * Check items before tunnel, save inner configs to outer configs, and clear 1051fcba820dSWei Hu (Xavier) * inner configs. 1052fcba820dSWei Hu (Xavier) * The key consists of two parts: meta_data and tuple keys. 1053fcba820dSWei Hu (Xavier) * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel 1054fcba820dSWei Hu (Xavier) * packet(1bit). 1055fcba820dSWei Hu (Xavier) * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit), 1056fcba820dSWei Hu (Xavier) * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit), 1057fcba820dSWei Hu (Xavier) * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit), 1058fcba820dSWei Hu (Xavier) * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit), 1059fcba820dSWei Hu (Xavier) * Vlantag2(16bit) and sctp-tag(32bit). 1060fcba820dSWei Hu (Xavier) */ 1061fcba820dSWei Hu (Xavier) static int 1062fcba820dSWei Hu (Xavier) hns3_handle_tunnel(const struct rte_flow_item *item, 1063fcba820dSWei Hu (Xavier) struct hns3_fdir_rule *rule, struct rte_flow_error *error) 1064fcba820dSWei Hu (Xavier) { 1065fcba820dSWei Hu (Xavier) /* check eth config */ 1066fcba820dSWei Hu (Xavier) if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC))) 1067fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1068fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, 1069fcba820dSWei Hu (Xavier) item, "Outer eth mac is unsupported"); 1070fcba820dSWei Hu (Xavier) if (rule->input_set & BIT(INNER_ETH_TYPE)) { 1071fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1); 1072fcba820dSWei Hu (Xavier) rule->key_conf.spec.outer_ether_type = 1073fcba820dSWei Hu (Xavier) rule->key_conf.spec.ether_type; 1074fcba820dSWei Hu (Xavier) rule->key_conf.mask.outer_ether_type = 1075fcba820dSWei Hu (Xavier) rule->key_conf.mask.ether_type; 1076fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0); 1077fcba820dSWei Hu (Xavier) rule->key_conf.spec.ether_type = 0; 1078fcba820dSWei Hu (Xavier) rule->key_conf.mask.ether_type = 0; 1079fcba820dSWei Hu (Xavier) } 1080fcba820dSWei Hu (Xavier) 1081a4732847SChengwen Feng if (rule->input_set & BIT(INNER_VLAN_TAG1)) { 1082a4732847SChengwen Feng hns3_set_bit(rule->input_set, OUTER_VLAN_TAG_FST, 1); 1083a4732847SChengwen Feng hns3_set_bit(rule->input_set, INNER_VLAN_TAG1, 0); 1084a4732847SChengwen Feng rule->key_conf.spec.outer_vlan_tag1 = rule->key_conf.spec.vlan_tag1; 1085a4732847SChengwen Feng rule->key_conf.mask.outer_vlan_tag1 = rule->key_conf.mask.vlan_tag1; 1086a4732847SChengwen Feng rule->key_conf.spec.vlan_tag1 = 0; 1087a4732847SChengwen Feng rule->key_conf.mask.vlan_tag1 = 0; 1088a4732847SChengwen Feng } 1089a4732847SChengwen Feng if (rule->input_set & BIT(INNER_VLAN_TAG2)) { 1090a4732847SChengwen Feng hns3_set_bit(rule->input_set, OUTER_VLAN_TAG_SEC, 1); 1091a4732847SChengwen Feng hns3_set_bit(rule->input_set, INNER_VLAN_TAG2, 0); 1092a4732847SChengwen Feng rule->key_conf.spec.outer_vlan_tag2 = rule->key_conf.spec.vlan_tag2; 1093a4732847SChengwen Feng rule->key_conf.mask.outer_vlan_tag2 = rule->key_conf.mask.vlan_tag2; 1094a4732847SChengwen Feng rule->key_conf.spec.vlan_tag2 = 0; 1095a4732847SChengwen Feng rule->key_conf.mask.vlan_tag2 = 0; 1096a4732847SChengwen Feng } 1097fcba820dSWei Hu (Xavier) 1098fcba820dSWei Hu (Xavier) /* clear vlan_num for inner vlan select */ 1099fcba820dSWei Hu (Xavier) rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num; 1100fcba820dSWei Hu (Xavier) rule->key_conf.vlan_num = 0; 1101fcba820dSWei Hu (Xavier) 1102fcba820dSWei Hu (Xavier) /* check L3 config */ 1103fcba820dSWei Hu (Xavier) if (rule->input_set & 1104fcba820dSWei Hu (Xavier) (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS))) 1105fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1106fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, 1107fcba820dSWei Hu (Xavier) item, "Outer ip is unsupported"); 1108fcba820dSWei Hu (Xavier) if (rule->input_set & BIT(INNER_IP_PROTO)) { 1109fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1); 1110fcba820dSWei Hu (Xavier) rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto; 1111fcba820dSWei Hu (Xavier) rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto; 1112fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0); 1113fcba820dSWei Hu (Xavier) rule->key_conf.spec.ip_proto = 0; 1114fcba820dSWei Hu (Xavier) rule->key_conf.mask.ip_proto = 0; 1115fcba820dSWei Hu (Xavier) } 1116fcba820dSWei Hu (Xavier) 1117fcba820dSWei Hu (Xavier) /* check L4 config */ 1118fcba820dSWei Hu (Xavier) if (rule->input_set & BIT(INNER_SCTP_TAG)) 1119fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1120fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, item, 1121fcba820dSWei Hu (Xavier) "Outer sctp tag is unsupported"); 1122fcba820dSWei Hu (Xavier) 1123fcba820dSWei Hu (Xavier) if (rule->input_set & BIT(INNER_SRC_PORT)) { 1124fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1); 1125fcba820dSWei Hu (Xavier) rule->key_conf.spec.outer_src_port = 1126fcba820dSWei Hu (Xavier) rule->key_conf.spec.src_port; 1127fcba820dSWei Hu (Xavier) rule->key_conf.mask.outer_src_port = 1128fcba820dSWei Hu (Xavier) rule->key_conf.mask.src_port; 1129fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0); 1130fcba820dSWei Hu (Xavier) rule->key_conf.spec.src_port = 0; 1131fcba820dSWei Hu (Xavier) rule->key_conf.mask.src_port = 0; 1132fcba820dSWei Hu (Xavier) } 1133fcba820dSWei Hu (Xavier) if (rule->input_set & BIT(INNER_DST_PORT)) { 1134fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, INNER_DST_PORT, 0); 1135fcba820dSWei Hu (Xavier) rule->key_conf.spec.dst_port = 0; 1136fcba820dSWei Hu (Xavier) rule->key_conf.mask.dst_port = 0; 1137fcba820dSWei Hu (Xavier) } 1138fcba820dSWei Hu (Xavier) return 0; 1139fcba820dSWei Hu (Xavier) } 1140fcba820dSWei Hu (Xavier) 1141fcba820dSWei Hu (Xavier) static int 1142fcba820dSWei Hu (Xavier) hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 1143fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1144fcba820dSWei Hu (Xavier) { 1145fcba820dSWei Hu (Xavier) const struct rte_flow_item_vxlan *vxlan_spec; 1146fcba820dSWei Hu (Xavier) const struct rte_flow_item_vxlan *vxlan_mask; 1147fcba820dSWei Hu (Xavier) 1148fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); 1149fcba820dSWei Hu (Xavier) rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; 1150fcba820dSWei Hu (Xavier) if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) 1151fcba820dSWei Hu (Xavier) rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN; 1152fcba820dSWei Hu (Xavier) else 1153fcba820dSWei Hu (Xavier) rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE; 1154fcba820dSWei Hu (Xavier) 1155fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 1156fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 1157fcba820dSWei Hu (Xavier) return 0; 1158fcba820dSWei Hu (Xavier) 1159fcba820dSWei Hu (Xavier) vxlan_mask = item->mask; 1160fcba820dSWei Hu (Xavier) vxlan_spec = item->spec; 1161fcba820dSWei Hu (Xavier) 11625ec2a97eSThomas Monjalon if (vxlan_mask->hdr.flags) 1163fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1164f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 1165fcba820dSWei Hu (Xavier) "Flags is not supported in VxLAN"); 1166fcba820dSWei Hu (Xavier) 1167fcba820dSWei Hu (Xavier) /* VNI must be totally masked or not. */ 11685ec2a97eSThomas Monjalon if (memcmp(vxlan_mask->hdr.vni, full_mask, VNI_OR_TNI_LEN) && 11695ec2a97eSThomas Monjalon memcmp(vxlan_mask->hdr.vni, zero_mask, VNI_OR_TNI_LEN)) 1170fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1171f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 1172fcba820dSWei Hu (Xavier) "VNI must be totally masked or not in VxLAN"); 11735ec2a97eSThomas Monjalon if (vxlan_mask->hdr.vni[0]) { 1174fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); 11755ec2a97eSThomas Monjalon memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->hdr.vni, 1176fcba820dSWei Hu (Xavier) VNI_OR_TNI_LEN); 1177fcba820dSWei Hu (Xavier) } 11785ec2a97eSThomas Monjalon memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->hdr.vni, 1179fcba820dSWei Hu (Xavier) VNI_OR_TNI_LEN); 1180fcba820dSWei Hu (Xavier) return 0; 1181fcba820dSWei Hu (Xavier) } 1182fcba820dSWei Hu (Xavier) 1183fcba820dSWei Hu (Xavier) static int 1184fcba820dSWei Hu (Xavier) hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 1185fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1186fcba820dSWei Hu (Xavier) { 1187fcba820dSWei Hu (Xavier) const struct rte_flow_item_nvgre *nvgre_spec; 1188fcba820dSWei Hu (Xavier) const struct rte_flow_item_nvgre *nvgre_mask; 1189fcba820dSWei Hu (Xavier) 1190fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1); 1191fcba820dSWei Hu (Xavier) rule->key_conf.spec.outer_proto = IPPROTO_GRE; 1192fcba820dSWei Hu (Xavier) rule->key_conf.mask.outer_proto = IPPROTO_MASK; 1193fcba820dSWei Hu (Xavier) 1194fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); 1195fcba820dSWei Hu (Xavier) rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE; 1196fcba820dSWei Hu (Xavier) rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE; 1197fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 1198fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 1199fcba820dSWei Hu (Xavier) return 0; 1200fcba820dSWei Hu (Xavier) 1201fcba820dSWei Hu (Xavier) nvgre_mask = item->mask; 1202fcba820dSWei Hu (Xavier) nvgre_spec = item->spec; 1203fcba820dSWei Hu (Xavier) 1204fcba820dSWei Hu (Xavier) if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) 1205fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1206f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 12077be78d02SJosh Soref "Ver/protocol is not supported in NVGRE"); 1208fcba820dSWei Hu (Xavier) 1209fcba820dSWei Hu (Xavier) /* TNI must be totally masked or not. */ 1210fcba820dSWei Hu (Xavier) if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && 1211fcba820dSWei Hu (Xavier) memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN)) 1212fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1213f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 1214fcba820dSWei Hu (Xavier) "TNI must be totally masked or not in NVGRE"); 1215fcba820dSWei Hu (Xavier) 1216fcba820dSWei Hu (Xavier) if (nvgre_mask->tni[0]) { 1217fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); 1218fcba820dSWei Hu (Xavier) memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni, 1219fcba820dSWei Hu (Xavier) VNI_OR_TNI_LEN); 1220fcba820dSWei Hu (Xavier) } 1221fcba820dSWei Hu (Xavier) memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni, 1222fcba820dSWei Hu (Xavier) VNI_OR_TNI_LEN); 1223fcba820dSWei Hu (Xavier) 1224fcba820dSWei Hu (Xavier) if (nvgre_mask->flow_id) { 1225fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1); 1226fcba820dSWei Hu (Xavier) rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id; 1227fcba820dSWei Hu (Xavier) } 1228fcba820dSWei Hu (Xavier) rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id; 1229fcba820dSWei Hu (Xavier) return 0; 1230fcba820dSWei Hu (Xavier) } 1231fcba820dSWei Hu (Xavier) 1232fcba820dSWei Hu (Xavier) static int 1233fcba820dSWei Hu (Xavier) hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 1234fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1235fcba820dSWei Hu (Xavier) { 1236fcba820dSWei Hu (Xavier) const struct rte_flow_item_geneve *geneve_spec; 1237fcba820dSWei Hu (Xavier) const struct rte_flow_item_geneve *geneve_mask; 1238fcba820dSWei Hu (Xavier) 1239fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); 1240fcba820dSWei Hu (Xavier) rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE; 1241fcba820dSWei Hu (Xavier) rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; 1242fcba820dSWei Hu (Xavier) /* Only used to describe the protocol stack. */ 1243fcba820dSWei Hu (Xavier) if (item->spec == NULL && item->mask == NULL) 1244fcba820dSWei Hu (Xavier) return 0; 1245fcba820dSWei Hu (Xavier) 1246fcba820dSWei Hu (Xavier) geneve_mask = item->mask; 1247fcba820dSWei Hu (Xavier) geneve_spec = item->spec; 1248fcba820dSWei Hu (Xavier) 1249fcba820dSWei Hu (Xavier) if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) 1250fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1251f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 12527be78d02SJosh Soref "Ver/protocol is not supported in GENEVE"); 1253fcba820dSWei Hu (Xavier) /* VNI must be totally masked or not. */ 1254fcba820dSWei Hu (Xavier) if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && 1255fcba820dSWei Hu (Xavier) memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) 1256fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1257f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 1258fcba820dSWei Hu (Xavier) "VNI must be totally masked or not in GENEVE"); 1259fcba820dSWei Hu (Xavier) if (geneve_mask->vni[0]) { 1260fcba820dSWei Hu (Xavier) hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); 1261fcba820dSWei Hu (Xavier) memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni, 1262fcba820dSWei Hu (Xavier) VNI_OR_TNI_LEN); 1263fcba820dSWei Hu (Xavier) } 1264fcba820dSWei Hu (Xavier) memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni, 1265fcba820dSWei Hu (Xavier) VNI_OR_TNI_LEN); 1266fcba820dSWei Hu (Xavier) return 0; 1267fcba820dSWei Hu (Xavier) } 1268fcba820dSWei Hu (Xavier) 1269fcba820dSWei Hu (Xavier) static int 127090294fa5SChengwen Feng hns3_parse_ptype(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 127190294fa5SChengwen Feng struct rte_flow_error *error) 127290294fa5SChengwen Feng { 127390294fa5SChengwen Feng const struct rte_flow_item_ptype *spec = item->spec; 127490294fa5SChengwen Feng const struct rte_flow_item_ptype *mask = item->mask; 127590294fa5SChengwen Feng 127690294fa5SChengwen Feng if (spec == NULL || mask == NULL) 127790294fa5SChengwen Feng return rte_flow_error_set(error, EINVAL, 127890294fa5SChengwen Feng RTE_FLOW_ERROR_TYPE_ITEM, item, 127990294fa5SChengwen Feng "PTYPE must set spec and mask at the same time!"); 128090294fa5SChengwen Feng 128190294fa5SChengwen Feng if (spec->packet_type != RTE_PTYPE_TUNNEL_MASK || 128290294fa5SChengwen Feng (mask->packet_type & RTE_PTYPE_TUNNEL_MASK) != RTE_PTYPE_TUNNEL_MASK) 128390294fa5SChengwen Feng return rte_flow_error_set(error, EINVAL, 128490294fa5SChengwen Feng RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, 128590294fa5SChengwen Feng "PTYPE only support general tunnel!"); 128690294fa5SChengwen Feng 128790294fa5SChengwen Feng /* 128890294fa5SChengwen Feng * Set tunnel_type to non-zero, so that meta-data's tunnel packet bit 128990294fa5SChengwen Feng * will be set, then hardware will match tunnel packet. 129090294fa5SChengwen Feng */ 129190294fa5SChengwen Feng rule->key_conf.spec.tunnel_type = 1; 129290294fa5SChengwen Feng return 0; 129390294fa5SChengwen Feng } 129490294fa5SChengwen Feng 129590294fa5SChengwen Feng static int 1296fcba820dSWei Hu (Xavier) hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 1297fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1298fcba820dSWei Hu (Xavier) { 1299fcba820dSWei Hu (Xavier) int ret; 1300fcba820dSWei Hu (Xavier) 13016f22672fSLijun Ou if (item->spec == NULL && item->mask) 13026f22672fSLijun Ou return rte_flow_error_set(error, EINVAL, 13036f22672fSLijun Ou RTE_FLOW_ERROR_TYPE_ITEM, item, 13046f22672fSLijun Ou "Can't configure FDIR with mask " 13056f22672fSLijun Ou "but without spec"); 13066f22672fSLijun Ou else if (item->spec && (item->mask == NULL)) 13076f22672fSLijun Ou return rte_flow_error_set(error, EINVAL, 13086f22672fSLijun Ou RTE_FLOW_ERROR_TYPE_ITEM, item, 13096f22672fSLijun Ou "Tunnel packets must configure " 13106f22672fSLijun Ou "with mask"); 13116f22672fSLijun Ou 13128887c207SChengwen Feng if (rule->key_conf.spec.tunnel_type != 0) 13138887c207SChengwen Feng return rte_flow_error_set(error, EINVAL, 13148887c207SChengwen Feng RTE_FLOW_ERROR_TYPE_ITEM, 13158887c207SChengwen Feng item, "Too many tunnel headers!"); 13168887c207SChengwen Feng 1317fcba820dSWei Hu (Xavier) switch (item->type) { 1318fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_VXLAN: 1319fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1320fcba820dSWei Hu (Xavier) ret = hns3_parse_vxlan(item, rule, error); 1321fcba820dSWei Hu (Xavier) break; 1322fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_NVGRE: 1323fcba820dSWei Hu (Xavier) ret = hns3_parse_nvgre(item, rule, error); 1324fcba820dSWei Hu (Xavier) break; 1325fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_GENEVE: 1326fcba820dSWei Hu (Xavier) ret = hns3_parse_geneve(item, rule, error); 1327fcba820dSWei Hu (Xavier) break; 132890294fa5SChengwen Feng case RTE_FLOW_ITEM_TYPE_PTYPE: 132990294fa5SChengwen Feng ret = hns3_parse_ptype(item, rule, error); 133090294fa5SChengwen Feng break; 1331fcba820dSWei Hu (Xavier) default: 1332fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 1333f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, 1334fcba820dSWei Hu (Xavier) NULL, "Unsupported tunnel type!"); 1335fcba820dSWei Hu (Xavier) } 1336fcba820dSWei Hu (Xavier) if (ret) 1337fcba820dSWei Hu (Xavier) return ret; 1338fcba820dSWei Hu (Xavier) return hns3_handle_tunnel(item, rule, error); 1339fcba820dSWei Hu (Xavier) } 1340fcba820dSWei Hu (Xavier) 1341fcba820dSWei Hu (Xavier) static int 1342ee160716SLijun Ou hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, 1343fcba820dSWei Hu (Xavier) struct items_step_mngr *step_mngr, 1344fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1345fcba820dSWei Hu (Xavier) { 1346fcba820dSWei Hu (Xavier) int ret; 1347fcba820dSWei Hu (Xavier) 13486f22672fSLijun Ou if (item->spec == NULL && item->mask) 13496f22672fSLijun Ou return rte_flow_error_set(error, EINVAL, 13506f22672fSLijun Ou RTE_FLOW_ERROR_TYPE_ITEM, item, 13516f22672fSLijun Ou "Can't configure FDIR with mask " 13526f22672fSLijun Ou "but without spec"); 13536f22672fSLijun Ou 1354fcba820dSWei Hu (Xavier) switch (item->type) { 1355fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_ETH: 1356fcba820dSWei Hu (Xavier) ret = hns3_parse_eth(item, rule, error); 1357fcba820dSWei Hu (Xavier) step_mngr->items = L2_next_items; 135877d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L2_next_items); 1359fcba820dSWei Hu (Xavier) break; 1360fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_VLAN: 1361fcba820dSWei Hu (Xavier) ret = hns3_parse_vlan(item, rule, error); 1362fcba820dSWei Hu (Xavier) step_mngr->items = L2_next_items; 136377d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L2_next_items); 1364fcba820dSWei Hu (Xavier) break; 1365fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_IPV4: 1366fcba820dSWei Hu (Xavier) ret = hns3_parse_ipv4(item, rule, error); 1367fcba820dSWei Hu (Xavier) step_mngr->items = L3_next_items; 136877d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L3_next_items); 1369fcba820dSWei Hu (Xavier) break; 1370fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_IPV6: 1371fcba820dSWei Hu (Xavier) ret = hns3_parse_ipv6(item, rule, error); 1372fcba820dSWei Hu (Xavier) step_mngr->items = L3_next_items; 137377d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L3_next_items); 1374fcba820dSWei Hu (Xavier) break; 1375fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_TCP: 1376fcba820dSWei Hu (Xavier) ret = hns3_parse_tcp(item, rule, error); 1377fcba820dSWei Hu (Xavier) step_mngr->items = L4_next_items; 137877d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L4_next_items); 1379fcba820dSWei Hu (Xavier) break; 1380fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_UDP: 1381fcba820dSWei Hu (Xavier) ret = hns3_parse_udp(item, rule, error); 1382fcba820dSWei Hu (Xavier) step_mngr->items = L4_next_items; 138377d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L4_next_items); 1384fcba820dSWei Hu (Xavier) break; 1385fcba820dSWei Hu (Xavier) case RTE_FLOW_ITEM_TYPE_SCTP: 1386fcba820dSWei Hu (Xavier) ret = hns3_parse_sctp(item, rule, error); 1387fcba820dSWei Hu (Xavier) step_mngr->items = L4_next_items; 138877d1f6b1SChengwen Feng step_mngr->count = RTE_DIM(L4_next_items); 1389fcba820dSWei Hu (Xavier) break; 1390fcba820dSWei Hu (Xavier) default: 1391fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 1392f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, 1393fcba820dSWei Hu (Xavier) NULL, "Unsupported normal type!"); 1394fcba820dSWei Hu (Xavier) } 1395fcba820dSWei Hu (Xavier) 1396fcba820dSWei Hu (Xavier) return ret; 1397fcba820dSWei Hu (Xavier) } 1398fcba820dSWei Hu (Xavier) 1399fcba820dSWei Hu (Xavier) static int 1400fcba820dSWei Hu (Xavier) hns3_validate_item(const struct rte_flow_item *item, 1401fcba820dSWei Hu (Xavier) struct items_step_mngr step_mngr, 1402fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1403fcba820dSWei Hu (Xavier) { 140482c2ca6dSMin Hu (Connor) uint32_t i; 1405fcba820dSWei Hu (Xavier) 1406fcba820dSWei Hu (Xavier) if (item->last) 1407fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 1408f2577609SWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_LAST, item, 1409fcba820dSWei Hu (Xavier) "Not supported last point for range"); 1410fcba820dSWei Hu (Xavier) 1411fcba820dSWei Hu (Xavier) for (i = 0; i < step_mngr.count; i++) { 1412fcba820dSWei Hu (Xavier) if (item->type == step_mngr.items[i]) 1413fcba820dSWei Hu (Xavier) break; 1414fcba820dSWei Hu (Xavier) } 1415fcba820dSWei Hu (Xavier) 1416fcba820dSWei Hu (Xavier) if (i == step_mngr.count) { 1417fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 1418fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM, 1419fcba820dSWei Hu (Xavier) item, "Inval or missing item"); 1420fcba820dSWei Hu (Xavier) } 1421fcba820dSWei Hu (Xavier) return 0; 1422fcba820dSWei Hu (Xavier) } 1423fcba820dSWei Hu (Xavier) 1424fcba820dSWei Hu (Xavier) static inline bool 1425fcba820dSWei Hu (Xavier) is_tunnel_packet(enum rte_flow_item_type type) 1426fcba820dSWei Hu (Xavier) { 1427fcba820dSWei Hu (Xavier) if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || 1428fcba820dSWei Hu (Xavier) type == RTE_FLOW_ITEM_TYPE_VXLAN || 1429fcba820dSWei Hu (Xavier) type == RTE_FLOW_ITEM_TYPE_NVGRE || 143090294fa5SChengwen Feng type == RTE_FLOW_ITEM_TYPE_GENEVE || 143190294fa5SChengwen Feng /* 143290294fa5SChengwen Feng * Here treat PTYPE as tunnel type because driver only support PTYPE_TUNNEL, 143390294fa5SChengwen Feng * other PTYPE will return error in hns3_parse_ptype() later. 143490294fa5SChengwen Feng */ 143590294fa5SChengwen Feng type == RTE_FLOW_ITEM_TYPE_PTYPE) 1436fcba820dSWei Hu (Xavier) return true; 1437fcba820dSWei Hu (Xavier) return false; 1438fcba820dSWei Hu (Xavier) } 1439fcba820dSWei Hu (Xavier) 1440ac72aae6SDengdui Huang static int 1441ac72aae6SDengdui Huang hns3_handle_attributes(struct rte_eth_dev *dev, 1442ac72aae6SDengdui Huang const struct rte_flow_attr *attr, 1443ac72aae6SDengdui Huang struct hns3_fdir_rule *rule, 1444ac72aae6SDengdui Huang struct rte_flow_error *error) 1445ac72aae6SDengdui Huang { 1446ac72aae6SDengdui Huang struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1447ac72aae6SDengdui Huang struct hns3_fdir_info fdir = pf->fdir; 1448ac72aae6SDengdui Huang uint32_t rule_num; 1449ac72aae6SDengdui Huang 1450ac72aae6SDengdui Huang if (fdir.index_cfg != HNS3_FDIR_INDEX_CONFIG_PRIORITY) { 1451ac72aae6SDengdui Huang if (attr->priority == 0) 1452ac72aae6SDengdui Huang return 0; 1453ac72aae6SDengdui Huang return rte_flow_error_set(error, ENOTSUP, 1454ac72aae6SDengdui Huang RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1455ac72aae6SDengdui Huang attr, "Not support priority"); 1456ac72aae6SDengdui Huang } 1457ac72aae6SDengdui Huang 1458ac72aae6SDengdui Huang rule_num = fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1]; 1459ac72aae6SDengdui Huang if (attr->priority >= rule_num) 1460ac72aae6SDengdui Huang return rte_flow_error_set(error, EINVAL, 1461ac72aae6SDengdui Huang RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1462ac72aae6SDengdui Huang attr, "Priority out of range"); 1463ac72aae6SDengdui Huang 1464ac72aae6SDengdui Huang if (fdir.hash_map[attr->priority] != NULL) 1465ac72aae6SDengdui Huang return rte_flow_error_set(error, EINVAL, 1466ac72aae6SDengdui Huang RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1467ac72aae6SDengdui Huang attr, "Priority already exists"); 1468ac72aae6SDengdui Huang 1469ac72aae6SDengdui Huang rule->location = attr->priority; 1470ac72aae6SDengdui Huang 1471ac72aae6SDengdui Huang return 0; 1472ac72aae6SDengdui Huang } 1473ac72aae6SDengdui Huang 1474fcba820dSWei Hu (Xavier) /* 1475529017f9SChengwen Feng * Parse the flow director rule. 1476529017f9SChengwen Feng * The supported PATTERN: 1477529017f9SChengwen Feng * case: non-tunnel packet: 1478529017f9SChengwen Feng * ETH : src-mac, dst-mac, ethertype 1479529017f9SChengwen Feng * VLAN: tag1, tag2 1480529017f9SChengwen Feng * IPv4: src-ip, dst-ip, tos, proto 1481529017f9SChengwen Feng * IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto 1482529017f9SChengwen Feng * UDP : src-port, dst-port 1483529017f9SChengwen Feng * TCP : src-port, dst-port 1484529017f9SChengwen Feng * SCTP: src-port, dst-port, tag 1485529017f9SChengwen Feng * case: tunnel packet: 1486529017f9SChengwen Feng * OUTER-ETH: ethertype 1487529017f9SChengwen Feng * OUTER-L3 : proto 1488529017f9SChengwen Feng * OUTER-L4 : src-port, dst-port 1489529017f9SChengwen Feng * TUNNEL : vni, flow-id(only valid when NVGRE) 1490529017f9SChengwen Feng * INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet 1491529017f9SChengwen Feng * The supported ACTION: 1492529017f9SChengwen Feng * QUEUE 1493529017f9SChengwen Feng * DROP 1494529017f9SChengwen Feng * COUNT 1495529017f9SChengwen Feng * MARK: the id range [0, 4094] 1496529017f9SChengwen Feng * FLAG 1497529017f9SChengwen Feng * RSS: only valid if firmware support FD_QUEUE_REGION. 1498fcba820dSWei Hu (Xavier) */ 1499fcba820dSWei Hu (Xavier) static int 1500fcba820dSWei Hu (Xavier) hns3_parse_fdir_filter(struct rte_eth_dev *dev, 1501ac72aae6SDengdui Huang const struct rte_flow_attr *attr, 1502fcba820dSWei Hu (Xavier) const struct rte_flow_item pattern[], 1503fcba820dSWei Hu (Xavier) const struct rte_flow_action actions[], 1504fcba820dSWei Hu (Xavier) struct hns3_fdir_rule *rule, 1505fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 1506fcba820dSWei Hu (Xavier) { 1507fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 1508fcba820dSWei Hu (Xavier) const struct rte_flow_item *item; 1509fcba820dSWei Hu (Xavier) struct items_step_mngr step_mngr; 1510fcba820dSWei Hu (Xavier) int ret; 1511fcba820dSWei Hu (Xavier) 1512fcba820dSWei Hu (Xavier) /* FDIR is available only in PF driver */ 1513fcba820dSWei Hu (Xavier) if (hns->is_vf) 1514fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 1515fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1516fcba820dSWei Hu (Xavier) "Fdir not supported in VF"); 1517fcba820dSWei Hu (Xavier) 1518ac72aae6SDengdui Huang ret = hns3_handle_attributes(dev, attr, rule, error); 1519ac72aae6SDengdui Huang if (ret) 1520ac72aae6SDengdui Huang return ret; 1521ac72aae6SDengdui Huang 1522fcba820dSWei Hu (Xavier) step_mngr.items = first_items; 152377d1f6b1SChengwen Feng step_mngr.count = RTE_DIM(first_items); 1524fcba820dSWei Hu (Xavier) for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 1525fcba820dSWei Hu (Xavier) if (item->type == RTE_FLOW_ITEM_TYPE_VOID) 1526fcba820dSWei Hu (Xavier) continue; 1527fcba820dSWei Hu (Xavier) 1528fcba820dSWei Hu (Xavier) ret = hns3_validate_item(item, step_mngr, error); 1529fcba820dSWei Hu (Xavier) if (ret) 1530fcba820dSWei Hu (Xavier) return ret; 1531fcba820dSWei Hu (Xavier) 1532fcba820dSWei Hu (Xavier) if (is_tunnel_packet(item->type)) { 1533fcba820dSWei Hu (Xavier) ret = hns3_parse_tunnel(item, rule, error); 1534fcba820dSWei Hu (Xavier) if (ret) 1535fcba820dSWei Hu (Xavier) return ret; 1536fcba820dSWei Hu (Xavier) step_mngr.items = tunnel_next_items; 153777d1f6b1SChengwen Feng step_mngr.count = RTE_DIM(tunnel_next_items); 1538fcba820dSWei Hu (Xavier) } else { 1539fcba820dSWei Hu (Xavier) ret = hns3_parse_normal(item, rule, &step_mngr, error); 1540fcba820dSWei Hu (Xavier) if (ret) 1541fcba820dSWei Hu (Xavier) return ret; 1542fcba820dSWei Hu (Xavier) } 1543fcba820dSWei Hu (Xavier) } 1544fcba820dSWei Hu (Xavier) 1545a4732847SChengwen Feng ret = hns3_check_tuple(dev, rule, error); 1546a4732847SChengwen Feng if (ret) 1547a4732847SChengwen Feng return ret; 1548a4732847SChengwen Feng 1549fcba820dSWei Hu (Xavier) return hns3_handle_actions(dev, actions, rule, error); 1550fcba820dSWei Hu (Xavier) } 1551fcba820dSWei Hu (Xavier) 1552fcba820dSWei Hu (Xavier) static void 1553fcba820dSWei Hu (Xavier) hns3_filterlist_flush(struct rte_eth_dev *dev) 1554fcba820dSWei Hu (Xavier) { 15559b290a3aSChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1556fcba820dSWei Hu (Xavier) struct hns3_fdir_rule_ele *fdir_rule_ptr; 1557fcba820dSWei Hu (Xavier) struct hns3_flow_mem *flow_node; 1558fcba820dSWei Hu (Xavier) 15599b290a3aSChengwen Feng fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); 1560fcba820dSWei Hu (Xavier) while (fdir_rule_ptr) { 15619b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries); 1562fcba820dSWei Hu (Xavier) rte_free(fdir_rule_ptr); 15639b290a3aSChengwen Feng fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); 1564fcba820dSWei Hu (Xavier) } 1565fcba820dSWei Hu (Xavier) 15669b290a3aSChengwen Feng flow_node = TAILQ_FIRST(&hw->flow_list); 1567fcba820dSWei Hu (Xavier) while (flow_node) { 15689b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_list, flow_node, entries); 1569fcba820dSWei Hu (Xavier) rte_free(flow_node->flow); 1570fcba820dSWei Hu (Xavier) rte_free(flow_node); 15719b290a3aSChengwen Feng flow_node = TAILQ_FIRST(&hw->flow_list); 1572fcba820dSWei Hu (Xavier) } 1573fcba820dSWei Hu (Xavier) } 1574fcba820dSWei Hu (Xavier) 1575c37ca66fSWei Hu (Xavier) static bool 1576150fd8f8SHuisong Li hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp, 1577150fd8f8SHuisong Li const struct rte_flow_action_rss *with) 1578150fd8f8SHuisong Li { 1579150fd8f8SHuisong Li if (comp->key_len != with->key_len) 1580150fd8f8SHuisong Li return false; 1581150fd8f8SHuisong Li 1582150fd8f8SHuisong Li if (with->key_len == 0) 1583150fd8f8SHuisong Li return true; 1584150fd8f8SHuisong Li 1585150fd8f8SHuisong Li if (comp->key == NULL && with->key == NULL) 1586150fd8f8SHuisong Li return true; 1587150fd8f8SHuisong Li 1588150fd8f8SHuisong Li if (!(comp->key != NULL && with->key != NULL)) 1589150fd8f8SHuisong Li return false; 1590150fd8f8SHuisong Li 1591150fd8f8SHuisong Li return !memcmp(comp->key, with->key, with->key_len); 1592150fd8f8SHuisong Li } 1593150fd8f8SHuisong Li 1594150fd8f8SHuisong Li static bool 1595150fd8f8SHuisong Li hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp, 1596150fd8f8SHuisong Li const struct rte_flow_action_rss *with) 1597150fd8f8SHuisong Li { 1598150fd8f8SHuisong Li if (comp->queue_num != with->queue_num) 1599150fd8f8SHuisong Li return false; 1600150fd8f8SHuisong Li 1601150fd8f8SHuisong Li if (with->queue_num == 0) 1602150fd8f8SHuisong Li return true; 1603150fd8f8SHuisong Li 1604150fd8f8SHuisong Li if (comp->queue == NULL && with->queue == NULL) 1605150fd8f8SHuisong Li return true; 1606150fd8f8SHuisong Li 1607150fd8f8SHuisong Li if (!(comp->queue != NULL && with->queue != NULL)) 1608150fd8f8SHuisong Li return false; 1609150fd8f8SHuisong Li 1610150fd8f8SHuisong Li return !memcmp(comp->queue, with->queue, with->queue_num); 1611150fd8f8SHuisong Li } 1612150fd8f8SHuisong Li 1613150fd8f8SHuisong Li static bool 1614c37ca66fSWei Hu (Xavier) hns3_action_rss_same(const struct rte_flow_action_rss *comp, 1615c37ca66fSWei Hu (Xavier) const struct rte_flow_action_rss *with) 1616c37ca66fSWei Hu (Xavier) { 1617150fd8f8SHuisong Li bool same_level; 1618150fd8f8SHuisong Li bool same_types; 1619150fd8f8SHuisong Li bool same_func; 1620eb158fc7SLijun Ou 1621150fd8f8SHuisong Li same_level = (comp->level == with->level); 1622150fd8f8SHuisong Li same_types = (comp->types == with->types); 1623150fd8f8SHuisong Li same_func = (comp->func == with->func); 1624eb158fc7SLijun Ou 1625150fd8f8SHuisong Li return same_level && same_types && same_func && 1626150fd8f8SHuisong Li hns3_flow_rule_key_same(comp, with) && 1627150fd8f8SHuisong Li hns3_flow_rule_queues_same(comp, with); 1628c37ca66fSWei Hu (Xavier) } 1629c37ca66fSWei Hu (Xavier) 16300867543fSLijun Ou static bool 1631e3069658SHuisong Li hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types) 16320867543fSLijun Ou { 16330867543fSLijun Ou /* 1634e3069658SHuisong Li * Some hardware don't support to use src/dst port fields to hash 1635e3069658SHuisong Li * for IPV6 SCTP packet type. 16360867543fSLijun Ou */ 1637e3069658SHuisong Li if (types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP && 1638e3069658SHuisong Li types & HNS3_RSS_SUPPORT_L4_SRC_DST && 1639e3069658SHuisong Li !hw->rss_info.ipv6_sctp_offload_supported) 16400867543fSLijun Ou return false; 16410867543fSLijun Ou 16420867543fSLijun Ou return true; 16430867543fSLijun Ou } 16440867543fSLijun Ou 1645e3069658SHuisong Li static int 1646e3069658SHuisong Li hns3_flow_parse_hash_func(const struct rte_flow_action_rss *rss_act, 1647e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1648e3069658SHuisong Li struct rte_flow_error *error) 1649e3069658SHuisong Li { 1650e3069658SHuisong Li if (rss_act->func >= RTE_ETH_HASH_FUNCTION_MAX) 1651e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1652e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1653e3069658SHuisong Li NULL, "RSS hash func are not supported"); 1654e3069658SHuisong Li 1655e3069658SHuisong Li rss_conf->conf.func = rss_act->func; 1656e3069658SHuisong Li return 0; 1657e3069658SHuisong Li } 1658e3069658SHuisong Li 1659e3069658SHuisong Li static int 1660e3069658SHuisong Li hns3_flow_parse_hash_key(struct hns3_hw *hw, 1661e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1662e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1663e3069658SHuisong Li struct rte_flow_error *error) 1664e3069658SHuisong Li { 1665e3069658SHuisong Li if (rss_act->key_len != hw->rss_key_size) 1666e3069658SHuisong Li return rte_flow_error_set(error, EINVAL, 1667e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1668e3069658SHuisong Li NULL, "invalid RSS key length"); 1669e3069658SHuisong Li 1670e3069658SHuisong Li if (rss_act->key != NULL) 1671e3069658SHuisong Li memcpy(rss_conf->key, rss_act->key, rss_act->key_len); 1672e3069658SHuisong Li else 1673e3069658SHuisong Li memcpy(rss_conf->key, hns3_hash_key, 1674e3069658SHuisong Li RTE_MIN(sizeof(hns3_hash_key), rss_act->key_len)); 1675e3069658SHuisong Li /* Need to record if user sets hash key. */ 1676e3069658SHuisong Li rss_conf->conf.key = rss_act->key; 1677e3069658SHuisong Li rss_conf->conf.key_len = rss_act->key_len; 1678e3069658SHuisong Li 1679e3069658SHuisong Li return 0; 1680e3069658SHuisong Li } 1681e3069658SHuisong Li 1682e3069658SHuisong Li static int 1683e3069658SHuisong Li hns3_flow_parse_queues(struct hns3_hw *hw, 1684e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1685e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1686e3069658SHuisong Li struct rte_flow_error *error) 1687e3069658SHuisong Li { 1688e3069658SHuisong Li uint16_t i; 1689e3069658SHuisong Li 1690e3069658SHuisong Li if (rss_act->queue_num > hw->rss_ind_tbl_size) 1691e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1692e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1693e3069658SHuisong Li NULL, 1694e3069658SHuisong Li "queue number can not exceed RSS indirection table."); 1695e3069658SHuisong Li 1696e3069658SHuisong Li if (rss_act->queue_num > HNS3_RSS_QUEUES_BUFFER_NUM) 1697e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1698e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1699e3069658SHuisong Li NULL, 1700e3069658SHuisong Li "queue number configured exceeds queue buffer size driver supported"); 1701e3069658SHuisong Li 1702e3069658SHuisong Li for (i = 0; i < rss_act->queue_num; i++) { 1703e3069658SHuisong Li if (rss_act->queue[i] >= hw->alloc_rss_size) 1704e3069658SHuisong Li return rte_flow_error_set(error, EINVAL, 1705e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1706e3069658SHuisong Li NULL, 1707e3069658SHuisong Li "queue id must be less than queue number allocated to a TC"); 1708e3069658SHuisong Li } 1709e3069658SHuisong Li 1710e3069658SHuisong Li memcpy(rss_conf->queue, rss_act->queue, 1711e3069658SHuisong Li rss_act->queue_num * sizeof(rss_conf->queue[0])); 1712e3069658SHuisong Li rss_conf->conf.queue = rss_conf->queue; 1713e3069658SHuisong Li rss_conf->conf.queue_num = rss_act->queue_num; 1714e3069658SHuisong Li 1715e3069658SHuisong Li return 0; 1716e3069658SHuisong Li } 1717e3069658SHuisong Li 1718e3069658SHuisong Li static int 1719e3069658SHuisong Li hns3_flow_get_hw_pctype(struct hns3_hw *hw, 1720e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1721e3069658SHuisong Li const struct hns3_hash_map_info *map, 1722e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1723e3069658SHuisong Li struct rte_flow_error *error) 1724e3069658SHuisong Li { 1725e3069658SHuisong Li uint64_t l3l4_src_dst, l3l4_refine, left_types; 1726e3069658SHuisong Li 1727e3069658SHuisong Li if (rss_act->types == 0) { 1728e3069658SHuisong Li /* Disable RSS hash of this packet type if types is zero. */ 1729e3069658SHuisong Li rss_conf->hw_pctypes |= map->hw_pctype; 1730e3069658SHuisong Li return 0; 1731e3069658SHuisong Li } 1732e3069658SHuisong Li 1733e3069658SHuisong Li /* 1734e3069658SHuisong Li * Can not have extra types except rss_pctype and l3l4_type in this map. 1735e3069658SHuisong Li */ 1736e3069658SHuisong Li left_types = ~map->rss_pctype & rss_act->types; 1737e3069658SHuisong Li if (left_types & ~map->l3l4_types) 1738e3069658SHuisong Li return rte_flow_error_set(error, EINVAL, 1739e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1740e3069658SHuisong Li "cannot set extra types."); 1741e3069658SHuisong Li 1742e3069658SHuisong Li l3l4_src_dst = left_types; 1743e3069658SHuisong Li /* L3/L4 SRC and DST shouldn't be specified at the same time. */ 1744e3069658SHuisong Li l3l4_refine = rte_eth_rss_hf_refine(l3l4_src_dst); 1745e3069658SHuisong Li if (l3l4_refine != l3l4_src_dst) 1746e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1747e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1748e3069658SHuisong Li "cannot specify L3_SRC/DST_ONLY or L4_SRC/DST_ONLY at the same."); 1749e3069658SHuisong Li 1750e3069658SHuisong Li if (!hns3_valid_ipv6_sctp_rss_types(hw, rss_act->types)) 1751e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1752e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1753e3069658SHuisong Li "hardware doesn't support to use L4 src/dst to hash for IPV6-SCTP."); 1754e3069658SHuisong Li 1755e3069658SHuisong Li rss_conf->hw_pctypes |= map->hw_pctype; 1756e3069658SHuisong Li 1757e3069658SHuisong Li return 0; 1758e3069658SHuisong Li } 1759e3069658SHuisong Li 1760e3069658SHuisong Li static int 1761e3069658SHuisong Li hns3_flow_parse_rss_types_by_ptype(struct hns3_hw *hw, 1762e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1763e3069658SHuisong Li uint64_t pattern_type, 1764e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1765e3069658SHuisong Li struct rte_flow_error *error) 1766e3069658SHuisong Li { 1767e3069658SHuisong Li const struct hns3_hash_map_info *map; 1768e3069658SHuisong Li bool matched = false; 1769e3069658SHuisong Li uint16_t i; 1770e3069658SHuisong Li int ret; 1771e3069658SHuisong Li 1772e3069658SHuisong Li for (i = 0; i < RTE_DIM(hash_map_table); i++) { 1773e3069658SHuisong Li map = &hash_map_table[i]; 1774e3069658SHuisong Li if (map->pattern_type != pattern_type) { 1775e3069658SHuisong Li /* 1776e3069658SHuisong Li * If the target pattern type is already matched with 1777e3069658SHuisong Li * the one before this pattern in the hash map table, 1778e3069658SHuisong Li * no need to continue walk. 1779e3069658SHuisong Li */ 1780e3069658SHuisong Li if (matched) 1781e3069658SHuisong Li break; 1782e3069658SHuisong Li continue; 1783e3069658SHuisong Li } 1784e3069658SHuisong Li matched = true; 1785e3069658SHuisong Li 1786e3069658SHuisong Li /* 1787e3069658SHuisong Li * If pattern type is matched and the 'types' is zero, all packet flow 1788e3069658SHuisong Li * types related to this pattern type disable RSS hash. 1789e3069658SHuisong Li * Otherwise, RSS types must match the pattern type and cannot have no 1790e3069658SHuisong Li * extra or unsupported types. 1791e3069658SHuisong Li */ 1792e3069658SHuisong Li if (rss_act->types != 0 && !(map->rss_pctype & rss_act->types)) 1793e3069658SHuisong Li continue; 1794e3069658SHuisong Li 1795e3069658SHuisong Li ret = hns3_flow_get_hw_pctype(hw, rss_act, map, rss_conf, error); 1796e3069658SHuisong Li if (ret != 0) 1797e3069658SHuisong Li return ret; 1798e3069658SHuisong Li } 1799e3069658SHuisong Li 1800e3069658SHuisong Li if (rss_conf->hw_pctypes != 0) 1801e3069658SHuisong Li return 0; 1802e3069658SHuisong Li 1803e3069658SHuisong Li if (matched) 1804e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1805e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1806e3069658SHuisong Li NULL, "RSS types are unsupported"); 1807e3069658SHuisong Li 1808e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 1809e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1810e3069658SHuisong Li NULL, "Pattern specified is unsupported"); 1811e3069658SHuisong Li } 1812e3069658SHuisong Li 1813e3069658SHuisong Li static uint64_t 1814e3069658SHuisong Li hns3_flow_get_all_hw_pctypes(uint64_t types) 1815e3069658SHuisong Li { 1816e3069658SHuisong Li uint64_t hw_pctypes = 0; 1817e3069658SHuisong Li uint16_t i; 1818e3069658SHuisong Li 1819e3069658SHuisong Li for (i = 0; i < RTE_DIM(hash_map_table); i++) { 1820e3069658SHuisong Li if (types & hash_map_table[i].rss_pctype) 1821e3069658SHuisong Li hw_pctypes |= hash_map_table[i].hw_pctype; 1822e3069658SHuisong Li } 1823e3069658SHuisong Li 1824e3069658SHuisong Li return hw_pctypes; 1825e3069658SHuisong Li } 1826e3069658SHuisong Li 1827e3069658SHuisong Li static int 1828e3069658SHuisong Li hns3_flow_parse_rss_types(struct hns3_hw *hw, 1829e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1830e3069658SHuisong Li uint64_t pattern_type, 1831e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1832e3069658SHuisong Li struct rte_flow_error *error) 1833e3069658SHuisong Li { 1834e3069658SHuisong Li rss_conf->conf.types = rss_act->types; 1835e3069658SHuisong Li 1836e3069658SHuisong Li /* no pattern specified to set global RSS types. */ 1837e3069658SHuisong Li if (pattern_type == 0) { 1838eb3ef9e0SHuisong Li if (!hns3_check_rss_types_valid(hw, rss_act->types)) 1839eb3ef9e0SHuisong Li return rte_flow_error_set(error, EINVAL, 1840eb3ef9e0SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1841eb3ef9e0SHuisong Li NULL, "RSS types is invalid."); 1842e3069658SHuisong Li rss_conf->hw_pctypes = 1843e3069658SHuisong Li hns3_flow_get_all_hw_pctypes(rss_act->types); 1844e3069658SHuisong Li return 0; 1845e3069658SHuisong Li } 1846e3069658SHuisong Li 1847e3069658SHuisong Li return hns3_flow_parse_rss_types_by_ptype(hw, rss_act, pattern_type, 1848e3069658SHuisong Li rss_conf, error); 1849e3069658SHuisong Li } 1850e3069658SHuisong Li 1851e3069658SHuisong Li static int 1852e3069658SHuisong Li hns3_flow_parse_hash_global_conf(struct rte_eth_dev *dev, 1853e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1854e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1855e3069658SHuisong Li struct rte_flow_error *error) 1856e3069658SHuisong Li { 1857e3069658SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1858e3069658SHuisong Li int ret; 1859e3069658SHuisong Li 1860e3069658SHuisong Li ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error); 1861e3069658SHuisong Li if (ret != 0) 1862e3069658SHuisong Li return ret; 1863e3069658SHuisong Li 1864e3069658SHuisong Li if (rss_act->queue_num > 0) { 1865e3069658SHuisong Li ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); 1866e3069658SHuisong Li if (ret != 0) 1867e3069658SHuisong Li return ret; 1868e3069658SHuisong Li } 1869e3069658SHuisong Li 1870e3069658SHuisong Li if (rss_act->key_len > 0) { 1871e3069658SHuisong Li ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); 1872e3069658SHuisong Li if (ret != 0) 1873e3069658SHuisong Li return ret; 1874e3069658SHuisong Li } 1875e3069658SHuisong Li 1876e3069658SHuisong Li return hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type, 1877e3069658SHuisong Li rss_conf, error); 1878e3069658SHuisong Li } 1879e3069658SHuisong Li 1880e3069658SHuisong Li static int 1881e3069658SHuisong Li hns3_flow_parse_pattern_type(const struct rte_flow_item pattern[], 1882e3069658SHuisong Li uint64_t *ptype, struct rte_flow_error *error) 1883e3069658SHuisong Li { 1884e3069658SHuisong Li enum rte_flow_item_type pre_type = RTE_FLOW_ITEM_TYPE_VOID; 1885e3069658SHuisong Li const char *message = "Pattern specified isn't supported"; 1886e3069658SHuisong Li uint64_t item_hdr, pattern_hdrs = 0; 1887e3069658SHuisong Li enum rte_flow_item_type cur_type; 1888e3069658SHuisong Li 1889e3069658SHuisong Li for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1890e3069658SHuisong Li if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) 1891e3069658SHuisong Li continue; 1892e3069658SHuisong Li if (pattern->mask || pattern->spec || pattern->last) { 1893e3069658SHuisong Li message = "Header info shouldn't be specified"; 1894e3069658SHuisong Li goto unsup; 1895e3069658SHuisong Li } 1896e3069658SHuisong Li 1897e3069658SHuisong Li /* Check the sub-item allowed by the previous item . */ 1898e3069658SHuisong Li if (pre_type >= RTE_DIM(hash_pattern_next_allow_items) || 1899e3069658SHuisong Li !(hash_pattern_next_allow_items[pre_type] & 1900e3069658SHuisong Li BIT_ULL(pattern->type))) 1901e3069658SHuisong Li goto unsup; 1902e3069658SHuisong Li 1903e3069658SHuisong Li cur_type = pattern->type; 1904e3069658SHuisong Li /* Unsupported for current type being greater than array size. */ 1905e3069658SHuisong Li if (cur_type >= RTE_DIM(hash_pattern_item_header)) 1906e3069658SHuisong Li goto unsup; 1907e3069658SHuisong Li 1908e3069658SHuisong Li /* The value is zero, which means unsupported current header. */ 1909e3069658SHuisong Li item_hdr = hash_pattern_item_header[cur_type]; 1910e3069658SHuisong Li if (item_hdr == 0) 1911e3069658SHuisong Li goto unsup; 1912e3069658SHuisong Li 1913e3069658SHuisong Li /* Have duplicate pattern header. */ 1914e3069658SHuisong Li if (item_hdr & pattern_hdrs) 1915e3069658SHuisong Li goto unsup; 1916e3069658SHuisong Li pre_type = cur_type; 1917e3069658SHuisong Li pattern_hdrs |= item_hdr; 1918e3069658SHuisong Li } 1919e3069658SHuisong Li 1920e3069658SHuisong Li if (pattern_hdrs != 0) { 1921e3069658SHuisong Li *ptype = pattern_hdrs; 1922e3069658SHuisong Li return 0; 1923e3069658SHuisong Li } 1924e3069658SHuisong Li 1925e3069658SHuisong Li unsup: 1926e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 1927e3069658SHuisong Li pattern, message); 1928e3069658SHuisong Li } 1929e3069658SHuisong Li 1930e3069658SHuisong Li static int 1931e3069658SHuisong Li hns3_flow_parse_pattern_act(struct rte_eth_dev *dev, 1932e3069658SHuisong Li const struct rte_flow_item pattern[], 1933e3069658SHuisong Li const struct rte_flow_action_rss *rss_act, 1934e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1935e3069658SHuisong Li struct rte_flow_error *error) 1936e3069658SHuisong Li { 1937e3069658SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1938e3069658SHuisong Li int ret; 1939e3069658SHuisong Li 1940e3069658SHuisong Li ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error); 1941e3069658SHuisong Li if (ret != 0) 1942e3069658SHuisong Li return ret; 1943e3069658SHuisong Li 1944e3069658SHuisong Li if (rss_act->key_len > 0) { 1945e3069658SHuisong Li ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); 1946e3069658SHuisong Li if (ret != 0) 1947e3069658SHuisong Li return ret; 1948e3069658SHuisong Li } 1949e3069658SHuisong Li 1950e3069658SHuisong Li if (rss_act->queue_num > 0) { 1951e3069658SHuisong Li ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); 1952e3069658SHuisong Li if (ret != 0) 1953e3069658SHuisong Li return ret; 1954e3069658SHuisong Li } 1955e3069658SHuisong Li 1956e3069658SHuisong Li ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type, 1957e3069658SHuisong Li error); 1958e3069658SHuisong Li if (ret != 0) 1959e3069658SHuisong Li return ret; 1960e3069658SHuisong Li 1961e3069658SHuisong Li ret = hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type, 1962e3069658SHuisong Li rss_conf, error); 1963e3069658SHuisong Li if (ret != 0) 1964e3069658SHuisong Li return ret; 1965e3069658SHuisong Li 1966e3069658SHuisong Li if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT || 1967e3069658SHuisong Li rss_act->key_len > 0 || rss_act->queue_num > 0) 1968e3069658SHuisong Li hns3_warn(hw, "hash func, key and queues are global config, which work for all flow types. " 1969e3069658SHuisong Li "Recommend: don't set them together with pattern."); 1970e3069658SHuisong Li 1971e3069658SHuisong Li return 0; 1972e3069658SHuisong Li } 1973e3069658SHuisong Li 1974e3069658SHuisong Li static bool 1975e3069658SHuisong Li hns3_rss_action_is_dup(struct hns3_hw *hw, 1976e3069658SHuisong Li const struct hns3_flow_rss_conf *conf) 1977e3069658SHuisong Li { 1978e3069658SHuisong Li struct hns3_rss_conf_ele *filter; 1979e3069658SHuisong Li 1980e3069658SHuisong Li TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { 1981e3069658SHuisong Li if (conf->pattern_type != filter->filter_info.pattern_type) 1982e3069658SHuisong Li continue; 1983e3069658SHuisong Li 1984e3069658SHuisong Li if (hns3_action_rss_same(&filter->filter_info.conf, &conf->conf)) 1985e3069658SHuisong Li return true; 1986e3069658SHuisong Li } 1987e3069658SHuisong Li 1988e3069658SHuisong Li return false; 1989e3069658SHuisong Li } 1990e3069658SHuisong Li 1991c37ca66fSWei Hu (Xavier) /* 19927be78d02SJosh Soref * This function is used to parse rss action validation. 1993c37ca66fSWei Hu (Xavier) */ 1994c37ca66fSWei Hu (Xavier) static int 1995c37ca66fSWei Hu (Xavier) hns3_parse_rss_filter(struct rte_eth_dev *dev, 1996e3069658SHuisong Li const struct rte_flow_item pattern[], 1997c37ca66fSWei Hu (Xavier) const struct rte_flow_action *actions, 1998e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 1999c37ca66fSWei Hu (Xavier) struct rte_flow_error *error) 2000c37ca66fSWei Hu (Xavier) { 2001c37ca66fSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 2002e3069658SHuisong Li const struct rte_flow_action_rss *rss_act; 2003c37ca66fSWei Hu (Xavier) const struct rte_flow_action *act; 2004e3069658SHuisong Li const struct rte_flow_item *pat; 2005e3069658SHuisong Li struct hns3_hw *hw = &hns->hw; 2006e3069658SHuisong Li uint32_t index = 0; 2007e3069658SHuisong Li int ret; 2008c37ca66fSWei Hu (Xavier) 2009e3069658SHuisong Li NEXT_ITEM_OF_ACTION(act, actions, index); 2010e3069658SHuisong Li if (actions[1].type != RTE_FLOW_ACTION_TYPE_END) 2011c37ca66fSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 2012c37ca66fSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION, 2013e3069658SHuisong Li &actions[1], 2014e3069658SHuisong Li "Only support one action for RSS."); 2015e3069658SHuisong Li 2016e3069658SHuisong Li rss_act = (const struct rte_flow_action_rss *)act->conf; 2017e3069658SHuisong Li if (rss_act == NULL) { 2018e3069658SHuisong Li return rte_flow_error_set(error, EINVAL, 2019e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2020e3069658SHuisong Li act, "lost RSS action configuration"); 2021e3069658SHuisong Li } 2022e3069658SHuisong Li 2023e3069658SHuisong Li if (rss_act->level != 0) 2024e3069658SHuisong Li return rte_flow_error_set(error, ENOTSUP, 2025e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2026e3069658SHuisong Li act, 2027e3069658SHuisong Li "RSS level is not supported"); 2028e3069658SHuisong Li 2029e3069658SHuisong Li index = 0; 2030e3069658SHuisong Li NEXT_ITEM_OF_PATTERN(pat, pattern, index); 2031e3069658SHuisong Li if (pat[0].type == RTE_FLOW_ITEM_TYPE_END) { 2032e3069658SHuisong Li rss_conf->pattern_type = 0; 2033e3069658SHuisong Li ret = hns3_flow_parse_hash_global_conf(dev, rss_act, 2034e3069658SHuisong Li rss_conf, error); 2035e3069658SHuisong Li } else { 2036e3069658SHuisong Li ret = hns3_flow_parse_pattern_act(dev, pat, rss_act, 2037e3069658SHuisong Li rss_conf, error); 2038e3069658SHuisong Li } 2039e3069658SHuisong Li if (ret != 0) 2040e3069658SHuisong Li return ret; 2041e3069658SHuisong Li 2042e3069658SHuisong Li if (hns3_rss_action_is_dup(hw, rss_conf)) 2043e3069658SHuisong Li return rte_flow_error_set(error, EINVAL, 2044e3069658SHuisong Li RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2045e3069658SHuisong Li act, "duplicate RSS rule"); 2046c37ca66fSWei Hu (Xavier) 2047c37ca66fSWei Hu (Xavier) return 0; 2048c37ca66fSWei Hu (Xavier) } 2049c37ca66fSWei Hu (Xavier) 2050c37ca66fSWei Hu (Xavier) static int 20511042ed40SHuisong Li hns3_update_indir_table(struct hns3_hw *hw, 2052c37ca66fSWei Hu (Xavier) const struct rte_flow_action_rss *conf, uint16_t num) 2053c37ca66fSWei Hu (Xavier) { 20540fce2c46SLijun Ou uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; 20555e76dfc3SHuisong Li uint16_t j; 2056c37ca66fSWei Hu (Xavier) uint32_t i; 2057c37ca66fSWei Hu (Xavier) 2058c37ca66fSWei Hu (Xavier) /* Fill in redirection table */ 20590fce2c46SLijun Ou for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { 2060c37ca66fSWei Hu (Xavier) j %= num; 20615e76dfc3SHuisong Li if (conf->queue[j] >= hw->alloc_rss_size) { 20625e76dfc3SHuisong Li hns3_err(hw, "queue id(%u) set to redirection table " 20635e76dfc3SHuisong Li "exceeds queue number(%u) allocated to a TC.", 20645e76dfc3SHuisong Li conf->queue[j], hw->alloc_rss_size); 2065c37ca66fSWei Hu (Xavier) return -EINVAL; 2066c37ca66fSWei Hu (Xavier) } 20679a7d3af2SHuisong Li indir_tbl[i] = conf->queue[j]; 2068c37ca66fSWei Hu (Xavier) } 2069c37ca66fSWei Hu (Xavier) 20700fce2c46SLijun Ou return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); 2071c37ca66fSWei Hu (Xavier) } 2072c37ca66fSWei Hu (Xavier) 2073e3069658SHuisong Li static uint64_t 2074e3069658SHuisong Li hns3_flow_get_pctype_tuple_mask(uint64_t hw_pctype) 20751c3aeb2bSHuisong Li { 2076e3069658SHuisong Li uint64_t tuple_mask = 0; 2077e3069658SHuisong Li uint16_t i; 20781c3aeb2bSHuisong Li 2079e3069658SHuisong Li for (i = 0; i < RTE_DIM(hash_map_table); i++) { 2080e3069658SHuisong Li if (hw_pctype == hash_map_table[i].hw_pctype) { 2081e3069658SHuisong Li tuple_mask = hash_map_table[i].tuple_mask; 2082e3069658SHuisong Li break; 2083e3069658SHuisong Li } 2084e3069658SHuisong Li } 20851c3aeb2bSHuisong Li 2086e3069658SHuisong Li return tuple_mask; 20871c3aeb2bSHuisong Li } 20881c3aeb2bSHuisong Li 20891c3aeb2bSHuisong Li static int 2090e3069658SHuisong Li hns3_flow_set_rss_ptype_tuple(struct hns3_hw *hw, 2091e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf) 2092e3069658SHuisong Li { 2093e3069658SHuisong Li uint64_t old_tuple_fields, new_tuple_fields; 2094e3069658SHuisong Li uint64_t hw_pctypes, tuples, tuple_mask = 0; 2095e3069658SHuisong Li bool cfg_global_tuple; 2096e3069658SHuisong Li int ret; 2097e3069658SHuisong Li 2098e3069658SHuisong Li cfg_global_tuple = (rss_conf->pattern_type == 0); 2099e3069658SHuisong Li if (!cfg_global_tuple) { 2100e3069658SHuisong Li /* 2101e3069658SHuisong Li * To ensure that different packets do not affect each other, 2102e3069658SHuisong Li * we have to first read all tuple fields, and then only modify 2103e3069658SHuisong Li * the tuples for the specified packet type. 2104e3069658SHuisong Li */ 2105e3069658SHuisong Li ret = hns3_get_rss_tuple_field(hw, &old_tuple_fields); 2106e3069658SHuisong Li if (ret != 0) 2107e3069658SHuisong Li return ret; 2108e3069658SHuisong Li 2109e3069658SHuisong Li new_tuple_fields = old_tuple_fields; 2110e3069658SHuisong Li hw_pctypes = rss_conf->hw_pctypes; 2111e3069658SHuisong Li while (hw_pctypes > 0) { 2112e3069658SHuisong Li uint32_t idx = rte_bsf64(hw_pctypes); 2113e3069658SHuisong Li uint64_t pctype = BIT_ULL(idx); 2114e3069658SHuisong Li 2115e3069658SHuisong Li tuple_mask = hns3_flow_get_pctype_tuple_mask(pctype); 2116eb3ef9e0SHuisong Li tuples = hns3_rss_calc_tuple_filed(rss_conf->conf.types); 2117e3069658SHuisong Li new_tuple_fields &= ~tuple_mask; 2118e3069658SHuisong Li new_tuple_fields |= tuples; 2119e3069658SHuisong Li hw_pctypes &= ~pctype; 2120e3069658SHuisong Li } 2121e3069658SHuisong Li } else { 2122e3069658SHuisong Li new_tuple_fields = 2123eb3ef9e0SHuisong Li hns3_rss_calc_tuple_filed(rss_conf->conf.types); 2124e3069658SHuisong Li } 2125e3069658SHuisong Li 2126e3069658SHuisong Li ret = hns3_set_rss_tuple_field(hw, new_tuple_fields); 2127e3069658SHuisong Li if (ret != 0) 2128e3069658SHuisong Li return ret; 2129e3069658SHuisong Li 2130156496daSJie Hai if (!cfg_global_tuple) 2131e3069658SHuisong Li hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64, 2132e3069658SHuisong Li old_tuple_fields, new_tuple_fields); 2133e3069658SHuisong Li 2134e3069658SHuisong Li return 0; 2135e3069658SHuisong Li } 2136e3069658SHuisong Li 2137e3069658SHuisong Li static int 2138e3069658SHuisong Li hns3_config_rss_filter(struct hns3_hw *hw, 2139e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf) 2140c37ca66fSWei Hu (Xavier) { 2141218a119aSHuisong Li struct rte_flow_action_rss *rss_act; 2142c37ca66fSWei Hu (Xavier) int ret; 2143c37ca66fSWei Hu (Xavier) 2144e3069658SHuisong Li rss_act = &rss_conf->conf; 2145e3069658SHuisong Li if (rss_act->queue_num > 0) { 2146e3069658SHuisong Li ret = hns3_update_indir_table(hw, rss_act, rss_act->queue_num); 2147e3069658SHuisong Li if (ret) { 2148e3069658SHuisong Li hns3_err(hw, "set queues action failed, ret = %d", ret); 2149a3579962SHuisong Li return ret; 2150b03ddaadSWei Hu (Xavier) } 2151e3069658SHuisong Li } 2152c37ca66fSWei Hu (Xavier) 2153e3069658SHuisong Li if (rss_act->key_len > 0 || 2154e3069658SHuisong Li rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) { 2155e3069658SHuisong Li ret = hns3_update_rss_algo_key(hw, rss_act->func, rss_conf->key, 2156e3069658SHuisong Li rss_act->key_len); 2157e3069658SHuisong Li if (ret != 0) { 2158e3069658SHuisong Li hns3_err(hw, "set func or hash key action failed, ret = %d", 2159e3069658SHuisong Li ret); 2160e3069658SHuisong Li return ret; 2161e3069658SHuisong Li } 2162e3069658SHuisong Li } 2163e3069658SHuisong Li 2164e3069658SHuisong Li if (rss_conf->hw_pctypes > 0) { 2165e3069658SHuisong Li ret = hns3_flow_set_rss_ptype_tuple(hw, rss_conf); 2166e3069658SHuisong Li if (ret != 0) { 2167e3069658SHuisong Li hns3_err(hw, "set types action failed, ret = %d", ret); 2168e3069658SHuisong Li return ret; 2169e3069658SHuisong Li } 2170e3069658SHuisong Li } 2171e3069658SHuisong Li 2172e3069658SHuisong Li return 0; 2173c37ca66fSWei Hu (Xavier) } 2174c37ca66fSWei Hu (Xavier) 2175c37ca66fSWei Hu (Xavier) static int 2176c37ca66fSWei Hu (Xavier) hns3_clear_rss_filter(struct rte_eth_dev *dev) 2177c37ca66fSWei Hu (Xavier) { 2178c37ca66fSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 2179ec674cb7SLijun Ou struct hns3_rss_conf_ele *rss_filter_ptr; 2180c37ca66fSWei Hu (Xavier) struct hns3_hw *hw = &hns->hw; 2181c37ca66fSWei Hu (Xavier) 21829b290a3aSChengwen Feng rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); 2183ec674cb7SLijun Ou while (rss_filter_ptr) { 21849b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); 2185ec674cb7SLijun Ou rte_free(rss_filter_ptr); 21869b290a3aSChengwen Feng rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); 2187ec674cb7SLijun Ou } 2188ec674cb7SLijun Ou 2189e3069658SHuisong Li return hns3_config_rss(hns); 2190ec674cb7SLijun Ou } 2191ec674cb7SLijun Ou 2192e3069658SHuisong Li static int 2193e3069658SHuisong Li hns3_reconfig_all_rss_filter(struct hns3_hw *hw) 2194e3069658SHuisong Li { 2195e3069658SHuisong Li struct hns3_rss_conf_ele *filter; 2196e3069658SHuisong Li uint32_t rule_no = 0; 2197e3069658SHuisong Li int ret; 2198e3069658SHuisong Li 2199e3069658SHuisong Li TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { 2200e3069658SHuisong Li ret = hns3_config_rss_filter(hw, &filter->filter_info); 2201e3069658SHuisong Li if (ret != 0) { 2202e3069658SHuisong Li hns3_err(hw, "config %uth RSS filter failed, ret = %d", 2203e3069658SHuisong Li rule_no, ret); 2204ec674cb7SLijun Ou return ret; 2205c37ca66fSWei Hu (Xavier) } 2206e3069658SHuisong Li rule_no++; 2207e3069658SHuisong Li } 2208e3069658SHuisong Li 2209e3069658SHuisong Li return 0; 2210e3069658SHuisong Li } 2211c37ca66fSWei Hu (Xavier) 221243d8adf3SHuisong Li static int 22131042ed40SHuisong Li hns3_restore_rss_filter(struct hns3_hw *hw) 2214920be799SLijun Ou { 2215e3069658SHuisong Li int ret; 2216920be799SLijun Ou 2217a3579962SHuisong Li pthread_mutex_lock(&hw->flows_lock); 2218e3069658SHuisong Li ret = hns3_reconfig_all_rss_filter(hw); 2219a3579962SHuisong Li pthread_mutex_unlock(&hw->flows_lock); 2220a3579962SHuisong Li 2221705a5080SHuisong Li return ret; 2222920be799SLijun Ou } 2223920be799SLijun Ou 222443d8adf3SHuisong Li int 22251042ed40SHuisong Li hns3_restore_filter(struct hns3_adapter *hns) 222643d8adf3SHuisong Li { 22271042ed40SHuisong Li struct hns3_hw *hw = &hns->hw; 2228860ed851SHuisong Li int ret; 2229860ed851SHuisong Li 2230860ed851SHuisong Li ret = hns3_restore_all_fdir_filter(hns); 2231860ed851SHuisong Li if (ret != 0) 2232860ed851SHuisong Li return ret; 2233860ed851SHuisong Li 22341042ed40SHuisong Li return hns3_restore_rss_filter(hw); 223543d8adf3SHuisong Li } 223643d8adf3SHuisong Li 2237fcba820dSWei Hu (Xavier) static int 2238fcba820dSWei Hu (Xavier) hns3_flow_args_check(const struct rte_flow_attr *attr, 2239fcba820dSWei Hu (Xavier) const struct rte_flow_item pattern[], 2240fcba820dSWei Hu (Xavier) const struct rte_flow_action actions[], 2241fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 2242fcba820dSWei Hu (Xavier) { 2243fcba820dSWei Hu (Xavier) if (pattern == NULL) 2244fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 2245fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ITEM_NUM, 2246fcba820dSWei Hu (Xavier) NULL, "NULL pattern."); 2247fcba820dSWei Hu (Xavier) 2248fcba820dSWei Hu (Xavier) if (actions == NULL) 2249fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 2250fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION_NUM, 2251fcba820dSWei Hu (Xavier) NULL, "NULL action."); 2252fcba820dSWei Hu (Xavier) 2253fcba820dSWei Hu (Xavier) if (attr == NULL) 2254fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 2255fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ATTR, 2256fcba820dSWei Hu (Xavier) NULL, "NULL attribute."); 2257fcba820dSWei Hu (Xavier) 2258fcba820dSWei Hu (Xavier) return hns3_check_attr(attr, error); 2259fcba820dSWei Hu (Xavier) } 2260fcba820dSWei Hu (Xavier) 2261fcba820dSWei Hu (Xavier) /* 2262fcba820dSWei Hu (Xavier) * Check if the flow rule is supported by hns3. 22637be78d02SJosh Soref * It only checks the format. Don't guarantee the rule can be programmed into 2264fcba820dSWei Hu (Xavier) * the HW. Because there can be no enough room for the rule. 2265fcba820dSWei Hu (Xavier) */ 2266fcba820dSWei Hu (Xavier) static int 2267fcba820dSWei Hu (Xavier) hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 2268fcba820dSWei Hu (Xavier) const struct rte_flow_item pattern[], 2269fcba820dSWei Hu (Xavier) const struct rte_flow_action actions[], 2270e3069658SHuisong Li struct rte_flow_error *error, 2271e3069658SHuisong Li struct hns3_filter_info *filter_info) 2272fcba820dSWei Hu (Xavier) { 2273e3069658SHuisong Li union hns3_filter_conf *conf; 2274fcba820dSWei Hu (Xavier) int ret; 2275fcba820dSWei Hu (Xavier) 2276fcba820dSWei Hu (Xavier) ret = hns3_flow_args_check(attr, pattern, actions, error); 2277fcba820dSWei Hu (Xavier) if (ret) 2278fcba820dSWei Hu (Xavier) return ret; 2279fcba820dSWei Hu (Xavier) 2280e3069658SHuisong Li hns3_parse_filter_type(pattern, actions, filter_info); 2281e3069658SHuisong Li conf = &filter_info->conf; 2282e3069658SHuisong Li if (filter_info->type == RTE_ETH_FILTER_HASH) 2283e3069658SHuisong Li return hns3_parse_rss_filter(dev, pattern, actions, 2284e3069658SHuisong Li &conf->rss_conf, error); 2285c37ca66fSWei Hu (Xavier) 2286ac72aae6SDengdui Huang return hns3_parse_fdir_filter(dev, attr, pattern, actions, 2287e3069658SHuisong Li &conf->fdir_conf, error); 2288e3069658SHuisong Li } 2289e3069658SHuisong Li 2290e3069658SHuisong Li static int 2291e3069658SHuisong Li hns3_flow_rebuild_all_rss_filter(struct hns3_adapter *hns) 2292e3069658SHuisong Li { 2293e3069658SHuisong Li struct hns3_hw *hw = &hns->hw; 2294e3069658SHuisong Li int ret; 2295e3069658SHuisong Li 2296e3069658SHuisong Li ret = hns3_config_rss(hns); 2297e3069658SHuisong Li if (ret != 0) { 2298e3069658SHuisong Li hns3_err(hw, "restore original RSS configuration failed, ret = %d.", 2299e3069658SHuisong Li ret); 2300e3069658SHuisong Li return ret; 2301e3069658SHuisong Li } 2302e3069658SHuisong Li ret = hns3_reconfig_all_rss_filter(hw); 2303e3069658SHuisong Li if (ret != 0) 2304e3069658SHuisong Li hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret); 2305e3069658SHuisong Li 2306e3069658SHuisong Li return ret; 2307fcba820dSWei Hu (Xavier) } 2308fcba820dSWei Hu (Xavier) 2309c4849917SHuisong Li static int 2310c4849917SHuisong Li hns3_flow_create_rss_rule(struct rte_eth_dev *dev, 2311e3069658SHuisong Li struct hns3_flow_rss_conf *rss_conf, 2312c4849917SHuisong Li struct rte_flow *flow) 2313c4849917SHuisong Li { 2314c4849917SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2315e3069658SHuisong Li struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2316c4849917SHuisong Li struct hns3_rss_conf_ele *rss_filter_ptr; 2317b93ad0ccSHuisong Li struct hns3_flow_rss_conf *new_conf; 2318e3069658SHuisong Li struct rte_flow_action_rss *rss_act; 2319c4849917SHuisong Li int ret; 2320c4849917SHuisong Li 2321c4849917SHuisong Li rss_filter_ptr = rte_zmalloc("hns3 rss filter", 2322c4849917SHuisong Li sizeof(struct hns3_rss_conf_ele), 0); 2323c4849917SHuisong Li if (rss_filter_ptr == NULL) { 2324c4849917SHuisong Li hns3_err(hw, "failed to allocate hns3_rss_filter memory"); 2325c4849917SHuisong Li return -ENOMEM; 2326c4849917SHuisong Li } 2327c4849917SHuisong Li 2328815c7db5SHuisong Li new_conf = &rss_filter_ptr->filter_info; 2329e3069658SHuisong Li memcpy(new_conf, rss_conf, sizeof(*new_conf)); 2330e3069658SHuisong Li rss_act = &new_conf->conf; 2331e3069658SHuisong Li if (rss_act->queue_num > 0) 2332815c7db5SHuisong Li new_conf->conf.queue = new_conf->queue; 2333e3069658SHuisong Li /* 2334e3069658SHuisong Li * There are two ways to deliver hash key action: 2335e3069658SHuisong Li * 1> 'key_len' is greater than zero and 'key' isn't NULL. 2336e3069658SHuisong Li * 2> 'key_len' is greater than zero, but 'key' is NULL. 2337e3069658SHuisong Li * For case 2, we need to keep 'key' of the new_conf is NULL so as to 2338e3069658SHuisong Li * inherit the configuration from user in case of failing to verify 2339e3069658SHuisong Li * duplicate rule later. 2340e3069658SHuisong Li */ 2341e3069658SHuisong Li if (rss_act->key_len > 0 && rss_act->key != NULL) 2342815c7db5SHuisong Li new_conf->conf.key = new_conf->key; 2343815c7db5SHuisong Li 2344e3069658SHuisong Li ret = hns3_config_rss_filter(hw, new_conf); 2345c4849917SHuisong Li if (ret != 0) { 2346c4849917SHuisong Li rte_free(rss_filter_ptr); 2347e3069658SHuisong Li (void)hns3_flow_rebuild_all_rss_filter(hns); 2348c4849917SHuisong Li return ret; 2349c4849917SHuisong Li } 23500d81da25SHuisong Li 2351c4849917SHuisong Li TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); 2352c4849917SHuisong Li flow->rule = rss_filter_ptr; 2353c4849917SHuisong Li flow->filter_type = RTE_ETH_FILTER_HASH; 2354c4849917SHuisong Li 2355c4849917SHuisong Li return 0; 2356c4849917SHuisong Li } 2357c4849917SHuisong Li 2358c4849917SHuisong Li static int 2359c4849917SHuisong Li hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, 2360e3069658SHuisong Li struct hns3_fdir_rule *fdir_rule, 2361c4849917SHuisong Li struct rte_flow_error *error, 2362c4849917SHuisong Li struct rte_flow *flow) 2363c4849917SHuisong Li { 2364c4849917SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2365c4849917SHuisong Li struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2366c4849917SHuisong Li struct hns3_fdir_rule_ele *fdir_rule_ptr; 2367fdfcb94dSChengwen Feng bool indir; 2368c4849917SHuisong Li int ret; 2369c4849917SHuisong Li 2370e3069658SHuisong Li indir = !!(fdir_rule->flags & HNS3_RULE_FLAG_COUNTER_INDIR); 2371e3069658SHuisong Li if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) { 2372e3069658SHuisong Li ret = hns3_counter_new(dev, indir, fdir_rule->act_cnt.id, 2373fdfcb94dSChengwen Feng error); 2374c4849917SHuisong Li if (ret != 0) 2375c4849917SHuisong Li return ret; 2376c4849917SHuisong Li 2377e3069658SHuisong Li flow->counter_id = fdir_rule->act_cnt.id; 2378c4849917SHuisong Li } 2379c4849917SHuisong Li 2380c4849917SHuisong Li fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", 2381c4849917SHuisong Li sizeof(struct hns3_fdir_rule_ele), 0); 2382c4849917SHuisong Li if (fdir_rule_ptr == NULL) { 2383c4849917SHuisong Li hns3_err(hw, "failed to allocate fdir_rule memory."); 2384c4849917SHuisong Li ret = -ENOMEM; 2385c4849917SHuisong Li goto err_malloc; 2386c4849917SHuisong Li } 2387c4849917SHuisong Li 2388c4849917SHuisong Li /* 2389c4849917SHuisong Li * After all the preceding tasks are successfully configured, configure 2390c4849917SHuisong Li * rules to the hardware to simplify the rollback of rules in the 2391c4849917SHuisong Li * hardware. 2392c4849917SHuisong Li */ 2393e3069658SHuisong Li ret = hns3_fdir_filter_program(hns, fdir_rule, false); 2394c4849917SHuisong Li if (ret != 0) 2395c4849917SHuisong Li goto err_fdir_filter; 2396c4849917SHuisong Li 2397e3069658SHuisong Li memcpy(&fdir_rule_ptr->fdir_conf, fdir_rule, 2398c4849917SHuisong Li sizeof(struct hns3_fdir_rule)); 2399c4849917SHuisong Li TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); 2400c4849917SHuisong Li flow->rule = fdir_rule_ptr; 2401c4849917SHuisong Li flow->filter_type = RTE_ETH_FILTER_FDIR; 2402c4849917SHuisong Li 2403c4849917SHuisong Li return 0; 2404c4849917SHuisong Li 2405c4849917SHuisong Li err_fdir_filter: 2406c4849917SHuisong Li rte_free(fdir_rule_ptr); 2407c4849917SHuisong Li err_malloc: 2408e3069658SHuisong Li if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) 2409e3069658SHuisong Li hns3_counter_release(dev, fdir_rule->act_cnt.id); 2410c4849917SHuisong Li 2411c4849917SHuisong Li return ret; 2412c4849917SHuisong Li } 2413c4849917SHuisong Li 2414fcba820dSWei Hu (Xavier) /* 2415fcba820dSWei Hu (Xavier) * Create or destroy a flow rule. 2416fcba820dSWei Hu (Xavier) * Theorically one rule can match more than one filters. 2417f77b3c3aSLijun Ou * We will let it use the filter which it hit first. 2418fcba820dSWei Hu (Xavier) * So, the sequence matters. 2419fcba820dSWei Hu (Xavier) */ 2420fcba820dSWei Hu (Xavier) static struct rte_flow * 2421fcba820dSWei Hu (Xavier) hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 2422fcba820dSWei Hu (Xavier) const struct rte_flow_item pattern[], 2423fcba820dSWei Hu (Xavier) const struct rte_flow_action actions[], 2424fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 2425fcba820dSWei Hu (Xavier) { 2426fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 2427e3069658SHuisong Li struct hns3_filter_info filter_info = {0}; 2428fcba820dSWei Hu (Xavier) struct hns3_flow_mem *flow_node; 2429e3069658SHuisong Li struct hns3_hw *hw = &hns->hw; 2430e3069658SHuisong Li union hns3_filter_conf *conf; 2431fcba820dSWei Hu (Xavier) struct rte_flow *flow; 2432fcba820dSWei Hu (Xavier) int ret; 2433fcba820dSWei Hu (Xavier) 2434e3069658SHuisong Li ret = hns3_flow_validate(dev, attr, pattern, actions, error, 2435e3069658SHuisong Li &filter_info); 2436fcba820dSWei Hu (Xavier) if (ret) 2437fcba820dSWei Hu (Xavier) return NULL; 2438fcba820dSWei Hu (Xavier) 2439fcba820dSWei Hu (Xavier) flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0); 2440fcba820dSWei Hu (Xavier) if (flow == NULL) { 2441ee160716SLijun Ou rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 2442ee160716SLijun Ou NULL, "Failed to allocate flow memory"); 2443fcba820dSWei Hu (Xavier) return NULL; 2444fcba820dSWei Hu (Xavier) } 2445fcba820dSWei Hu (Xavier) flow_node = rte_zmalloc("hns3 flow node", 2446fcba820dSWei Hu (Xavier) sizeof(struct hns3_flow_mem), 0); 2447fcba820dSWei Hu (Xavier) if (flow_node == NULL) { 2448ee160716SLijun Ou rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 2449ee160716SLijun Ou NULL, "Failed to allocate flow list memory"); 2450fcba820dSWei Hu (Xavier) rte_free(flow); 2451fcba820dSWei Hu (Xavier) return NULL; 2452fcba820dSWei Hu (Xavier) } 2453fcba820dSWei Hu (Xavier) 2454fcba820dSWei Hu (Xavier) flow_node->flow = flow; 2455e3069658SHuisong Li conf = &filter_info.conf; 24569b290a3aSChengwen Feng TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries); 2457e3069658SHuisong Li if (filter_info.type == RTE_ETH_FILTER_HASH) 2458e3069658SHuisong Li ret = hns3_flow_create_rss_rule(dev, &conf->rss_conf, flow); 2459c4849917SHuisong Li else 2460e3069658SHuisong Li ret = hns3_flow_create_fdir_rule(dev, &conf->fdir_conf, 2461c4849917SHuisong Li error, flow); 2462c4849917SHuisong Li if (ret == 0) 2463c37ca66fSWei Hu (Xavier) return flow; 2464c37ca66fSWei Hu (Xavier) 2465fcba820dSWei Hu (Xavier) rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2466fcba820dSWei Hu (Xavier) "Failed to create flow"); 24679b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_list, flow_node, entries); 2468fcba820dSWei Hu (Xavier) rte_free(flow_node); 2469fcba820dSWei Hu (Xavier) rte_free(flow); 2470c4849917SHuisong Li 2471fcba820dSWei Hu (Xavier) return NULL; 2472fcba820dSWei Hu (Xavier) } 2473fcba820dSWei Hu (Xavier) 2474fcba820dSWei Hu (Xavier) /* Destroy a flow rule on hns3. */ 2475fcba820dSWei Hu (Xavier) static int 2476fcba820dSWei Hu (Xavier) hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, 2477fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 2478fcba820dSWei Hu (Xavier) { 2479fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 2480fcba820dSWei Hu (Xavier) struct hns3_fdir_rule_ele *fdir_rule_ptr; 2481c37ca66fSWei Hu (Xavier) struct hns3_rss_conf_ele *rss_filter_ptr; 2482fcba820dSWei Hu (Xavier) struct hns3_flow_mem *flow_node; 2483fcba820dSWei Hu (Xavier) enum rte_filter_type filter_type; 2484fcba820dSWei Hu (Xavier) struct hns3_fdir_rule fdir_rule; 24859b290a3aSChengwen Feng struct hns3_hw *hw = &hns->hw; 2486fcba820dSWei Hu (Xavier) int ret; 2487fcba820dSWei Hu (Xavier) 2488fcba820dSWei Hu (Xavier) if (flow == NULL) 2489fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 2490fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, 2491fcba820dSWei Hu (Xavier) flow, "Flow is NULL"); 24921d82001aSLijun Ou 2493fcba820dSWei Hu (Xavier) filter_type = flow->filter_type; 2494fcba820dSWei Hu (Xavier) switch (filter_type) { 2495fcba820dSWei Hu (Xavier) case RTE_ETH_FILTER_FDIR: 2496fcba820dSWei Hu (Xavier) fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule; 2497fcba820dSWei Hu (Xavier) memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf, 2498fcba820dSWei Hu (Xavier) sizeof(struct hns3_fdir_rule)); 2499fcba820dSWei Hu (Xavier) 2500fcba820dSWei Hu (Xavier) ret = hns3_fdir_filter_program(hns, &fdir_rule, true); 2501fcba820dSWei Hu (Xavier) if (ret) 2502fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EIO, 2503fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, 2504fcba820dSWei Hu (Xavier) flow, 2505fcba820dSWei Hu (Xavier) "Destroy FDIR fail.Try again"); 2506fcba820dSWei Hu (Xavier) if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) 2507fcba820dSWei Hu (Xavier) hns3_counter_release(dev, fdir_rule.act_cnt.id); 25089b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries); 2509fcba820dSWei Hu (Xavier) rte_free(fdir_rule_ptr); 2510fcba820dSWei Hu (Xavier) fdir_rule_ptr = NULL; 2511fcba820dSWei Hu (Xavier) break; 2512c37ca66fSWei Hu (Xavier) case RTE_ETH_FILTER_HASH: 2513c37ca66fSWei Hu (Xavier) rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; 25149b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); 2515c37ca66fSWei Hu (Xavier) rte_free(rss_filter_ptr); 2516c37ca66fSWei Hu (Xavier) rss_filter_ptr = NULL; 2517e3069658SHuisong Li (void)hns3_flow_rebuild_all_rss_filter(hns); 2518c37ca66fSWei Hu (Xavier) break; 2519fcba820dSWei Hu (Xavier) default: 2520fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, EINVAL, 2521fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, flow, 2522fcba820dSWei Hu (Xavier) "Unsupported filter type"); 2523fcba820dSWei Hu (Xavier) } 2524fcba820dSWei Hu (Xavier) 25259b290a3aSChengwen Feng TAILQ_FOREACH(flow_node, &hw->flow_list, entries) { 2526fcba820dSWei Hu (Xavier) if (flow_node->flow == flow) { 25279b290a3aSChengwen Feng TAILQ_REMOVE(&hw->flow_list, flow_node, entries); 2528fcba820dSWei Hu (Xavier) rte_free(flow_node); 2529fcba820dSWei Hu (Xavier) flow_node = NULL; 2530fcba820dSWei Hu (Xavier) break; 2531fcba820dSWei Hu (Xavier) } 2532fcba820dSWei Hu (Xavier) } 2533fcba820dSWei Hu (Xavier) rte_free(flow); 2534fcba820dSWei Hu (Xavier) 2535fcba820dSWei Hu (Xavier) return 0; 2536fcba820dSWei Hu (Xavier) } 2537fcba820dSWei Hu (Xavier) 2538fcba820dSWei Hu (Xavier) /* Destroy all flow rules associated with a port on hns3. */ 2539fcba820dSWei Hu (Xavier) static int 2540fcba820dSWei Hu (Xavier) hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) 2541fcba820dSWei Hu (Xavier) { 2542fcba820dSWei Hu (Xavier) struct hns3_adapter *hns = dev->data->dev_private; 2543fcba820dSWei Hu (Xavier) int ret; 2544fcba820dSWei Hu (Xavier) 2545fcba820dSWei Hu (Xavier) /* FDIR is available only in PF driver */ 2546fcba820dSWei Hu (Xavier) if (!hns->is_vf) { 2547fcba820dSWei Hu (Xavier) ret = hns3_clear_all_fdir_filter(hns); 2548fcba820dSWei Hu (Xavier) if (ret) { 2549fcba820dSWei Hu (Xavier) rte_flow_error_set(error, ret, 2550fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_HANDLE, 2551fcba820dSWei Hu (Xavier) NULL, "Failed to flush rule"); 2552fcba820dSWei Hu (Xavier) return ret; 2553fcba820dSWei Hu (Xavier) } 2554fcba820dSWei Hu (Xavier) hns3_counter_flush(dev); 2555fcba820dSWei Hu (Xavier) } 2556fcba820dSWei Hu (Xavier) 2557c37ca66fSWei Hu (Xavier) ret = hns3_clear_rss_filter(dev); 2558c064f691SChengwen Feng if (ret) { 2559c064f691SChengwen Feng rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, 2560c064f691SChengwen Feng NULL, "Failed to flush rss filter"); 2561c37ca66fSWei Hu (Xavier) return ret; 2562c064f691SChengwen Feng } 2563c37ca66fSWei Hu (Xavier) 2564fcba820dSWei Hu (Xavier) hns3_filterlist_flush(dev); 2565fcba820dSWei Hu (Xavier) 2566fcba820dSWei Hu (Xavier) return 0; 2567fcba820dSWei Hu (Xavier) } 2568fcba820dSWei Hu (Xavier) 2569fcba820dSWei Hu (Xavier) /* Query an existing flow rule. */ 2570fcba820dSWei Hu (Xavier) static int 2571fcba820dSWei Hu (Xavier) hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, 2572fcba820dSWei Hu (Xavier) const struct rte_flow_action *actions, void *data, 2573fcba820dSWei Hu (Xavier) struct rte_flow_error *error) 2574fcba820dSWei Hu (Xavier) { 2575a22bff98SLijun Ou struct rte_flow_action_rss *rss_conf; 2576a22bff98SLijun Ou struct hns3_rss_conf_ele *rss_rule; 2577fcba820dSWei Hu (Xavier) struct rte_flow_query_count *qc; 2578fcba820dSWei Hu (Xavier) int ret; 2579fcba820dSWei Hu (Xavier) 2580a22bff98SLijun Ou if (!flow->rule) 2581a22bff98SLijun Ou return rte_flow_error_set(error, EINVAL, 2582a22bff98SLijun Ou RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule"); 2583a22bff98SLijun Ou 2584fcba820dSWei Hu (Xavier) for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2585fcba820dSWei Hu (Xavier) switch (actions->type) { 2586fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_VOID: 2587fcba820dSWei Hu (Xavier) break; 2588fcba820dSWei Hu (Xavier) case RTE_FLOW_ACTION_TYPE_COUNT: 2589fcba820dSWei Hu (Xavier) qc = (struct rte_flow_query_count *)data; 2590fcba820dSWei Hu (Xavier) ret = hns3_counter_query(dev, flow, qc, error); 2591fcba820dSWei Hu (Xavier) if (ret) 2592fcba820dSWei Hu (Xavier) return ret; 2593fcba820dSWei Hu (Xavier) break; 2594a22bff98SLijun Ou case RTE_FLOW_ACTION_TYPE_RSS: 2595a22bff98SLijun Ou if (flow->filter_type != RTE_ETH_FILTER_HASH) { 2596a22bff98SLijun Ou return rte_flow_error_set(error, ENOTSUP, 2597a22bff98SLijun Ou RTE_FLOW_ERROR_TYPE_ACTION, 2598a22bff98SLijun Ou actions, "action is not supported"); 2599a22bff98SLijun Ou } 2600a22bff98SLijun Ou rss_conf = (struct rte_flow_action_rss *)data; 2601a22bff98SLijun Ou rss_rule = (struct hns3_rss_conf_ele *)flow->rule; 2602a22bff98SLijun Ou rte_memcpy(rss_conf, &rss_rule->filter_info.conf, 2603a22bff98SLijun Ou sizeof(struct rte_flow_action_rss)); 2604a22bff98SLijun Ou break; 2605fcba820dSWei Hu (Xavier) default: 2606fcba820dSWei Hu (Xavier) return rte_flow_error_set(error, ENOTSUP, 2607fcba820dSWei Hu (Xavier) RTE_FLOW_ERROR_TYPE_ACTION, 2608a22bff98SLijun Ou actions, "action is not supported"); 2609fcba820dSWei Hu (Xavier) } 2610fcba820dSWei Hu (Xavier) } 2611a22bff98SLijun Ou 2612fcba820dSWei Hu (Xavier) return 0; 2613fcba820dSWei Hu (Xavier) } 2614fcba820dSWei Hu (Xavier) 26151bdcca80SChengwen Feng static int 26161bdcca80SChengwen Feng hns3_flow_validate_wrap(struct rte_eth_dev *dev, 26171bdcca80SChengwen Feng const struct rte_flow_attr *attr, 26181bdcca80SChengwen Feng const struct rte_flow_item pattern[], 26191bdcca80SChengwen Feng const struct rte_flow_action actions[], 26201bdcca80SChengwen Feng struct rte_flow_error *error) 26211bdcca80SChengwen Feng { 26221bdcca80SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2623e3069658SHuisong Li struct hns3_filter_info filter_info = {0}; 26241bdcca80SChengwen Feng int ret; 26251bdcca80SChengwen Feng 26261bdcca80SChengwen Feng pthread_mutex_lock(&hw->flows_lock); 2627e3069658SHuisong Li ret = hns3_flow_validate(dev, attr, pattern, actions, error, 2628e3069658SHuisong Li &filter_info); 26291bdcca80SChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 26301bdcca80SChengwen Feng 26311bdcca80SChengwen Feng return ret; 26321bdcca80SChengwen Feng } 26331bdcca80SChengwen Feng 26341bdcca80SChengwen Feng static struct rte_flow * 26351bdcca80SChengwen Feng hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 26361bdcca80SChengwen Feng const struct rte_flow_item pattern[], 26371bdcca80SChengwen Feng const struct rte_flow_action actions[], 26381bdcca80SChengwen Feng struct rte_flow_error *error) 26391bdcca80SChengwen Feng { 26401bdcca80SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 26411bdcca80SChengwen Feng struct rte_flow *flow; 26421bdcca80SChengwen Feng 26431bdcca80SChengwen Feng pthread_mutex_lock(&hw->flows_lock); 26441bdcca80SChengwen Feng flow = hns3_flow_create(dev, attr, pattern, actions, error); 26451bdcca80SChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 26461bdcca80SChengwen Feng 26471bdcca80SChengwen Feng return flow; 26481bdcca80SChengwen Feng } 26491bdcca80SChengwen Feng 26501bdcca80SChengwen Feng static int 26511bdcca80SChengwen Feng hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, 26521bdcca80SChengwen Feng struct rte_flow_error *error) 26531bdcca80SChengwen Feng { 26541bdcca80SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 26551bdcca80SChengwen Feng int ret; 26561bdcca80SChengwen Feng 26571bdcca80SChengwen Feng pthread_mutex_lock(&hw->flows_lock); 26581bdcca80SChengwen Feng ret = hns3_flow_destroy(dev, flow, error); 26591bdcca80SChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 26601bdcca80SChengwen Feng 26611bdcca80SChengwen Feng return ret; 26621bdcca80SChengwen Feng } 26631bdcca80SChengwen Feng 26641bdcca80SChengwen Feng static int 26651bdcca80SChengwen Feng hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error) 26661bdcca80SChengwen Feng { 26671bdcca80SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 26681bdcca80SChengwen Feng int ret; 26691bdcca80SChengwen Feng 26701bdcca80SChengwen Feng pthread_mutex_lock(&hw->flows_lock); 26711bdcca80SChengwen Feng ret = hns3_flow_flush(dev, error); 26721bdcca80SChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 26731bdcca80SChengwen Feng 26741bdcca80SChengwen Feng return ret; 26751bdcca80SChengwen Feng } 26761bdcca80SChengwen Feng 26771bdcca80SChengwen Feng static int 26781bdcca80SChengwen Feng hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, 26791bdcca80SChengwen Feng const struct rte_flow_action *actions, void *data, 26801bdcca80SChengwen Feng struct rte_flow_error *error) 26811bdcca80SChengwen Feng { 26821bdcca80SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 26831bdcca80SChengwen Feng int ret; 26841bdcca80SChengwen Feng 26851bdcca80SChengwen Feng pthread_mutex_lock(&hw->flows_lock); 26861bdcca80SChengwen Feng ret = hns3_flow_query(dev, flow, actions, data, error); 26871bdcca80SChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 26881bdcca80SChengwen Feng 26891bdcca80SChengwen Feng return ret; 26901bdcca80SChengwen Feng } 26911bdcca80SChengwen Feng 2692fdfcb94dSChengwen Feng static int 2693fdfcb94dSChengwen Feng hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf, 2694fdfcb94dSChengwen Feng const struct rte_flow_action *action, 2695fdfcb94dSChengwen Feng struct rte_flow_error *error) 2696fdfcb94dSChengwen Feng { 2697fdfcb94dSChengwen Feng if (!conf->ingress) 2698fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2699fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION, 2700fdfcb94dSChengwen Feng NULL, "Indir action ingress can't be zero"); 2701fdfcb94dSChengwen Feng 2702fdfcb94dSChengwen Feng if (conf->egress) 2703fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2704fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION, 2705fdfcb94dSChengwen Feng NULL, "Indir action not support egress"); 2706fdfcb94dSChengwen Feng 2707fdfcb94dSChengwen Feng if (conf->transfer) 2708fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2709fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION, 2710fdfcb94dSChengwen Feng NULL, "Indir action not support transfer"); 2711fdfcb94dSChengwen Feng 2712fdfcb94dSChengwen Feng if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) 2713fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2714fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION, 2715fdfcb94dSChengwen Feng NULL, "Indir action only support count"); 2716fdfcb94dSChengwen Feng 2717fdfcb94dSChengwen Feng return 0; 2718fdfcb94dSChengwen Feng } 2719fdfcb94dSChengwen Feng 2720fdfcb94dSChengwen Feng static struct rte_flow_action_handle * 2721fdfcb94dSChengwen Feng hns3_flow_action_create(struct rte_eth_dev *dev, 2722fdfcb94dSChengwen Feng const struct rte_flow_indir_action_conf *conf, 2723fdfcb94dSChengwen Feng const struct rte_flow_action *action, 2724fdfcb94dSChengwen Feng struct rte_flow_error *error) 2725fdfcb94dSChengwen Feng { 2726fdfcb94dSChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2727fdfcb94dSChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2728fdfcb94dSChengwen Feng const struct rte_flow_action_count *act_count; 2729fdfcb94dSChengwen Feng struct rte_flow_action_handle *handle = NULL; 2730fdfcb94dSChengwen Feng struct hns3_flow_counter *counter; 2731fdfcb94dSChengwen Feng 2732fdfcb94dSChengwen Feng if (hns3_check_indir_action(conf, action, error)) 2733fdfcb94dSChengwen Feng return NULL; 2734fdfcb94dSChengwen Feng 2735fdfcb94dSChengwen Feng handle = rte_zmalloc("hns3 action handle", 2736fdfcb94dSChengwen Feng sizeof(struct rte_flow_action_handle), 0); 2737fdfcb94dSChengwen Feng if (handle == NULL) { 2738fdfcb94dSChengwen Feng rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 2739fdfcb94dSChengwen Feng NULL, "Failed to allocate action memory"); 2740fdfcb94dSChengwen Feng return NULL; 2741fdfcb94dSChengwen Feng } 2742fdfcb94dSChengwen Feng 2743fdfcb94dSChengwen Feng pthread_mutex_lock(&hw->flows_lock); 2744fdfcb94dSChengwen Feng 2745fdfcb94dSChengwen Feng act_count = (const struct rte_flow_action_count *)action->conf; 2746fdfcb94dSChengwen Feng if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) { 2747fdfcb94dSChengwen Feng rte_flow_error_set(error, EINVAL, 2748fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2749fdfcb94dSChengwen Feng action, "Invalid counter id"); 2750fdfcb94dSChengwen Feng goto err_exit; 2751fdfcb94dSChengwen Feng } 2752fdfcb94dSChengwen Feng 2753fdfcb94dSChengwen Feng if (hns3_counter_new(dev, false, act_count->id, error)) 2754fdfcb94dSChengwen Feng goto err_exit; 2755fdfcb94dSChengwen Feng 2756fdfcb94dSChengwen Feng counter = hns3_counter_lookup(dev, act_count->id); 2757fdfcb94dSChengwen Feng if (counter == NULL) { 2758fdfcb94dSChengwen Feng rte_flow_error_set(error, EINVAL, 2759fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2760fdfcb94dSChengwen Feng action, "Counter id not found"); 2761fdfcb94dSChengwen Feng goto err_exit; 2762fdfcb94dSChengwen Feng } 2763fdfcb94dSChengwen Feng 2764fdfcb94dSChengwen Feng counter->indirect = true; 2765fdfcb94dSChengwen Feng handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT; 2766fdfcb94dSChengwen Feng handle->counter_id = counter->id; 2767fdfcb94dSChengwen Feng 2768fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2769fdfcb94dSChengwen Feng return handle; 2770fdfcb94dSChengwen Feng 2771fdfcb94dSChengwen Feng err_exit: 2772fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2773fdfcb94dSChengwen Feng rte_free(handle); 2774fdfcb94dSChengwen Feng return NULL; 2775fdfcb94dSChengwen Feng } 2776fdfcb94dSChengwen Feng 2777fdfcb94dSChengwen Feng static int 2778fdfcb94dSChengwen Feng hns3_flow_action_destroy(struct rte_eth_dev *dev, 2779fdfcb94dSChengwen Feng struct rte_flow_action_handle *handle, 2780fdfcb94dSChengwen Feng struct rte_flow_error *error) 2781fdfcb94dSChengwen Feng { 2782fdfcb94dSChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2783fdfcb94dSChengwen Feng struct hns3_flow_counter *counter; 2784fdfcb94dSChengwen Feng 2785fdfcb94dSChengwen Feng pthread_mutex_lock(&hw->flows_lock); 2786fdfcb94dSChengwen Feng 2787fdfcb94dSChengwen Feng if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) { 2788fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2789fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2790fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2791fdfcb94dSChengwen Feng handle, "Invalid indirect type"); 2792fdfcb94dSChengwen Feng } 2793fdfcb94dSChengwen Feng 2794fdfcb94dSChengwen Feng counter = hns3_counter_lookup(dev, handle->counter_id); 2795fdfcb94dSChengwen Feng if (counter == NULL) { 2796fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2797fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2798fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2799fdfcb94dSChengwen Feng handle, "Counter id not exist"); 2800fdfcb94dSChengwen Feng } 2801fdfcb94dSChengwen Feng 2802fdfcb94dSChengwen Feng if (counter->ref_cnt > 1) { 2803fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2804fdfcb94dSChengwen Feng return rte_flow_error_set(error, EBUSY, 2805fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_HANDLE, 2806fdfcb94dSChengwen Feng handle, "Counter id in use"); 2807fdfcb94dSChengwen Feng } 2808fdfcb94dSChengwen Feng 2809fdfcb94dSChengwen Feng (void)hns3_counter_release(dev, handle->counter_id); 2810fdfcb94dSChengwen Feng rte_free(handle); 2811fdfcb94dSChengwen Feng 2812fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2813fdfcb94dSChengwen Feng return 0; 2814fdfcb94dSChengwen Feng } 2815fdfcb94dSChengwen Feng 2816fdfcb94dSChengwen Feng static int 2817fdfcb94dSChengwen Feng hns3_flow_action_query(struct rte_eth_dev *dev, 2818fdfcb94dSChengwen Feng const struct rte_flow_action_handle *handle, 2819fdfcb94dSChengwen Feng void *data, 2820fdfcb94dSChengwen Feng struct rte_flow_error *error) 2821fdfcb94dSChengwen Feng { 2822fdfcb94dSChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2823fdfcb94dSChengwen Feng struct rte_flow flow; 2824fdfcb94dSChengwen Feng int ret; 2825fdfcb94dSChengwen Feng 2826fdfcb94dSChengwen Feng pthread_mutex_lock(&hw->flows_lock); 2827fdfcb94dSChengwen Feng 2828fdfcb94dSChengwen Feng if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) { 2829fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2830fdfcb94dSChengwen Feng return rte_flow_error_set(error, EINVAL, 2831fdfcb94dSChengwen Feng RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2832fdfcb94dSChengwen Feng handle, "Invalid indirect type"); 2833fdfcb94dSChengwen Feng } 2834fdfcb94dSChengwen Feng 2835fdfcb94dSChengwen Feng memset(&flow, 0, sizeof(flow)); 2836fdfcb94dSChengwen Feng flow.counter_id = handle->counter_id; 2837fdfcb94dSChengwen Feng ret = hns3_counter_query(dev, &flow, 2838fdfcb94dSChengwen Feng (struct rte_flow_query_count *)data, error); 2839fdfcb94dSChengwen Feng pthread_mutex_unlock(&hw->flows_lock); 2840fdfcb94dSChengwen Feng return ret; 2841fdfcb94dSChengwen Feng } 2842fdfcb94dSChengwen Feng 2843fcba820dSWei Hu (Xavier) static const struct rte_flow_ops hns3_flow_ops = { 28441bdcca80SChengwen Feng .validate = hns3_flow_validate_wrap, 28451bdcca80SChengwen Feng .create = hns3_flow_create_wrap, 28461bdcca80SChengwen Feng .destroy = hns3_flow_destroy_wrap, 28471bdcca80SChengwen Feng .flush = hns3_flow_flush_wrap, 28481bdcca80SChengwen Feng .query = hns3_flow_query_wrap, 2849fcba820dSWei Hu (Xavier) .isolate = NULL, 2850fdfcb94dSChengwen Feng .action_handle_create = hns3_flow_action_create, 2851fdfcb94dSChengwen Feng .action_handle_destroy = hns3_flow_action_destroy, 2852fdfcb94dSChengwen Feng .action_handle_query = hns3_flow_action_query, 2853fcba820dSWei Hu (Xavier) }; 2854fcba820dSWei Hu (Xavier) 2855fcba820dSWei Hu (Xavier) int 2856fb7ad441SThomas Monjalon hns3_dev_flow_ops_get(struct rte_eth_dev *dev, 2857fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 2858fcba820dSWei Hu (Xavier) { 2859fcba820dSWei Hu (Xavier) struct hns3_hw *hw; 2860fcba820dSWei Hu (Xavier) 2861fcba820dSWei Hu (Xavier) hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2862fcba820dSWei Hu (Xavier) if (hw->adapter_state >= HNS3_NIC_CLOSED) 2863fcba820dSWei Hu (Xavier) return -ENODEV; 2864fcba820dSWei Hu (Xavier) 2865fb7ad441SThomas Monjalon *ops = &hns3_flow_ops; 2866fb7ad441SThomas Monjalon return 0; 2867fcba820dSWei Hu (Xavier) } 28689b290a3aSChengwen Feng 28699b290a3aSChengwen Feng void 28709b290a3aSChengwen Feng hns3_flow_init(struct rte_eth_dev *dev) 28719b290a3aSChengwen Feng { 28729b290a3aSChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 28739b290a3aSChengwen Feng pthread_mutexattr_t attr; 28749b290a3aSChengwen Feng 28759b290a3aSChengwen Feng if (rte_eal_process_type() != RTE_PROC_PRIMARY) 28769b290a3aSChengwen Feng return; 28779b290a3aSChengwen Feng 28789b290a3aSChengwen Feng pthread_mutexattr_init(&attr); 28799b290a3aSChengwen Feng pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); 28809b290a3aSChengwen Feng pthread_mutex_init(&hw->flows_lock, &attr); 28819b290a3aSChengwen Feng dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 28829b290a3aSChengwen Feng 28839b290a3aSChengwen Feng TAILQ_INIT(&hw->flow_fdir_list); 28849b290a3aSChengwen Feng TAILQ_INIT(&hw->flow_rss_list); 28859b290a3aSChengwen Feng TAILQ_INIT(&hw->flow_list); 28869b290a3aSChengwen Feng } 28879b290a3aSChengwen Feng 28889b290a3aSChengwen Feng void 28899b290a3aSChengwen Feng hns3_flow_uninit(struct rte_eth_dev *dev) 28909b290a3aSChengwen Feng { 28919b290a3aSChengwen Feng struct rte_flow_error error; 28929b290a3aSChengwen Feng if (rte_eal_process_type() == RTE_PROC_PRIMARY) 28939b290a3aSChengwen Feng hns3_flow_flush_wrap(dev, &error); 28949b290a3aSChengwen Feng } 2895