182092c87SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 246d5736aSVasily Philipov * Copyright 2017 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2017 Mellanox Technologies, Ltd 446d5736aSVasily Philipov */ 546d5736aSVasily Philipov 637491c7fSAdrien Mazarguil /** 737491c7fSAdrien Mazarguil * @file 837491c7fSAdrien Mazarguil * Flow API operations for mlx4 driver. 937491c7fSAdrien Mazarguil */ 1037491c7fSAdrien Mazarguil 11af745cd6SAdrien Mazarguil #include <arpa/inet.h> 12af745cd6SAdrien Mazarguil #include <errno.h> 13100fe44bSAdrien Mazarguil #include <stdalign.h> 14af745cd6SAdrien Mazarguil #include <stddef.h> 15af745cd6SAdrien Mazarguil #include <stdint.h> 16af745cd6SAdrien Mazarguil #include <string.h> 17af745cd6SAdrien Mazarguil #include <sys/queue.h> 1846d5736aSVasily Philipov 19af745cd6SAdrien Mazarguil /* Verbs headers do not support -pedantic. */ 20af745cd6SAdrien Mazarguil #ifdef PEDANTIC 21af745cd6SAdrien Mazarguil #pragma GCC diagnostic ignored "-Wpedantic" 22af745cd6SAdrien Mazarguil #endif 23af745cd6SAdrien Mazarguil #include <infiniband/verbs.h> 24af745cd6SAdrien Mazarguil #ifdef PEDANTIC 25af745cd6SAdrien Mazarguil #pragma GCC diagnostic error "-Wpedantic" 26af745cd6SAdrien Mazarguil #endif 27af745cd6SAdrien Mazarguil 28267d07daSAdrien Mazarguil #include <rte_byteorder.h> 29af745cd6SAdrien Mazarguil #include <rte_errno.h> 30df96fd0dSBruce Richardson #include <ethdev_driver.h> 311437784bSAdrien Mazarguil #include <rte_ether.h> 3246d5736aSVasily Philipov #include <rte_flow.h> 3346d5736aSVasily Philipov #include <rte_flow_driver.h> 3446d5736aSVasily Philipov #include <rte_malloc.h> 3546d5736aSVasily Philipov 3646d5736aSVasily Philipov /* PMD headers. */ 3746d5736aSVasily Philipov #include "mlx4.h" 384eba244bSAdrien Mazarguil #include "mlx4_glue.h" 3946d5736aSVasily Philipov #include "mlx4_flow.h" 403d555728SAdrien Mazarguil #include "mlx4_rxtx.h" 4176df01ffSAdrien Mazarguil #include "mlx4_utils.h" 4246d5736aSVasily Philipov 43809d8a6cSAdrien Mazarguil /** Static initializer for a list of subsequent item types. */ 44809d8a6cSAdrien Mazarguil #define NEXT_ITEM(...) \ 4546d5736aSVasily Philipov (const enum rte_flow_item_type []){ \ 4646d5736aSVasily Philipov __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ 4746d5736aSVasily Philipov } 4846d5736aSVasily Philipov 49809d8a6cSAdrien Mazarguil /** Processor structure associated with a flow item. */ 50809d8a6cSAdrien Mazarguil struct mlx4_flow_proc_item { 51680d5280SAdrien Mazarguil /** Bit-mask for fields supported by this PMD. */ 52680d5280SAdrien Mazarguil const void *mask_support; 53680d5280SAdrien Mazarguil /** Bit-mask to use when @p item->mask is not provided. */ 54680d5280SAdrien Mazarguil const void *mask_default; 55680d5280SAdrien Mazarguil /** Size in bytes for @p mask_support and @p mask_default. */ 5646d5736aSVasily Philipov const unsigned int mask_sz; 57680d5280SAdrien Mazarguil /** Merge a pattern item into a flow rule handle. */ 58680d5280SAdrien Mazarguil int (*merge)(struct rte_flow *flow, 59680d5280SAdrien Mazarguil const struct rte_flow_item *item, 60680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 61680d5280SAdrien Mazarguil struct rte_flow_error *error); 6246d5736aSVasily Philipov /** Size in bytes of the destination structure. */ 6346d5736aSVasily Philipov const unsigned int dst_sz; 64809d8a6cSAdrien Mazarguil /** List of possible subsequent items. */ 65809d8a6cSAdrien Mazarguil const enum rte_flow_item_type *const next_item; 6646d5736aSVasily Philipov }; 6746d5736aSVasily Philipov 68d3a7e092SAdrien Mazarguil /** Shared resources for drop flow rules. */ 69d3a7e092SAdrien Mazarguil struct mlx4_drop { 70d3a7e092SAdrien Mazarguil struct ibv_qp *qp; /**< QP target. */ 71d3a7e092SAdrien Mazarguil struct ibv_cq *cq; /**< CQ associated with above QP. */ 72dbeba4cfSThomas Monjalon struct mlx4_priv *priv; /**< Back pointer to private data. */ 73d3a7e092SAdrien Mazarguil uint32_t refcnt; /**< Reference count. */ 74642fe56aSVasily Philipov }; 75642fe56aSVasily Philipov 7646d5736aSVasily Philipov /** 77b7abc67cSAdrien Mazarguil * Convert supported RSS hash field types between DPDK and Verbs formats. 78078b8b45SAdrien Mazarguil * 79ac8d22deSAdrien Mazarguil * This function returns the supported (default) set when @p types has 8027bdbbefSAdrien Mazarguil * special value 0. 8127563725SAdrien Mazarguil * 82024e87beSAdrien Mazarguil * @param priv 83024e87beSAdrien Mazarguil * Pointer to private structure. 84ac8d22deSAdrien Mazarguil * @param types 85b7abc67cSAdrien Mazarguil * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct 86b7abc67cSAdrien Mazarguil * rte_eth_rss_conf) or Verbs format. 87b7abc67cSAdrien Mazarguil * @param verbs_to_dpdk 88b7abc67cSAdrien Mazarguil * A zero value converts @p types from DPDK to Verbs, a nonzero value 89b7abc67cSAdrien Mazarguil * performs the reverse operation. 90078b8b45SAdrien Mazarguil * 91078b8b45SAdrien Mazarguil * @return 92b7abc67cSAdrien Mazarguil * Converted RSS hash fields on success, (uint64_t)-1 otherwise and 93b7abc67cSAdrien Mazarguil * rte_errno is set. 94078b8b45SAdrien Mazarguil */ 95024e87beSAdrien Mazarguil uint64_t 96dbeba4cfSThomas Monjalon mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk) 97078b8b45SAdrien Mazarguil { 98b7abc67cSAdrien Mazarguil enum { 99b7abc67cSAdrien Mazarguil INNER, 100b7abc67cSAdrien Mazarguil IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3, 101b7abc67cSAdrien Mazarguil TCP, UDP, 102b7abc67cSAdrien Mazarguil IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1, 103078b8b45SAdrien Mazarguil }; 104b7abc67cSAdrien Mazarguil enum { 105b7abc67cSAdrien Mazarguil VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4, 106b7abc67cSAdrien Mazarguil VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6, 107b7abc67cSAdrien Mazarguil VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP, 108b7abc67cSAdrien Mazarguil VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP, 109078b8b45SAdrien Mazarguil }; 110b7abc67cSAdrien Mazarguil static const uint64_t dpdk[] = { 111b7abc67cSAdrien Mazarguil [INNER] = 0, 112295968d1SFerruh Yigit [IPV4] = RTE_ETH_RSS_IPV4, 113295968d1SFerruh Yigit [IPV4_1] = RTE_ETH_RSS_FRAG_IPV4, 114295968d1SFerruh Yigit [IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 115295968d1SFerruh Yigit [IPV6] = RTE_ETH_RSS_IPV6, 116295968d1SFerruh Yigit [IPV6_1] = RTE_ETH_RSS_FRAG_IPV6, 117295968d1SFerruh Yigit [IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 118295968d1SFerruh Yigit [IPV6_3] = RTE_ETH_RSS_IPV6_EX, 119b7abc67cSAdrien Mazarguil [TCP] = 0, 120b7abc67cSAdrien Mazarguil [UDP] = 0, 121295968d1SFerruh Yigit [IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 122295968d1SFerruh Yigit [IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 123295968d1SFerruh Yigit [IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 124295968d1SFerruh Yigit [IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX, 125295968d1SFerruh Yigit [IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 126295968d1SFerruh Yigit [IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX, 127b7abc67cSAdrien Mazarguil }; 128b7abc67cSAdrien Mazarguil static const uint64_t verbs[RTE_DIM(dpdk)] = { 129b7abc67cSAdrien Mazarguil [INNER] = IBV_RX_HASH_INNER, 130b7abc67cSAdrien Mazarguil [IPV4] = VERBS_IPV4, 131b7abc67cSAdrien Mazarguil [IPV4_1] = VERBS_IPV4, 132b7abc67cSAdrien Mazarguil [IPV4_2] = VERBS_IPV4, 133b7abc67cSAdrien Mazarguil [IPV6] = VERBS_IPV6, 134b7abc67cSAdrien Mazarguil [IPV6_1] = VERBS_IPV6, 135b7abc67cSAdrien Mazarguil [IPV6_2] = VERBS_IPV6, 136b7abc67cSAdrien Mazarguil [IPV6_3] = VERBS_IPV6, 137b7abc67cSAdrien Mazarguil [TCP] = VERBS_TCP, 138b7abc67cSAdrien Mazarguil [UDP] = VERBS_UDP, 139b7abc67cSAdrien Mazarguil [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP, 140b7abc67cSAdrien Mazarguil [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP, 141b7abc67cSAdrien Mazarguil [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP, 142b7abc67cSAdrien Mazarguil [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP, 143b7abc67cSAdrien Mazarguil [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP, 144b7abc67cSAdrien Mazarguil [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP, 145b7abc67cSAdrien Mazarguil }; 146b7abc67cSAdrien Mazarguil const uint64_t *in = verbs_to_dpdk ? verbs : dpdk; 147b7abc67cSAdrien Mazarguil const uint64_t *out = verbs_to_dpdk ? dpdk : verbs; 148078b8b45SAdrien Mazarguil uint64_t seen = 0; 149078b8b45SAdrien Mazarguil uint64_t conv = 0; 150078b8b45SAdrien Mazarguil unsigned int i; 151078b8b45SAdrien Mazarguil 152b7abc67cSAdrien Mazarguil if (!types) { 153b7abc67cSAdrien Mazarguil if (!verbs_to_dpdk) 1541d173da8SAdrien Mazarguil return priv->hw_rss_sup; 155b7abc67cSAdrien Mazarguil types = priv->hw_rss_sup; 156b7abc67cSAdrien Mazarguil } 157b7abc67cSAdrien Mazarguil for (i = 0; i != RTE_DIM(dpdk); ++i) 158b7abc67cSAdrien Mazarguil if (in[i] && (types & in[i]) == in[i]) { 159ac8d22deSAdrien Mazarguil seen |= types & in[i]; 160078b8b45SAdrien Mazarguil conv |= out[i]; 161078b8b45SAdrien Mazarguil } 162b7abc67cSAdrien Mazarguil if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) && 163b7abc67cSAdrien Mazarguil !(types & ~seen)) 16427563725SAdrien Mazarguil return conv; 165078b8b45SAdrien Mazarguil rte_errno = ENOTSUP; 166078b8b45SAdrien Mazarguil return (uint64_t)-1; 167078b8b45SAdrien Mazarguil } 168078b8b45SAdrien Mazarguil 169078b8b45SAdrien Mazarguil /** 170680d5280SAdrien Mazarguil * Merge Ethernet pattern item into flow rule handle. 17146d5736aSVasily Philipov * 172680d5280SAdrien Mazarguil * Additional mlx4-specific constraints on supported fields: 173680d5280SAdrien Mazarguil * 1743e49f870SAdrien Mazarguil * - No support for partial masks, except in the specific case of matching 1753e49f870SAdrien Mazarguil * all multicast traffic (@p spec->dst and @p mask->dst equal to 1763e49f870SAdrien Mazarguil * 01:00:00:00:00:00). 177680d5280SAdrien Mazarguil * - Not providing @p item->spec or providing an empty @p mask->dst is 178680d5280SAdrien Mazarguil * *only* supported if the rule doesn't specify additional matching 179680d5280SAdrien Mazarguil * criteria (i.e. rule is promiscuous-like). 180680d5280SAdrien Mazarguil * 181680d5280SAdrien Mazarguil * @param[in, out] flow 182100fe44bSAdrien Mazarguil * Flow rule handle to update. 183680d5280SAdrien Mazarguil * @param[in] item 184680d5280SAdrien Mazarguil * Pattern item to merge. 185680d5280SAdrien Mazarguil * @param[in] proc 186680d5280SAdrien Mazarguil * Associated item-processing object. 187680d5280SAdrien Mazarguil * @param[out] error 188680d5280SAdrien Mazarguil * Perform verbose error reporting if not NULL. 189680d5280SAdrien Mazarguil * 190680d5280SAdrien Mazarguil * @return 191680d5280SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 19246d5736aSVasily Philipov */ 19346d5736aSVasily Philipov static int 194680d5280SAdrien Mazarguil mlx4_flow_merge_eth(struct rte_flow *flow, 195680d5280SAdrien Mazarguil const struct rte_flow_item *item, 196680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 197680d5280SAdrien Mazarguil struct rte_flow_error *error) 19846d5736aSVasily Philipov { 19946d5736aSVasily Philipov const struct rte_flow_item_eth *spec = item->spec; 200680d5280SAdrien Mazarguil const struct rte_flow_item_eth *mask = 201680d5280SAdrien Mazarguil spec ? (item->mask ? item->mask : proc->mask_default) : NULL; 20246d5736aSVasily Philipov struct ibv_flow_spec_eth *eth; 203680d5280SAdrien Mazarguil const char *msg; 20446d5736aSVasily Philipov unsigned int i; 20546d5736aSVasily Philipov 206c0d23926SDekel Peled if (mask) { 207680d5280SAdrien Mazarguil uint32_t sum_dst = 0; 208680d5280SAdrien Mazarguil uint32_t sum_src = 0; 209680d5280SAdrien Mazarguil 2108275d5fcSThomas Monjalon for (i = 0; i != sizeof(mask->hdr.dst_addr.addr_bytes); ++i) { 2118275d5fcSThomas Monjalon sum_dst += mask->hdr.dst_addr.addr_bytes[i]; 2128275d5fcSThomas Monjalon sum_src += mask->hdr.src_addr.addr_bytes[i]; 213680d5280SAdrien Mazarguil } 214680d5280SAdrien Mazarguil if (sum_src) { 215680d5280SAdrien Mazarguil msg = "mlx4 does not support source MAC matching"; 216680d5280SAdrien Mazarguil goto error; 217680d5280SAdrien Mazarguil } else if (!sum_dst) { 218680d5280SAdrien Mazarguil flow->promisc = 1; 2198275d5fcSThomas Monjalon } else if (sum_dst == 1 && mask->hdr.dst_addr.addr_bytes[0] == 1) { 2208275d5fcSThomas Monjalon if (!(spec->hdr.dst_addr.addr_bytes[0] & 1)) { 2213e49f870SAdrien Mazarguil msg = "mlx4 does not support the explicit" 2223e49f870SAdrien Mazarguil " exclusion of all multicast traffic"; 2233e49f870SAdrien Mazarguil goto error; 2243e49f870SAdrien Mazarguil } 2253e49f870SAdrien Mazarguil flow->allmulti = 1; 22635b2d13fSOlivier Matz } else if (sum_dst != (UINT8_C(0xff) * RTE_ETHER_ADDR_LEN)) { 227680d5280SAdrien Mazarguil msg = "mlx4 does not support matching partial" 228680d5280SAdrien Mazarguil " Ethernet fields"; 229680d5280SAdrien Mazarguil goto error; 230680d5280SAdrien Mazarguil } 231680d5280SAdrien Mazarguil } 232680d5280SAdrien Mazarguil if (!flow->ibv_attr) 233680d5280SAdrien Mazarguil return 0; 234680d5280SAdrien Mazarguil if (flow->promisc) { 235680d5280SAdrien Mazarguil flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT; 236680d5280SAdrien Mazarguil return 0; 237680d5280SAdrien Mazarguil } 2383e49f870SAdrien Mazarguil if (flow->allmulti) { 2393e49f870SAdrien Mazarguil flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT; 2403e49f870SAdrien Mazarguil return 0; 2413e49f870SAdrien Mazarguil } 24246d5736aSVasily Philipov ++flow->ibv_attr->num_of_specs; 243100fe44bSAdrien Mazarguil eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); 24446d5736aSVasily Philipov *eth = (struct ibv_flow_spec_eth) { 24546d5736aSVasily Philipov .type = IBV_FLOW_SPEC_ETH, 246680d5280SAdrien Mazarguil .size = sizeof(*eth), 24746d5736aSVasily Philipov }; 248c0d23926SDekel Peled if (!mask) { 249105add6bSDekel Peled eth->val.dst_mac[0] = 0xff; 250c0d23926SDekel Peled flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT; 251efad0db1SDekel Peled flow->promisc = 1; 252c0d23926SDekel Peled return 0; 253c0d23926SDekel Peled } 2548275d5fcSThomas Monjalon memcpy(eth->val.dst_mac, spec->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN); 2558275d5fcSThomas Monjalon memcpy(eth->mask.dst_mac, mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN); 25646d5736aSVasily Philipov /* Remove unwanted bits from values. */ 25735b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 25846d5736aSVasily Philipov eth->val.dst_mac[i] &= eth->mask.dst_mac[i]; 25935b2d13fSOlivier Matz 26046d5736aSVasily Philipov return 0; 261680d5280SAdrien Mazarguil error: 262680d5280SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 263680d5280SAdrien Mazarguil item, msg); 26446d5736aSVasily Philipov } 26546d5736aSVasily Philipov 26646d5736aSVasily Philipov /** 267680d5280SAdrien Mazarguil * Merge VLAN pattern item into flow rule handle. 26846d5736aSVasily Philipov * 269680d5280SAdrien Mazarguil * Additional mlx4-specific constraints on supported fields: 270680d5280SAdrien Mazarguil * 271680d5280SAdrien Mazarguil * - Matching *all* VLAN traffic by omitting @p item->spec or providing an 272680d5280SAdrien Mazarguil * empty @p item->mask would also include non-VLAN traffic. Doing so is 273680d5280SAdrien Mazarguil * therefore unsupported. 274680d5280SAdrien Mazarguil * - No support for partial masks. 275680d5280SAdrien Mazarguil * 276680d5280SAdrien Mazarguil * @param[in, out] flow 277100fe44bSAdrien Mazarguil * Flow rule handle to update. 278680d5280SAdrien Mazarguil * @param[in] item 279680d5280SAdrien Mazarguil * Pattern item to merge. 280680d5280SAdrien Mazarguil * @param[in] proc 281680d5280SAdrien Mazarguil * Associated item-processing object. 282680d5280SAdrien Mazarguil * @param[out] error 283680d5280SAdrien Mazarguil * Perform verbose error reporting if not NULL. 284680d5280SAdrien Mazarguil * 285680d5280SAdrien Mazarguil * @return 286680d5280SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 28746d5736aSVasily Philipov */ 28846d5736aSVasily Philipov static int 289680d5280SAdrien Mazarguil mlx4_flow_merge_vlan(struct rte_flow *flow, 290680d5280SAdrien Mazarguil const struct rte_flow_item *item, 291680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 292680d5280SAdrien Mazarguil struct rte_flow_error *error) 29346d5736aSVasily Philipov { 29446d5736aSVasily Philipov const struct rte_flow_item_vlan *spec = item->spec; 295680d5280SAdrien Mazarguil const struct rte_flow_item_vlan *mask = 296680d5280SAdrien Mazarguil spec ? (item->mask ? item->mask : proc->mask_default) : NULL; 29746d5736aSVasily Philipov struct ibv_flow_spec_eth *eth; 298680d5280SAdrien Mazarguil const char *msg; 29946d5736aSVasily Philipov 3008275d5fcSThomas Monjalon if (!mask || !mask->hdr.vlan_tci) { 301680d5280SAdrien Mazarguil msg = "mlx4 cannot match all VLAN traffic while excluding" 302680d5280SAdrien Mazarguil " non-VLAN traffic, TCI VID must be specified"; 303680d5280SAdrien Mazarguil goto error; 304680d5280SAdrien Mazarguil } 3058275d5fcSThomas Monjalon if (mask->hdr.vlan_tci != RTE_BE16(0x0fff)) { 306680d5280SAdrien Mazarguil msg = "mlx4 does not support partial TCI VID matching"; 307680d5280SAdrien Mazarguil goto error; 308680d5280SAdrien Mazarguil } 309680d5280SAdrien Mazarguil if (!flow->ibv_attr) 31046d5736aSVasily Philipov return 0; 311680d5280SAdrien Mazarguil eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size - 312680d5280SAdrien Mazarguil sizeof(*eth)); 3138275d5fcSThomas Monjalon eth->val.vlan_tag = spec->hdr.vlan_tci; 3148275d5fcSThomas Monjalon eth->mask.vlan_tag = mask->hdr.vlan_tci; 31546d5736aSVasily Philipov eth->val.vlan_tag &= eth->mask.vlan_tag; 316c0d23926SDekel Peled if (flow->ibv_attr->type == IBV_FLOW_ATTR_ALL_DEFAULT) 317c0d23926SDekel Peled flow->ibv_attr->type = IBV_FLOW_ATTR_NORMAL; 31846d5736aSVasily Philipov return 0; 319680d5280SAdrien Mazarguil error: 320680d5280SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 321680d5280SAdrien Mazarguil item, msg); 32246d5736aSVasily Philipov } 32346d5736aSVasily Philipov 32446d5736aSVasily Philipov /** 325680d5280SAdrien Mazarguil * Merge IPv4 pattern item into flow rule handle. 32646d5736aSVasily Philipov * 327680d5280SAdrien Mazarguil * Additional mlx4-specific constraints on supported fields: 328680d5280SAdrien Mazarguil * 329680d5280SAdrien Mazarguil * - No support for partial masks. 330680d5280SAdrien Mazarguil * 331680d5280SAdrien Mazarguil * @param[in, out] flow 332100fe44bSAdrien Mazarguil * Flow rule handle to update. 333680d5280SAdrien Mazarguil * @param[in] item 334680d5280SAdrien Mazarguil * Pattern item to merge. 335680d5280SAdrien Mazarguil * @param[in] proc 336680d5280SAdrien Mazarguil * Associated item-processing object. 337680d5280SAdrien Mazarguil * @param[out] error 338680d5280SAdrien Mazarguil * Perform verbose error reporting if not NULL. 339680d5280SAdrien Mazarguil * 340680d5280SAdrien Mazarguil * @return 341680d5280SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 34246d5736aSVasily Philipov */ 34346d5736aSVasily Philipov static int 344680d5280SAdrien Mazarguil mlx4_flow_merge_ipv4(struct rte_flow *flow, 345680d5280SAdrien Mazarguil const struct rte_flow_item *item, 346680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 347680d5280SAdrien Mazarguil struct rte_flow_error *error) 34846d5736aSVasily Philipov { 34946d5736aSVasily Philipov const struct rte_flow_item_ipv4 *spec = item->spec; 350680d5280SAdrien Mazarguil const struct rte_flow_item_ipv4 *mask = 351680d5280SAdrien Mazarguil spec ? (item->mask ? item->mask : proc->mask_default) : NULL; 35246d5736aSVasily Philipov struct ibv_flow_spec_ipv4 *ipv4; 353680d5280SAdrien Mazarguil const char *msg; 35446d5736aSVasily Philipov 355680d5280SAdrien Mazarguil if (mask && 356680d5280SAdrien Mazarguil ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) || 357680d5280SAdrien Mazarguil (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) { 358680d5280SAdrien Mazarguil msg = "mlx4 does not support matching partial IPv4 fields"; 359680d5280SAdrien Mazarguil goto error; 360680d5280SAdrien Mazarguil } 361680d5280SAdrien Mazarguil if (!flow->ibv_attr) 362680d5280SAdrien Mazarguil return 0; 36346d5736aSVasily Philipov ++flow->ibv_attr->num_of_specs; 364100fe44bSAdrien Mazarguil ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); 36546d5736aSVasily Philipov *ipv4 = (struct ibv_flow_spec_ipv4) { 36646d5736aSVasily Philipov .type = IBV_FLOW_SPEC_IPV4, 367680d5280SAdrien Mazarguil .size = sizeof(*ipv4), 36846d5736aSVasily Philipov }; 36946d5736aSVasily Philipov if (!spec) 37046d5736aSVasily Philipov return 0; 37146d5736aSVasily Philipov ipv4->val = (struct ibv_flow_ipv4_filter) { 37246d5736aSVasily Philipov .src_ip = spec->hdr.src_addr, 37346d5736aSVasily Philipov .dst_ip = spec->hdr.dst_addr, 37446d5736aSVasily Philipov }; 37546d5736aSVasily Philipov ipv4->mask = (struct ibv_flow_ipv4_filter) { 37646d5736aSVasily Philipov .src_ip = mask->hdr.src_addr, 37746d5736aSVasily Philipov .dst_ip = mask->hdr.dst_addr, 37846d5736aSVasily Philipov }; 37946d5736aSVasily Philipov /* Remove unwanted bits from values. */ 38046d5736aSVasily Philipov ipv4->val.src_ip &= ipv4->mask.src_ip; 38146d5736aSVasily Philipov ipv4->val.dst_ip &= ipv4->mask.dst_ip; 38246d5736aSVasily Philipov return 0; 383680d5280SAdrien Mazarguil error: 384680d5280SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 385680d5280SAdrien Mazarguil item, msg); 38646d5736aSVasily Philipov } 38746d5736aSVasily Philipov 38846d5736aSVasily Philipov /** 389680d5280SAdrien Mazarguil * Merge UDP pattern item into flow rule handle. 39046d5736aSVasily Philipov * 391680d5280SAdrien Mazarguil * Additional mlx4-specific constraints on supported fields: 392680d5280SAdrien Mazarguil * 393680d5280SAdrien Mazarguil * - No support for partial masks. 394bf959ec5SAdrien Mazarguil * - Due to HW/FW limitation, flow rule priority is not taken into account 395bf959ec5SAdrien Mazarguil * when matching UDP destination ports, doing is therefore only supported 396bf959ec5SAdrien Mazarguil * at the highest priority level (0). 397680d5280SAdrien Mazarguil * 398680d5280SAdrien Mazarguil * @param[in, out] flow 399100fe44bSAdrien Mazarguil * Flow rule handle to update. 400680d5280SAdrien Mazarguil * @param[in] item 401680d5280SAdrien Mazarguil * Pattern item to merge. 402680d5280SAdrien Mazarguil * @param[in] proc 403680d5280SAdrien Mazarguil * Associated item-processing object. 404680d5280SAdrien Mazarguil * @param[out] error 405680d5280SAdrien Mazarguil * Perform verbose error reporting if not NULL. 406680d5280SAdrien Mazarguil * 407680d5280SAdrien Mazarguil * @return 408680d5280SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 40946d5736aSVasily Philipov */ 41046d5736aSVasily Philipov static int 411680d5280SAdrien Mazarguil mlx4_flow_merge_udp(struct rte_flow *flow, 412680d5280SAdrien Mazarguil const struct rte_flow_item *item, 413680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 414680d5280SAdrien Mazarguil struct rte_flow_error *error) 41546d5736aSVasily Philipov { 41646d5736aSVasily Philipov const struct rte_flow_item_udp *spec = item->spec; 417680d5280SAdrien Mazarguil const struct rte_flow_item_udp *mask = 418680d5280SAdrien Mazarguil spec ? (item->mask ? item->mask : proc->mask_default) : NULL; 41946d5736aSVasily Philipov struct ibv_flow_spec_tcp_udp *udp; 420680d5280SAdrien Mazarguil const char *msg; 42146d5736aSVasily Philipov 422cad92582SAdrien Mazarguil if (mask && 423680d5280SAdrien Mazarguil ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) || 424680d5280SAdrien Mazarguil (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) { 425680d5280SAdrien Mazarguil msg = "mlx4 does not support matching partial UDP fields"; 426680d5280SAdrien Mazarguil goto error; 427680d5280SAdrien Mazarguil } 428bf959ec5SAdrien Mazarguil if (mask && mask->hdr.dst_port && flow->priority) { 429bf959ec5SAdrien Mazarguil msg = "combining UDP destination port matching with a nonzero" 430bf959ec5SAdrien Mazarguil " priority level is not supported"; 431bf959ec5SAdrien Mazarguil goto error; 432bf959ec5SAdrien Mazarguil } 433680d5280SAdrien Mazarguil if (!flow->ibv_attr) 434680d5280SAdrien Mazarguil return 0; 43546d5736aSVasily Philipov ++flow->ibv_attr->num_of_specs; 436100fe44bSAdrien Mazarguil udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); 43746d5736aSVasily Philipov *udp = (struct ibv_flow_spec_tcp_udp) { 43846d5736aSVasily Philipov .type = IBV_FLOW_SPEC_UDP, 439680d5280SAdrien Mazarguil .size = sizeof(*udp), 44046d5736aSVasily Philipov }; 44146d5736aSVasily Philipov if (!spec) 44246d5736aSVasily Philipov return 0; 44346d5736aSVasily Philipov udp->val.dst_port = spec->hdr.dst_port; 44446d5736aSVasily Philipov udp->val.src_port = spec->hdr.src_port; 44546d5736aSVasily Philipov udp->mask.dst_port = mask->hdr.dst_port; 44646d5736aSVasily Philipov udp->mask.src_port = mask->hdr.src_port; 44746d5736aSVasily Philipov /* Remove unwanted bits from values. */ 44846d5736aSVasily Philipov udp->val.src_port &= udp->mask.src_port; 44946d5736aSVasily Philipov udp->val.dst_port &= udp->mask.dst_port; 45046d5736aSVasily Philipov return 0; 451680d5280SAdrien Mazarguil error: 452680d5280SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 453680d5280SAdrien Mazarguil item, msg); 45446d5736aSVasily Philipov } 45546d5736aSVasily Philipov 45646d5736aSVasily Philipov /** 457680d5280SAdrien Mazarguil * Merge TCP pattern item into flow rule handle. 45846d5736aSVasily Philipov * 459680d5280SAdrien Mazarguil * Additional mlx4-specific constraints on supported fields: 460680d5280SAdrien Mazarguil * 461680d5280SAdrien Mazarguil * - No support for partial masks. 462680d5280SAdrien Mazarguil * 463680d5280SAdrien Mazarguil * @param[in, out] flow 464100fe44bSAdrien Mazarguil * Flow rule handle to update. 465680d5280SAdrien Mazarguil * @param[in] item 466680d5280SAdrien Mazarguil * Pattern item to merge. 467680d5280SAdrien Mazarguil * @param[in] proc 468680d5280SAdrien Mazarguil * Associated item-processing object. 469680d5280SAdrien Mazarguil * @param[out] error 470680d5280SAdrien Mazarguil * Perform verbose error reporting if not NULL. 471680d5280SAdrien Mazarguil * 472680d5280SAdrien Mazarguil * @return 473680d5280SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 47446d5736aSVasily Philipov */ 47546d5736aSVasily Philipov static int 476680d5280SAdrien Mazarguil mlx4_flow_merge_tcp(struct rte_flow *flow, 477680d5280SAdrien Mazarguil const struct rte_flow_item *item, 478680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 479680d5280SAdrien Mazarguil struct rte_flow_error *error) 48046d5736aSVasily Philipov { 48146d5736aSVasily Philipov const struct rte_flow_item_tcp *spec = item->spec; 482680d5280SAdrien Mazarguil const struct rte_flow_item_tcp *mask = 483680d5280SAdrien Mazarguil spec ? (item->mask ? item->mask : proc->mask_default) : NULL; 48446d5736aSVasily Philipov struct ibv_flow_spec_tcp_udp *tcp; 485680d5280SAdrien Mazarguil const char *msg; 48646d5736aSVasily Philipov 487cad92582SAdrien Mazarguil if (mask && 488680d5280SAdrien Mazarguil ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) || 489680d5280SAdrien Mazarguil (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) { 490680d5280SAdrien Mazarguil msg = "mlx4 does not support matching partial TCP fields"; 491680d5280SAdrien Mazarguil goto error; 492680d5280SAdrien Mazarguil } 493680d5280SAdrien Mazarguil if (!flow->ibv_attr) 494680d5280SAdrien Mazarguil return 0; 49546d5736aSVasily Philipov ++flow->ibv_attr->num_of_specs; 496100fe44bSAdrien Mazarguil tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); 49746d5736aSVasily Philipov *tcp = (struct ibv_flow_spec_tcp_udp) { 49846d5736aSVasily Philipov .type = IBV_FLOW_SPEC_TCP, 499680d5280SAdrien Mazarguil .size = sizeof(*tcp), 50046d5736aSVasily Philipov }; 50146d5736aSVasily Philipov if (!spec) 50246d5736aSVasily Philipov return 0; 50346d5736aSVasily Philipov tcp->val.dst_port = spec->hdr.dst_port; 50446d5736aSVasily Philipov tcp->val.src_port = spec->hdr.src_port; 50546d5736aSVasily Philipov tcp->mask.dst_port = mask->hdr.dst_port; 50646d5736aSVasily Philipov tcp->mask.src_port = mask->hdr.src_port; 50746d5736aSVasily Philipov /* Remove unwanted bits from values. */ 50846d5736aSVasily Philipov tcp->val.src_port &= tcp->mask.src_port; 50946d5736aSVasily Philipov tcp->val.dst_port &= tcp->mask.dst_port; 51046d5736aSVasily Philipov return 0; 511680d5280SAdrien Mazarguil error: 512680d5280SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 513680d5280SAdrien Mazarguil item, msg); 51446d5736aSVasily Philipov } 51546d5736aSVasily Philipov 51646d5736aSVasily Philipov /** 517680d5280SAdrien Mazarguil * Perform basic sanity checks on a pattern item. 51846d5736aSVasily Philipov * 519680d5280SAdrien Mazarguil * @param[in] item 52046d5736aSVasily Philipov * Item specification. 521680d5280SAdrien Mazarguil * @param[in] proc 522680d5280SAdrien Mazarguil * Associated item-processing object. 523680d5280SAdrien Mazarguil * @param[out] error 524680d5280SAdrien Mazarguil * Perform verbose error reporting if not NULL. 52546d5736aSVasily Philipov * 52646d5736aSVasily Philipov * @return 527680d5280SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 52846d5736aSVasily Philipov */ 52946d5736aSVasily Philipov static int 530680d5280SAdrien Mazarguil mlx4_flow_item_check(const struct rte_flow_item *item, 531680d5280SAdrien Mazarguil const struct mlx4_flow_proc_item *proc, 532680d5280SAdrien Mazarguil struct rte_flow_error *error) 53346d5736aSVasily Philipov { 534680d5280SAdrien Mazarguil const uint8_t *mask; 535680d5280SAdrien Mazarguil unsigned int i; 53646d5736aSVasily Philipov 537680d5280SAdrien Mazarguil /* item->last and item->mask cannot exist without item->spec. */ 53846d5736aSVasily Philipov if (!item->spec && (item->mask || item->last)) 539680d5280SAdrien Mazarguil return rte_flow_error_set 540680d5280SAdrien Mazarguil (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, 541680d5280SAdrien Mazarguil "\"mask\" or \"last\" field provided without a" 542680d5280SAdrien Mazarguil " corresponding \"spec\""); 543680d5280SAdrien Mazarguil /* No spec, no mask, no problem. */ 544680d5280SAdrien Mazarguil if (!item->spec) 545680d5280SAdrien Mazarguil return 0; 546680d5280SAdrien Mazarguil mask = item->mask ? 547680d5280SAdrien Mazarguil (const uint8_t *)item->mask : 548680d5280SAdrien Mazarguil (const uint8_t *)proc->mask_default; 5498e08df22SAlexander Kozyrev MLX4_ASSERT(mask); 550680d5280SAdrien Mazarguil /* 551680d5280SAdrien Mazarguil * Single-pass check to make sure that: 552680d5280SAdrien Mazarguil * - Mask is supported, no bits are set outside proc->mask_support. 553680d5280SAdrien Mazarguil * - Both item->spec and item->last are included in mask. 554680d5280SAdrien Mazarguil */ 555680d5280SAdrien Mazarguil for (i = 0; i != proc->mask_sz; ++i) { 556680d5280SAdrien Mazarguil if (!mask[i]) 557680d5280SAdrien Mazarguil continue; 558680d5280SAdrien Mazarguil if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) != 559680d5280SAdrien Mazarguil ((const uint8_t *)proc->mask_support)[i]) 560680d5280SAdrien Mazarguil return rte_flow_error_set 561680d5280SAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 562680d5280SAdrien Mazarguil item, "unsupported field found in \"mask\""); 563680d5280SAdrien Mazarguil if (item->last && 564680d5280SAdrien Mazarguil (((const uint8_t *)item->spec)[i] & mask[i]) != 565680d5280SAdrien Mazarguil (((const uint8_t *)item->last)[i] & mask[i])) 566680d5280SAdrien Mazarguil return rte_flow_error_set 567680d5280SAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 568680d5280SAdrien Mazarguil item, 569680d5280SAdrien Mazarguil "range between \"spec\" and \"last\"" 570680d5280SAdrien Mazarguil " is larger than \"mask\""); 57146d5736aSVasily Philipov } 572680d5280SAdrien Mazarguil return 0; 57346d5736aSVasily Philipov } 57446d5736aSVasily Philipov 57546d5736aSVasily Philipov /** Graph of supported items and associated actions. */ 576809d8a6cSAdrien Mazarguil static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = { 57746d5736aSVasily Philipov [RTE_FLOW_ITEM_TYPE_END] = { 578809d8a6cSAdrien Mazarguil .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH), 57946d5736aSVasily Philipov }, 58046d5736aSVasily Philipov [RTE_FLOW_ITEM_TYPE_ETH] = { 581809d8a6cSAdrien Mazarguil .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN, 58246d5736aSVasily Philipov RTE_FLOW_ITEM_TYPE_IPV4), 583680d5280SAdrien Mazarguil .mask_support = &(const struct rte_flow_item_eth){ 584680d5280SAdrien Mazarguil /* Only destination MAC can be matched. */ 585*e0d947a1SFerruh Yigit .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 58646d5736aSVasily Philipov }, 587680d5280SAdrien Mazarguil .mask_default = &rte_flow_item_eth_mask, 58846d5736aSVasily Philipov .mask_sz = sizeof(struct rte_flow_item_eth), 589680d5280SAdrien Mazarguil .merge = mlx4_flow_merge_eth, 59046d5736aSVasily Philipov .dst_sz = sizeof(struct ibv_flow_spec_eth), 59146d5736aSVasily Philipov }, 59246d5736aSVasily Philipov [RTE_FLOW_ITEM_TYPE_VLAN] = { 593809d8a6cSAdrien Mazarguil .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4), 594680d5280SAdrien Mazarguil .mask_support = &(const struct rte_flow_item_vlan){ 595267d07daSAdrien Mazarguil /* Only TCI VID matching is supported. */ 5968275d5fcSThomas Monjalon .hdr.vlan_tci = RTE_BE16(0x0fff), 59746d5736aSVasily Philipov }, 598680d5280SAdrien Mazarguil .mask_default = &rte_flow_item_vlan_mask, 59946d5736aSVasily Philipov .mask_sz = sizeof(struct rte_flow_item_vlan), 600680d5280SAdrien Mazarguil .merge = mlx4_flow_merge_vlan, 60146d5736aSVasily Philipov .dst_sz = 0, 60246d5736aSVasily Philipov }, 60346d5736aSVasily Philipov [RTE_FLOW_ITEM_TYPE_IPV4] = { 604809d8a6cSAdrien Mazarguil .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP, 60546d5736aSVasily Philipov RTE_FLOW_ITEM_TYPE_TCP), 606680d5280SAdrien Mazarguil .mask_support = &(const struct rte_flow_item_ipv4){ 60746d5736aSVasily Philipov .hdr = { 608267d07daSAdrien Mazarguil .src_addr = RTE_BE32(0xffffffff), 609267d07daSAdrien Mazarguil .dst_addr = RTE_BE32(0xffffffff), 61046d5736aSVasily Philipov }, 61146d5736aSVasily Philipov }, 612680d5280SAdrien Mazarguil .mask_default = &rte_flow_item_ipv4_mask, 61346d5736aSVasily Philipov .mask_sz = sizeof(struct rte_flow_item_ipv4), 614680d5280SAdrien Mazarguil .merge = mlx4_flow_merge_ipv4, 61546d5736aSVasily Philipov .dst_sz = sizeof(struct ibv_flow_spec_ipv4), 61646d5736aSVasily Philipov }, 61746d5736aSVasily Philipov [RTE_FLOW_ITEM_TYPE_UDP] = { 618680d5280SAdrien Mazarguil .mask_support = &(const struct rte_flow_item_udp){ 61946d5736aSVasily Philipov .hdr = { 620267d07daSAdrien Mazarguil .src_port = RTE_BE16(0xffff), 621267d07daSAdrien Mazarguil .dst_port = RTE_BE16(0xffff), 62246d5736aSVasily Philipov }, 62346d5736aSVasily Philipov }, 624680d5280SAdrien Mazarguil .mask_default = &rte_flow_item_udp_mask, 62546d5736aSVasily Philipov .mask_sz = sizeof(struct rte_flow_item_udp), 626680d5280SAdrien Mazarguil .merge = mlx4_flow_merge_udp, 62746d5736aSVasily Philipov .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), 62846d5736aSVasily Philipov }, 62946d5736aSVasily Philipov [RTE_FLOW_ITEM_TYPE_TCP] = { 630680d5280SAdrien Mazarguil .mask_support = &(const struct rte_flow_item_tcp){ 63146d5736aSVasily Philipov .hdr = { 632267d07daSAdrien Mazarguil .src_port = RTE_BE16(0xffff), 633267d07daSAdrien Mazarguil .dst_port = RTE_BE16(0xffff), 63446d5736aSVasily Philipov }, 63546d5736aSVasily Philipov }, 636680d5280SAdrien Mazarguil .mask_default = &rte_flow_item_tcp_mask, 63746d5736aSVasily Philipov .mask_sz = sizeof(struct rte_flow_item_tcp), 638680d5280SAdrien Mazarguil .merge = mlx4_flow_merge_tcp, 63946d5736aSVasily Philipov .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), 64046d5736aSVasily Philipov }, 64146d5736aSVasily Philipov }; 64246d5736aSVasily Philipov 64346d5736aSVasily Philipov /** 644686a64ebSAdrien Mazarguil * Make sure a flow rule is supported and initialize associated structure. 64546d5736aSVasily Philipov * 64646d5736aSVasily Philipov * @param priv 64746d5736aSVasily Philipov * Pointer to private structure. 64846d5736aSVasily Philipov * @param[in] attr 64946d5736aSVasily Philipov * Flow rule attributes. 650809d8a6cSAdrien Mazarguil * @param[in] pattern 65146d5736aSVasily Philipov * Pattern specification (list terminated by the END pattern item). 65246d5736aSVasily Philipov * @param[in] actions 65346d5736aSVasily Philipov * Associated actions (list terminated by the END action). 65446d5736aSVasily Philipov * @param[out] error 65546d5736aSVasily Philipov * Perform verbose error reporting if not NULL. 656100fe44bSAdrien Mazarguil * @param[in, out] addr 657100fe44bSAdrien Mazarguil * Buffer where the resulting flow rule handle pointer must be stored. 658100fe44bSAdrien Mazarguil * If NULL, stop processing after validation stage. 65946d5736aSVasily Philipov * 66046d5736aSVasily Philipov * @return 66146d5736aSVasily Philipov * 0 on success, a negative errno value otherwise and rte_errno is set. 66246d5736aSVasily Philipov */ 66346d5736aSVasily Philipov static int 664dbeba4cfSThomas Monjalon mlx4_flow_prepare(struct mlx4_priv *priv, 66546d5736aSVasily Philipov const struct rte_flow_attr *attr, 666809d8a6cSAdrien Mazarguil const struct rte_flow_item pattern[], 66746d5736aSVasily Philipov const struct rte_flow_action actions[], 66846d5736aSVasily Philipov struct rte_flow_error *error, 669100fe44bSAdrien Mazarguil struct rte_flow **addr) 67046d5736aSVasily Philipov { 671809d8a6cSAdrien Mazarguil const struct rte_flow_item *item; 672809d8a6cSAdrien Mazarguil const struct rte_flow_action *action; 673100fe44bSAdrien Mazarguil const struct mlx4_flow_proc_item *proc; 674100fe44bSAdrien Mazarguil struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) }; 675100fe44bSAdrien Mazarguil struct rte_flow *flow = &temp; 676680d5280SAdrien Mazarguil const char *msg = NULL; 677cc17feb9SAdrien Mazarguil int overlap; 67846d5736aSVasily Philipov 67928daff07SAdrien Mazarguil if (attr->group) 68028daff07SAdrien Mazarguil return rte_flow_error_set 68128daff07SAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 68228daff07SAdrien Mazarguil NULL, "groups are not supported"); 68328daff07SAdrien Mazarguil if (attr->priority > MLX4_FLOW_PRIORITY_LAST) 68428daff07SAdrien Mazarguil return rte_flow_error_set 68528daff07SAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 68628daff07SAdrien Mazarguil NULL, "maximum priority level is " 687a5171594SAdrien Mazarguil MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)); 68828daff07SAdrien Mazarguil if (attr->egress) 68928daff07SAdrien Mazarguil return rte_flow_error_set 69028daff07SAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 69128daff07SAdrien Mazarguil NULL, "egress is not supported"); 69276e9a55bSAdrien Mazarguil if (attr->transfer) 69376e9a55bSAdrien Mazarguil return rte_flow_error_set 69476e9a55bSAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 69576e9a55bSAdrien Mazarguil NULL, "transfer is not supported"); 69628daff07SAdrien Mazarguil if (!attr->ingress) 69728daff07SAdrien Mazarguil return rte_flow_error_set 69828daff07SAdrien Mazarguil (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 69928daff07SAdrien Mazarguil NULL, "only ingress is supported"); 700100fe44bSAdrien Mazarguil fill: 701cc17feb9SAdrien Mazarguil overlap = 0; 702100fe44bSAdrien Mazarguil proc = mlx4_flow_proc_item_list; 703bf959ec5SAdrien Mazarguil flow->priority = attr->priority; 704809d8a6cSAdrien Mazarguil /* Go over pattern. */ 705267d07daSAdrien Mazarguil for (item = pattern; item->type; ++item) { 706809d8a6cSAdrien Mazarguil const struct mlx4_flow_proc_item *next = NULL; 70746d5736aSVasily Philipov unsigned int i; 70846d5736aSVasily Philipov int err; 70946d5736aSVasily Philipov 710809d8a6cSAdrien Mazarguil if (item->type == RTE_FLOW_ITEM_TYPE_VOID) 71146d5736aSVasily Philipov continue; 712bdcad2f4SAdrien Mazarguil if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) { 713bdcad2f4SAdrien Mazarguil flow->internal = 1; 714bdcad2f4SAdrien Mazarguil continue; 715bdcad2f4SAdrien Mazarguil } 716d564eea7SXiaoyu Min if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN && flow->promisc) || 717d564eea7SXiaoyu Min flow->allmulti) { 718680d5280SAdrien Mazarguil msg = "mlx4 does not support additional matching" 719680d5280SAdrien Mazarguil " criteria combined with indiscriminate" 720680d5280SAdrien Mazarguil " matching on Ethernet headers"; 721680d5280SAdrien Mazarguil goto exit_item_not_supported; 72246d5736aSVasily Philipov } 723267d07daSAdrien Mazarguil for (i = 0; proc->next_item && proc->next_item[i]; ++i) { 724809d8a6cSAdrien Mazarguil if (proc->next_item[i] == item->type) { 725809d8a6cSAdrien Mazarguil next = &mlx4_flow_proc_item_list[item->type]; 72646d5736aSVasily Philipov break; 72746d5736aSVasily Philipov } 72846d5736aSVasily Philipov } 729809d8a6cSAdrien Mazarguil if (!next) 73046d5736aSVasily Philipov goto exit_item_not_supported; 731809d8a6cSAdrien Mazarguil proc = next; 732680d5280SAdrien Mazarguil /* 733680d5280SAdrien Mazarguil * Perform basic sanity checks only once, while handle is 734680d5280SAdrien Mazarguil * not allocated. 735680d5280SAdrien Mazarguil */ 736100fe44bSAdrien Mazarguil if (flow == &temp) { 737680d5280SAdrien Mazarguil err = mlx4_flow_item_check(item, proc, error); 73846d5736aSVasily Philipov if (err) 739680d5280SAdrien Mazarguil return err; 740680d5280SAdrien Mazarguil } 741680d5280SAdrien Mazarguil if (proc->merge) { 742680d5280SAdrien Mazarguil err = proc->merge(flow, item, proc, error); 74346d5736aSVasily Philipov if (err) 744680d5280SAdrien Mazarguil return err; 74546d5736aSVasily Philipov } 746100fe44bSAdrien Mazarguil flow->ibv_attr_size += proc->dst_sz; 74746d5736aSVasily Philipov } 748809d8a6cSAdrien Mazarguil /* Go over actions list. */ 749267d07daSAdrien Mazarguil for (action = actions; action->type; ++action) { 750cc17feb9SAdrien Mazarguil /* This one may appear anywhere multiple times. */ 751cc17feb9SAdrien Mazarguil if (action->type == RTE_FLOW_ACTION_TYPE_VOID) 752cc17feb9SAdrien Mazarguil continue; 753cc17feb9SAdrien Mazarguil /* Fate-deciding actions may appear exactly once. */ 754cc17feb9SAdrien Mazarguil if (overlap) { 755cc17feb9SAdrien Mazarguil msg = "cannot combine several fate-deciding actions," 756cc17feb9SAdrien Mazarguil " choose between DROP, QUEUE or RSS"; 757cc17feb9SAdrien Mazarguil goto exit_action_not_supported; 758cc17feb9SAdrien Mazarguil } 759cc17feb9SAdrien Mazarguil overlap = 1; 760267d07daSAdrien Mazarguil switch (action->type) { 761267d07daSAdrien Mazarguil const struct rte_flow_action_queue *queue; 762078b8b45SAdrien Mazarguil const struct rte_flow_action_rss *rss; 763ac8d22deSAdrien Mazarguil const uint8_t *rss_key; 764ac8d22deSAdrien Mazarguil uint32_t rss_key_len; 765ef134c8dSAdrien Mazarguil uint64_t fields; 766078b8b45SAdrien Mazarguil unsigned int i; 76746d5736aSVasily Philipov 768267d07daSAdrien Mazarguil case RTE_FLOW_ACTION_TYPE_DROP: 769100fe44bSAdrien Mazarguil flow->drop = 1; 770267d07daSAdrien Mazarguil break; 771267d07daSAdrien Mazarguil case RTE_FLOW_ACTION_TYPE_QUEUE: 772078b8b45SAdrien Mazarguil if (flow->rss) 773078b8b45SAdrien Mazarguil break; 774267d07daSAdrien Mazarguil queue = action->conf; 775099c2c53SYongseok Koh if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) { 776e21bdfaaSAdrien Mazarguil msg = "queue target index beyond number of" 777e21bdfaaSAdrien Mazarguil " configured Rx queues"; 778e21bdfaaSAdrien Mazarguil goto exit_action_not_supported; 779e21bdfaaSAdrien Mazarguil } 780078b8b45SAdrien Mazarguil flow->rss = mlx4_rss_get 781078b8b45SAdrien Mazarguil (priv, 0, mlx4_rss_hash_key_default, 1, 782078b8b45SAdrien Mazarguil &queue->index); 783078b8b45SAdrien Mazarguil if (!flow->rss) { 784078b8b45SAdrien Mazarguil msg = "not enough resources for additional" 785078b8b45SAdrien Mazarguil " single-queue RSS context"; 78646d5736aSVasily Philipov goto exit_action_not_supported; 787078b8b45SAdrien Mazarguil } 788078b8b45SAdrien Mazarguil break; 789078b8b45SAdrien Mazarguil case RTE_FLOW_ACTION_TYPE_RSS: 790078b8b45SAdrien Mazarguil if (flow->rss) 791078b8b45SAdrien Mazarguil break; 792078b8b45SAdrien Mazarguil rss = action->conf; 793078b8b45SAdrien Mazarguil /* Default RSS configuration if none is provided. */ 794ac8d22deSAdrien Mazarguil if (rss->key_len) { 7954a750d29SViacheslav Ovsiienko rss_key = rss->key ? 7964a750d29SViacheslav Ovsiienko rss->key : mlx4_rss_hash_key_default; 797ac8d22deSAdrien Mazarguil rss_key_len = rss->key_len; 798ac8d22deSAdrien Mazarguil } else { 799ac8d22deSAdrien Mazarguil rss_key = mlx4_rss_hash_key_default; 800ac8d22deSAdrien Mazarguil rss_key_len = MLX4_RSS_HASH_KEY_SIZE; 801ac8d22deSAdrien Mazarguil } 802078b8b45SAdrien Mazarguil /* Sanity checks. */ 803ac8d22deSAdrien Mazarguil for (i = 0; i < rss->queue_num; ++i) 804e21bdfaaSAdrien Mazarguil if (rss->queue[i] >= 805099c2c53SYongseok Koh ETH_DEV(priv)->data->nb_rx_queues) 806e21bdfaaSAdrien Mazarguil break; 807ac8d22deSAdrien Mazarguil if (i != rss->queue_num) { 808e21bdfaaSAdrien Mazarguil msg = "queue index target beyond number of" 809e21bdfaaSAdrien Mazarguil " configured Rx queues"; 810e21bdfaaSAdrien Mazarguil goto exit_action_not_supported; 811e21bdfaaSAdrien Mazarguil } 812ac8d22deSAdrien Mazarguil if (!rte_is_power_of_2(rss->queue_num)) { 813078b8b45SAdrien Mazarguil msg = "for RSS, mlx4 requires the number of" 814078b8b45SAdrien Mazarguil " queues to be a power of two"; 815078b8b45SAdrien Mazarguil goto exit_action_not_supported; 816078b8b45SAdrien Mazarguil } 817ac8d22deSAdrien Mazarguil if (rss_key_len != sizeof(flow->rss->key)) { 818078b8b45SAdrien Mazarguil msg = "mlx4 supports exactly one RSS hash key" 819078b8b45SAdrien Mazarguil " length: " 820078b8b45SAdrien Mazarguil MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE); 821078b8b45SAdrien Mazarguil goto exit_action_not_supported; 822078b8b45SAdrien Mazarguil } 823ac8d22deSAdrien Mazarguil for (i = 1; i < rss->queue_num; ++i) 824078b8b45SAdrien Mazarguil if (rss->queue[i] - rss->queue[i - 1] != 1) 825078b8b45SAdrien Mazarguil break; 826ac8d22deSAdrien Mazarguil if (i != rss->queue_num) { 827078b8b45SAdrien Mazarguil msg = "mlx4 requires RSS contexts to use" 828078b8b45SAdrien Mazarguil " consecutive queue indices only"; 829078b8b45SAdrien Mazarguil goto exit_action_not_supported; 830078b8b45SAdrien Mazarguil } 831ac8d22deSAdrien Mazarguil if (rss->queue[0] % rss->queue_num) { 832078b8b45SAdrien Mazarguil msg = "mlx4 requires the first queue of a RSS" 833078b8b45SAdrien Mazarguil " context to be aligned on a multiple" 834078b8b45SAdrien Mazarguil " of the context size"; 835078b8b45SAdrien Mazarguil goto exit_action_not_supported; 836078b8b45SAdrien Mazarguil } 837929e3319SAdrien Mazarguil if (rss->func && 838929e3319SAdrien Mazarguil rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { 839929e3319SAdrien Mazarguil msg = "the only supported RSS hash function" 840929e3319SAdrien Mazarguil " is Toeplitz"; 841929e3319SAdrien Mazarguil goto exit_action_not_supported; 842929e3319SAdrien Mazarguil } 84318aee286SAdrien Mazarguil if (rss->level) { 84418aee286SAdrien Mazarguil msg = "a nonzero RSS encapsulation level is" 84518aee286SAdrien Mazarguil " not supported"; 84618aee286SAdrien Mazarguil goto exit_action_not_supported; 84718aee286SAdrien Mazarguil } 848ef134c8dSAdrien Mazarguil rte_errno = 0; 849b7abc67cSAdrien Mazarguil fields = mlx4_conv_rss_types(priv, rss->types, 0); 850ef134c8dSAdrien Mazarguil if (fields == (uint64_t)-1 && rte_errno) { 851ef134c8dSAdrien Mazarguil msg = "unsupported RSS hash type requested"; 852ef134c8dSAdrien Mazarguil goto exit_action_not_supported; 853ef134c8dSAdrien Mazarguil } 854078b8b45SAdrien Mazarguil flow->rss = mlx4_rss_get 855ac8d22deSAdrien Mazarguil (priv, fields, rss_key, rss->queue_num, 856ef134c8dSAdrien Mazarguil rss->queue); 857078b8b45SAdrien Mazarguil if (!flow->rss) { 858078b8b45SAdrien Mazarguil msg = "either invalid parameters or not enough" 859078b8b45SAdrien Mazarguil " resources for additional multi-queue" 860078b8b45SAdrien Mazarguil " RSS context"; 861078b8b45SAdrien Mazarguil goto exit_action_not_supported; 862078b8b45SAdrien Mazarguil } 863267d07daSAdrien Mazarguil break; 864267d07daSAdrien Mazarguil default: 86546d5736aSVasily Philipov goto exit_action_not_supported; 86646d5736aSVasily Philipov } 86746d5736aSVasily Philipov } 868cc17feb9SAdrien Mazarguil /* When fate is unknown, drop traffic. */ 869cc17feb9SAdrien Mazarguil if (!overlap) 870cc17feb9SAdrien Mazarguil flow->drop = 1; 871100fe44bSAdrien Mazarguil /* Validation ends here. */ 872078b8b45SAdrien Mazarguil if (!addr) { 873078b8b45SAdrien Mazarguil if (flow->rss) 874078b8b45SAdrien Mazarguil mlx4_rss_put(flow->rss); 875100fe44bSAdrien Mazarguil return 0; 876078b8b45SAdrien Mazarguil } 877100fe44bSAdrien Mazarguil if (flow == &temp) { 878100fe44bSAdrien Mazarguil /* Allocate proper handle based on collected data. */ 879100fe44bSAdrien Mazarguil const struct mlx4_malloc_vec vec[] = { 880100fe44bSAdrien Mazarguil { 881100fe44bSAdrien Mazarguil .align = alignof(struct rte_flow), 882100fe44bSAdrien Mazarguil .size = sizeof(*flow), 883100fe44bSAdrien Mazarguil .addr = (void **)&flow, 884100fe44bSAdrien Mazarguil }, 885100fe44bSAdrien Mazarguil { 886100fe44bSAdrien Mazarguil .align = alignof(struct ibv_flow_attr), 887100fe44bSAdrien Mazarguil .size = temp.ibv_attr_size, 888100fe44bSAdrien Mazarguil .addr = (void **)&temp.ibv_attr, 889100fe44bSAdrien Mazarguil }, 890100fe44bSAdrien Mazarguil }; 891100fe44bSAdrien Mazarguil 892cb43322fSAdrien Mazarguil if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) { 893cb43322fSAdrien Mazarguil if (temp.rss) 894cb43322fSAdrien Mazarguil mlx4_rss_put(temp.rss); 895100fe44bSAdrien Mazarguil return rte_flow_error_set 896100fe44bSAdrien Mazarguil (error, -rte_errno, 897100fe44bSAdrien Mazarguil RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 898100fe44bSAdrien Mazarguil "flow rule handle allocation failure"); 899cb43322fSAdrien Mazarguil } 900100fe44bSAdrien Mazarguil /* Most fields will be updated by second pass. */ 901100fe44bSAdrien Mazarguil *flow = (struct rte_flow){ 902100fe44bSAdrien Mazarguil .ibv_attr = temp.ibv_attr, 903100fe44bSAdrien Mazarguil .ibv_attr_size = sizeof(*flow->ibv_attr), 904078b8b45SAdrien Mazarguil .rss = temp.rss, 905100fe44bSAdrien Mazarguil }; 906100fe44bSAdrien Mazarguil *flow->ibv_attr = (struct ibv_flow_attr){ 907100fe44bSAdrien Mazarguil .type = IBV_FLOW_ATTR_NORMAL, 908100fe44bSAdrien Mazarguil .size = sizeof(*flow->ibv_attr), 909fc49cbb7SAdrien Mazarguil .priority = attr->priority, 910100fe44bSAdrien Mazarguil .port = priv->port, 911100fe44bSAdrien Mazarguil }; 912100fe44bSAdrien Mazarguil goto fill; 913100fe44bSAdrien Mazarguil } 914100fe44bSAdrien Mazarguil *addr = flow; 91546d5736aSVasily Philipov return 0; 91646d5736aSVasily Philipov exit_item_not_supported: 91728daff07SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 918680d5280SAdrien Mazarguil item, msg ? msg : "item not supported"); 91946d5736aSVasily Philipov exit_action_not_supported: 92028daff07SAdrien Mazarguil return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 921078b8b45SAdrien Mazarguil action, msg ? msg : "action not supported"); 92246d5736aSVasily Philipov } 92346d5736aSVasily Philipov 92446d5736aSVasily Philipov /** 92546d5736aSVasily Philipov * Validate a flow supported by the NIC. 92646d5736aSVasily Philipov * 92746d5736aSVasily Philipov * @see rte_flow_validate() 92846d5736aSVasily Philipov * @see rte_flow_ops 92946d5736aSVasily Philipov */ 930af745cd6SAdrien Mazarguil static int 93146d5736aSVasily Philipov mlx4_flow_validate(struct rte_eth_dev *dev, 93246d5736aSVasily Philipov const struct rte_flow_attr *attr, 933809d8a6cSAdrien Mazarguil const struct rte_flow_item pattern[], 93446d5736aSVasily Philipov const struct rte_flow_action actions[], 93546d5736aSVasily Philipov struct rte_flow_error *error) 93646d5736aSVasily Philipov { 937dbeba4cfSThomas Monjalon struct mlx4_priv *priv = dev->data->dev_private; 93846d5736aSVasily Philipov 939100fe44bSAdrien Mazarguil return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL); 94046d5736aSVasily Philipov } 94146d5736aSVasily Philipov 94246d5736aSVasily Philipov /** 943d3a7e092SAdrien Mazarguil * Get a drop flow rule resources instance. 944642fe56aSVasily Philipov * 945642fe56aSVasily Philipov * @param priv 946642fe56aSVasily Philipov * Pointer to private structure. 947642fe56aSVasily Philipov * 948642fe56aSVasily Philipov * @return 949d3a7e092SAdrien Mazarguil * Pointer to drop flow resources on success, NULL otherwise and rte_errno 950d3a7e092SAdrien Mazarguil * is set. 951642fe56aSVasily Philipov */ 952d3a7e092SAdrien Mazarguil static struct mlx4_drop * 953dbeba4cfSThomas Monjalon mlx4_drop_get(struct mlx4_priv *priv) 954642fe56aSVasily Philipov { 955d3a7e092SAdrien Mazarguil struct mlx4_drop *drop = priv->drop; 956642fe56aSVasily Philipov 957d3a7e092SAdrien Mazarguil if (drop) { 9588e08df22SAlexander Kozyrev MLX4_ASSERT(drop->refcnt); 9598e08df22SAlexander Kozyrev MLX4_ASSERT(drop->priv == priv); 960d3a7e092SAdrien Mazarguil ++drop->refcnt; 961d3a7e092SAdrien Mazarguil return drop; 962642fe56aSVasily Philipov } 963d3a7e092SAdrien Mazarguil drop = rte_malloc(__func__, sizeof(*drop), 0); 964d3a7e092SAdrien Mazarguil if (!drop) 965d3a7e092SAdrien Mazarguil goto error; 966d3a7e092SAdrien Mazarguil *drop = (struct mlx4_drop){ 967d3a7e092SAdrien Mazarguil .priv = priv, 968d3a7e092SAdrien Mazarguil .refcnt = 1, 969d3a7e092SAdrien Mazarguil }; 9704eba244bSAdrien Mazarguil drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); 971d3a7e092SAdrien Mazarguil if (!drop->cq) 972d3a7e092SAdrien Mazarguil goto error; 9734eba244bSAdrien Mazarguil drop->qp = mlx4_glue->create_qp 9744eba244bSAdrien Mazarguil (priv->pd, 97572ba7fadSAdrien Mazarguil &(struct ibv_qp_init_attr){ 976d3a7e092SAdrien Mazarguil .send_cq = drop->cq, 977d3a7e092SAdrien Mazarguil .recv_cq = drop->cq, 978642fe56aSVasily Philipov .qp_type = IBV_QPT_RAW_PACKET, 979642fe56aSVasily Philipov }); 980d3a7e092SAdrien Mazarguil if (!drop->qp) 981d3a7e092SAdrien Mazarguil goto error; 982d3a7e092SAdrien Mazarguil priv->drop = drop; 983d3a7e092SAdrien Mazarguil return drop; 984d3a7e092SAdrien Mazarguil error: 9856f155c0bSMichael Baum if (drop) { 986d3a7e092SAdrien Mazarguil if (drop->qp) 9874eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_qp(drop->qp)); 988d3a7e092SAdrien Mazarguil if (drop->cq) 9894eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_cq(drop->cq)); 990d3a7e092SAdrien Mazarguil rte_free(drop); 9916f155c0bSMichael Baum } 992d3a7e092SAdrien Mazarguil rte_errno = ENOMEM; 993d3a7e092SAdrien Mazarguil return NULL; 994642fe56aSVasily Philipov } 995d3a7e092SAdrien Mazarguil 996d3a7e092SAdrien Mazarguil /** 997d3a7e092SAdrien Mazarguil * Give back a drop flow rule resources instance. 998d3a7e092SAdrien Mazarguil * 999d3a7e092SAdrien Mazarguil * @param drop 1000d3a7e092SAdrien Mazarguil * Pointer to drop flow rule resources. 1001d3a7e092SAdrien Mazarguil */ 1002d3a7e092SAdrien Mazarguil static void 1003d3a7e092SAdrien Mazarguil mlx4_drop_put(struct mlx4_drop *drop) 1004d3a7e092SAdrien Mazarguil { 10058e08df22SAlexander Kozyrev MLX4_ASSERT(drop->refcnt); 1006d3a7e092SAdrien Mazarguil if (--drop->refcnt) 1007d3a7e092SAdrien Mazarguil return; 1008d3a7e092SAdrien Mazarguil drop->priv->drop = NULL; 10094eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_qp(drop->qp)); 10104eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_cq(drop->cq)); 1011d3a7e092SAdrien Mazarguil rte_free(drop); 1012642fe56aSVasily Philipov } 1013642fe56aSVasily Philipov 1014642fe56aSVasily Philipov /** 1015100fe44bSAdrien Mazarguil * Toggle a configured flow rule. 101646d5736aSVasily Philipov * 101746d5736aSVasily Philipov * @param priv 101846d5736aSVasily Philipov * Pointer to private structure. 1019100fe44bSAdrien Mazarguil * @param flow 1020100fe44bSAdrien Mazarguil * Flow rule handle to toggle. 1021100fe44bSAdrien Mazarguil * @param enable 1022100fe44bSAdrien Mazarguil * Whether associated Verbs flow must be created or removed. 102346d5736aSVasily Philipov * @param[out] error 102446d5736aSVasily Philipov * Perform verbose error reporting if not NULL. 102546d5736aSVasily Philipov * 102646d5736aSVasily Philipov * @return 1027100fe44bSAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 102846d5736aSVasily Philipov */ 1029100fe44bSAdrien Mazarguil static int 1030dbeba4cfSThomas Monjalon mlx4_flow_toggle(struct mlx4_priv *priv, 1031100fe44bSAdrien Mazarguil struct rte_flow *flow, 1032100fe44bSAdrien Mazarguil int enable, 103346d5736aSVasily Philipov struct rte_flow_error *error) 103446d5736aSVasily Philipov { 1035100fe44bSAdrien Mazarguil struct ibv_qp *qp = NULL; 1036100fe44bSAdrien Mazarguil const char *msg; 1037100fe44bSAdrien Mazarguil int err; 103846d5736aSVasily Philipov 1039100fe44bSAdrien Mazarguil if (!enable) { 1040100fe44bSAdrien Mazarguil if (!flow->ibv_flow) 1041100fe44bSAdrien Mazarguil return 0; 10424eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow)); 1043100fe44bSAdrien Mazarguil flow->ibv_flow = NULL; 1044d3a7e092SAdrien Mazarguil if (flow->drop) 1045d3a7e092SAdrien Mazarguil mlx4_drop_put(priv->drop); 1046078b8b45SAdrien Mazarguil else if (flow->rss) 1047078b8b45SAdrien Mazarguil mlx4_rss_detach(flow->rss); 1048100fe44bSAdrien Mazarguil return 0; 104946d5736aSVasily Philipov } 10508e08df22SAlexander Kozyrev MLX4_ASSERT(flow->ibv_attr); 1051fc49cbb7SAdrien Mazarguil if (!flow->internal && 1052fc49cbb7SAdrien Mazarguil !priv->isolated && 1053fc49cbb7SAdrien Mazarguil flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) { 1054fc49cbb7SAdrien Mazarguil if (flow->ibv_flow) { 10554eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow)); 1056fc49cbb7SAdrien Mazarguil flow->ibv_flow = NULL; 1057fc49cbb7SAdrien Mazarguil if (flow->drop) 1058fc49cbb7SAdrien Mazarguil mlx4_drop_put(priv->drop); 1059078b8b45SAdrien Mazarguil else if (flow->rss) 1060078b8b45SAdrien Mazarguil mlx4_rss_detach(flow->rss); 1061fc49cbb7SAdrien Mazarguil } 1062fc49cbb7SAdrien Mazarguil err = EACCES; 1063fc49cbb7SAdrien Mazarguil msg = ("priority level " 1064fc49cbb7SAdrien Mazarguil MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST) 1065fc49cbb7SAdrien Mazarguil " is reserved when not in isolated mode"); 1066fc49cbb7SAdrien Mazarguil goto error; 1067fc49cbb7SAdrien Mazarguil } 1068078b8b45SAdrien Mazarguil if (flow->rss) { 1069078b8b45SAdrien Mazarguil struct mlx4_rss *rss = flow->rss; 1070078b8b45SAdrien Mazarguil int missing = 0; 1071078b8b45SAdrien Mazarguil unsigned int i; 1072642fe56aSVasily Philipov 1073078b8b45SAdrien Mazarguil /* Stop at the first nonexistent target queue. */ 1074078b8b45SAdrien Mazarguil for (i = 0; i != rss->queues; ++i) 1075078b8b45SAdrien Mazarguil if (rss->queue_id[i] >= 1076099c2c53SYongseok Koh ETH_DEV(priv)->data->nb_rx_queues || 1077099c2c53SYongseok Koh !ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) { 1078078b8b45SAdrien Mazarguil missing = 1; 1079078b8b45SAdrien Mazarguil break; 1080078b8b45SAdrien Mazarguil } 1081328bf8e5SAdrien Mazarguil if (flow->ibv_flow) { 1082078b8b45SAdrien Mazarguil if (missing ^ !flow->drop) 1083328bf8e5SAdrien Mazarguil return 0; 1084328bf8e5SAdrien Mazarguil /* Verbs flow needs updating. */ 10854eba244bSAdrien Mazarguil claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow)); 1086328bf8e5SAdrien Mazarguil flow->ibv_flow = NULL; 1087328bf8e5SAdrien Mazarguil if (flow->drop) 1088328bf8e5SAdrien Mazarguil mlx4_drop_put(priv->drop); 1089078b8b45SAdrien Mazarguil else 1090078b8b45SAdrien Mazarguil mlx4_rss_detach(rss); 109146d5736aSVasily Philipov } 1092078b8b45SAdrien Mazarguil if (!missing) { 1093078b8b45SAdrien Mazarguil err = mlx4_rss_attach(rss); 1094078b8b45SAdrien Mazarguil if (err) { 1095078b8b45SAdrien Mazarguil err = -err; 1096078b8b45SAdrien Mazarguil msg = "cannot create indirection table or hash" 1097078b8b45SAdrien Mazarguil " QP to associate flow rule with"; 1098078b8b45SAdrien Mazarguil goto error; 1099078b8b45SAdrien Mazarguil } 1100078b8b45SAdrien Mazarguil qp = rss->qp; 1101078b8b45SAdrien Mazarguil } 1102328bf8e5SAdrien Mazarguil /* A missing target queue drops traffic implicitly. */ 1103078b8b45SAdrien Mazarguil flow->drop = missing; 1104100fe44bSAdrien Mazarguil } 1105100fe44bSAdrien Mazarguil if (flow->drop) { 1106ff20ecbfSAdrien Mazarguil if (flow->ibv_flow) 1107ff20ecbfSAdrien Mazarguil return 0; 1108d3a7e092SAdrien Mazarguil mlx4_drop_get(priv); 1109d3a7e092SAdrien Mazarguil if (!priv->drop) { 1110d3a7e092SAdrien Mazarguil err = rte_errno; 1111d3a7e092SAdrien Mazarguil msg = "resources for drop flow rule cannot be created"; 1112d3a7e092SAdrien Mazarguil goto error; 1113d3a7e092SAdrien Mazarguil } 1114d3a7e092SAdrien Mazarguil qp = priv->drop->qp; 1115100fe44bSAdrien Mazarguil } 11168e08df22SAlexander Kozyrev MLX4_ASSERT(qp); 1117328bf8e5SAdrien Mazarguil if (flow->ibv_flow) 1118328bf8e5SAdrien Mazarguil return 0; 11194eba244bSAdrien Mazarguil flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr); 1120100fe44bSAdrien Mazarguil if (flow->ibv_flow) 1121100fe44bSAdrien Mazarguil return 0; 1122d3a7e092SAdrien Mazarguil if (flow->drop) 1123d3a7e092SAdrien Mazarguil mlx4_drop_put(priv->drop); 1124078b8b45SAdrien Mazarguil else if (flow->rss) 1125078b8b45SAdrien Mazarguil mlx4_rss_detach(flow->rss); 1126100fe44bSAdrien Mazarguil err = errno; 1127100fe44bSAdrien Mazarguil msg = "flow rule rejected by device"; 112846d5736aSVasily Philipov error: 1129100fe44bSAdrien Mazarguil return rte_flow_error_set 1130100fe44bSAdrien Mazarguil (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg); 113146d5736aSVasily Philipov } 113246d5736aSVasily Philipov 113346d5736aSVasily Philipov /** 11343cf06ceaSAdrien Mazarguil * Create a flow. 113546d5736aSVasily Philipov * 11363cf06ceaSAdrien Mazarguil * @see rte_flow_create() 11373cf06ceaSAdrien Mazarguil * @see rte_flow_ops 113846d5736aSVasily Philipov */ 1139af745cd6SAdrien Mazarguil static struct rte_flow * 11403cf06ceaSAdrien Mazarguil mlx4_flow_create(struct rte_eth_dev *dev, 114146d5736aSVasily Philipov const struct rte_flow_attr *attr, 1142809d8a6cSAdrien Mazarguil const struct rte_flow_item pattern[], 114346d5736aSVasily Philipov const struct rte_flow_action actions[], 114446d5736aSVasily Philipov struct rte_flow_error *error) 114546d5736aSVasily Philipov { 1146dbeba4cfSThomas Monjalon struct mlx4_priv *priv = dev->data->dev_private; 1147100fe44bSAdrien Mazarguil struct rte_flow *flow; 114846d5736aSVasily Philipov int err; 114946d5736aSVasily Philipov 1150809d8a6cSAdrien Mazarguil err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow); 115146d5736aSVasily Philipov if (err) 115246d5736aSVasily Philipov return NULL; 1153100fe44bSAdrien Mazarguil err = mlx4_flow_toggle(priv, flow, priv->started, error); 1154100fe44bSAdrien Mazarguil if (!err) { 1155bdcad2f4SAdrien Mazarguil struct rte_flow *curr = LIST_FIRST(&priv->flows); 1156bdcad2f4SAdrien Mazarguil 1157bdcad2f4SAdrien Mazarguil /* New rules are inserted after internal ones. */ 1158bdcad2f4SAdrien Mazarguil if (!curr || !curr->internal) { 1159100fe44bSAdrien Mazarguil LIST_INSERT_HEAD(&priv->flows, flow, next); 1160bdcad2f4SAdrien Mazarguil } else { 1161bdcad2f4SAdrien Mazarguil while (LIST_NEXT(curr, next) && 1162bdcad2f4SAdrien Mazarguil LIST_NEXT(curr, next)->internal) 1163bdcad2f4SAdrien Mazarguil curr = LIST_NEXT(curr, next); 1164bdcad2f4SAdrien Mazarguil LIST_INSERT_AFTER(curr, flow, next); 1165bdcad2f4SAdrien Mazarguil } 1166100fe44bSAdrien Mazarguil return flow; 116746d5736aSVasily Philipov } 1168078b8b45SAdrien Mazarguil if (flow->rss) 1169078b8b45SAdrien Mazarguil mlx4_rss_put(flow->rss); 1170100fe44bSAdrien Mazarguil rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1171100fe44bSAdrien Mazarguil error->message); 1172100fe44bSAdrien Mazarguil rte_free(flow); 117346d5736aSVasily Philipov return NULL; 117446d5736aSVasily Philipov } 117546d5736aSVasily Philipov 117646d5736aSVasily Philipov /** 1177dcafc2a6SAdrien Mazarguil * Configure isolated mode. 1178dcafc2a6SAdrien Mazarguil * 1179ae7954ddSVasily Philipov * @see rte_flow_isolate() 1180dcafc2a6SAdrien Mazarguil * @see rte_flow_ops 1181ae7954ddSVasily Philipov */ 1182af745cd6SAdrien Mazarguil static int 1183ae7954ddSVasily Philipov mlx4_flow_isolate(struct rte_eth_dev *dev, 1184ae7954ddSVasily Philipov int enable, 1185ae7954ddSVasily Philipov struct rte_flow_error *error) 1186ae7954ddSVasily Philipov { 1187dbeba4cfSThomas Monjalon struct mlx4_priv *priv = dev->data->dev_private; 1188ae7954ddSVasily Philipov 1189dcafc2a6SAdrien Mazarguil if (!!enable == !!priv->isolated) 1190dcafc2a6SAdrien Mazarguil return 0; 1191ae7954ddSVasily Philipov priv->isolated = !!enable; 1192fee75e14SAdrien Mazarguil if (mlx4_flow_sync(priv, error)) { 1193bdcad2f4SAdrien Mazarguil priv->isolated = !enable; 1194fee75e14SAdrien Mazarguil return -rte_errno; 1195dcafc2a6SAdrien Mazarguil } 1196ae7954ddSVasily Philipov return 0; 1197ae7954ddSVasily Philipov } 1198ae7954ddSVasily Philipov 1199ae7954ddSVasily Philipov /** 1200100fe44bSAdrien Mazarguil * Destroy a flow rule. 120146d5736aSVasily Philipov * 120246d5736aSVasily Philipov * @see rte_flow_destroy() 120346d5736aSVasily Philipov * @see rte_flow_ops 120446d5736aSVasily Philipov */ 1205af745cd6SAdrien Mazarguil static int 120646d5736aSVasily Philipov mlx4_flow_destroy(struct rte_eth_dev *dev, 120746d5736aSVasily Philipov struct rte_flow *flow, 120846d5736aSVasily Philipov struct rte_flow_error *error) 120946d5736aSVasily Philipov { 1210dbeba4cfSThomas Monjalon struct mlx4_priv *priv = dev->data->dev_private; 1211100fe44bSAdrien Mazarguil int err = mlx4_flow_toggle(priv, flow, 0, error); 1212100fe44bSAdrien Mazarguil 1213100fe44bSAdrien Mazarguil if (err) 1214100fe44bSAdrien Mazarguil return err; 12153cf06ceaSAdrien Mazarguil LIST_REMOVE(flow, next); 1216078b8b45SAdrien Mazarguil if (flow->rss) 1217078b8b45SAdrien Mazarguil mlx4_rss_put(flow->rss); 12183cf06ceaSAdrien Mazarguil rte_free(flow); 121946d5736aSVasily Philipov return 0; 122046d5736aSVasily Philipov } 122146d5736aSVasily Philipov 122246d5736aSVasily Philipov /** 1223bdcad2f4SAdrien Mazarguil * Destroy user-configured flow rules. 1224bdcad2f4SAdrien Mazarguil * 1225bdcad2f4SAdrien Mazarguil * This function skips internal flows rules. 122646d5736aSVasily Philipov * 122746d5736aSVasily Philipov * @see rte_flow_flush() 122846d5736aSVasily Philipov * @see rte_flow_ops 122946d5736aSVasily Philipov */ 1230af745cd6SAdrien Mazarguil static int 123146d5736aSVasily Philipov mlx4_flow_flush(struct rte_eth_dev *dev, 123246d5736aSVasily Philipov struct rte_flow_error *error) 123346d5736aSVasily Philipov { 1234dbeba4cfSThomas Monjalon struct mlx4_priv *priv = dev->data->dev_private; 1235bdcad2f4SAdrien Mazarguil struct rte_flow *flow = LIST_FIRST(&priv->flows); 123646d5736aSVasily Philipov 1237bdcad2f4SAdrien Mazarguil while (flow) { 1238bdcad2f4SAdrien Mazarguil struct rte_flow *next = LIST_NEXT(flow, next); 12393cf06ceaSAdrien Mazarguil 1240bdcad2f4SAdrien Mazarguil if (!flow->internal) 12413cf06ceaSAdrien Mazarguil mlx4_flow_destroy(dev, flow, error); 1242bdcad2f4SAdrien Mazarguil flow = next; 12433cf06ceaSAdrien Mazarguil } 124446d5736aSVasily Philipov return 0; 124546d5736aSVasily Philipov } 124646d5736aSVasily Philipov 124746d5736aSVasily Philipov /** 124830695adbSAdrien Mazarguil * Helper function to determine the next configured VLAN filter. 124930695adbSAdrien Mazarguil * 125030695adbSAdrien Mazarguil * @param priv 125130695adbSAdrien Mazarguil * Pointer to private structure. 125230695adbSAdrien Mazarguil * @param vlan 125330695adbSAdrien Mazarguil * VLAN ID to use as a starting point. 125430695adbSAdrien Mazarguil * 125530695adbSAdrien Mazarguil * @return 125630695adbSAdrien Mazarguil * Next configured VLAN ID or a high value (>= 4096) if there is none. 125730695adbSAdrien Mazarguil */ 125830695adbSAdrien Mazarguil static uint16_t 1259dbeba4cfSThomas Monjalon mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan) 126030695adbSAdrien Mazarguil { 126130695adbSAdrien Mazarguil while (vlan < 4096) { 1262099c2c53SYongseok Koh if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] & 126330695adbSAdrien Mazarguil (UINT64_C(1) << (vlan % 64))) 126430695adbSAdrien Mazarguil return vlan; 126530695adbSAdrien Mazarguil ++vlan; 126630695adbSAdrien Mazarguil } 126730695adbSAdrien Mazarguil return vlan; 126830695adbSAdrien Mazarguil } 126930695adbSAdrien Mazarguil 127030695adbSAdrien Mazarguil /** 1271bdcad2f4SAdrien Mazarguil * Generate internal flow rules. 1272bdcad2f4SAdrien Mazarguil * 1273eacaac7bSAdrien Mazarguil * Various flow rules are created depending on the mode the device is in: 1274eacaac7bSAdrien Mazarguil * 1275643958cfSMoti Haimovsky * 1. Promiscuous: 1276643958cfSMoti Haimovsky * port MAC + broadcast + catch-all (VLAN filtering is ignored). 1277643958cfSMoti Haimovsky * 2. All multicast: 1278643958cfSMoti Haimovsky * port MAC/VLAN + broadcast + catch-all multicast. 1279643958cfSMoti Haimovsky * 3. Otherwise: 1280643958cfSMoti Haimovsky * port MAC/VLAN + broadcast MAC/VLAN. 1281eacaac7bSAdrien Mazarguil * 1282eacaac7bSAdrien Mazarguil * About MAC flow rules: 1283eacaac7bSAdrien Mazarguil * 12841437784bSAdrien Mazarguil * - MAC flow rules are generated from @p dev->data->mac_addrs 12851437784bSAdrien Mazarguil * (@p priv->mac array). 12861437784bSAdrien Mazarguil * - An additional flow rule for Ethernet broadcasts is also generated. 1287295968d1SFerruh Yigit * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER 128830695adbSAdrien Mazarguil * is enabled and VLAN filters are configured. 12891437784bSAdrien Mazarguil * 1290bdcad2f4SAdrien Mazarguil * @param priv 1291bdcad2f4SAdrien Mazarguil * Pointer to private structure. 1292bdcad2f4SAdrien Mazarguil * @param[out] error 1293bdcad2f4SAdrien Mazarguil * Perform verbose error reporting if not NULL. 1294bdcad2f4SAdrien Mazarguil * 1295bdcad2f4SAdrien Mazarguil * @return 1296bdcad2f4SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 1297bdcad2f4SAdrien Mazarguil */ 1298bdcad2f4SAdrien Mazarguil static int 1299dbeba4cfSThomas Monjalon mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error) 1300bdcad2f4SAdrien Mazarguil { 1301bdcad2f4SAdrien Mazarguil struct rte_flow_attr attr = { 1302fc49cbb7SAdrien Mazarguil .priority = MLX4_FLOW_PRIORITY_LAST, 1303bdcad2f4SAdrien Mazarguil .ingress = 1, 1304bdcad2f4SAdrien Mazarguil }; 13051437784bSAdrien Mazarguil struct rte_flow_item_eth eth_spec; 13061437784bSAdrien Mazarguil const struct rte_flow_item_eth eth_mask = { 1307*e0d947a1SFerruh Yigit .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 13081437784bSAdrien Mazarguil }; 1309eacaac7bSAdrien Mazarguil const struct rte_flow_item_eth eth_allmulti = { 1310*e0d947a1SFerruh Yigit .hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }, 1311eacaac7bSAdrien Mazarguil }; 131230695adbSAdrien Mazarguil struct rte_flow_item_vlan vlan_spec; 131330695adbSAdrien Mazarguil const struct rte_flow_item_vlan vlan_mask = { 13148275d5fcSThomas Monjalon .hdr.vlan_tci = RTE_BE16(0x0fff), 131530695adbSAdrien Mazarguil }; 1316bdcad2f4SAdrien Mazarguil struct rte_flow_item pattern[] = { 1317bdcad2f4SAdrien Mazarguil { 1318bdcad2f4SAdrien Mazarguil .type = MLX4_FLOW_ITEM_TYPE_INTERNAL, 1319bdcad2f4SAdrien Mazarguil }, 1320bdcad2f4SAdrien Mazarguil { 1321bdcad2f4SAdrien Mazarguil .type = RTE_FLOW_ITEM_TYPE_ETH, 13221437784bSAdrien Mazarguil .spec = ð_spec, 13231437784bSAdrien Mazarguil .mask = ð_mask, 1324bdcad2f4SAdrien Mazarguil }, 1325bdcad2f4SAdrien Mazarguil { 132630695adbSAdrien Mazarguil /* Replaced with VLAN if filtering is enabled. */ 132730695adbSAdrien Mazarguil .type = RTE_FLOW_ITEM_TYPE_END, 132830695adbSAdrien Mazarguil }, 132930695adbSAdrien Mazarguil { 1330bdcad2f4SAdrien Mazarguil .type = RTE_FLOW_ITEM_TYPE_END, 1331bdcad2f4SAdrien Mazarguil }, 1332bdcad2f4SAdrien Mazarguil }; 13337d867595SAdrien Mazarguil /* 13347d867595SAdrien Mazarguil * Round number of queues down to their previous power of 2 to 13357d867595SAdrien Mazarguil * comply with RSS context limitations. Extra queues silently do not 13367d867595SAdrien Mazarguil * get RSS by default. 13377d867595SAdrien Mazarguil */ 13387d867595SAdrien Mazarguil uint32_t queues = 1339099c2c53SYongseok Koh rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1; 134019b3bc47SAdrien Mazarguil uint16_t queue[queues]; 134119b3bc47SAdrien Mazarguil struct rte_flow_action_rss action_rss = { 1342929e3319SAdrien Mazarguil .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 134318aee286SAdrien Mazarguil .level = 0, 134427bdbbefSAdrien Mazarguil .types = 0, 1345ac8d22deSAdrien Mazarguil .key_len = MLX4_RSS_HASH_KEY_SIZE, 1346ac8d22deSAdrien Mazarguil .queue_num = queues, 1347ac8d22deSAdrien Mazarguil .key = mlx4_rss_hash_key_default, 134819b3bc47SAdrien Mazarguil .queue = queue, 134919b3bc47SAdrien Mazarguil }; 1350bdcad2f4SAdrien Mazarguil struct rte_flow_action actions[] = { 1351bdcad2f4SAdrien Mazarguil { 13527d867595SAdrien Mazarguil .type = RTE_FLOW_ACTION_TYPE_RSS, 135319b3bc47SAdrien Mazarguil .conf = &action_rss, 1354bdcad2f4SAdrien Mazarguil }, 1355bdcad2f4SAdrien Mazarguil { 1356bdcad2f4SAdrien Mazarguil .type = RTE_FLOW_ACTION_TYPE_END, 1357bdcad2f4SAdrien Mazarguil }, 1358bdcad2f4SAdrien Mazarguil }; 13598275d5fcSThomas Monjalon struct rte_ether_addr *rule_mac = ð_spec.hdr.dst_addr; 136030695adbSAdrien Mazarguil rte_be16_t *rule_vlan = 1361099c2c53SYongseok Koh (ETH_DEV(priv)->data->dev_conf.rxmode.offloads & 1362295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER) && 1363099c2c53SYongseok Koh !ETH_DEV(priv)->data->promiscuous ? 13648275d5fcSThomas Monjalon &vlan_spec.hdr.vlan_tci : 136530695adbSAdrien Mazarguil NULL; 136630695adbSAdrien Mazarguil uint16_t vlan = 0; 13671437784bSAdrien Mazarguil struct rte_flow *flow; 13681437784bSAdrien Mazarguil unsigned int i; 13691437784bSAdrien Mazarguil int err = 0; 1370bdcad2f4SAdrien Mazarguil 1371a76bec52SMatan Azrad /* Nothing to be done if there are no Rx queues. */ 1372a76bec52SMatan Azrad if (!queues) 1373a76bec52SMatan Azrad goto error; 13747d867595SAdrien Mazarguil /* Prepare default RSS configuration. */ 13757d867595SAdrien Mazarguil for (i = 0; i != queues; ++i) 137619b3bc47SAdrien Mazarguil queue[i] = i; 137730695adbSAdrien Mazarguil /* 137830695adbSAdrien Mazarguil * Set up VLAN item if filtering is enabled and at least one VLAN 137930695adbSAdrien Mazarguil * filter is configured. 138030695adbSAdrien Mazarguil */ 138130695adbSAdrien Mazarguil if (rule_vlan) { 138230695adbSAdrien Mazarguil vlan = mlx4_flow_internal_next_vlan(priv, 0); 138330695adbSAdrien Mazarguil if (vlan < 4096) { 138430695adbSAdrien Mazarguil pattern[2] = (struct rte_flow_item){ 138530695adbSAdrien Mazarguil .type = RTE_FLOW_ITEM_TYPE_VLAN, 138630695adbSAdrien Mazarguil .spec = &vlan_spec, 138730695adbSAdrien Mazarguil .mask = &vlan_mask, 138830695adbSAdrien Mazarguil }; 138930695adbSAdrien Mazarguil next_vlan: 139030695adbSAdrien Mazarguil *rule_vlan = rte_cpu_to_be_16(vlan); 139130695adbSAdrien Mazarguil } else { 139230695adbSAdrien Mazarguil rule_vlan = NULL; 139330695adbSAdrien Mazarguil } 139430695adbSAdrien Mazarguil } 1395643958cfSMoti Haimovsky for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) { 13966d13ea8eSOlivier Matz const struct rte_ether_addr *mac; 13971437784bSAdrien Mazarguil 13981437784bSAdrien Mazarguil /* Broadcasts are handled by an extra iteration. */ 13991437784bSAdrien Mazarguil if (i < RTE_DIM(priv->mac)) 14001437784bSAdrien Mazarguil mac = &priv->mac[i]; 14011437784bSAdrien Mazarguil else 14028275d5fcSThomas Monjalon mac = ð_mask.hdr.dst_addr; 1403538da7a1SOlivier Matz if (rte_is_zero_ether_addr(mac)) 14041437784bSAdrien Mazarguil continue; 14051437784bSAdrien Mazarguil /* Check if MAC flow rule is already present. */ 14061437784bSAdrien Mazarguil for (flow = LIST_FIRST(&priv->flows); 14071437784bSAdrien Mazarguil flow && flow->internal; 14081437784bSAdrien Mazarguil flow = LIST_NEXT(flow, next)) { 14091437784bSAdrien Mazarguil const struct ibv_flow_spec_eth *eth = 14101437784bSAdrien Mazarguil (const void *)((uintptr_t)flow->ibv_attr + 14111437784bSAdrien Mazarguil sizeof(*flow->ibv_attr)); 14121437784bSAdrien Mazarguil unsigned int j; 14131437784bSAdrien Mazarguil 14141437784bSAdrien Mazarguil if (!flow->mac) 14151437784bSAdrien Mazarguil continue; 14168e08df22SAlexander Kozyrev MLX4_ASSERT(flow->ibv_attr->type == 14178e08df22SAlexander Kozyrev IBV_FLOW_ATTR_NORMAL); 14188e08df22SAlexander Kozyrev MLX4_ASSERT(flow->ibv_attr->num_of_specs == 1); 14198e08df22SAlexander Kozyrev MLX4_ASSERT(eth->type == IBV_FLOW_SPEC_ETH); 14208e08df22SAlexander Kozyrev MLX4_ASSERT(flow->rss); 142130695adbSAdrien Mazarguil if (rule_vlan && 142230695adbSAdrien Mazarguil (eth->val.vlan_tag != *rule_vlan || 142330695adbSAdrien Mazarguil eth->mask.vlan_tag != RTE_BE16(0x0fff))) 142430695adbSAdrien Mazarguil continue; 142530695adbSAdrien Mazarguil if (!rule_vlan && eth->mask.vlan_tag) 142630695adbSAdrien Mazarguil continue; 14271437784bSAdrien Mazarguil for (j = 0; j != sizeof(mac->addr_bytes); ++j) 14281437784bSAdrien Mazarguil if (eth->val.dst_mac[j] != mac->addr_bytes[j] || 14291437784bSAdrien Mazarguil eth->mask.dst_mac[j] != UINT8_C(0xff) || 14301437784bSAdrien Mazarguil eth->val.src_mac[j] != UINT8_C(0x00) || 14311437784bSAdrien Mazarguil eth->mask.src_mac[j] != UINT8_C(0x00)) 14321437784bSAdrien Mazarguil break; 1433a9b3568eSAdrien Mazarguil if (j != sizeof(mac->addr_bytes)) 1434a9b3568eSAdrien Mazarguil continue; 1435a9b3568eSAdrien Mazarguil if (flow->rss->queues != queues || 143619b3bc47SAdrien Mazarguil memcmp(flow->rss->queue_id, action_rss.queue, 1437a9b3568eSAdrien Mazarguil queues * sizeof(flow->rss->queue_id[0]))) 1438a9b3568eSAdrien Mazarguil continue; 14391437784bSAdrien Mazarguil break; 14401437784bSAdrien Mazarguil } 14411437784bSAdrien Mazarguil if (!flow || !flow->internal) { 14421437784bSAdrien Mazarguil /* Not found, create a new flow rule. */ 14431437784bSAdrien Mazarguil memcpy(rule_mac, mac, sizeof(*mac)); 1444099c2c53SYongseok Koh flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern, 14451437784bSAdrien Mazarguil actions, error); 14461437784bSAdrien Mazarguil if (!flow) { 14471437784bSAdrien Mazarguil err = -rte_errno; 1448eacaac7bSAdrien Mazarguil goto error; 14491437784bSAdrien Mazarguil } 14501437784bSAdrien Mazarguil } 14511437784bSAdrien Mazarguil flow->select = 1; 14521437784bSAdrien Mazarguil flow->mac = 1; 14531437784bSAdrien Mazarguil } 1454eacaac7bSAdrien Mazarguil if (rule_vlan) { 145530695adbSAdrien Mazarguil vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1); 145630695adbSAdrien Mazarguil if (vlan < 4096) 145730695adbSAdrien Mazarguil goto next_vlan; 145830695adbSAdrien Mazarguil } 1459eacaac7bSAdrien Mazarguil /* Take care of promiscuous and all multicast flow rules. */ 1460099c2c53SYongseok Koh if (ETH_DEV(priv)->data->promiscuous || 1461099c2c53SYongseok Koh ETH_DEV(priv)->data->all_multicast) { 1462eacaac7bSAdrien Mazarguil for (flow = LIST_FIRST(&priv->flows); 1463eacaac7bSAdrien Mazarguil flow && flow->internal; 1464eacaac7bSAdrien Mazarguil flow = LIST_NEXT(flow, next)) { 1465099c2c53SYongseok Koh if (ETH_DEV(priv)->data->promiscuous) { 1466eacaac7bSAdrien Mazarguil if (flow->promisc) 1467eacaac7bSAdrien Mazarguil break; 1468eacaac7bSAdrien Mazarguil } else { 14698e08df22SAlexander Kozyrev MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast); 1470eacaac7bSAdrien Mazarguil if (flow->allmulti) 1471eacaac7bSAdrien Mazarguil break; 1472eacaac7bSAdrien Mazarguil } 1473eacaac7bSAdrien Mazarguil } 1474a9b3568eSAdrien Mazarguil if (flow && flow->internal) { 14758e08df22SAlexander Kozyrev MLX4_ASSERT(flow->rss); 1476a9b3568eSAdrien Mazarguil if (flow->rss->queues != queues || 147719b3bc47SAdrien Mazarguil memcmp(flow->rss->queue_id, action_rss.queue, 1478a9b3568eSAdrien Mazarguil queues * sizeof(flow->rss->queue_id[0]))) 1479a9b3568eSAdrien Mazarguil flow = NULL; 1480a9b3568eSAdrien Mazarguil } 1481eacaac7bSAdrien Mazarguil if (!flow || !flow->internal) { 1482eacaac7bSAdrien Mazarguil /* Not found, create a new flow rule. */ 1483099c2c53SYongseok Koh if (ETH_DEV(priv)->data->promiscuous) { 1484eacaac7bSAdrien Mazarguil pattern[1].spec = NULL; 1485eacaac7bSAdrien Mazarguil pattern[1].mask = NULL; 1486eacaac7bSAdrien Mazarguil } else { 14878e08df22SAlexander Kozyrev MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast); 1488eacaac7bSAdrien Mazarguil pattern[1].spec = ð_allmulti; 1489eacaac7bSAdrien Mazarguil pattern[1].mask = ð_allmulti; 1490eacaac7bSAdrien Mazarguil } 1491eacaac7bSAdrien Mazarguil pattern[2] = pattern[3]; 1492099c2c53SYongseok Koh flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern, 1493eacaac7bSAdrien Mazarguil actions, error); 1494eacaac7bSAdrien Mazarguil if (!flow) { 1495eacaac7bSAdrien Mazarguil err = -rte_errno; 1496eacaac7bSAdrien Mazarguil goto error; 1497eacaac7bSAdrien Mazarguil } 1498eacaac7bSAdrien Mazarguil } 14998e08df22SAlexander Kozyrev MLX4_ASSERT(flow->promisc || flow->allmulti); 1500eacaac7bSAdrien Mazarguil flow->select = 1; 1501eacaac7bSAdrien Mazarguil } 1502eacaac7bSAdrien Mazarguil error: 1503eacaac7bSAdrien Mazarguil /* Clear selection and clean up stale internal flow rules. */ 15041437784bSAdrien Mazarguil flow = LIST_FIRST(&priv->flows); 15051437784bSAdrien Mazarguil while (flow && flow->internal) { 15061437784bSAdrien Mazarguil struct rte_flow *next = LIST_NEXT(flow, next); 15071437784bSAdrien Mazarguil 1508eacaac7bSAdrien Mazarguil if (!flow->select) 1509099c2c53SYongseok Koh claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow, 1510099c2c53SYongseok Koh error)); 15111437784bSAdrien Mazarguil else 15121437784bSAdrien Mazarguil flow->select = 0; 15131437784bSAdrien Mazarguil flow = next; 15141437784bSAdrien Mazarguil } 15151437784bSAdrien Mazarguil return err; 1516bdcad2f4SAdrien Mazarguil } 1517bdcad2f4SAdrien Mazarguil 1518bdcad2f4SAdrien Mazarguil /** 1519bdcad2f4SAdrien Mazarguil * Synchronize flow rules. 1520bdcad2f4SAdrien Mazarguil * 1521bdcad2f4SAdrien Mazarguil * This function synchronizes flow rules with the state of the device by 1522bdcad2f4SAdrien Mazarguil * taking into account isolated mode and whether target queues are 1523bdcad2f4SAdrien Mazarguil * configured. 1524bdcad2f4SAdrien Mazarguil * 1525bdcad2f4SAdrien Mazarguil * @param priv 1526bdcad2f4SAdrien Mazarguil * Pointer to private structure. 1527fee75e14SAdrien Mazarguil * @param[out] error 1528fee75e14SAdrien Mazarguil * Perform verbose error reporting if not NULL. 1529bdcad2f4SAdrien Mazarguil * 1530bdcad2f4SAdrien Mazarguil * @return 1531bdcad2f4SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 1532bdcad2f4SAdrien Mazarguil */ 1533bdcad2f4SAdrien Mazarguil int 1534dbeba4cfSThomas Monjalon mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error) 1535bdcad2f4SAdrien Mazarguil { 1536bdcad2f4SAdrien Mazarguil struct rte_flow *flow; 1537bdcad2f4SAdrien Mazarguil int ret; 1538bdcad2f4SAdrien Mazarguil 1539bdcad2f4SAdrien Mazarguil /* Internal flow rules are guaranteed to come first in the list. */ 1540bdcad2f4SAdrien Mazarguil if (priv->isolated) { 1541bdcad2f4SAdrien Mazarguil /* 1542bdcad2f4SAdrien Mazarguil * Get rid of them in isolated mode, stop at the first 1543bdcad2f4SAdrien Mazarguil * non-internal rule found. 1544bdcad2f4SAdrien Mazarguil */ 1545bdcad2f4SAdrien Mazarguil for (flow = LIST_FIRST(&priv->flows); 1546bdcad2f4SAdrien Mazarguil flow && flow->internal; 1547bdcad2f4SAdrien Mazarguil flow = LIST_FIRST(&priv->flows)) 1548099c2c53SYongseok Koh claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow, 1549099c2c53SYongseok Koh error)); 15501437784bSAdrien Mazarguil } else { 15511437784bSAdrien Mazarguil /* Refresh internal rules. */ 1552fee75e14SAdrien Mazarguil ret = mlx4_flow_internal(priv, error); 1553bdcad2f4SAdrien Mazarguil if (ret) 1554bdcad2f4SAdrien Mazarguil return ret; 1555bdcad2f4SAdrien Mazarguil } 1556fee75e14SAdrien Mazarguil /* Toggle the remaining flow rules . */ 1557ed4724c8SAdrien Mazarguil LIST_FOREACH(flow, &priv->flows, next) { 1558fee75e14SAdrien Mazarguil ret = mlx4_flow_toggle(priv, flow, priv->started, error); 1559fee75e14SAdrien Mazarguil if (ret) 1560fee75e14SAdrien Mazarguil return ret; 1561fee75e14SAdrien Mazarguil } 1562fee75e14SAdrien Mazarguil if (!priv->started) 15638e08df22SAlexander Kozyrev MLX4_ASSERT(!priv->drop); 1564bdcad2f4SAdrien Mazarguil return 0; 1565bdcad2f4SAdrien Mazarguil } 1566bdcad2f4SAdrien Mazarguil 1567bdcad2f4SAdrien Mazarguil /** 1568bdcad2f4SAdrien Mazarguil * Clean up all flow rules. 1569bdcad2f4SAdrien Mazarguil * 1570bdcad2f4SAdrien Mazarguil * Unlike mlx4_flow_flush(), this function takes care of all remaining flow 1571bdcad2f4SAdrien Mazarguil * rules regardless of whether they are internal or user-configured. 1572bdcad2f4SAdrien Mazarguil * 1573bdcad2f4SAdrien Mazarguil * @param priv 1574bdcad2f4SAdrien Mazarguil * Pointer to private structure. 1575bdcad2f4SAdrien Mazarguil */ 1576bdcad2f4SAdrien Mazarguil void 1577dbeba4cfSThomas Monjalon mlx4_flow_clean(struct mlx4_priv *priv) 1578bdcad2f4SAdrien Mazarguil { 1579bdcad2f4SAdrien Mazarguil struct rte_flow *flow; 1580bdcad2f4SAdrien Mazarguil 1581bdcad2f4SAdrien Mazarguil while ((flow = LIST_FIRST(&priv->flows))) 1582099c2c53SYongseok Koh mlx4_flow_destroy(ETH_DEV(priv), flow, NULL); 15838e08df22SAlexander Kozyrev MLX4_ASSERT(LIST_EMPTY(&priv->rss)); 1584bdcad2f4SAdrien Mazarguil } 1585bdcad2f4SAdrien Mazarguil 1586af745cd6SAdrien Mazarguil static const struct rte_flow_ops mlx4_flow_ops = { 1587af745cd6SAdrien Mazarguil .validate = mlx4_flow_validate, 1588af745cd6SAdrien Mazarguil .create = mlx4_flow_create, 1589af745cd6SAdrien Mazarguil .destroy = mlx4_flow_destroy, 1590af745cd6SAdrien Mazarguil .flush = mlx4_flow_flush, 1591af745cd6SAdrien Mazarguil .isolate = mlx4_flow_isolate, 1592af745cd6SAdrien Mazarguil }; 1593af745cd6SAdrien Mazarguil 1594af745cd6SAdrien Mazarguil /** 1595fb7ad441SThomas Monjalon * Get rte_flow callbacks. 1596af745cd6SAdrien Mazarguil * 1597af745cd6SAdrien Mazarguil * @param dev 1598af745cd6SAdrien Mazarguil * Pointer to Ethernet device structure. 1599fb7ad441SThomas Monjalon * @param ops 1600af745cd6SAdrien Mazarguil * Pointer to operation-specific structure. 1601af745cd6SAdrien Mazarguil * 1602fb7ad441SThomas Monjalon * @return 0 1603af745cd6SAdrien Mazarguil */ 1604af745cd6SAdrien Mazarguil int 1605fb7ad441SThomas Monjalon mlx4_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1606fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 1607af745cd6SAdrien Mazarguil { 1608fb7ad441SThomas Monjalon *ops = &mlx4_flow_ops; 1609af745cd6SAdrien Mazarguil return 0; 1610af745cd6SAdrien Mazarguil } 1611