1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stddef.h> 35 #include <errno.h> 36 #include <string.h> 37 38 /* Verbs header. */ 39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 40 #ifdef PEDANTIC 41 #pragma GCC diagnostic ignored "-pedantic" 42 #endif 43 #include <infiniband/verbs.h> 44 #ifdef PEDANTIC 45 #pragma GCC diagnostic error "-pedantic" 46 #endif 47 48 /* DPDK headers don't like -pedantic. */ 49 #ifdef PEDANTIC 50 #pragma GCC diagnostic ignored "-pedantic" 51 #endif 52 #include <rte_ethdev.h> 53 #ifdef PEDANTIC 54 #pragma GCC diagnostic error "-pedantic" 55 #endif 56 57 #include "mlx5.h" 58 #include "mlx5_rxtx.h" 59 #include "mlx5_utils.h" 60 61 /* Initialization data for special flows. */ 62 static const struct special_flow_init special_flow_init[] = { 63 [HASH_RXQ_FLOW_TYPE_PROMISC] = { 64 .dst_mac_val = "\x00\x00\x00\x00\x00\x00", 65 .dst_mac_mask = "\x00\x00\x00\x00\x00\x00", 66 .hash_types = 67 1 << HASH_RXQ_TCPV4 | 68 1 << HASH_RXQ_UDPV4 | 69 1 << HASH_RXQ_IPV4 | 70 #ifdef HAVE_FLOW_SPEC_IPV6 71 1 << HASH_RXQ_TCPV6 | 72 1 << HASH_RXQ_UDPV6 | 73 1 << HASH_RXQ_IPV6 | 74 #endif /* HAVE_FLOW_SPEC_IPV6 */ 75 1 << HASH_RXQ_ETH | 76 0, 77 .per_vlan = 0, 78 }, 79 [HASH_RXQ_FLOW_TYPE_ALLMULTI] = { 80 .dst_mac_val = "\x01\x00\x00\x00\x00\x00", 81 .dst_mac_mask = "\x01\x00\x00\x00\x00\x00", 82 .hash_types = 83 1 << HASH_RXQ_UDPV4 | 84 1 << HASH_RXQ_IPV4 | 85 #ifdef HAVE_FLOW_SPEC_IPV6 86 1 << HASH_RXQ_UDPV6 | 87 1 << HASH_RXQ_IPV6 | 88 #endif /* HAVE_FLOW_SPEC_IPV6 */ 89 1 << HASH_RXQ_ETH | 90 0, 91 .per_vlan = 0, 92 }, 93 [HASH_RXQ_FLOW_TYPE_BROADCAST] = { 94 .dst_mac_val = "\xff\xff\xff\xff\xff\xff", 95 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff", 96 .hash_types = 97 1 << HASH_RXQ_UDPV4 | 98 1 << HASH_RXQ_IPV4 | 99 #ifdef HAVE_FLOW_SPEC_IPV6 100 1 << HASH_RXQ_UDPV6 | 101 1 << HASH_RXQ_IPV6 | 102 #endif /* HAVE_FLOW_SPEC_IPV6 */ 103 1 << HASH_RXQ_ETH | 104 0, 105 .per_vlan = 1, 106 }, 107 #ifdef HAVE_FLOW_SPEC_IPV6 108 [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = { 109 .dst_mac_val = "\x33\x33\x00\x00\x00\x00", 110 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00", 111 .hash_types = 112 1 << HASH_RXQ_UDPV6 | 113 1 << HASH_RXQ_IPV6 | 114 1 << HASH_RXQ_ETH | 115 0, 116 .per_vlan = 1, 117 }, 118 #endif /* HAVE_FLOW_SPEC_IPV6 */ 119 }; 120 121 /** 122 * Enable a special flow in a hash RX queue for a given VLAN index. 123 * 124 * @param hash_rxq 125 * Pointer to hash RX queue structure. 126 * @param flow_type 127 * Special flow type. 128 * @param vlan_index 129 * VLAN index to use. 130 * 131 * @return 132 * 0 on success, errno value on failure. 133 */ 134 static int 135 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq, 136 enum hash_rxq_flow_type flow_type, 137 unsigned int vlan_index) 138 { 139 struct priv *priv = hash_rxq->priv; 140 struct ibv_exp_flow *flow; 141 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type)); 142 struct ibv_exp_flow_attr *attr = &data->attr; 143 struct ibv_exp_flow_spec_eth *spec = &data->spec; 144 const uint8_t *mac; 145 const uint8_t *mask; 146 unsigned int vlan_enabled = (priv->vlan_filter_n && 147 special_flow_init[flow_type].per_vlan); 148 unsigned int vlan_id = priv->vlan_filter[vlan_index]; 149 150 /* Check if flow is relevant for this hash_rxq. */ 151 if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type))) 152 return 0; 153 /* Check if flow already exists. */ 154 if (hash_rxq->special_flow[flow_type][vlan_index] != NULL) 155 return 0; 156 157 /* 158 * No padding must be inserted by the compiler between attr and spec. 159 * This layout is expected by libibverbs. 160 */ 161 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec); 162 priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type); 163 /* The first specification must be Ethernet. */ 164 assert(spec->type == IBV_EXP_FLOW_SPEC_ETH); 165 assert(spec->size == sizeof(*spec)); 166 167 mac = special_flow_init[flow_type].dst_mac_val; 168 mask = special_flow_init[flow_type].dst_mac_mask; 169 *spec = (struct ibv_exp_flow_spec_eth){ 170 .type = IBV_EXP_FLOW_SPEC_ETH, 171 .size = sizeof(*spec), 172 .val = { 173 .dst_mac = { 174 mac[0], mac[1], mac[2], 175 mac[3], mac[4], mac[5], 176 }, 177 .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), 178 }, 179 .mask = { 180 .dst_mac = { 181 mask[0], mask[1], mask[2], 182 mask[3], mask[4], mask[5], 183 }, 184 .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), 185 }, 186 }; 187 188 errno = 0; 189 flow = ibv_exp_create_flow(hash_rxq->qp, attr); 190 if (flow == NULL) { 191 /* It's not clear whether errno is always set in this case. */ 192 ERROR("%p: flow configuration failed, errno=%d: %s", 193 (void *)hash_rxq, errno, 194 (errno ? strerror(errno) : "Unknown error")); 195 if (errno) 196 return errno; 197 return EINVAL; 198 } 199 hash_rxq->special_flow[flow_type][vlan_index] = flow; 200 DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled", 201 (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type, 202 vlan_id, vlan_index); 203 return 0; 204 } 205 206 /** 207 * Disable a special flow in a hash RX queue for a given VLAN index. 208 * 209 * @param hash_rxq 210 * Pointer to hash RX queue structure. 211 * @param flow_type 212 * Special flow type. 213 * @param vlan_index 214 * VLAN index to use. 215 */ 216 static void 217 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq, 218 enum hash_rxq_flow_type flow_type, 219 unsigned int vlan_index) 220 { 221 struct ibv_exp_flow *flow = 222 hash_rxq->special_flow[flow_type][vlan_index]; 223 224 if (flow == NULL) 225 return; 226 claim_zero(ibv_exp_destroy_flow(flow)); 227 hash_rxq->special_flow[flow_type][vlan_index] = NULL; 228 DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled", 229 (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type, 230 hash_rxq->priv->vlan_filter[vlan_index], vlan_index); 231 } 232 233 /** 234 * Enable a special flow in a hash RX queue. 235 * 236 * @param hash_rxq 237 * Pointer to hash RX queue structure. 238 * @param flow_type 239 * Special flow type. 240 * @param vlan_index 241 * VLAN index to use. 242 * 243 * @return 244 * 0 on success, errno value on failure. 245 */ 246 static int 247 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq, 248 enum hash_rxq_flow_type flow_type) 249 { 250 struct priv *priv = hash_rxq->priv; 251 unsigned int i = 0; 252 int ret; 253 254 assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow)); 255 assert(RTE_DIM(hash_rxq->special_flow[flow_type]) == 256 RTE_DIM(priv->vlan_filter)); 257 /* Add a special flow for each VLAN filter when relevant. */ 258 do { 259 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i); 260 if (ret) { 261 /* Failure, rollback. */ 262 while (i != 0) 263 hash_rxq_special_flow_disable_vlan(hash_rxq, 264 flow_type, 265 --i); 266 return ret; 267 } 268 } while (special_flow_init[flow_type].per_vlan && 269 ++i < priv->vlan_filter_n); 270 return 0; 271 } 272 273 /** 274 * Disable a special flow in a hash RX queue. 275 * 276 * @param hash_rxq 277 * Pointer to hash RX queue structure. 278 * @param flow_type 279 * Special flow type. 280 */ 281 static void 282 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq, 283 enum hash_rxq_flow_type flow_type) 284 { 285 unsigned int i; 286 287 assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow)); 288 for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i) 289 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i); 290 } 291 292 /** 293 * Enable a special flow in all hash RX queues. 294 * 295 * @param priv 296 * Private structure. 297 * @param flow_type 298 * Special flow type. 299 * 300 * @return 301 * 0 on success, errno value on failure. 302 */ 303 int 304 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type) 305 { 306 unsigned int i; 307 308 if (!priv_allow_flow_type(priv, flow_type)) 309 return 0; 310 for (i = 0; (i != priv->hash_rxqs_n); ++i) { 311 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; 312 int ret; 313 314 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type); 315 if (!ret) 316 continue; 317 /* Failure, rollback. */ 318 while (i != 0) { 319 hash_rxq = &(*priv->hash_rxqs)[--i]; 320 hash_rxq_special_flow_disable(hash_rxq, flow_type); 321 } 322 return ret; 323 } 324 return 0; 325 } 326 327 /** 328 * Disable a special flow in all hash RX queues. 329 * 330 * @param priv 331 * Private structure. 332 * @param flow_type 333 * Special flow type. 334 */ 335 void 336 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type) 337 { 338 unsigned int i; 339 340 for (i = 0; (i != priv->hash_rxqs_n); ++i) { 341 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; 342 343 hash_rxq_special_flow_disable(hash_rxq, flow_type); 344 } 345 } 346 347 /** 348 * Enable all special flows in all hash RX queues. 349 * 350 * @param priv 351 * Private structure. 352 */ 353 int 354 priv_special_flow_enable_all(struct priv *priv) 355 { 356 enum hash_rxq_flow_type flow_type; 357 358 for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) { 359 int ret; 360 361 ret = priv_special_flow_enable(priv, flow_type); 362 if (!ret) 363 continue; 364 /* Failure, rollback. */ 365 while (flow_type) 366 priv_special_flow_disable(priv, --flow_type); 367 return ret; 368 } 369 return 0; 370 } 371 372 /** 373 * Disable all special flows in all hash RX queues. 374 * 375 * @param priv 376 * Private structure. 377 */ 378 void 379 priv_special_flow_disable_all(struct priv *priv) 380 { 381 enum hash_rxq_flow_type flow_type; 382 383 for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) 384 priv_special_flow_disable(priv, flow_type); 385 } 386 387 /** 388 * DPDK callback to enable promiscuous mode. 389 * 390 * @param dev 391 * Pointer to Ethernet device structure. 392 */ 393 void 394 mlx5_promiscuous_enable(struct rte_eth_dev *dev) 395 { 396 struct priv *priv = dev->data->dev_private; 397 int ret; 398 399 if (mlx5_is_secondary()) 400 return; 401 402 priv_lock(priv); 403 priv->promisc_req = 1; 404 ret = priv_rehash_flows(priv); 405 if (ret) 406 ERROR("error while enabling promiscuous mode: %s", 407 strerror(ret)); 408 priv_unlock(priv); 409 } 410 411 /** 412 * DPDK callback to disable promiscuous mode. 413 * 414 * @param dev 415 * Pointer to Ethernet device structure. 416 */ 417 void 418 mlx5_promiscuous_disable(struct rte_eth_dev *dev) 419 { 420 struct priv *priv = dev->data->dev_private; 421 int ret; 422 423 if (mlx5_is_secondary()) 424 return; 425 426 priv_lock(priv); 427 priv->promisc_req = 0; 428 ret = priv_rehash_flows(priv); 429 if (ret) 430 ERROR("error while disabling promiscuous mode: %s", 431 strerror(ret)); 432 priv_unlock(priv); 433 } 434 435 /** 436 * DPDK callback to enable allmulti mode. 437 * 438 * @param dev 439 * Pointer to Ethernet device structure. 440 */ 441 void 442 mlx5_allmulticast_enable(struct rte_eth_dev *dev) 443 { 444 struct priv *priv = dev->data->dev_private; 445 int ret; 446 447 if (mlx5_is_secondary()) 448 return; 449 450 priv_lock(priv); 451 priv->allmulti_req = 1; 452 ret = priv_rehash_flows(priv); 453 if (ret) 454 ERROR("error while enabling allmulticast mode: %s", 455 strerror(ret)); 456 priv_unlock(priv); 457 } 458 459 /** 460 * DPDK callback to disable allmulti mode. 461 * 462 * @param dev 463 * Pointer to Ethernet device structure. 464 */ 465 void 466 mlx5_allmulticast_disable(struct rte_eth_dev *dev) 467 { 468 struct priv *priv = dev->data->dev_private; 469 int ret; 470 471 if (mlx5_is_secondary()) 472 return; 473 474 priv_lock(priv); 475 priv->allmulti_req = 0; 476 ret = priv_rehash_flows(priv); 477 if (ret) 478 ERROR("error while disabling allmulticast mode: %s", 479 strerror(ret)); 480 priv_unlock(priv); 481 } 482