1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stddef.h> 35 #include <errno.h> 36 #include <string.h> 37 38 /* Verbs header. */ 39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 40 #ifdef PEDANTIC 41 #pragma GCC diagnostic ignored "-pedantic" 42 #endif 43 #include <infiniband/verbs.h> 44 #ifdef PEDANTIC 45 #pragma GCC diagnostic error "-pedantic" 46 #endif 47 48 /* DPDK headers don't like -pedantic. */ 49 #ifdef PEDANTIC 50 #pragma GCC diagnostic ignored "-pedantic" 51 #endif 52 #include <rte_ethdev.h> 53 #ifdef PEDANTIC 54 #pragma GCC diagnostic error "-pedantic" 55 #endif 56 57 #include "mlx5.h" 58 #include "mlx5_rxtx.h" 59 #include "mlx5_utils.h" 60 61 /* Initialization data for special flows. */ 62 static const struct special_flow_init special_flow_init[] = { 63 [HASH_RXQ_FLOW_TYPE_PROMISC] = { 64 .dst_mac_val = "\x00\x00\x00\x00\x00\x00", 65 .dst_mac_mask = "\x00\x00\x00\x00\x00\x00", 66 .hash_types = 67 1 << HASH_RXQ_TCPV4 | 68 1 << HASH_RXQ_UDPV4 | 69 1 << HASH_RXQ_IPV4 | 70 1 << HASH_RXQ_TCPV6 | 71 1 << HASH_RXQ_UDPV6 | 72 1 << HASH_RXQ_IPV6 | 73 1 << HASH_RXQ_ETH | 74 0, 75 .per_vlan = 0, 76 }, 77 [HASH_RXQ_FLOW_TYPE_ALLMULTI] = { 78 .dst_mac_val = "\x01\x00\x00\x00\x00\x00", 79 .dst_mac_mask = "\x01\x00\x00\x00\x00\x00", 80 .hash_types = 81 1 << HASH_RXQ_UDPV4 | 82 1 << HASH_RXQ_IPV4 | 83 1 << HASH_RXQ_UDPV6 | 84 1 << HASH_RXQ_IPV6 | 85 1 << HASH_RXQ_ETH | 86 0, 87 .per_vlan = 0, 88 }, 89 [HASH_RXQ_FLOW_TYPE_BROADCAST] = { 90 .dst_mac_val = "\xff\xff\xff\xff\xff\xff", 91 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff", 92 .hash_types = 93 1 << HASH_RXQ_UDPV4 | 94 1 << HASH_RXQ_IPV4 | 95 1 << HASH_RXQ_UDPV6 | 96 1 << HASH_RXQ_IPV6 | 97 1 << HASH_RXQ_ETH | 98 0, 99 .per_vlan = 1, 100 }, 101 [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = { 102 .dst_mac_val = "\x33\x33\x00\x00\x00\x00", 103 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00", 104 .hash_types = 105 1 << HASH_RXQ_UDPV6 | 106 1 << HASH_RXQ_IPV6 | 107 1 << HASH_RXQ_ETH | 108 0, 109 .per_vlan = 1, 110 }, 111 }; 112 113 /** 114 * Enable a special flow in a hash RX queue for a given VLAN index. 115 * 116 * @param hash_rxq 117 * Pointer to hash RX queue structure. 118 * @param flow_type 119 * Special flow type. 120 * @param vlan_index 121 * VLAN index to use. 122 * 123 * @return 124 * 0 on success, errno value on failure. 125 */ 126 static int 127 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq, 128 enum hash_rxq_flow_type flow_type, 129 unsigned int vlan_index) 130 { 131 struct priv *priv = hash_rxq->priv; 132 struct ibv_exp_flow *flow; 133 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type)); 134 struct ibv_exp_flow_attr *attr = &data->attr; 135 struct ibv_exp_flow_spec_eth *spec = &data->spec; 136 const uint8_t *mac; 137 const uint8_t *mask; 138 unsigned int vlan_enabled = (priv->vlan_filter_n && 139 special_flow_init[flow_type].per_vlan); 140 unsigned int vlan_id = priv->vlan_filter[vlan_index]; 141 142 /* Check if flow is relevant for this hash_rxq. */ 143 if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type))) 144 return 0; 145 /* Check if flow already exists. */ 146 if (hash_rxq->special_flow[flow_type][vlan_index] != NULL) 147 return 0; 148 149 /* 150 * No padding must be inserted by the compiler between attr and spec. 151 * This layout is expected by libibverbs. 152 */ 153 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec); 154 priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type); 155 /* The first specification must be Ethernet. */ 156 assert(spec->type == IBV_EXP_FLOW_SPEC_ETH); 157 assert(spec->size == sizeof(*spec)); 158 159 mac = special_flow_init[flow_type].dst_mac_val; 160 mask = special_flow_init[flow_type].dst_mac_mask; 161 *spec = (struct ibv_exp_flow_spec_eth){ 162 .type = IBV_EXP_FLOW_SPEC_ETH, 163 .size = sizeof(*spec), 164 .val = { 165 .dst_mac = { 166 mac[0], mac[1], mac[2], 167 mac[3], mac[4], mac[5], 168 }, 169 .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), 170 }, 171 .mask = { 172 .dst_mac = { 173 mask[0], mask[1], mask[2], 174 mask[3], mask[4], mask[5], 175 }, 176 .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), 177 }, 178 }; 179 180 errno = 0; 181 flow = ibv_exp_create_flow(hash_rxq->qp, attr); 182 if (flow == NULL) { 183 /* It's not clear whether errno is always set in this case. */ 184 ERROR("%p: flow configuration failed, errno=%d: %s", 185 (void *)hash_rxq, errno, 186 (errno ? strerror(errno) : "Unknown error")); 187 if (errno) 188 return errno; 189 return EINVAL; 190 } 191 hash_rxq->special_flow[flow_type][vlan_index] = flow; 192 DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled", 193 (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type, 194 vlan_id, vlan_index); 195 return 0; 196 } 197 198 /** 199 * Disable a special flow in a hash RX queue for a given VLAN index. 200 * 201 * @param hash_rxq 202 * Pointer to hash RX queue structure. 203 * @param flow_type 204 * Special flow type. 205 * @param vlan_index 206 * VLAN index to use. 207 */ 208 static void 209 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq, 210 enum hash_rxq_flow_type flow_type, 211 unsigned int vlan_index) 212 { 213 struct ibv_exp_flow *flow = 214 hash_rxq->special_flow[flow_type][vlan_index]; 215 216 if (flow == NULL) 217 return; 218 claim_zero(ibv_exp_destroy_flow(flow)); 219 hash_rxq->special_flow[flow_type][vlan_index] = NULL; 220 DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled", 221 (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type, 222 hash_rxq->priv->vlan_filter[vlan_index], vlan_index); 223 } 224 225 /** 226 * Enable a special flow in a hash RX queue. 227 * 228 * @param hash_rxq 229 * Pointer to hash RX queue structure. 230 * @param flow_type 231 * Special flow type. 232 * @param vlan_index 233 * VLAN index to use. 234 * 235 * @return 236 * 0 on success, errno value on failure. 237 */ 238 static int 239 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq, 240 enum hash_rxq_flow_type flow_type) 241 { 242 struct priv *priv = hash_rxq->priv; 243 unsigned int i = 0; 244 int ret; 245 246 assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow)); 247 assert(RTE_DIM(hash_rxq->special_flow[flow_type]) == 248 RTE_DIM(priv->vlan_filter)); 249 /* Add a special flow for each VLAN filter when relevant. */ 250 do { 251 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i); 252 if (ret) { 253 /* Failure, rollback. */ 254 while (i != 0) 255 hash_rxq_special_flow_disable_vlan(hash_rxq, 256 flow_type, 257 --i); 258 return ret; 259 } 260 } while (special_flow_init[flow_type].per_vlan && 261 ++i < priv->vlan_filter_n); 262 return 0; 263 } 264 265 /** 266 * Disable a special flow in a hash RX queue. 267 * 268 * @param hash_rxq 269 * Pointer to hash RX queue structure. 270 * @param flow_type 271 * Special flow type. 272 */ 273 static void 274 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq, 275 enum hash_rxq_flow_type flow_type) 276 { 277 unsigned int i; 278 279 assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow)); 280 for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i) 281 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i); 282 } 283 284 /** 285 * Enable a special flow in all hash RX queues. 286 * 287 * @param priv 288 * Private structure. 289 * @param flow_type 290 * Special flow type. 291 * 292 * @return 293 * 0 on success, errno value on failure. 294 */ 295 int 296 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type) 297 { 298 unsigned int i; 299 300 if (!priv_allow_flow_type(priv, flow_type)) 301 return 0; 302 for (i = 0; (i != priv->hash_rxqs_n); ++i) { 303 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; 304 int ret; 305 306 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type); 307 if (!ret) 308 continue; 309 /* Failure, rollback. */ 310 while (i != 0) { 311 hash_rxq = &(*priv->hash_rxqs)[--i]; 312 hash_rxq_special_flow_disable(hash_rxq, flow_type); 313 } 314 return ret; 315 } 316 return 0; 317 } 318 319 /** 320 * Disable a special flow in all hash RX queues. 321 * 322 * @param priv 323 * Private structure. 324 * @param flow_type 325 * Special flow type. 326 */ 327 void 328 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type) 329 { 330 unsigned int i; 331 332 for (i = 0; (i != priv->hash_rxqs_n); ++i) { 333 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; 334 335 hash_rxq_special_flow_disable(hash_rxq, flow_type); 336 } 337 } 338 339 /** 340 * Enable all special flows in all hash RX queues. 341 * 342 * @param priv 343 * Private structure. 344 */ 345 int 346 priv_special_flow_enable_all(struct priv *priv) 347 { 348 enum hash_rxq_flow_type flow_type; 349 350 for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; 351 flow_type != HASH_RXQ_FLOW_TYPE_MAC; 352 ++flow_type) { 353 int ret; 354 355 ret = priv_special_flow_enable(priv, flow_type); 356 if (!ret) 357 continue; 358 /* Failure, rollback. */ 359 while (flow_type) 360 priv_special_flow_disable(priv, --flow_type); 361 return ret; 362 } 363 return 0; 364 } 365 366 /** 367 * Disable all special flows in all hash RX queues. 368 * 369 * @param priv 370 * Private structure. 371 */ 372 void 373 priv_special_flow_disable_all(struct priv *priv) 374 { 375 enum hash_rxq_flow_type flow_type; 376 377 for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; 378 flow_type != HASH_RXQ_FLOW_TYPE_MAC; 379 ++flow_type) 380 priv_special_flow_disable(priv, flow_type); 381 } 382 383 /** 384 * DPDK callback to enable promiscuous mode. 385 * 386 * @param dev 387 * Pointer to Ethernet device structure. 388 */ 389 void 390 mlx5_promiscuous_enable(struct rte_eth_dev *dev) 391 { 392 struct priv *priv = dev->data->dev_private; 393 int ret; 394 395 if (mlx5_is_secondary()) 396 return; 397 398 priv_lock(priv); 399 priv->promisc_req = 1; 400 ret = priv_rehash_flows(priv); 401 if (ret) 402 ERROR("error while enabling promiscuous mode: %s", 403 strerror(ret)); 404 priv_unlock(priv); 405 } 406 407 /** 408 * DPDK callback to disable promiscuous mode. 409 * 410 * @param dev 411 * Pointer to Ethernet device structure. 412 */ 413 void 414 mlx5_promiscuous_disable(struct rte_eth_dev *dev) 415 { 416 struct priv *priv = dev->data->dev_private; 417 int ret; 418 419 if (mlx5_is_secondary()) 420 return; 421 422 priv_lock(priv); 423 priv->promisc_req = 0; 424 ret = priv_rehash_flows(priv); 425 if (ret) 426 ERROR("error while disabling promiscuous mode: %s", 427 strerror(ret)); 428 priv_unlock(priv); 429 } 430 431 /** 432 * DPDK callback to enable allmulti mode. 433 * 434 * @param dev 435 * Pointer to Ethernet device structure. 436 */ 437 void 438 mlx5_allmulticast_enable(struct rte_eth_dev *dev) 439 { 440 struct priv *priv = dev->data->dev_private; 441 int ret; 442 443 if (mlx5_is_secondary()) 444 return; 445 446 priv_lock(priv); 447 priv->allmulti_req = 1; 448 ret = priv_rehash_flows(priv); 449 if (ret) 450 ERROR("error while enabling allmulticast mode: %s", 451 strerror(ret)); 452 priv_unlock(priv); 453 } 454 455 /** 456 * DPDK callback to disable allmulti mode. 457 * 458 * @param dev 459 * Pointer to Ethernet device structure. 460 */ 461 void 462 mlx5_allmulticast_disable(struct rte_eth_dev *dev) 463 { 464 struct priv *priv = dev->data->dev_private; 465 int ret; 466 467 if (mlx5_is_secondary()) 468 return; 469 470 priv_lock(priv); 471 priv->allmulti_req = 0; 472 ret = priv_rehash_flows(priv); 473 if (ret) 474 ERROR("error while disabling allmulticast mode: %s", 475 strerror(ret)); 476 priv_unlock(priv); 477 } 478