1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 #include <unistd.h> 34 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_interrupts.h> 38 #include <rte_alarm.h> 39 40 #include "mlx5.h" 41 #include "mlx5_rxtx.h" 42 #include "mlx5_utils.h" 43 44 static void 45 priv_txq_stop(struct priv *priv) 46 { 47 unsigned int i; 48 49 for (i = 0; i != priv->txqs_n; ++i) 50 mlx5_priv_txq_release(priv, i); 51 } 52 53 static int 54 priv_txq_start(struct priv *priv) 55 { 56 unsigned int i; 57 int ret = 0; 58 59 /* Add memory regions to Tx queues. */ 60 for (i = 0; i != priv->txqs_n; ++i) { 61 unsigned int idx = 0; 62 struct mlx5_mr *mr; 63 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); 64 65 if (!txq_ctrl) 66 continue; 67 LIST_FOREACH(mr, &priv->mr, next) 68 priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); 69 txq_alloc_elts(txq_ctrl); 70 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); 71 if (!txq_ctrl->ibv) { 72 ret = ENOMEM; 73 goto error; 74 } 75 } 76 return -ret; 77 error: 78 priv_txq_stop(priv); 79 return -ret; 80 } 81 82 static void 83 priv_rxq_stop(struct priv *priv) 84 { 85 unsigned int i; 86 87 for (i = 0; i != priv->rxqs_n; ++i) 88 mlx5_priv_rxq_release(priv, i); 89 } 90 91 static int 92 priv_rxq_start(struct priv *priv) 93 { 94 unsigned int i; 95 int ret = 0; 96 97 for (i = 0; i != priv->rxqs_n; ++i) { 98 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); 99 100 if (!rxq_ctrl) 101 continue; 102 ret = rxq_alloc_elts(rxq_ctrl); 103 if (ret) 104 goto error; 105 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); 106 if (!rxq_ctrl->ibv) { 107 ret = ENOMEM; 108 goto error; 109 } 110 } 111 return -ret; 112 error: 113 priv_rxq_stop(priv); 114 return -ret; 115 } 116 117 /** 118 * DPDK callback to start the device. 119 * 120 * Simulate device start by attaching all configured flows. 121 * 122 * @param dev 123 * Pointer to Ethernet device structure. 124 * 125 * @return 126 * 0 on success, negative errno value on failure. 127 */ 128 int 129 mlx5_dev_start(struct rte_eth_dev *dev) 130 { 131 struct priv *priv = dev->data->dev_private; 132 struct mlx5_mr *mr = NULL; 133 int err; 134 135 if (mlx5_is_secondary()) 136 return -E_RTE_SECONDARY; 137 138 dev->data->dev_started = 1; 139 priv_lock(priv); 140 err = priv_flow_create_drop_queue(priv); 141 if (err) { 142 ERROR("%p: Drop queue allocation failed: %s", 143 (void *)dev, strerror(err)); 144 goto error; 145 } 146 DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); 147 rte_mempool_walk(mlx5_mp2mr_iter, priv); 148 err = priv_txq_start(priv); 149 if (err) { 150 ERROR("%p: TXQ allocation failed: %s", 151 (void *)dev, strerror(err)); 152 goto error; 153 } 154 /* Update send callback. */ 155 priv_dev_select_tx_function(priv, dev); 156 err = priv_rxq_start(priv); 157 if (err) { 158 ERROR("%p: RXQ allocation failed: %s", 159 (void *)dev, strerror(err)); 160 goto error; 161 } 162 /* Update receive callback. */ 163 priv_dev_select_rx_function(priv, dev); 164 err = priv_dev_traffic_enable(priv, dev); 165 if (err) { 166 ERROR("%p: an error occurred while configuring control flows:" 167 " %s", 168 (void *)priv, strerror(err)); 169 goto error; 170 } 171 err = priv_flow_start(priv, &priv->flows); 172 if (err) { 173 ERROR("%p: an error occurred while configuring flows:" 174 " %s", 175 (void *)priv, strerror(err)); 176 goto error; 177 } 178 err = priv_rx_intr_vec_enable(priv); 179 if (err) { 180 ERROR("%p: RX interrupt vector creation failed", 181 (void *)priv); 182 goto error; 183 } 184 priv_dev_interrupt_handler_install(priv, dev); 185 priv_xstats_init(priv); 186 priv_unlock(priv); 187 return 0; 188 error: 189 /* Rollback. */ 190 dev->data->dev_started = 0; 191 LIST_FOREACH(mr, &priv->mr, next) 192 priv_mr_release(priv, mr); 193 priv_flow_stop(priv, &priv->flows); 194 priv_dev_traffic_disable(priv, dev); 195 priv_txq_stop(priv); 196 priv_rxq_stop(priv); 197 priv_flow_delete_drop_queue(priv); 198 priv_unlock(priv); 199 return -err; 200 } 201 202 /** 203 * DPDK callback to stop the device. 204 * 205 * Simulate device stop by detaching all configured flows. 206 * 207 * @param dev 208 * Pointer to Ethernet device structure. 209 */ 210 void 211 mlx5_dev_stop(struct rte_eth_dev *dev) 212 { 213 struct priv *priv = dev->data->dev_private; 214 struct mlx5_mr *mr; 215 216 if (mlx5_is_secondary()) 217 return; 218 219 priv_lock(priv); 220 dev->data->dev_started = 0; 221 /* Prevent crashes when queues are still in use. */ 222 dev->rx_pkt_burst = removed_rx_burst; 223 dev->tx_pkt_burst = removed_tx_burst; 224 rte_wmb(); 225 usleep(1000 * priv->rxqs_n); 226 DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); 227 priv_flow_stop(priv, &priv->flows); 228 priv_dev_traffic_disable(priv, dev); 229 priv_rx_intr_vec_disable(priv); 230 priv_dev_interrupt_handler_uninstall(priv, dev); 231 priv_txq_stop(priv); 232 priv_rxq_stop(priv); 233 LIST_FOREACH(mr, &priv->mr, next) { 234 priv_mr_release(priv, mr); 235 } 236 priv_flow_delete_drop_queue(priv); 237 priv_unlock(priv); 238 } 239 240 /** 241 * Enable traffic flows configured by control plane 242 * 243 * @param priv 244 * Pointer to Ethernet device private data. 245 * @param dev 246 * Pointer to Ethernet device structure. 247 * 248 * @return 249 * 0 on success. 250 */ 251 int 252 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) 253 { 254 struct rte_flow_item_eth bcast = { 255 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 256 }; 257 struct rte_flow_item_eth ipv6_multi_spec = { 258 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 259 }; 260 struct rte_flow_item_eth ipv6_multi_mask = { 261 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 262 }; 263 struct rte_flow_item_eth unicast = { 264 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 265 }; 266 struct rte_flow_item_eth unicast_mask = { 267 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 268 }; 269 const unsigned int vlan_filter_n = priv->vlan_filter_n; 270 const struct ether_addr cmp = { 271 .addr_bytes = "\x00\x00\x00\x00\x00\x00", 272 }; 273 unsigned int i; 274 unsigned int j; 275 int ret; 276 277 if (priv->isolated) 278 return 0; 279 if (dev->data->promiscuous) { 280 struct rte_flow_item_eth promisc = { 281 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 282 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 283 .type = 0, 284 }; 285 286 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); 287 return 0; 288 } 289 if (dev->data->all_multicast) { 290 struct rte_flow_item_eth multicast = { 291 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 292 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 293 .type = 0, 294 }; 295 296 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); 297 } else { 298 /* Add broadcast/multicast flows. */ 299 for (i = 0; i != vlan_filter_n; ++i) { 300 uint16_t vlan = priv->vlan_filter[i]; 301 302 struct rte_flow_item_vlan vlan_spec = { 303 .tci = rte_cpu_to_be_16(vlan), 304 }; 305 struct rte_flow_item_vlan vlan_mask = { 306 .tci = 0xffff, 307 }; 308 309 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 310 &vlan_spec, &vlan_mask); 311 if (ret) 312 goto error; 313 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 314 &ipv6_multi_mask, 315 &vlan_spec, &vlan_mask); 316 if (ret) 317 goto error; 318 } 319 if (!vlan_filter_n) { 320 ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 321 if (ret) 322 goto error; 323 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 324 &ipv6_multi_mask); 325 if (ret) 326 goto error; 327 } 328 } 329 /* Add MAC address flows. */ 330 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 331 struct ether_addr *mac = &dev->data->mac_addrs[i]; 332 333 if (!memcmp(mac, &cmp, sizeof(*mac))) 334 continue; 335 memcpy(&unicast.dst.addr_bytes, 336 mac->addr_bytes, 337 ETHER_ADDR_LEN); 338 for (j = 0; j != vlan_filter_n; ++j) { 339 uint16_t vlan = priv->vlan_filter[j]; 340 341 struct rte_flow_item_vlan vlan_spec = { 342 .tci = rte_cpu_to_be_16(vlan), 343 }; 344 struct rte_flow_item_vlan vlan_mask = { 345 .tci = 0xffff, 346 }; 347 348 ret = mlx5_ctrl_flow_vlan(dev, &unicast, 349 &unicast_mask, 350 &vlan_spec, 351 &vlan_mask); 352 if (ret) 353 goto error; 354 } 355 if (!vlan_filter_n) { 356 ret = mlx5_ctrl_flow(dev, &unicast, 357 &unicast_mask); 358 if (ret) 359 goto error; 360 } 361 } 362 return 0; 363 error: 364 return rte_errno; 365 } 366 367 368 /** 369 * Disable traffic flows configured by control plane 370 * 371 * @param priv 372 * Pointer to Ethernet device private data. 373 * @param dev 374 * Pointer to Ethernet device structure. 375 * 376 * @return 377 * 0 on success. 378 */ 379 int 380 priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) 381 { 382 (void)dev; 383 priv_flow_flush(priv, &priv->ctrl_flows); 384 return 0; 385 } 386 387 /** 388 * Restart traffic flows configured by control plane 389 * 390 * @param priv 391 * Pointer to Ethernet device private data. 392 * @param dev 393 * Pointer to Ethernet device structure. 394 * 395 * @return 396 * 0 on success. 397 */ 398 int 399 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) 400 { 401 if (dev->data->dev_started) { 402 priv_dev_traffic_disable(priv, dev); 403 priv_dev_traffic_enable(priv, dev); 404 } 405 return 0; 406 } 407 408 /** 409 * Restart traffic flows configured by control plane 410 * 411 * @param dev 412 * Pointer to Ethernet device structure. 413 * 414 * @return 415 * 0 on success. 416 */ 417 int 418 mlx5_traffic_restart(struct rte_eth_dev *dev) 419 { 420 struct priv *priv = dev->data->dev_private; 421 422 priv_lock(priv); 423 priv_dev_traffic_restart(priv, dev); 424 priv_unlock(priv); 425 return 0; 426 } 427