1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 #include <unistd.h> 34 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_interrupts.h> 38 #include <rte_alarm.h> 39 40 #include "mlx5.h" 41 #include "mlx5_rxtx.h" 42 #include "mlx5_utils.h" 43 44 static void 45 priv_txq_stop(struct priv *priv) 46 { 47 unsigned int i; 48 49 for (i = 0; i != priv->txqs_n; ++i) 50 mlx5_priv_txq_release(priv, i); 51 } 52 53 static int 54 priv_txq_start(struct priv *priv) 55 { 56 unsigned int i; 57 int ret = 0; 58 59 /* Add memory regions to Tx queues. */ 60 for (i = 0; i != priv->txqs_n; ++i) { 61 unsigned int idx = 0; 62 struct mlx5_mr *mr; 63 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); 64 65 if (!txq_ctrl) 66 continue; 67 LIST_FOREACH(mr, &priv->mr, next) { 68 priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); 69 if (idx == MLX5_PMD_TX_MP_CACHE) 70 break; 71 } 72 txq_alloc_elts(txq_ctrl); 73 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); 74 if (!txq_ctrl->ibv) { 75 ret = ENOMEM; 76 goto error; 77 } 78 } 79 return -ret; 80 error: 81 priv_txq_stop(priv); 82 return -ret; 83 } 84 85 static void 86 priv_rxq_stop(struct priv *priv) 87 { 88 unsigned int i; 89 90 for (i = 0; i != priv->rxqs_n; ++i) 91 mlx5_priv_rxq_release(priv, i); 92 } 93 94 static int 95 priv_rxq_start(struct priv *priv) 96 { 97 unsigned int i; 98 int ret = 0; 99 100 for (i = 0; i != priv->rxqs_n; ++i) { 101 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); 102 103 if (!rxq_ctrl) 104 continue; 105 ret = rxq_alloc_elts(rxq_ctrl); 106 if (ret) 107 goto error; 108 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); 109 if (!rxq_ctrl->ibv) { 110 ret = ENOMEM; 111 goto error; 112 } 113 } 114 return -ret; 115 error: 116 priv_rxq_stop(priv); 117 return -ret; 118 } 119 120 /** 121 * DPDK callback to start the device. 122 * 123 * Simulate device start by attaching all configured flows. 124 * 125 * @param dev 126 * Pointer to Ethernet device structure. 127 * 128 * @return 129 * 0 on success, negative errno value on failure. 130 */ 131 int 132 mlx5_dev_start(struct rte_eth_dev *dev) 133 { 134 struct priv *priv = dev->data->dev_private; 135 struct mlx5_mr *mr = NULL; 136 int err; 137 138 dev->data->dev_started = 1; 139 priv_lock(priv); 140 err = priv_flow_create_drop_queue(priv); 141 if (err) { 142 ERROR("%p: Drop queue allocation failed: %s", 143 (void *)dev, strerror(err)); 144 goto error; 145 } 146 DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); 147 rte_mempool_walk(mlx5_mp2mr_iter, priv); 148 err = priv_txq_start(priv); 149 if (err) { 150 ERROR("%p: TXQ allocation failed: %s", 151 (void *)dev, strerror(err)); 152 goto error; 153 } 154 /* Update send callback. */ 155 priv_dev_select_tx_function(priv, dev); 156 err = priv_rxq_start(priv); 157 if (err) { 158 ERROR("%p: RXQ allocation failed: %s", 159 (void *)dev, strerror(err)); 160 goto error; 161 } 162 /* Update receive callback. */ 163 priv_dev_select_rx_function(priv, dev); 164 err = priv_dev_traffic_enable(priv, dev); 165 if (err) { 166 ERROR("%p: an error occurred while configuring control flows:" 167 " %s", 168 (void *)priv, strerror(err)); 169 goto error; 170 } 171 err = priv_flow_start(priv, &priv->flows); 172 if (err) { 173 ERROR("%p: an error occurred while configuring flows:" 174 " %s", 175 (void *)priv, strerror(err)); 176 goto error; 177 } 178 err = priv_rx_intr_vec_enable(priv); 179 if (err) { 180 ERROR("%p: RX interrupt vector creation failed", 181 (void *)priv); 182 goto error; 183 } 184 priv_dev_interrupt_handler_install(priv, dev); 185 priv_xstats_init(priv); 186 priv_unlock(priv); 187 return 0; 188 error: 189 /* Rollback. */ 190 dev->data->dev_started = 0; 191 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) 192 priv_mr_release(priv, mr); 193 priv_flow_stop(priv, &priv->flows); 194 priv_dev_traffic_disable(priv, dev); 195 priv_txq_stop(priv); 196 priv_rxq_stop(priv); 197 priv_flow_delete_drop_queue(priv); 198 priv_unlock(priv); 199 return -err; 200 } 201 202 /** 203 * DPDK callback to stop the device. 204 * 205 * Simulate device stop by detaching all configured flows. 206 * 207 * @param dev 208 * Pointer to Ethernet device structure. 209 */ 210 void 211 mlx5_dev_stop(struct rte_eth_dev *dev) 212 { 213 struct priv *priv = dev->data->dev_private; 214 struct mlx5_mr *mr; 215 216 priv_lock(priv); 217 dev->data->dev_started = 0; 218 /* Prevent crashes when queues are still in use. */ 219 dev->rx_pkt_burst = removed_rx_burst; 220 dev->tx_pkt_burst = removed_tx_burst; 221 rte_wmb(); 222 usleep(1000 * priv->rxqs_n); 223 DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); 224 priv_flow_stop(priv, &priv->flows); 225 priv_dev_traffic_disable(priv, dev); 226 priv_rx_intr_vec_disable(priv); 227 priv_dev_interrupt_handler_uninstall(priv, dev); 228 priv_txq_stop(priv); 229 priv_rxq_stop(priv); 230 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) 231 priv_mr_release(priv, mr); 232 priv_flow_delete_drop_queue(priv); 233 priv_unlock(priv); 234 } 235 236 /** 237 * Enable traffic flows configured by control plane 238 * 239 * @param priv 240 * Pointer to Ethernet device private data. 241 * @param dev 242 * Pointer to Ethernet device structure. 243 * 244 * @return 245 * 0 on success. 246 */ 247 int 248 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) 249 { 250 struct rte_flow_item_eth bcast = { 251 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 252 }; 253 struct rte_flow_item_eth ipv6_multi_spec = { 254 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 255 }; 256 struct rte_flow_item_eth ipv6_multi_mask = { 257 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 258 }; 259 struct rte_flow_item_eth unicast = { 260 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 261 }; 262 struct rte_flow_item_eth unicast_mask = { 263 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 264 }; 265 const unsigned int vlan_filter_n = priv->vlan_filter_n; 266 const struct ether_addr cmp = { 267 .addr_bytes = "\x00\x00\x00\x00\x00\x00", 268 }; 269 unsigned int i; 270 unsigned int j; 271 int ret; 272 273 if (priv->isolated) 274 return 0; 275 if (dev->data->promiscuous) { 276 struct rte_flow_item_eth promisc = { 277 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 278 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 279 .type = 0, 280 }; 281 282 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); 283 return 0; 284 } 285 if (dev->data->all_multicast) { 286 struct rte_flow_item_eth multicast = { 287 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 288 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 289 .type = 0, 290 }; 291 292 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); 293 } else { 294 /* Add broadcast/multicast flows. */ 295 for (i = 0; i != vlan_filter_n; ++i) { 296 uint16_t vlan = priv->vlan_filter[i]; 297 298 struct rte_flow_item_vlan vlan_spec = { 299 .tci = rte_cpu_to_be_16(vlan), 300 }; 301 struct rte_flow_item_vlan vlan_mask = { 302 .tci = 0xffff, 303 }; 304 305 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 306 &vlan_spec, &vlan_mask); 307 if (ret) 308 goto error; 309 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 310 &ipv6_multi_mask, 311 &vlan_spec, &vlan_mask); 312 if (ret) 313 goto error; 314 } 315 if (!vlan_filter_n) { 316 ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 317 if (ret) 318 goto error; 319 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 320 &ipv6_multi_mask); 321 if (ret) 322 goto error; 323 } 324 } 325 /* Add MAC address flows. */ 326 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 327 struct ether_addr *mac = &dev->data->mac_addrs[i]; 328 329 if (!memcmp(mac, &cmp, sizeof(*mac))) 330 continue; 331 memcpy(&unicast.dst.addr_bytes, 332 mac->addr_bytes, 333 ETHER_ADDR_LEN); 334 for (j = 0; j != vlan_filter_n; ++j) { 335 uint16_t vlan = priv->vlan_filter[j]; 336 337 struct rte_flow_item_vlan vlan_spec = { 338 .tci = rte_cpu_to_be_16(vlan), 339 }; 340 struct rte_flow_item_vlan vlan_mask = { 341 .tci = 0xffff, 342 }; 343 344 ret = mlx5_ctrl_flow_vlan(dev, &unicast, 345 &unicast_mask, 346 &vlan_spec, 347 &vlan_mask); 348 if (ret) 349 goto error; 350 } 351 if (!vlan_filter_n) { 352 ret = mlx5_ctrl_flow(dev, &unicast, 353 &unicast_mask); 354 if (ret) 355 goto error; 356 } 357 } 358 return 0; 359 error: 360 return rte_errno; 361 } 362 363 364 /** 365 * Disable traffic flows configured by control plane 366 * 367 * @param priv 368 * Pointer to Ethernet device private data. 369 * @param dev 370 * Pointer to Ethernet device structure. 371 * 372 * @return 373 * 0 on success. 374 */ 375 int 376 priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) 377 { 378 (void)dev; 379 priv_flow_flush(priv, &priv->ctrl_flows); 380 return 0; 381 } 382 383 /** 384 * Restart traffic flows configured by control plane 385 * 386 * @param priv 387 * Pointer to Ethernet device private data. 388 * @param dev 389 * Pointer to Ethernet device structure. 390 * 391 * @return 392 * 0 on success. 393 */ 394 int 395 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) 396 { 397 if (dev->data->dev_started) { 398 priv_dev_traffic_disable(priv, dev); 399 priv_dev_traffic_enable(priv, dev); 400 } 401 return 0; 402 } 403 404 /** 405 * Restart traffic flows configured by control plane 406 * 407 * @param dev 408 * Pointer to Ethernet device structure. 409 * 410 * @return 411 * 0 on success. 412 */ 413 int 414 mlx5_traffic_restart(struct rte_eth_dev *dev) 415 { 416 struct priv *priv = dev->data->dev_private; 417 418 priv_lock(priv); 419 priv_dev_traffic_restart(priv, dev); 420 priv_unlock(priv); 421 return 0; 422 } 423