1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox. 4 */ 5 6 #include <unistd.h> 7 8 #include <rte_ether.h> 9 #include <rte_ethdev_driver.h> 10 #include <rte_interrupts.h> 11 #include <rte_alarm.h> 12 13 #include "mlx5.h" 14 #include "mlx5_rxtx.h" 15 #include "mlx5_utils.h" 16 17 /** 18 * Stop traffic on Tx queues. 19 * 20 * @param dev 21 * Pointer to Ethernet device structure. 22 */ 23 static void 24 priv_txq_stop(struct priv *priv) 25 { 26 unsigned int i; 27 28 for (i = 0; i != priv->txqs_n; ++i) 29 mlx5_priv_txq_release(priv, i); 30 } 31 32 /** 33 * Start traffic on Tx queues. 34 * 35 * @param dev 36 * Pointer to Ethernet device structure. 37 * 38 * @return 39 * 0 on success, errno on error. 40 */ 41 static int 42 priv_txq_start(struct priv *priv) 43 { 44 unsigned int i; 45 int ret = 0; 46 47 /* Add memory regions to Tx queues. */ 48 for (i = 0; i != priv->txqs_n; ++i) { 49 unsigned int idx = 0; 50 struct mlx5_mr *mr; 51 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); 52 53 if (!txq_ctrl) 54 continue; 55 LIST_FOREACH(mr, &priv->mr, next) { 56 priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); 57 if (idx == MLX5_PMD_TX_MP_CACHE) 58 break; 59 } 60 txq_alloc_elts(txq_ctrl); 61 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); 62 if (!txq_ctrl->ibv) { 63 ret = ENOMEM; 64 goto error; 65 } 66 } 67 ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); 68 if (ret) 69 goto error; 70 return ret; 71 error: 72 priv_txq_stop(priv); 73 return ret; 74 } 75 76 /** 77 * Stop traffic on Rx queues. 78 * 79 * @param dev 80 * Pointer to Ethernet device structure. 81 */ 82 static void 83 priv_rxq_stop(struct priv *priv) 84 { 85 unsigned int i; 86 87 for (i = 0; i != priv->rxqs_n; ++i) 88 mlx5_priv_rxq_release(priv, i); 89 } 90 91 /** 92 * Start traffic on Rx queues. 93 * 94 * @param dev 95 * Pointer to Ethernet device structure. 96 * 97 * @return 98 * 0 on success, errno on error. 99 */ 100 static int 101 priv_rxq_start(struct priv *priv) 102 { 103 unsigned int i; 104 int ret = 0; 105 106 for (i = 0; i != priv->rxqs_n; ++i) { 107 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); 108 109 if (!rxq_ctrl) 110 continue; 111 ret = rxq_alloc_elts(rxq_ctrl); 112 if (ret) 113 goto error; 114 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); 115 if (!rxq_ctrl->ibv) { 116 ret = ENOMEM; 117 goto error; 118 } 119 } 120 return -ret; 121 error: 122 priv_rxq_stop(priv); 123 return -ret; 124 } 125 126 /** 127 * DPDK callback to start the device. 128 * 129 * Simulate device start by attaching all configured flows. 130 * 131 * @param dev 132 * Pointer to Ethernet device structure. 133 * 134 * @return 135 * 0 on success, negative errno value on failure. 136 */ 137 int 138 mlx5_dev_start(struct rte_eth_dev *dev) 139 { 140 struct priv *priv = dev->data->dev_private; 141 struct mlx5_mr *mr = NULL; 142 int err; 143 144 dev->data->dev_started = 1; 145 priv_lock(priv); 146 err = priv_flow_create_drop_queue(priv); 147 if (err) { 148 ERROR("%p: Drop queue allocation failed: %s", 149 (void *)dev, strerror(err)); 150 goto error; 151 } 152 DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); 153 rte_mempool_walk(mlx5_mp2mr_iter, priv); 154 err = priv_txq_start(priv); 155 if (err) { 156 ERROR("%p: TXQ allocation failed: %s", 157 (void *)dev, strerror(err)); 158 goto error; 159 } 160 err = priv_rxq_start(priv); 161 if (err) { 162 ERROR("%p: RXQ allocation failed: %s", 163 (void *)dev, strerror(err)); 164 goto error; 165 } 166 err = priv_rx_intr_vec_enable(priv); 167 if (err) { 168 ERROR("%p: RX interrupt vector creation failed", 169 (void *)priv); 170 goto error; 171 } 172 priv_xstats_init(priv); 173 /* Update link status and Tx/Rx callbacks for the first time. */ 174 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 175 INFO("Forcing port %u link to be up", dev->data->port_id); 176 err = priv_force_link_status_change(priv, ETH_LINK_UP); 177 if (err) { 178 DEBUG("Failed to set port %u link to be up", 179 dev->data->port_id); 180 goto error; 181 } 182 priv_dev_interrupt_handler_install(priv, dev); 183 priv_unlock(priv); 184 return 0; 185 error: 186 /* Rollback. */ 187 dev->data->dev_started = 0; 188 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) 189 priv_mr_release(priv, mr); 190 priv_flow_stop(priv, &priv->flows); 191 priv_dev_traffic_disable(priv, dev); 192 priv_txq_stop(priv); 193 priv_rxq_stop(priv); 194 priv_flow_delete_drop_queue(priv); 195 priv_unlock(priv); 196 return err; 197 } 198 199 /** 200 * DPDK callback to stop the device. 201 * 202 * Simulate device stop by detaching all configured flows. 203 * 204 * @param dev 205 * Pointer to Ethernet device structure. 206 */ 207 void 208 mlx5_dev_stop(struct rte_eth_dev *dev) 209 { 210 struct priv *priv = dev->data->dev_private; 211 struct mlx5_mr *mr; 212 213 priv_lock(priv); 214 dev->data->dev_started = 0; 215 /* Prevent crashes when queues are still in use. */ 216 dev->rx_pkt_burst = removed_rx_burst; 217 dev->tx_pkt_burst = removed_tx_burst; 218 rte_wmb(); 219 usleep(1000 * priv->rxqs_n); 220 DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); 221 priv_flow_stop(priv, &priv->flows); 222 priv_dev_traffic_disable(priv, dev); 223 priv_rx_intr_vec_disable(priv); 224 priv_dev_interrupt_handler_uninstall(priv, dev); 225 priv_txq_stop(priv); 226 priv_rxq_stop(priv); 227 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) 228 priv_mr_release(priv, mr); 229 priv_flow_delete_drop_queue(priv); 230 priv_unlock(priv); 231 } 232 233 /** 234 * Enable traffic flows configured by control plane 235 * 236 * @param priv 237 * Pointer to Ethernet device private data. 238 * @param dev 239 * Pointer to Ethernet device structure. 240 * 241 * @return 242 * 0 on success. 243 */ 244 int 245 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) 246 { 247 struct rte_flow_item_eth bcast = { 248 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 249 }; 250 struct rte_flow_item_eth ipv6_multi_spec = { 251 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 252 }; 253 struct rte_flow_item_eth ipv6_multi_mask = { 254 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 255 }; 256 struct rte_flow_item_eth unicast = { 257 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 258 }; 259 struct rte_flow_item_eth unicast_mask = { 260 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 261 }; 262 const unsigned int vlan_filter_n = priv->vlan_filter_n; 263 const struct ether_addr cmp = { 264 .addr_bytes = "\x00\x00\x00\x00\x00\x00", 265 }; 266 unsigned int i; 267 unsigned int j; 268 int ret; 269 270 if (priv->isolated) 271 return 0; 272 if (dev->data->promiscuous) { 273 struct rte_flow_item_eth promisc = { 274 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 275 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 276 .type = 0, 277 }; 278 279 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); 280 return 0; 281 } 282 if (dev->data->all_multicast) { 283 struct rte_flow_item_eth multicast = { 284 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 285 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 286 .type = 0, 287 }; 288 289 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); 290 } else { 291 /* Add broadcast/multicast flows. */ 292 for (i = 0; i != vlan_filter_n; ++i) { 293 uint16_t vlan = priv->vlan_filter[i]; 294 295 struct rte_flow_item_vlan vlan_spec = { 296 .tci = rte_cpu_to_be_16(vlan), 297 }; 298 struct rte_flow_item_vlan vlan_mask = { 299 .tci = 0xffff, 300 }; 301 302 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 303 &vlan_spec, &vlan_mask); 304 if (ret) 305 goto error; 306 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 307 &ipv6_multi_mask, 308 &vlan_spec, &vlan_mask); 309 if (ret) 310 goto error; 311 } 312 if (!vlan_filter_n) { 313 ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 314 if (ret) 315 goto error; 316 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 317 &ipv6_multi_mask); 318 if (ret) 319 goto error; 320 } 321 } 322 /* Add MAC address flows. */ 323 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 324 struct ether_addr *mac = &dev->data->mac_addrs[i]; 325 326 if (!memcmp(mac, &cmp, sizeof(*mac))) 327 continue; 328 memcpy(&unicast.dst.addr_bytes, 329 mac->addr_bytes, 330 ETHER_ADDR_LEN); 331 for (j = 0; j != vlan_filter_n; ++j) { 332 uint16_t vlan = priv->vlan_filter[j]; 333 334 struct rte_flow_item_vlan vlan_spec = { 335 .tci = rte_cpu_to_be_16(vlan), 336 }; 337 struct rte_flow_item_vlan vlan_mask = { 338 .tci = 0xffff, 339 }; 340 341 ret = mlx5_ctrl_flow_vlan(dev, &unicast, 342 &unicast_mask, 343 &vlan_spec, 344 &vlan_mask); 345 if (ret) 346 goto error; 347 } 348 if (!vlan_filter_n) { 349 ret = mlx5_ctrl_flow(dev, &unicast, 350 &unicast_mask); 351 if (ret) 352 goto error; 353 } 354 } 355 return 0; 356 error: 357 return rte_errno; 358 } 359 360 361 /** 362 * Disable traffic flows configured by control plane 363 * 364 * @param priv 365 * Pointer to Ethernet device private data. 366 * @param dev 367 * Pointer to Ethernet device structure. 368 * 369 * @return 370 * 0 on success. 371 */ 372 int 373 priv_dev_traffic_disable(struct priv *priv, 374 struct rte_eth_dev *dev __rte_unused) 375 { 376 priv_flow_flush(priv, &priv->ctrl_flows); 377 return 0; 378 } 379 380 /** 381 * Restart traffic flows configured by control plane 382 * 383 * @param priv 384 * Pointer to Ethernet device private data. 385 * @param dev 386 * Pointer to Ethernet device structure. 387 * 388 * @return 389 * 0 on success. 390 */ 391 int 392 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) 393 { 394 if (dev->data->dev_started) { 395 priv_dev_traffic_disable(priv, dev); 396 priv_dev_traffic_enable(priv, dev); 397 } 398 return 0; 399 } 400 401 /** 402 * Restart traffic flows configured by control plane 403 * 404 * @param dev 405 * Pointer to Ethernet device structure. 406 * 407 * @return 408 * 0 on success. 409 */ 410 int 411 mlx5_traffic_restart(struct rte_eth_dev *dev) 412 { 413 struct priv *priv = dev->data->dev_private; 414 415 priv_lock(priv); 416 priv_dev_traffic_restart(priv, dev); 417 priv_unlock(priv); 418 return 0; 419 } 420