1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox. 4 */ 5 6 #include <unistd.h> 7 8 #include <rte_ether.h> 9 #include <rte_ethdev_driver.h> 10 #include <rte_interrupts.h> 11 #include <rte_alarm.h> 12 13 #include "mlx5.h" 14 #include "mlx5_rxtx.h" 15 #include "mlx5_utils.h" 16 17 static void 18 priv_txq_stop(struct priv *priv) 19 { 20 unsigned int i; 21 22 for (i = 0; i != priv->txqs_n; ++i) 23 mlx5_priv_txq_release(priv, i); 24 } 25 26 static int 27 priv_txq_start(struct priv *priv) 28 { 29 unsigned int i; 30 int ret = 0; 31 32 /* Add memory regions to Tx queues. */ 33 for (i = 0; i != priv->txqs_n; ++i) { 34 unsigned int idx = 0; 35 struct mlx5_mr *mr; 36 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); 37 38 if (!txq_ctrl) 39 continue; 40 LIST_FOREACH(mr, &priv->mr, next) { 41 priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); 42 if (idx == MLX5_PMD_TX_MP_CACHE) 43 break; 44 } 45 txq_alloc_elts(txq_ctrl); 46 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); 47 if (!txq_ctrl->ibv) { 48 ret = ENOMEM; 49 goto error; 50 } 51 } 52 ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); 53 if (ret) 54 goto error; 55 return ret; 56 error: 57 priv_txq_stop(priv); 58 return ret; 59 } 60 61 static void 62 priv_rxq_stop(struct priv *priv) 63 { 64 unsigned int i; 65 66 for (i = 0; i != priv->rxqs_n; ++i) 67 mlx5_priv_rxq_release(priv, i); 68 } 69 70 static int 71 priv_rxq_start(struct priv *priv) 72 { 73 unsigned int i; 74 int ret = 0; 75 76 for (i = 0; i != priv->rxqs_n; ++i) { 77 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); 78 79 if (!rxq_ctrl) 80 continue; 81 ret = rxq_alloc_elts(rxq_ctrl); 82 if (ret) 83 goto error; 84 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); 85 if (!rxq_ctrl->ibv) { 86 ret = ENOMEM; 87 goto error; 88 } 89 } 90 return -ret; 91 error: 92 priv_rxq_stop(priv); 93 return -ret; 94 } 95 96 /** 97 * DPDK callback to start the device. 98 * 99 * Simulate device start by attaching all configured flows. 100 * 101 * @param dev 102 * Pointer to Ethernet device structure. 103 * 104 * @return 105 * 0 on success, negative errno value on failure. 106 */ 107 int 108 mlx5_dev_start(struct rte_eth_dev *dev) 109 { 110 struct priv *priv = dev->data->dev_private; 111 struct mlx5_mr *mr = NULL; 112 int err; 113 114 dev->data->dev_started = 1; 115 priv_lock(priv); 116 err = priv_flow_create_drop_queue(priv); 117 if (err) { 118 ERROR("%p: Drop queue allocation failed: %s", 119 (void *)dev, strerror(err)); 120 goto error; 121 } 122 DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); 123 rte_mempool_walk(mlx5_mp2mr_iter, priv); 124 err = priv_txq_start(priv); 125 if (err) { 126 ERROR("%p: TXQ allocation failed: %s", 127 (void *)dev, strerror(err)); 128 goto error; 129 } 130 err = priv_rxq_start(priv); 131 if (err) { 132 ERROR("%p: RXQ allocation failed: %s", 133 (void *)dev, strerror(err)); 134 goto error; 135 } 136 err = priv_rx_intr_vec_enable(priv); 137 if (err) { 138 ERROR("%p: RX interrupt vector creation failed", 139 (void *)priv); 140 goto error; 141 } 142 priv_xstats_init(priv); 143 /* Update link status and Tx/Rx callbacks for the first time. */ 144 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 145 INFO("Forcing port %u link to be up", dev->data->port_id); 146 err = priv_force_link_status_change(priv, ETH_LINK_UP); 147 if (err) { 148 DEBUG("Failed to set port %u link to be up", 149 dev->data->port_id); 150 goto error; 151 } 152 priv_dev_interrupt_handler_install(priv, dev); 153 priv_unlock(priv); 154 return 0; 155 error: 156 /* Rollback. */ 157 dev->data->dev_started = 0; 158 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) 159 priv_mr_release(priv, mr); 160 priv_flow_stop(priv, &priv->flows); 161 priv_dev_traffic_disable(priv, dev); 162 priv_txq_stop(priv); 163 priv_rxq_stop(priv); 164 priv_flow_delete_drop_queue(priv); 165 priv_unlock(priv); 166 return err; 167 } 168 169 /** 170 * DPDK callback to stop the device. 171 * 172 * Simulate device stop by detaching all configured flows. 173 * 174 * @param dev 175 * Pointer to Ethernet device structure. 176 */ 177 void 178 mlx5_dev_stop(struct rte_eth_dev *dev) 179 { 180 struct priv *priv = dev->data->dev_private; 181 struct mlx5_mr *mr; 182 183 priv_lock(priv); 184 dev->data->dev_started = 0; 185 /* Prevent crashes when queues are still in use. */ 186 dev->rx_pkt_burst = removed_rx_burst; 187 dev->tx_pkt_burst = removed_tx_burst; 188 rte_wmb(); 189 usleep(1000 * priv->rxqs_n); 190 DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); 191 priv_flow_stop(priv, &priv->flows); 192 priv_dev_traffic_disable(priv, dev); 193 priv_rx_intr_vec_disable(priv); 194 priv_dev_interrupt_handler_uninstall(priv, dev); 195 priv_txq_stop(priv); 196 priv_rxq_stop(priv); 197 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) 198 priv_mr_release(priv, mr); 199 priv_flow_delete_drop_queue(priv); 200 priv_unlock(priv); 201 } 202 203 /** 204 * Enable traffic flows configured by control plane 205 * 206 * @param priv 207 * Pointer to Ethernet device private data. 208 * @param dev 209 * Pointer to Ethernet device structure. 210 * 211 * @return 212 * 0 on success. 213 */ 214 int 215 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) 216 { 217 struct rte_flow_item_eth bcast = { 218 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 219 }; 220 struct rte_flow_item_eth ipv6_multi_spec = { 221 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 222 }; 223 struct rte_flow_item_eth ipv6_multi_mask = { 224 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 225 }; 226 struct rte_flow_item_eth unicast = { 227 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 228 }; 229 struct rte_flow_item_eth unicast_mask = { 230 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 231 }; 232 const unsigned int vlan_filter_n = priv->vlan_filter_n; 233 const struct ether_addr cmp = { 234 .addr_bytes = "\x00\x00\x00\x00\x00\x00", 235 }; 236 unsigned int i; 237 unsigned int j; 238 int ret; 239 240 if (priv->isolated) 241 return 0; 242 if (dev->data->promiscuous) { 243 struct rte_flow_item_eth promisc = { 244 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 245 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 246 .type = 0, 247 }; 248 249 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); 250 return 0; 251 } 252 if (dev->data->all_multicast) { 253 struct rte_flow_item_eth multicast = { 254 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 255 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 256 .type = 0, 257 }; 258 259 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); 260 } else { 261 /* Add broadcast/multicast flows. */ 262 for (i = 0; i != vlan_filter_n; ++i) { 263 uint16_t vlan = priv->vlan_filter[i]; 264 265 struct rte_flow_item_vlan vlan_spec = { 266 .tci = rte_cpu_to_be_16(vlan), 267 }; 268 struct rte_flow_item_vlan vlan_mask = { 269 .tci = 0xffff, 270 }; 271 272 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 273 &vlan_spec, &vlan_mask); 274 if (ret) 275 goto error; 276 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 277 &ipv6_multi_mask, 278 &vlan_spec, &vlan_mask); 279 if (ret) 280 goto error; 281 } 282 if (!vlan_filter_n) { 283 ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 284 if (ret) 285 goto error; 286 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 287 &ipv6_multi_mask); 288 if (ret) 289 goto error; 290 } 291 } 292 /* Add MAC address flows. */ 293 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 294 struct ether_addr *mac = &dev->data->mac_addrs[i]; 295 296 if (!memcmp(mac, &cmp, sizeof(*mac))) 297 continue; 298 memcpy(&unicast.dst.addr_bytes, 299 mac->addr_bytes, 300 ETHER_ADDR_LEN); 301 for (j = 0; j != vlan_filter_n; ++j) { 302 uint16_t vlan = priv->vlan_filter[j]; 303 304 struct rte_flow_item_vlan vlan_spec = { 305 .tci = rte_cpu_to_be_16(vlan), 306 }; 307 struct rte_flow_item_vlan vlan_mask = { 308 .tci = 0xffff, 309 }; 310 311 ret = mlx5_ctrl_flow_vlan(dev, &unicast, 312 &unicast_mask, 313 &vlan_spec, 314 &vlan_mask); 315 if (ret) 316 goto error; 317 } 318 if (!vlan_filter_n) { 319 ret = mlx5_ctrl_flow(dev, &unicast, 320 &unicast_mask); 321 if (ret) 322 goto error; 323 } 324 } 325 return 0; 326 error: 327 return rte_errno; 328 } 329 330 331 /** 332 * Disable traffic flows configured by control plane 333 * 334 * @param priv 335 * Pointer to Ethernet device private data. 336 * @param dev 337 * Pointer to Ethernet device structure. 338 * 339 * @return 340 * 0 on success. 341 */ 342 int 343 priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) 344 { 345 (void)dev; 346 priv_flow_flush(priv, &priv->ctrl_flows); 347 return 0; 348 } 349 350 /** 351 * Restart traffic flows configured by control plane 352 * 353 * @param priv 354 * Pointer to Ethernet device private data. 355 * @param dev 356 * Pointer to Ethernet device structure. 357 * 358 * @return 359 * 0 on success. 360 */ 361 int 362 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) 363 { 364 if (dev->data->dev_started) { 365 priv_dev_traffic_disable(priv, dev); 366 priv_dev_traffic_enable(priv, dev); 367 } 368 return 0; 369 } 370 371 /** 372 * Restart traffic flows configured by control plane 373 * 374 * @param dev 375 * Pointer to Ethernet device structure. 376 * 377 * @return 378 * 0 on success. 379 */ 380 int 381 mlx5_traffic_restart(struct rte_eth_dev *dev) 382 { 383 struct priv *priv = dev->data->dev_private; 384 385 priv_lock(priv); 386 priv_dev_traffic_restart(priv, dev); 387 priv_unlock(priv); 388 return 0; 389 } 390