1 /*- 2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/kmod.h> 29 #include <linux/module.h> 30 #include <linux/errno.h> 31 #include <linux/pci.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/slab.h> 34 #include <linux/io-mapping.h> 35 #include <linux/interrupt.h> 36 #include <linux/hardirq.h> 37 #include <dev/mlx5/driver.h> 38 #include <dev/mlx5/cq.h> 39 #include <dev/mlx5/qp.h> 40 #include <dev/mlx5/srq.h> 41 #include <dev/mlx5/mpfs.h> 42 #include <dev/mlx5/vport.h> 43 #include <linux/delay.h> 44 #include <dev/mlx5/mlx5_ifc.h> 45 #include <dev/mlx5/mlx5_fpga/core.h> 46 #include <dev/mlx5/mlx5_lib/mlx5.h> 47 #include "mlx5_core.h" 48 #include "eswitch.h" 49 #include "fs_core.h" 50 #ifdef PCI_IOV 51 #include <sys/nv.h> 52 #include <dev/pci/pci_iov.h> 53 #include <sys/iov_schema.h> 54 #endif 55 56 static const char mlx5_version[] = "Mellanox Core driver " 57 DRIVER_VERSION " (" DRIVER_RELDATE ")"; 58 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 59 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 62 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); 63 MODULE_DEPEND(mlx5, firmware, 1, 1, 1); 64 MODULE_VERSION(mlx5, 1); 65 66 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 67 "mlx5 hardware controls"); 68 69 int mlx5_core_debug_mask; 70 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, 71 &mlx5_core_debug_mask, 0, 72 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 73 74 #define MLX5_DEFAULT_PROF 2 75 static int mlx5_prof_sel = MLX5_DEFAULT_PROF; 76 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, 77 &mlx5_prof_sel, 0, 78 "profile selector. Valid range 0 - 2"); 79 80 static int mlx5_fast_unload_enabled = 1; 81 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, 82 &mlx5_fast_unload_enabled, 0, 83 "Set to enable fast unload. Clear to disable."); 84 85 #define NUMA_NO_NODE -1 86 87 static LIST_HEAD(intf_list); 88 static LIST_HEAD(dev_list); 89 static DEFINE_MUTEX(intf_mutex); 90 91 struct mlx5_device_context { 92 struct list_head list; 93 struct mlx5_interface *intf; 94 void *context; 95 }; 96 97 enum { 98 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 99 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 100 }; 101 102 static struct mlx5_profile profiles[] = { 103 [0] = { 104 .mask = 0, 105 }, 106 [1] = { 107 .mask = MLX5_PROF_MASK_QP_SIZE, 108 .log_max_qp = 12, 109 }, 110 [2] = { 111 .mask = MLX5_PROF_MASK_QP_SIZE | 112 MLX5_PROF_MASK_MR_CACHE, 113 .log_max_qp = 17, 114 .mr_cache[0] = { 115 .size = 500, 116 .limit = 250 117 }, 118 .mr_cache[1] = { 119 .size = 500, 120 .limit = 250 121 }, 122 .mr_cache[2] = { 123 .size = 500, 124 .limit = 250 125 }, 126 .mr_cache[3] = { 127 .size = 500, 128 .limit = 250 129 }, 130 .mr_cache[4] = { 131 .size = 500, 132 .limit = 250 133 }, 134 .mr_cache[5] = { 135 .size = 500, 136 .limit = 250 137 }, 138 .mr_cache[6] = { 139 .size = 500, 140 .limit = 250 141 }, 142 .mr_cache[7] = { 143 .size = 500, 144 .limit = 250 145 }, 146 .mr_cache[8] = { 147 .size = 500, 148 .limit = 250 149 }, 150 .mr_cache[9] = { 151 .size = 500, 152 .limit = 250 153 }, 154 .mr_cache[10] = { 155 .size = 500, 156 .limit = 250 157 }, 158 .mr_cache[11] = { 159 .size = 500, 160 .limit = 250 161 }, 162 .mr_cache[12] = { 163 .size = 64, 164 .limit = 32 165 }, 166 .mr_cache[13] = { 167 .size = 32, 168 .limit = 16 169 }, 170 .mr_cache[14] = { 171 .size = 16, 172 .limit = 8 173 }, 174 }, 175 [3] = { 176 .mask = MLX5_PROF_MASK_QP_SIZE, 177 .log_max_qp = 17, 178 }, 179 }; 180 181 #ifdef PCI_IOV 182 static const char iov_mac_addr_name[] = "mac-addr"; 183 static const char iov_node_guid_name[] = "node-guid"; 184 static const char iov_port_guid_name[] = "port-guid"; 185 #endif 186 187 static int set_dma_caps(struct pci_dev *pdev) 188 { 189 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 190 int err; 191 192 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 193 if (err) { 194 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); 195 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 196 if (err) { 197 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); 198 return err; 199 } 200 } 201 202 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 203 if (err) { 204 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); 205 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 206 if (err) { 207 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); 208 return err; 209 } 210 } 211 212 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 213 return err; 214 } 215 216 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, 217 u16 *p_power, u8 *p_status) 218 { 219 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; 220 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; 221 int err; 222 223 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 224 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); 225 226 *p_status = MLX5_GET(mpein_reg, out, pwr_status); 227 *p_power = MLX5_GET(mpein_reg, out, pci_power); 228 return err; 229 } 230 231 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 232 { 233 struct pci_dev *pdev = dev->pdev; 234 int err = 0; 235 236 mutex_lock(&dev->pci_status_mutex); 237 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 238 err = pci_enable_device(pdev); 239 if (!err) 240 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 241 } 242 mutex_unlock(&dev->pci_status_mutex); 243 244 return err; 245 } 246 247 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 248 { 249 struct pci_dev *pdev = dev->pdev; 250 251 mutex_lock(&dev->pci_status_mutex); 252 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 253 pci_disable_device(pdev); 254 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 255 } 256 mutex_unlock(&dev->pci_status_mutex); 257 } 258 259 static int request_bar(struct pci_dev *pdev) 260 { 261 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 262 int err = 0; 263 264 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 265 mlx5_core_err(dev, "Missing registers BAR, aborting\n"); 266 return -ENODEV; 267 } 268 269 err = pci_request_regions(pdev, DRIVER_NAME); 270 if (err) 271 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); 272 273 return err; 274 } 275 276 static void release_bar(struct pci_dev *pdev) 277 { 278 pci_release_regions(pdev); 279 } 280 281 static int mlx5_enable_msix(struct mlx5_core_dev *dev) 282 { 283 struct mlx5_priv *priv = &dev->priv; 284 struct mlx5_eq_table *table = &priv->eq_table; 285 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 286 int limit = dev->msix_eqvec; 287 int nvec = MLX5_EQ_VEC_COMP_BASE; 288 int i; 289 290 if (limit > 0) 291 nvec += limit; 292 else 293 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 294 295 if (nvec > num_eqs) 296 nvec = num_eqs; 297 if (nvec > 256) 298 nvec = 256; /* limit of firmware API */ 299 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 300 return -ENOMEM; 301 302 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 303 304 for (i = 0; i < nvec; i++) 305 priv->msix_arr[i].entry = i; 306 307 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 308 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 309 if (nvec < 0) 310 return nvec; 311 312 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 313 return 0; 314 } 315 316 static void mlx5_disable_msix(struct mlx5_core_dev *dev) 317 { 318 struct mlx5_priv *priv = &dev->priv; 319 320 pci_disable_msix(dev->pdev); 321 kfree(priv->msix_arr); 322 } 323 324 struct mlx5_reg_host_endianess { 325 u8 he; 326 u8 rsvd[15]; 327 }; 328 329 330 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 331 332 enum { 333 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 334 MLX5_DEV_CAP_FLAG_DCT | 335 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 336 }; 337 338 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) 339 { 340 switch (size) { 341 case 128: 342 return 0; 343 case 256: 344 return 1; 345 case 512: 346 return 2; 347 case 1024: 348 return 3; 349 case 2048: 350 return 4; 351 case 4096: 352 return 5; 353 default: 354 mlx5_core_warn(dev, "invalid pkey table size %d\n", size); 355 return 0; 356 } 357 } 358 359 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 360 enum mlx5_cap_type cap_type, 361 enum mlx5_cap_mode cap_mode) 362 { 363 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 364 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 365 void *out, *hca_caps; 366 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 367 int err; 368 369 memset(in, 0, sizeof(in)); 370 out = kzalloc(out_sz, GFP_KERNEL); 371 372 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 373 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 374 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 375 if (err) { 376 mlx5_core_warn(dev, 377 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 378 cap_type, cap_mode, err); 379 goto query_ex; 380 } 381 382 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 383 384 switch (cap_mode) { 385 case HCA_CAP_OPMOD_GET_MAX: 386 memcpy(dev->hca_caps_max[cap_type], hca_caps, 387 MLX5_UN_SZ_BYTES(hca_cap_union)); 388 break; 389 case HCA_CAP_OPMOD_GET_CUR: 390 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 391 MLX5_UN_SZ_BYTES(hca_cap_union)); 392 break; 393 default: 394 mlx5_core_warn(dev, 395 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 396 cap_type, cap_mode); 397 err = -EINVAL; 398 break; 399 } 400 query_ex: 401 kfree(out); 402 return err; 403 } 404 405 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 406 { 407 int ret; 408 409 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 410 if (ret) 411 return ret; 412 413 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 414 } 415 416 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 417 { 418 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 419 420 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 421 422 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 423 } 424 425 static int handle_hca_cap(struct mlx5_core_dev *dev) 426 { 427 void *set_ctx = NULL; 428 struct mlx5_profile *prof = dev->profile; 429 int err = -ENOMEM; 430 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 431 void *set_hca_cap; 432 433 set_ctx = kzalloc(set_sz, GFP_KERNEL); 434 435 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 436 if (err) 437 goto query_ex; 438 439 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 440 capability); 441 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 442 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 443 444 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 445 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 446 128); 447 /* we limit the size of the pkey table to 128 entries for now */ 448 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 449 to_fw_pkey_sz(dev, 128)); 450 451 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 452 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 453 prof->log_max_qp); 454 455 /* disable cmdif checksum */ 456 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 457 458 /* enable drain sigerr */ 459 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 460 461 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 462 463 err = set_caps(dev, set_ctx, set_sz); 464 465 query_ex: 466 kfree(set_ctx); 467 return err; 468 } 469 470 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 471 { 472 void *set_ctx; 473 void *set_hca_cap; 474 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 475 int req_endianness; 476 int err; 477 478 if (MLX5_CAP_GEN(dev, atomic)) { 479 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 480 if (err) 481 return err; 482 } else { 483 return 0; 484 } 485 486 req_endianness = 487 MLX5_CAP_ATOMIC(dev, 488 supported_atomic_req_8B_endianess_mode_1); 489 490 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 491 return 0; 492 493 set_ctx = kzalloc(set_sz, GFP_KERNEL); 494 if (!set_ctx) 495 return -ENOMEM; 496 497 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 498 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 499 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 500 501 /* Set requestor to host endianness */ 502 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 503 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 504 505 err = set_caps(dev, set_ctx, set_sz); 506 507 kfree(set_ctx); 508 return err; 509 } 510 511 static int set_hca_ctrl(struct mlx5_core_dev *dev) 512 { 513 struct mlx5_reg_host_endianess he_in; 514 struct mlx5_reg_host_endianess he_out; 515 int err; 516 517 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 518 !MLX5_CAP_GEN(dev, roce)) 519 return 0; 520 521 memset(&he_in, 0, sizeof(he_in)); 522 he_in.he = MLX5_SET_HOST_ENDIANNESS; 523 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 524 &he_out, sizeof(he_out), 525 MLX5_REG_HOST_ENDIANNESS, 0, 1); 526 return err; 527 } 528 529 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) 530 { 531 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 532 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 533 534 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 535 MLX5_SET(enable_hca_in, in, function_id, func_id); 536 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 537 } 538 539 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 540 { 541 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 542 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 543 544 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 545 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 546 } 547 548 static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 549 { 550 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 551 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 552 u32 sup_issi; 553 int err; 554 555 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 556 557 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 558 if (err) { 559 u32 syndrome; 560 u8 status; 561 562 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 563 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 564 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); 565 return 0; 566 } 567 568 mlx5_core_err(dev, "failed to query ISSI\n"); 569 return err; 570 } 571 572 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 573 574 if (sup_issi & (1 << 1)) { 575 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 576 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 577 578 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 579 MLX5_SET(set_issi_in, set_in, current_issi, 1); 580 581 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 582 if (err) { 583 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); 584 return err; 585 } 586 587 dev->issi = 1; 588 589 return 0; 590 } else if (sup_issi & (1 << 0)) { 591 return 0; 592 } 593 594 return -ENOTSUPP; 595 } 596 597 598 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 599 { 600 struct mlx5_eq_table *table = &dev->priv.eq_table; 601 struct mlx5_eq *eq; 602 int err = -ENOENT; 603 604 spin_lock(&table->lock); 605 list_for_each_entry(eq, &table->comp_eqs_list, list) { 606 if (eq->index == vector) { 607 *eqn = eq->eqn; 608 *irqn = eq->irqn; 609 err = 0; 610 break; 611 } 612 } 613 spin_unlock(&table->lock); 614 615 return err; 616 } 617 EXPORT_SYMBOL(mlx5_vector2eqn); 618 619 static void free_comp_eqs(struct mlx5_core_dev *dev) 620 { 621 struct mlx5_eq_table *table = &dev->priv.eq_table; 622 struct mlx5_eq *eq, *n; 623 624 spin_lock(&table->lock); 625 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 626 list_del(&eq->list); 627 spin_unlock(&table->lock); 628 if (mlx5_destroy_unmap_eq(dev, eq)) 629 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 630 eq->eqn); 631 kfree(eq); 632 spin_lock(&table->lock); 633 } 634 spin_unlock(&table->lock); 635 } 636 637 static int alloc_comp_eqs(struct mlx5_core_dev *dev) 638 { 639 struct mlx5_eq_table *table = &dev->priv.eq_table; 640 struct mlx5_eq *eq; 641 int ncomp_vec; 642 int nent; 643 int err; 644 int i; 645 646 INIT_LIST_HEAD(&table->comp_eqs_list); 647 ncomp_vec = table->num_comp_vectors; 648 nent = MLX5_COMP_EQ_SIZE; 649 for (i = 0; i < ncomp_vec; i++) { 650 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 651 652 err = mlx5_create_map_eq(dev, eq, 653 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 654 &dev->priv.uuari.uars[0]); 655 if (err) { 656 kfree(eq); 657 goto clean; 658 } 659 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 660 eq->index = i; 661 spin_lock(&table->lock); 662 list_add_tail(&eq->list, &table->comp_eqs_list); 663 spin_unlock(&table->lock); 664 } 665 666 return 0; 667 668 clean: 669 free_comp_eqs(dev); 670 return err; 671 } 672 673 static int map_bf_area(struct mlx5_core_dev *dev) 674 { 675 resource_size_t bf_start = pci_resource_start(dev->pdev, 0); 676 resource_size_t bf_len = pci_resource_len(dev->pdev, 0); 677 678 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); 679 680 return dev->priv.bf_mapping ? 0 : -ENOMEM; 681 } 682 683 static void unmap_bf_area(struct mlx5_core_dev *dev) 684 { 685 if (dev->priv.bf_mapping) 686 io_mapping_free(dev->priv.bf_mapping); 687 } 688 689 static inline int fw_initializing(struct mlx5_core_dev *dev) 690 { 691 return ioread32be(&dev->iseg->initializing) >> 31; 692 } 693 694 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, 695 u32 warn_time_mili) 696 { 697 int warn = jiffies + msecs_to_jiffies(warn_time_mili); 698 int end = jiffies + msecs_to_jiffies(max_wait_mili); 699 int err = 0; 700 701 MPASS(max_wait_mili > warn_time_mili); 702 703 while (fw_initializing(dev) == 1) { 704 if (time_after(jiffies, end)) { 705 err = -EBUSY; 706 break; 707 } 708 if (warn_time_mili && time_after(jiffies, warn)) { 709 mlx5_core_warn(dev, 710 "Waiting for FW initialization, timeout abort in %u s\n", 711 (unsigned int)(jiffies_to_msecs(end - warn) / 1000)); 712 warn = jiffies + msecs_to_jiffies(warn_time_mili); 713 } 714 msleep(FW_INIT_WAIT_MS); 715 } 716 717 if (err != 0) 718 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", 719 ioread32be(&dev->iseg->initializing)); 720 721 return err; 722 } 723 724 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 725 { 726 struct mlx5_device_context *dev_ctx; 727 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 728 729 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); 730 if (!dev_ctx) 731 return; 732 733 dev_ctx->intf = intf; 734 CURVNET_SET_QUIET(vnet0); 735 dev_ctx->context = intf->add(dev); 736 CURVNET_RESTORE(); 737 738 if (dev_ctx->context) { 739 spin_lock_irq(&priv->ctx_lock); 740 list_add_tail(&dev_ctx->list, &priv->ctx_list); 741 spin_unlock_irq(&priv->ctx_lock); 742 } else { 743 kfree(dev_ctx); 744 } 745 } 746 747 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 748 { 749 struct mlx5_device_context *dev_ctx; 750 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 751 752 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 753 if (dev_ctx->intf == intf) { 754 spin_lock_irq(&priv->ctx_lock); 755 list_del(&dev_ctx->list); 756 spin_unlock_irq(&priv->ctx_lock); 757 758 intf->remove(dev, dev_ctx->context); 759 kfree(dev_ctx); 760 return; 761 } 762 } 763 764 int 765 mlx5_register_device(struct mlx5_core_dev *dev) 766 { 767 struct mlx5_priv *priv = &dev->priv; 768 struct mlx5_interface *intf; 769 770 mutex_lock(&intf_mutex); 771 list_add_tail(&priv->dev_list, &dev_list); 772 list_for_each_entry(intf, &intf_list, list) 773 mlx5_add_device(intf, priv); 774 mutex_unlock(&intf_mutex); 775 776 return 0; 777 } 778 779 void 780 mlx5_unregister_device(struct mlx5_core_dev *dev) 781 { 782 struct mlx5_priv *priv = &dev->priv; 783 struct mlx5_interface *intf; 784 785 mutex_lock(&intf_mutex); 786 list_for_each_entry(intf, &intf_list, list) 787 mlx5_remove_device(intf, priv); 788 list_del(&priv->dev_list); 789 mutex_unlock(&intf_mutex); 790 } 791 792 int mlx5_register_interface(struct mlx5_interface *intf) 793 { 794 struct mlx5_priv *priv; 795 796 if (!intf->add || !intf->remove) 797 return -EINVAL; 798 799 mutex_lock(&intf_mutex); 800 list_add_tail(&intf->list, &intf_list); 801 list_for_each_entry(priv, &dev_list, dev_list) 802 mlx5_add_device(intf, priv); 803 mutex_unlock(&intf_mutex); 804 805 return 0; 806 } 807 EXPORT_SYMBOL(mlx5_register_interface); 808 809 void mlx5_unregister_interface(struct mlx5_interface *intf) 810 { 811 struct mlx5_priv *priv; 812 813 mutex_lock(&intf_mutex); 814 list_for_each_entry(priv, &dev_list, dev_list) 815 mlx5_remove_device(intf, priv); 816 list_del(&intf->list); 817 mutex_unlock(&intf_mutex); 818 } 819 EXPORT_SYMBOL(mlx5_unregister_interface); 820 821 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 822 { 823 struct mlx5_priv *priv = &mdev->priv; 824 struct mlx5_device_context *dev_ctx; 825 unsigned long flags; 826 void *result = NULL; 827 828 spin_lock_irqsave(&priv->ctx_lock, flags); 829 830 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 831 if ((dev_ctx->intf->protocol == protocol) && 832 dev_ctx->intf->get_dev) { 833 result = dev_ctx->intf->get_dev(dev_ctx->context); 834 break; 835 } 836 837 spin_unlock_irqrestore(&priv->ctx_lock, flags); 838 839 return result; 840 } 841 EXPORT_SYMBOL(mlx5_get_protocol_dev); 842 843 static int mlx5_auto_fw_update; 844 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 845 &mlx5_auto_fw_update, 0, 846 "Allow automatic firmware update on driver start"); 847 static int 848 mlx5_firmware_update(struct mlx5_core_dev *dev) 849 { 850 const struct firmware *fw; 851 int err; 852 853 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); 854 if (!mlx5_auto_fw_update) 855 return (0); 856 fw = firmware_get("mlx5fw_mfa"); 857 if (fw) { 858 err = mlx5_firmware_flash(dev, fw); 859 firmware_put(fw, FIRMWARE_UNLOAD); 860 } 861 else 862 return (-ENOENT); 863 864 return err; 865 } 866 867 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 868 { 869 struct pci_dev *pdev = dev->pdev; 870 device_t bsddev; 871 int err; 872 873 pdev = dev->pdev; 874 bsddev = pdev->dev.bsddev; 875 pci_set_drvdata(dev->pdev, dev); 876 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 877 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 878 879 mutex_init(&priv->pgdir_mutex); 880 INIT_LIST_HEAD(&priv->pgdir_list); 881 spin_lock_init(&priv->mkey_lock); 882 883 priv->numa_node = NUMA_NO_NODE; 884 885 err = mlx5_pci_enable_device(dev); 886 if (err) { 887 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); 888 goto err_dbg; 889 } 890 891 err = request_bar(pdev); 892 if (err) { 893 mlx5_core_err(dev, "error requesting BARs, aborting\n"); 894 goto err_disable; 895 } 896 897 pci_set_master(pdev); 898 899 err = set_dma_caps(pdev); 900 if (err) { 901 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); 902 goto err_clr_master; 903 } 904 905 dev->iseg_base = pci_resource_start(dev->pdev, 0); 906 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 907 if (!dev->iseg) { 908 err = -ENOMEM; 909 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); 910 goto err_clr_master; 911 } 912 913 return 0; 914 915 err_clr_master: 916 release_bar(dev->pdev); 917 err_disable: 918 mlx5_pci_disable_device(dev); 919 err_dbg: 920 return err; 921 } 922 923 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 924 { 925 #ifdef PCI_IOV 926 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) 927 pci_iov_detach(dev->pdev->dev.bsddev); 928 #endif 929 iounmap(dev->iseg); 930 release_bar(dev->pdev); 931 mlx5_pci_disable_device(dev); 932 } 933 934 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 935 { 936 int err; 937 938 err = mlx5_vsc_find_cap(dev); 939 if (err) 940 mlx5_core_err(dev, "Unable to find vendor specific capabilities\n"); 941 942 err = mlx5_query_hca_caps(dev); 943 if (err) { 944 mlx5_core_err(dev, "query hca failed\n"); 945 goto out; 946 } 947 948 err = mlx5_query_board_id(dev); 949 if (err) { 950 mlx5_core_err(dev, "query board id failed\n"); 951 goto out; 952 } 953 954 err = mlx5_eq_init(dev); 955 if (err) { 956 mlx5_core_err(dev, "failed to initialize eq\n"); 957 goto out; 958 } 959 960 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 961 962 err = mlx5_init_cq_table(dev); 963 if (err) { 964 mlx5_core_err(dev, "failed to initialize cq table\n"); 965 goto err_eq_cleanup; 966 } 967 968 mlx5_init_qp_table(dev); 969 mlx5_init_srq_table(dev); 970 mlx5_init_mr_table(dev); 971 972 mlx5_init_reserved_gids(dev); 973 mlx5_fpga_init(dev); 974 975 #ifdef RATELIMIT 976 err = mlx5_init_rl_table(dev); 977 if (err) { 978 mlx5_core_err(dev, "Failed to init rate limiting\n"); 979 goto err_tables_cleanup; 980 } 981 #endif 982 return 0; 983 984 #ifdef RATELIMIT 985 err_tables_cleanup: 986 mlx5_cleanup_mr_table(dev); 987 mlx5_cleanup_srq_table(dev); 988 mlx5_cleanup_qp_table(dev); 989 mlx5_cleanup_cq_table(dev); 990 #endif 991 992 err_eq_cleanup: 993 mlx5_eq_cleanup(dev); 994 995 out: 996 return err; 997 } 998 999 static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 1000 { 1001 #ifdef RATELIMIT 1002 mlx5_cleanup_rl_table(dev); 1003 #endif 1004 mlx5_fpga_cleanup(dev); 1005 mlx5_cleanup_reserved_gids(dev); 1006 mlx5_cleanup_mr_table(dev); 1007 mlx5_cleanup_srq_table(dev); 1008 mlx5_cleanup_qp_table(dev); 1009 mlx5_cleanup_cq_table(dev); 1010 mlx5_eq_cleanup(dev); 1011 } 1012 1013 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1014 bool boot) 1015 { 1016 int err; 1017 1018 mutex_lock(&dev->intf_state_mutex); 1019 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1020 mlx5_core_warn(dev, "interface is up, NOP\n"); 1021 goto out; 1022 } 1023 1024 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", 1025 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 1026 1027 /* 1028 * On load removing any previous indication of internal error, 1029 * device is up 1030 */ 1031 dev->state = MLX5_DEVICE_STATE_UP; 1032 1033 /* wait for firmware to accept initialization segments configurations 1034 */ 1035 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, 1036 FW_INIT_WARN_MESSAGE_INTERVAL); 1037 if (err) { 1038 dev_err(&dev->pdev->dev, 1039 "Firmware over %d MS in pre-initializing state, aborting\n", 1040 FW_PRE_INIT_TIMEOUT_MILI); 1041 goto out_err; 1042 } 1043 1044 err = mlx5_cmd_init(dev); 1045 if (err) { 1046 mlx5_core_err(dev, 1047 "Failed initializing command interface, aborting\n"); 1048 goto out_err; 1049 } 1050 1051 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); 1052 if (err) { 1053 mlx5_core_err(dev, 1054 "Firmware over %d MS in initializing state, aborting\n", 1055 FW_INIT_TIMEOUT_MILI); 1056 goto err_cmd_cleanup; 1057 } 1058 1059 err = mlx5_core_enable_hca(dev, 0); 1060 if (err) { 1061 mlx5_core_err(dev, "enable hca failed\n"); 1062 goto err_cmd_cleanup; 1063 } 1064 1065 err = mlx5_core_set_issi(dev); 1066 if (err) { 1067 mlx5_core_err(dev, "failed to set issi\n"); 1068 goto err_disable_hca; 1069 } 1070 1071 err = mlx5_pagealloc_start(dev); 1072 if (err) { 1073 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); 1074 goto err_disable_hca; 1075 } 1076 1077 err = mlx5_satisfy_startup_pages(dev, 1); 1078 if (err) { 1079 mlx5_core_err(dev, "failed to allocate boot pages\n"); 1080 goto err_pagealloc_stop; 1081 } 1082 1083 err = set_hca_ctrl(dev); 1084 if (err) { 1085 mlx5_core_err(dev, "set_hca_ctrl failed\n"); 1086 goto reclaim_boot_pages; 1087 } 1088 1089 err = handle_hca_cap(dev); 1090 if (err) { 1091 mlx5_core_err(dev, "handle_hca_cap failed\n"); 1092 goto reclaim_boot_pages; 1093 } 1094 1095 err = handle_hca_cap_atomic(dev); 1096 if (err) { 1097 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); 1098 goto reclaim_boot_pages; 1099 } 1100 1101 err = mlx5_satisfy_startup_pages(dev, 0); 1102 if (err) { 1103 mlx5_core_err(dev, "failed to allocate init pages\n"); 1104 goto reclaim_boot_pages; 1105 } 1106 1107 err = mlx5_cmd_init_hca(dev); 1108 if (err) { 1109 mlx5_core_err(dev, "init hca failed\n"); 1110 goto reclaim_boot_pages; 1111 } 1112 1113 mlx5_start_health_poll(dev); 1114 1115 if (boot && mlx5_init_once(dev, priv)) { 1116 mlx5_core_err(dev, "sw objs init failed\n"); 1117 goto err_stop_poll; 1118 } 1119 1120 err = mlx5_enable_msix(dev); 1121 if (err) { 1122 mlx5_core_err(dev, "enable msix failed\n"); 1123 goto err_cleanup_once; 1124 } 1125 1126 err = mlx5_alloc_uuars(dev, &priv->uuari); 1127 if (err) { 1128 mlx5_core_err(dev, "Failed allocating uar, aborting\n"); 1129 goto err_disable_msix; 1130 } 1131 1132 err = mlx5_start_eqs(dev); 1133 if (err) { 1134 mlx5_core_err(dev, "Failed to start pages and async EQs\n"); 1135 goto err_free_uar; 1136 } 1137 1138 err = alloc_comp_eqs(dev); 1139 if (err) { 1140 mlx5_core_err(dev, "Failed to alloc completion EQs\n"); 1141 goto err_stop_eqs; 1142 } 1143 1144 if (map_bf_area(dev)) 1145 mlx5_core_err(dev, "Failed to map blue flame area\n"); 1146 1147 err = mlx5_init_fs(dev); 1148 if (err) { 1149 mlx5_core_err(dev, "flow steering init %d\n", err); 1150 goto err_free_comp_eqs; 1151 } 1152 1153 err = mlx5_mpfs_init(dev); 1154 if (err) { 1155 mlx5_core_err(dev, "mpfs init failed %d\n", err); 1156 goto err_fs; 1157 } 1158 1159 err = mlx5_fpga_device_start(dev); 1160 if (err) { 1161 mlx5_core_err(dev, "fpga device start failed %d\n", err); 1162 goto err_mpfs; 1163 } 1164 1165 err = mlx5_register_device(dev); 1166 if (err) { 1167 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); 1168 goto err_fpga; 1169 } 1170 1171 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1172 1173 out: 1174 mutex_unlock(&dev->intf_state_mutex); 1175 return 0; 1176 1177 err_fpga: 1178 mlx5_fpga_device_stop(dev); 1179 1180 err_mpfs: 1181 mlx5_mpfs_destroy(dev); 1182 1183 err_fs: 1184 mlx5_cleanup_fs(dev); 1185 1186 err_free_comp_eqs: 1187 free_comp_eqs(dev); 1188 unmap_bf_area(dev); 1189 1190 err_stop_eqs: 1191 mlx5_stop_eqs(dev); 1192 1193 err_free_uar: 1194 mlx5_free_uuars(dev, &priv->uuari); 1195 1196 err_disable_msix: 1197 mlx5_disable_msix(dev); 1198 1199 err_cleanup_once: 1200 if (boot) 1201 mlx5_cleanup_once(dev); 1202 1203 err_stop_poll: 1204 mlx5_stop_health_poll(dev, boot); 1205 if (mlx5_cmd_teardown_hca(dev)) { 1206 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1207 goto out_err; 1208 } 1209 1210 reclaim_boot_pages: 1211 mlx5_reclaim_startup_pages(dev); 1212 1213 err_pagealloc_stop: 1214 mlx5_pagealloc_stop(dev); 1215 1216 err_disable_hca: 1217 mlx5_core_disable_hca(dev); 1218 1219 err_cmd_cleanup: 1220 mlx5_cmd_cleanup(dev); 1221 1222 out_err: 1223 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1224 mutex_unlock(&dev->intf_state_mutex); 1225 1226 return err; 1227 } 1228 1229 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1230 bool cleanup) 1231 { 1232 int err = 0; 1233 1234 if (cleanup) 1235 mlx5_drain_health_recovery(dev); 1236 1237 mutex_lock(&dev->intf_state_mutex); 1238 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1239 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); 1240 if (cleanup) 1241 mlx5_cleanup_once(dev); 1242 goto out; 1243 } 1244 1245 mlx5_unregister_device(dev); 1246 1247 mlx5_eswitch_cleanup(dev->priv.eswitch); 1248 mlx5_fpga_device_stop(dev); 1249 mlx5_mpfs_destroy(dev); 1250 mlx5_cleanup_fs(dev); 1251 unmap_bf_area(dev); 1252 mlx5_wait_for_reclaim_vfs_pages(dev); 1253 free_comp_eqs(dev); 1254 mlx5_stop_eqs(dev); 1255 mlx5_free_uuars(dev, &priv->uuari); 1256 mlx5_disable_msix(dev); 1257 if (cleanup) 1258 mlx5_cleanup_once(dev); 1259 mlx5_stop_health_poll(dev, cleanup); 1260 err = mlx5_cmd_teardown_hca(dev); 1261 if (err) { 1262 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1263 goto out; 1264 } 1265 mlx5_pagealloc_stop(dev); 1266 mlx5_reclaim_startup_pages(dev); 1267 mlx5_core_disable_hca(dev); 1268 mlx5_cmd_cleanup(dev); 1269 1270 out: 1271 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1272 mutex_unlock(&dev->intf_state_mutex); 1273 return err; 1274 } 1275 1276 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1277 unsigned long param) 1278 { 1279 struct mlx5_priv *priv = &dev->priv; 1280 struct mlx5_device_context *dev_ctx; 1281 unsigned long flags; 1282 1283 spin_lock_irqsave(&priv->ctx_lock, flags); 1284 1285 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1286 if (dev_ctx->intf->event) 1287 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1288 1289 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1290 } 1291 1292 struct mlx5_core_event_handler { 1293 void (*event)(struct mlx5_core_dev *dev, 1294 enum mlx5_dev_event event, 1295 void *data); 1296 }; 1297 1298 #define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e, 1299 1300 #define MLX5_PORT_MODULE_ERROR_STATS(m) \ 1301 m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \ 1302 m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \ 1303 m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \ 1304 m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \ 1305 m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \ 1306 m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \ 1307 m(+1, u64, high_temp, "high_temp", "Module High Temperature") \ 1308 m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") \ 1309 m(+1, u64, pmd_type_not_enabled, "pmd_type_not_enabled", "PMD type is not enabled") \ 1310 m(+1, u64, laster_tec_failure, "laster_tec_failure", "Laster TEC failure") \ 1311 m(+1, u64, high_current, "high_current", "High current") \ 1312 m(+1, u64, high_voltage, "high_voltage", "High voltage") \ 1313 m(+1, u64, pcie_sys_power_slot_exceeded, "pcie_sys_power_slot_exceeded", "PCIe system power slot Exceeded") \ 1314 m(+1, u64, high_power, "high_power", "High power") \ 1315 m(+1, u64, module_state_machine_fault, "module_state_machine_fault", "Module State Machine fault") 1316 1317 static const char *mlx5_pme_err_desc[] = { 1318 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC) 1319 }; 1320 1321 static int init_one(struct pci_dev *pdev, 1322 const struct pci_device_id *id) 1323 { 1324 struct mlx5_core_dev *dev; 1325 struct mlx5_priv *priv; 1326 device_t bsddev = pdev->dev.bsddev; 1327 #ifdef PCI_IOV 1328 nvlist_t *pf_schema, *vf_schema; 1329 int num_vfs, sriov_pos; 1330 #endif 1331 int i,err; 1332 struct sysctl_oid *pme_sysctl_node; 1333 struct sysctl_oid *pme_err_sysctl_node; 1334 struct sysctl_oid *cap_sysctl_node; 1335 struct sysctl_oid *current_cap_sysctl_node; 1336 struct sysctl_oid *max_cap_sysctl_node; 1337 1338 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1339 priv = &dev->priv; 1340 if (id) 1341 priv->pci_dev_data = id->driver_data; 1342 1343 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { 1344 device_printf(bsddev, 1345 "WARN: selected profile out of range, selecting default (%d)\n", 1346 MLX5_DEFAULT_PROF); 1347 mlx5_prof_sel = MLX5_DEFAULT_PROF; 1348 } 1349 dev->profile = &profiles[mlx5_prof_sel]; 1350 dev->pdev = pdev; 1351 dev->event = mlx5_core_event; 1352 1353 /* Set desc */ 1354 device_set_desc(bsddev, mlx5_version); 1355 1356 sysctl_ctx_init(&dev->sysctl_ctx); 1357 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1358 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1359 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1360 "Maximum number of MSIX event queue vectors, if set"); 1361 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1362 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1363 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, 1364 "0:Invalid 1:Sufficient 2:Insufficient"); 1365 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1366 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1367 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, 1368 "Current power value in Watts"); 1369 1370 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1371 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1372 OID_AUTO, "pme_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1373 "Port module event statistics"); 1374 if (pme_sysctl_node == NULL) { 1375 err = -ENOMEM; 1376 goto clean_sysctl_ctx; 1377 } 1378 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1379 SYSCTL_CHILDREN(pme_sysctl_node), 1380 OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1381 "Port module event error statistics"); 1382 if (pme_err_sysctl_node == NULL) { 1383 err = -ENOMEM; 1384 goto clean_sysctl_ctx; 1385 } 1386 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1387 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1388 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1389 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], 1390 0, "Number of time module plugged"); 1391 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1392 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1393 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1394 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], 1395 0, "Number of time module unplugged"); 1396 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) { 1397 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1398 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO, 1399 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE, 1400 &dev->priv.pme_stats.error_counters[i], 1401 0, mlx5_pme_err_desc[2 * i + 1]); 1402 } 1403 1404 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1405 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1406 OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1407 "hardware capabilities raw bitstrings"); 1408 if (cap_sysctl_node == NULL) { 1409 err = -ENOMEM; 1410 goto clean_sysctl_ctx; 1411 } 1412 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1413 SYSCTL_CHILDREN(cap_sysctl_node), 1414 OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1415 ""); 1416 if (current_cap_sysctl_node == NULL) { 1417 err = -ENOMEM; 1418 goto clean_sysctl_ctx; 1419 } 1420 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1421 SYSCTL_CHILDREN(cap_sysctl_node), 1422 OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1423 ""); 1424 if (max_cap_sysctl_node == NULL) { 1425 err = -ENOMEM; 1426 goto clean_sysctl_ctx; 1427 } 1428 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1429 SYSCTL_CHILDREN(current_cap_sysctl_node), 1430 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1431 &dev->hca_caps_cur[MLX5_CAP_GENERAL], 1432 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1433 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1434 SYSCTL_CHILDREN(max_cap_sysctl_node), 1435 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1436 &dev->hca_caps_max[MLX5_CAP_GENERAL], 1437 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1438 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1439 SYSCTL_CHILDREN(current_cap_sysctl_node), 1440 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1441 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], 1442 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1443 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1444 SYSCTL_CHILDREN(max_cap_sysctl_node), 1445 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1446 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], 1447 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1448 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1449 SYSCTL_CHILDREN(current_cap_sysctl_node), 1450 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1451 &dev->hca_caps_cur[MLX5_CAP_ODP], 1452 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1453 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1454 SYSCTL_CHILDREN(max_cap_sysctl_node), 1455 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1456 &dev->hca_caps_max[MLX5_CAP_ODP], 1457 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1458 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1459 SYSCTL_CHILDREN(current_cap_sysctl_node), 1460 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1461 &dev->hca_caps_cur[MLX5_CAP_ATOMIC], 1462 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1463 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1464 SYSCTL_CHILDREN(max_cap_sysctl_node), 1465 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1466 &dev->hca_caps_max[MLX5_CAP_ATOMIC], 1467 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1468 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1469 SYSCTL_CHILDREN(current_cap_sysctl_node), 1470 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1471 &dev->hca_caps_cur[MLX5_CAP_ROCE], 1472 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1473 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1474 SYSCTL_CHILDREN(max_cap_sysctl_node), 1475 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1476 &dev->hca_caps_max[MLX5_CAP_ROCE], 1477 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1478 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1479 SYSCTL_CHILDREN(current_cap_sysctl_node), 1480 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1481 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], 1482 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1483 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1484 SYSCTL_CHILDREN(max_cap_sysctl_node), 1485 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1486 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], 1487 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1488 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1489 SYSCTL_CHILDREN(current_cap_sysctl_node), 1490 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1491 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], 1492 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1493 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1494 SYSCTL_CHILDREN(max_cap_sysctl_node), 1495 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1496 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], 1497 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1498 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1499 SYSCTL_CHILDREN(current_cap_sysctl_node), 1500 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1501 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], 1502 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1503 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1504 SYSCTL_CHILDREN(max_cap_sysctl_node), 1505 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1506 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], 1507 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1508 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1509 SYSCTL_CHILDREN(current_cap_sysctl_node), 1510 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1511 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], 1512 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1513 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1514 SYSCTL_CHILDREN(max_cap_sysctl_node), 1515 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1516 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], 1517 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1518 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1519 SYSCTL_CHILDREN(current_cap_sysctl_node), 1520 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1521 &dev->hca_caps_cur[MLX5_CAP_ESWITCH], 1522 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1523 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1524 SYSCTL_CHILDREN(max_cap_sysctl_node), 1525 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1526 &dev->hca_caps_max[MLX5_CAP_ESWITCH], 1527 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1528 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1529 SYSCTL_CHILDREN(current_cap_sysctl_node), 1530 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1531 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], 1532 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1533 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1534 SYSCTL_CHILDREN(max_cap_sysctl_node), 1535 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1536 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], 1537 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1538 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1539 SYSCTL_CHILDREN(current_cap_sysctl_node), 1540 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1541 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], 1542 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1543 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1544 SYSCTL_CHILDREN(max_cap_sysctl_node), 1545 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1546 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], 1547 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1548 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1549 SYSCTL_CHILDREN(current_cap_sysctl_node), 1550 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1551 &dev->hca_caps_cur[MLX5_CAP_QOS], 1552 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1553 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1554 SYSCTL_CHILDREN(max_cap_sysctl_node), 1555 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1556 &dev->hca_caps_max[MLX5_CAP_QOS], 1557 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1558 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1559 SYSCTL_CHILDREN(current_cap_sysctl_node), 1560 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1561 &dev->hca_caps_cur[MLX5_CAP_DEBUG], 1562 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1563 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1564 SYSCTL_CHILDREN(max_cap_sysctl_node), 1565 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1566 &dev->hca_caps_max[MLX5_CAP_DEBUG], 1567 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1568 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1569 SYSCTL_CHILDREN(cap_sysctl_node), 1570 OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1571 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); 1572 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1573 SYSCTL_CHILDREN(cap_sysctl_node), 1574 OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1575 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); 1576 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1577 SYSCTL_CHILDREN(cap_sysctl_node), 1578 OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1579 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); 1580 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1581 SYSCTL_CHILDREN(cap_sysctl_node), 1582 OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE, 1583 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); 1584 1585 INIT_LIST_HEAD(&priv->ctx_list); 1586 spin_lock_init(&priv->ctx_lock); 1587 mutex_init(&dev->pci_status_mutex); 1588 mutex_init(&dev->intf_state_mutex); 1589 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); 1590 err = mlx5_pci_init(dev, priv); 1591 if (err) { 1592 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); 1593 goto clean_dev; 1594 } 1595 1596 err = mlx5_health_init(dev); 1597 if (err) { 1598 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); 1599 goto close_pci; 1600 } 1601 1602 mlx5_pagealloc_init(dev); 1603 1604 err = mlx5_load_one(dev, priv, true); 1605 if (err) { 1606 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); 1607 goto clean_health; 1608 } 1609 1610 mlx5_fwdump_prep(dev); 1611 1612 mlx5_firmware_update(dev); 1613 1614 #ifdef PCI_IOV 1615 if (MLX5_CAP_GEN(dev, vport_group_manager)) { 1616 if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) { 1617 num_vfs = pci_read_config(bsddev, sriov_pos + 1618 PCIR_SRIOV_TOTAL_VFS, 2); 1619 } else { 1620 mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n"); 1621 num_vfs = 0; 1622 } 1623 err = mlx5_eswitch_init(dev, 1 + num_vfs); 1624 if (err == 0) { 1625 pf_schema = pci_iov_schema_alloc_node(); 1626 vf_schema = pci_iov_schema_alloc_node(); 1627 pci_iov_schema_add_unicast_mac(vf_schema, 1628 iov_mac_addr_name, 0, NULL); 1629 pci_iov_schema_add_uint64(vf_schema, iov_node_guid_name, 1630 0, 0); 1631 pci_iov_schema_add_uint64(vf_schema, iov_port_guid_name, 1632 0, 0); 1633 err = pci_iov_attach(bsddev, pf_schema, vf_schema); 1634 if (err != 0) { 1635 device_printf(bsddev, 1636 "Failed to initialize SR-IOV support, error %d\n", 1637 err); 1638 } 1639 } else { 1640 mlx5_core_err(dev, "eswitch init failed, error %d\n", 1641 err); 1642 } 1643 } 1644 #endif 1645 1646 pci_save_state(bsddev); 1647 return 0; 1648 1649 clean_health: 1650 mlx5_pagealloc_cleanup(dev); 1651 mlx5_health_cleanup(dev); 1652 close_pci: 1653 mlx5_pci_close(dev, priv); 1654 clean_dev: 1655 mtx_destroy(&dev->dump_lock); 1656 clean_sysctl_ctx: 1657 sysctl_ctx_free(&dev->sysctl_ctx); 1658 kfree(dev); 1659 return err; 1660 } 1661 1662 static void remove_one(struct pci_dev *pdev) 1663 { 1664 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1665 struct mlx5_priv *priv = &dev->priv; 1666 1667 if (mlx5_unload_one(dev, priv, true)) { 1668 mlx5_core_err(dev, "mlx5_unload_one failed\n"); 1669 mlx5_health_cleanup(dev); 1670 return; 1671 } 1672 1673 mlx5_pagealloc_cleanup(dev); 1674 mlx5_health_cleanup(dev); 1675 mlx5_fwdump_clean(dev); 1676 mlx5_pci_close(dev, priv); 1677 mtx_destroy(&dev->dump_lock); 1678 pci_set_drvdata(pdev, NULL); 1679 sysctl_ctx_free(&dev->sysctl_ctx); 1680 kfree(dev); 1681 } 1682 1683 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1684 pci_channel_state_t state) 1685 { 1686 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1687 struct mlx5_priv *priv = &dev->priv; 1688 1689 mlx5_core_info(dev, "%s was called\n", __func__); 1690 mlx5_enter_error_state(dev, false); 1691 mlx5_unload_one(dev, priv, false); 1692 1693 if (state) { 1694 mlx5_drain_health_wq(dev); 1695 mlx5_pci_disable_device(dev); 1696 } 1697 1698 return state == pci_channel_io_perm_failure ? 1699 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1700 } 1701 1702 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1703 { 1704 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1705 int err = 0; 1706 1707 mlx5_core_info(dev,"%s was called\n", __func__); 1708 1709 err = mlx5_pci_enable_device(dev); 1710 if (err) { 1711 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" 1712 ,err); 1713 return PCI_ERS_RESULT_DISCONNECT; 1714 } 1715 pci_set_master(pdev); 1716 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1717 pci_restore_state(pdev->dev.bsddev); 1718 pci_save_state(pdev->dev.bsddev); 1719 1720 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1721 } 1722 1723 /* wait for the device to show vital signs. For now we check 1724 * that we can read the device ID and that the health buffer 1725 * shows a non zero value which is different than 0xffffffff 1726 */ 1727 static void wait_vital(struct pci_dev *pdev) 1728 { 1729 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1730 struct mlx5_core_health *health = &dev->priv.health; 1731 const int niter = 100; 1732 u32 count; 1733 u16 did; 1734 int i; 1735 1736 /* Wait for firmware to be ready after reset */ 1737 msleep(1000); 1738 for (i = 0; i < niter; i++) { 1739 if (pci_read_config_word(pdev, 2, &did)) { 1740 mlx5_core_warn(dev, "failed reading config word\n"); 1741 break; 1742 } 1743 if (did == pdev->device) { 1744 mlx5_core_info(dev, 1745 "device ID correctly read after %d iterations\n", i); 1746 break; 1747 } 1748 msleep(50); 1749 } 1750 if (i == niter) 1751 mlx5_core_warn(dev, "could not read device ID\n"); 1752 1753 for (i = 0; i < niter; i++) { 1754 count = ioread32be(health->health_counter); 1755 if (count && count != 0xffffffff) { 1756 mlx5_core_info(dev, 1757 "Counter value 0x%x after %d iterations\n", count, i); 1758 break; 1759 } 1760 msleep(50); 1761 } 1762 1763 if (i == niter) 1764 mlx5_core_warn(dev, "could not read device ID\n"); 1765 } 1766 1767 static void mlx5_pci_resume(struct pci_dev *pdev) 1768 { 1769 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1770 struct mlx5_priv *priv = &dev->priv; 1771 int err; 1772 1773 mlx5_core_info(dev,"%s was called\n", __func__); 1774 1775 wait_vital(pdev); 1776 1777 err = mlx5_load_one(dev, priv, false); 1778 if (err) 1779 mlx5_core_err(dev, 1780 "mlx5_load_one failed with error code: %d\n" ,err); 1781 else 1782 mlx5_core_info(dev,"device recovered\n"); 1783 } 1784 1785 static const struct pci_error_handlers mlx5_err_handler = { 1786 .error_detected = mlx5_pci_err_detected, 1787 .slot_reset = mlx5_pci_slot_reset, 1788 .resume = mlx5_pci_resume 1789 }; 1790 1791 #ifdef PCI_IOV 1792 static int 1793 mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1794 { 1795 struct pci_dev *pdev; 1796 struct mlx5_core_dev *core_dev; 1797 struct mlx5_priv *priv; 1798 int err; 1799 1800 pdev = device_get_softc(dev); 1801 core_dev = pci_get_drvdata(pdev); 1802 priv = &core_dev->priv; 1803 1804 if (priv->eswitch == NULL) 1805 return (ENXIO); 1806 if (priv->eswitch->total_vports < num_vfs + 1) 1807 num_vfs = priv->eswitch->total_vports - 1; 1808 err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs); 1809 return (-err); 1810 } 1811 1812 static void 1813 mlx5_iov_uninit(device_t dev) 1814 { 1815 struct pci_dev *pdev; 1816 struct mlx5_core_dev *core_dev; 1817 struct mlx5_priv *priv; 1818 1819 pdev = device_get_softc(dev); 1820 core_dev = pci_get_drvdata(pdev); 1821 priv = &core_dev->priv; 1822 1823 mlx5_eswitch_disable_sriov(priv->eswitch); 1824 } 1825 1826 static int 1827 mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1828 { 1829 struct pci_dev *pdev; 1830 struct mlx5_core_dev *core_dev; 1831 struct mlx5_priv *priv; 1832 const void *mac; 1833 size_t mac_size; 1834 uint64_t node_guid, port_guid; 1835 int error; 1836 1837 pdev = device_get_softc(dev); 1838 core_dev = pci_get_drvdata(pdev); 1839 priv = &core_dev->priv; 1840 1841 if (vfnum + 1 >= priv->eswitch->total_vports) 1842 return (ENXIO); 1843 1844 if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) { 1845 mac = nvlist_get_binary(vf_config, iov_mac_addr_name, 1846 &mac_size); 1847 error = -mlx5_eswitch_set_vport_mac(priv->eswitch, 1848 vfnum + 1, __DECONST(u8 *, mac)); 1849 if (error != 0) { 1850 mlx5_core_err(core_dev, 1851 "setting MAC for VF %d failed, error %d\n", 1852 vfnum + 1, error); 1853 } 1854 } 1855 1856 if (nvlist_exists_number(vf_config, iov_node_guid_name)) { 1857 node_guid = nvlist_get_number(vf_config, iov_node_guid_name); 1858 error = -mlx5_modify_nic_vport_node_guid(core_dev, vfnum + 1, 1859 node_guid); 1860 if (error != 0) { 1861 mlx5_core_err(core_dev, 1862 "modifying node GUID for VF %d failed, error %d\n", 1863 vfnum + 1, error); 1864 } 1865 } 1866 1867 if (nvlist_exists_number(vf_config, iov_port_guid_name)) { 1868 port_guid = nvlist_get_number(vf_config, iov_port_guid_name); 1869 error = -mlx5_modify_nic_vport_port_guid(core_dev, vfnum + 1, 1870 port_guid); 1871 if (error != 0) { 1872 mlx5_core_err(core_dev, 1873 "modifying port GUID for VF %d failed, error %d\n", 1874 vfnum + 1, error); 1875 } 1876 } 1877 1878 error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1, 1879 VPORT_STATE_FOLLOW); 1880 if (error != 0) { 1881 mlx5_core_err(core_dev, 1882 "upping vport for VF %d failed, error %d\n", 1883 vfnum + 1, error); 1884 } 1885 error = -mlx5_core_enable_hca(core_dev, vfnum + 1); 1886 if (error != 0) { 1887 mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n", 1888 vfnum + 1, error); 1889 } 1890 return (error); 1891 } 1892 #endif 1893 1894 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1895 { 1896 bool fast_teardown, force_teardown; 1897 int err; 1898 1899 if (!mlx5_fast_unload_enabled) { 1900 mlx5_core_dbg(dev, "fast unload is disabled by user\n"); 1901 return -EOPNOTSUPP; 1902 } 1903 1904 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); 1905 force_teardown = MLX5_CAP_GEN(dev, force_teardown); 1906 1907 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); 1908 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); 1909 1910 if (!fast_teardown && !force_teardown) 1911 return -EOPNOTSUPP; 1912 1913 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1914 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1915 return -EAGAIN; 1916 } 1917 1918 /* Panic tear down fw command will stop the PCI bus communication 1919 * with the HCA, so the health polll is no longer needed. 1920 */ 1921 mlx5_drain_health_wq(dev); 1922 mlx5_stop_health_poll(dev, false); 1923 1924 err = mlx5_cmd_fast_teardown_hca(dev); 1925 if (!err) 1926 goto done; 1927 1928 err = mlx5_cmd_force_teardown_hca(dev); 1929 if (!err) 1930 goto done; 1931 1932 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1933 mlx5_start_health_poll(dev); 1934 return err; 1935 done: 1936 mlx5_enter_error_state(dev, true); 1937 return 0; 1938 } 1939 1940 static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev) 1941 { 1942 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 1943 int x; 1944 1945 mdev->priv.disable_irqs = 1; 1946 1947 /* wait for all IRQ handlers to finish processing */ 1948 for (x = 0; x != nvec; x++) 1949 synchronize_irq(mdev->priv.msix_arr[x].vector); 1950 } 1951 1952 static void shutdown_one(struct pci_dev *pdev) 1953 { 1954 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1955 struct mlx5_priv *priv = &dev->priv; 1956 int err; 1957 1958 /* enter polling mode */ 1959 mlx5_cmd_use_polling(dev); 1960 1961 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); 1962 1963 /* disable all interrupts */ 1964 mlx5_shutdown_disable_interrupts(dev); 1965 1966 err = mlx5_try_fast_unload(dev); 1967 if (err) 1968 mlx5_unload_one(dev, priv, false); 1969 mlx5_pci_disable_device(dev); 1970 } 1971 1972 static const struct pci_device_id mlx5_core_pci_table[] = { 1973 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 1974 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 1975 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 1976 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 1977 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 1978 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 1979 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ 1980 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 1981 { PCI_VDEVICE(MELLANOX, 4121) }, 1982 { PCI_VDEVICE(MELLANOX, 4122) }, 1983 { PCI_VDEVICE(MELLANOX, 4123) }, 1984 { PCI_VDEVICE(MELLANOX, 4124) }, 1985 { PCI_VDEVICE(MELLANOX, 4125) }, 1986 { PCI_VDEVICE(MELLANOX, 4126) }, 1987 { PCI_VDEVICE(MELLANOX, 4127) }, 1988 { PCI_VDEVICE(MELLANOX, 4128) }, 1989 { PCI_VDEVICE(MELLANOX, 4129) }, 1990 { PCI_VDEVICE(MELLANOX, 4130) }, 1991 { PCI_VDEVICE(MELLANOX, 4131) }, 1992 { PCI_VDEVICE(MELLANOX, 4132) }, 1993 { PCI_VDEVICE(MELLANOX, 4133) }, 1994 { PCI_VDEVICE(MELLANOX, 4134) }, 1995 { PCI_VDEVICE(MELLANOX, 4135) }, 1996 { PCI_VDEVICE(MELLANOX, 4136) }, 1997 { PCI_VDEVICE(MELLANOX, 4137) }, 1998 { PCI_VDEVICE(MELLANOX, 4138) }, 1999 { PCI_VDEVICE(MELLANOX, 4139) }, 2000 { PCI_VDEVICE(MELLANOX, 4140) }, 2001 { PCI_VDEVICE(MELLANOX, 4141) }, 2002 { PCI_VDEVICE(MELLANOX, 4142) }, 2003 { PCI_VDEVICE(MELLANOX, 4143) }, 2004 { PCI_VDEVICE(MELLANOX, 4144) }, 2005 { 0, } 2006 }; 2007 2008 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 2009 2010 void mlx5_disable_device(struct mlx5_core_dev *dev) 2011 { 2012 mlx5_pci_err_detected(dev->pdev, 0); 2013 } 2014 2015 void mlx5_recover_device(struct mlx5_core_dev *dev) 2016 { 2017 mlx5_pci_disable_device(dev); 2018 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 2019 mlx5_pci_resume(dev->pdev); 2020 } 2021 2022 struct pci_driver mlx5_core_driver = { 2023 .name = DRIVER_NAME, 2024 .id_table = mlx5_core_pci_table, 2025 .shutdown = shutdown_one, 2026 .probe = init_one, 2027 .remove = remove_one, 2028 .err_handler = &mlx5_err_handler, 2029 #ifdef PCI_IOV 2030 .bsd_iov_init = mlx5_iov_init, 2031 .bsd_iov_uninit = mlx5_iov_uninit, 2032 .bsd_iov_add_vf = mlx5_iov_add_vf, 2033 #endif 2034 }; 2035 2036 static int __init init(void) 2037 { 2038 int err; 2039 2040 err = pci_register_driver(&mlx5_core_driver); 2041 if (err) 2042 goto err_debug; 2043 2044 err = mlx5_ctl_init(); 2045 if (err) 2046 goto err_ctl; 2047 2048 return 0; 2049 2050 err_ctl: 2051 pci_unregister_driver(&mlx5_core_driver); 2052 2053 err_debug: 2054 return err; 2055 } 2056 2057 static void __exit cleanup(void) 2058 { 2059 mlx5_ctl_fini(); 2060 pci_unregister_driver(&mlx5_core_driver); 2061 } 2062 2063 module_init_order(init, SI_ORDER_FIRST); 2064 module_exit_order(cleanup, SI_ORDER_FIRST); 2065