1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #define LINUXKPI_PARAM_PREFIX mlx5_ 29 30 #include <linux/kmod.h> 31 #include <linux/module.h> 32 #include <linux/errno.h> 33 #include <linux/pci.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/slab.h> 36 #include <linux/io-mapping.h> 37 #include <linux/interrupt.h> 38 #include <dev/mlx5/driver.h> 39 #include <dev/mlx5/cq.h> 40 #include <dev/mlx5/qp.h> 41 #include <dev/mlx5/srq.h> 42 #include <linux/delay.h> 43 #include <dev/mlx5/mlx5_ifc.h> 44 #include "mlx5_core.h" 45 #include "fs_core.h" 46 47 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 48 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 49 MODULE_LICENSE("Dual BSD/GPL"); 50 #if (__FreeBSD_version >= 1100000) 51 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 52 #endif 53 MODULE_VERSION(mlx5, 1); 54 55 int mlx5_core_debug_mask; 56 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); 57 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 58 59 #define MLX5_DEFAULT_PROF 2 60 static int prof_sel = MLX5_DEFAULT_PROF; 61 module_param_named(prof_sel, prof_sel, int, 0444); 62 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); 63 64 static int mlx5_core_msix_eqvec; 65 module_param_named(msix_eqvec, mlx5_core_msix_eqvec, int, 0644); 66 MODULE_PARM_DESC(msix_eqvec, "Maximum number of MSIX event queue vectors"); 67 68 #define NUMA_NO_NODE -1 69 70 static LIST_HEAD(intf_list); 71 static LIST_HEAD(dev_list); 72 static DEFINE_MUTEX(intf_mutex); 73 74 struct mlx5_device_context { 75 struct list_head list; 76 struct mlx5_interface *intf; 77 void *context; 78 }; 79 80 enum { 81 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 82 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 83 }; 84 85 static struct mlx5_profile profiles[] = { 86 [0] = { 87 .mask = 0, 88 }, 89 [1] = { 90 .mask = MLX5_PROF_MASK_QP_SIZE, 91 .log_max_qp = 12, 92 }, 93 [2] = { 94 .mask = MLX5_PROF_MASK_QP_SIZE | 95 MLX5_PROF_MASK_MR_CACHE, 96 .log_max_qp = 17, 97 .mr_cache[0] = { 98 .size = 500, 99 .limit = 250 100 }, 101 .mr_cache[1] = { 102 .size = 500, 103 .limit = 250 104 }, 105 .mr_cache[2] = { 106 .size = 500, 107 .limit = 250 108 }, 109 .mr_cache[3] = { 110 .size = 500, 111 .limit = 250 112 }, 113 .mr_cache[4] = { 114 .size = 500, 115 .limit = 250 116 }, 117 .mr_cache[5] = { 118 .size = 500, 119 .limit = 250 120 }, 121 .mr_cache[6] = { 122 .size = 500, 123 .limit = 250 124 }, 125 .mr_cache[7] = { 126 .size = 500, 127 .limit = 250 128 }, 129 .mr_cache[8] = { 130 .size = 500, 131 .limit = 250 132 }, 133 .mr_cache[9] = { 134 .size = 500, 135 .limit = 250 136 }, 137 .mr_cache[10] = { 138 .size = 500, 139 .limit = 250 140 }, 141 .mr_cache[11] = { 142 .size = 500, 143 .limit = 250 144 }, 145 .mr_cache[12] = { 146 .size = 64, 147 .limit = 32 148 }, 149 .mr_cache[13] = { 150 .size = 32, 151 .limit = 16 152 }, 153 .mr_cache[14] = { 154 .size = 16, 155 .limit = 8 156 }, 157 }, 158 [3] = { 159 .mask = MLX5_PROF_MASK_QP_SIZE, 160 .log_max_qp = 17, 161 }, 162 }; 163 164 static int set_dma_caps(struct pci_dev *pdev) 165 { 166 int err; 167 168 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 169 if (err) { 170 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n"); 171 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 172 if (err) { 173 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n"); 174 return err; 175 } 176 } 177 178 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 179 if (err) { 180 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 181 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 182 if (err) { 183 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n"); 184 return err; 185 } 186 } 187 188 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 189 return err; 190 } 191 192 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 193 { 194 struct pci_dev *pdev = dev->pdev; 195 int err = 0; 196 197 mutex_lock(&dev->pci_status_mutex); 198 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 199 err = pci_enable_device(pdev); 200 if (!err) 201 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 202 } 203 mutex_unlock(&dev->pci_status_mutex); 204 205 return err; 206 } 207 208 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 209 { 210 struct pci_dev *pdev = dev->pdev; 211 212 mutex_lock(&dev->pci_status_mutex); 213 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 214 pci_disable_device(pdev); 215 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 216 } 217 mutex_unlock(&dev->pci_status_mutex); 218 } 219 220 static int request_bar(struct pci_dev *pdev) 221 { 222 int err = 0; 223 224 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 225 device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n"); 226 return -ENODEV; 227 } 228 229 err = pci_request_regions(pdev, DRIVER_NAME); 230 if (err) 231 device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n"); 232 233 return err; 234 } 235 236 static void release_bar(struct pci_dev *pdev) 237 { 238 pci_release_regions(pdev); 239 } 240 241 static int mlx5_enable_msix(struct mlx5_core_dev *dev) 242 { 243 struct mlx5_priv *priv = &dev->priv; 244 struct mlx5_eq_table *table = &priv->eq_table; 245 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 246 int limit = mlx5_core_msix_eqvec; 247 int nvec = MLX5_EQ_VEC_COMP_BASE; 248 int i; 249 250 if (limit > 0) 251 nvec += limit; 252 else 253 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 254 255 nvec = min_t(int, nvec, num_eqs); 256 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 257 return -ENOMEM; 258 259 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 260 261 priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL); 262 263 for (i = 0; i < nvec; i++) 264 priv->msix_arr[i].entry = i; 265 266 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 267 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 268 if (nvec < 0) 269 return nvec; 270 271 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 272 273 return 0; 274 275 } 276 277 static void mlx5_disable_msix(struct mlx5_core_dev *dev) 278 { 279 struct mlx5_priv *priv = &dev->priv; 280 281 pci_disable_msix(dev->pdev); 282 kfree(priv->irq_info); 283 kfree(priv->msix_arr); 284 } 285 286 struct mlx5_reg_host_endianess { 287 u8 he; 288 u8 rsvd[15]; 289 }; 290 291 292 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 293 294 enum { 295 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 296 MLX5_DEV_CAP_FLAG_DCT | 297 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 298 }; 299 300 static u16 to_fw_pkey_sz(u32 size) 301 { 302 switch (size) { 303 case 128: 304 return 0; 305 case 256: 306 return 1; 307 case 512: 308 return 2; 309 case 1024: 310 return 3; 311 case 2048: 312 return 4; 313 case 4096: 314 return 5; 315 default: 316 printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size); 317 return 0; 318 } 319 } 320 321 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 322 enum mlx5_cap_type cap_type, 323 enum mlx5_cap_mode cap_mode) 324 { 325 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 326 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 327 void *out, *hca_caps; 328 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 329 int err; 330 331 memset(in, 0, sizeof(in)); 332 out = kzalloc(out_sz, GFP_KERNEL); 333 334 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 335 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 336 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 337 if (err) { 338 mlx5_core_warn(dev, 339 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 340 cap_type, cap_mode, err); 341 goto query_ex; 342 } 343 344 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 345 346 switch (cap_mode) { 347 case HCA_CAP_OPMOD_GET_MAX: 348 memcpy(dev->hca_caps_max[cap_type], hca_caps, 349 MLX5_UN_SZ_BYTES(hca_cap_union)); 350 break; 351 case HCA_CAP_OPMOD_GET_CUR: 352 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 353 MLX5_UN_SZ_BYTES(hca_cap_union)); 354 break; 355 default: 356 mlx5_core_warn(dev, 357 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 358 cap_type, cap_mode); 359 err = -EINVAL; 360 break; 361 } 362 query_ex: 363 kfree(out); 364 return err; 365 } 366 367 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 368 { 369 int ret; 370 371 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 372 if (ret) 373 return ret; 374 375 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 376 } 377 378 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 379 { 380 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 381 382 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 383 384 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 385 } 386 387 static int handle_hca_cap(struct mlx5_core_dev *dev) 388 { 389 void *set_ctx = NULL; 390 struct mlx5_profile *prof = dev->profile; 391 int err = -ENOMEM; 392 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 393 void *set_hca_cap; 394 395 set_ctx = kzalloc(set_sz, GFP_KERNEL); 396 397 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 398 if (err) 399 goto query_ex; 400 401 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 402 capability); 403 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 404 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 405 406 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 407 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 408 128); 409 /* we limit the size of the pkey table to 128 entries for now */ 410 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 411 to_fw_pkey_sz(128)); 412 413 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 414 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 415 prof->log_max_qp); 416 417 /* disable cmdif checksum */ 418 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 419 420 /* enable drain sigerr */ 421 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 422 423 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 424 425 err = set_caps(dev, set_ctx, set_sz); 426 427 query_ex: 428 kfree(set_ctx); 429 return err; 430 } 431 432 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 433 { 434 void *set_ctx; 435 void *set_hca_cap; 436 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 437 int req_endianness; 438 int err; 439 440 if (MLX5_CAP_GEN(dev, atomic)) { 441 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 442 if (err) 443 return err; 444 } else { 445 return 0; 446 } 447 448 req_endianness = 449 MLX5_CAP_ATOMIC(dev, 450 supported_atomic_req_8B_endianess_mode_1); 451 452 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 453 return 0; 454 455 set_ctx = kzalloc(set_sz, GFP_KERNEL); 456 if (!set_ctx) 457 return -ENOMEM; 458 459 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 460 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 461 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 462 463 /* Set requestor to host endianness */ 464 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 465 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 466 467 err = set_caps(dev, set_ctx, set_sz); 468 469 kfree(set_ctx); 470 return err; 471 } 472 473 static int set_hca_ctrl(struct mlx5_core_dev *dev) 474 { 475 struct mlx5_reg_host_endianess he_in; 476 struct mlx5_reg_host_endianess he_out; 477 int err; 478 479 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 480 !MLX5_CAP_GEN(dev, roce)) 481 return 0; 482 483 memset(&he_in, 0, sizeof(he_in)); 484 he_in.he = MLX5_SET_HOST_ENDIANNESS; 485 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 486 &he_out, sizeof(he_out), 487 MLX5_REG_HOST_ENDIANNESS, 0, 1); 488 return err; 489 } 490 491 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) 492 { 493 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 494 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 495 496 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 497 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 498 } 499 500 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 501 { 502 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 503 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 504 505 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 506 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 507 } 508 509 static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 510 { 511 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 512 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 513 u32 sup_issi; 514 int err; 515 516 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 517 518 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 519 if (err) { 520 u32 syndrome; 521 u8 status; 522 523 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 524 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 525 pr_debug("Only ISSI 0 is supported\n"); 526 return 0; 527 } 528 529 printf("mlx5_core: ERR: ""failed to query ISSI\n"); 530 return err; 531 } 532 533 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 534 535 if (sup_issi & (1 << 1)) { 536 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 537 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 538 539 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 540 MLX5_SET(set_issi_in, set_in, current_issi, 1); 541 542 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 543 if (err) { 544 printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err); 545 return err; 546 } 547 548 dev->issi = 1; 549 550 return 0; 551 } else if (sup_issi & (1 << 0)) { 552 return 0; 553 } 554 555 return -ENOTSUPP; 556 } 557 558 559 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 560 { 561 struct mlx5_eq_table *table = &dev->priv.eq_table; 562 struct mlx5_eq *eq; 563 int err = -ENOENT; 564 565 spin_lock(&table->lock); 566 list_for_each_entry(eq, &table->comp_eqs_list, list) { 567 if (eq->index == vector) { 568 *eqn = eq->eqn; 569 *irqn = eq->irqn; 570 err = 0; 571 break; 572 } 573 } 574 spin_unlock(&table->lock); 575 576 return err; 577 } 578 EXPORT_SYMBOL(mlx5_vector2eqn); 579 580 int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name) 581 { 582 struct mlx5_priv *priv = &dev->priv; 583 struct mlx5_eq_table *table = &priv->eq_table; 584 struct mlx5_eq *eq; 585 int err = -ENOENT; 586 587 spin_lock(&table->lock); 588 list_for_each_entry(eq, &table->comp_eqs_list, list) { 589 if (eq->index == eq_ix) { 590 int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE; 591 592 snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME, 593 "%s-%d", name, eq_ix); 594 595 err = 0; 596 break; 597 } 598 } 599 spin_unlock(&table->lock); 600 601 return err; 602 } 603 604 static void free_comp_eqs(struct mlx5_core_dev *dev) 605 { 606 struct mlx5_eq_table *table = &dev->priv.eq_table; 607 struct mlx5_eq *eq, *n; 608 609 spin_lock(&table->lock); 610 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 611 list_del(&eq->list); 612 spin_unlock(&table->lock); 613 if (mlx5_destroy_unmap_eq(dev, eq)) 614 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 615 eq->eqn); 616 kfree(eq); 617 spin_lock(&table->lock); 618 } 619 spin_unlock(&table->lock); 620 } 621 622 static int alloc_comp_eqs(struct mlx5_core_dev *dev) 623 { 624 struct mlx5_eq_table *table = &dev->priv.eq_table; 625 char name[MLX5_MAX_IRQ_NAME]; 626 struct mlx5_eq *eq; 627 int ncomp_vec; 628 int nent; 629 int err; 630 int i; 631 632 INIT_LIST_HEAD(&table->comp_eqs_list); 633 ncomp_vec = table->num_comp_vectors; 634 nent = MLX5_COMP_EQ_SIZE; 635 for (i = 0; i < ncomp_vec; i++) { 636 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 637 638 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); 639 err = mlx5_create_map_eq(dev, eq, 640 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 641 name, &dev->priv.uuari.uars[0]); 642 if (err) { 643 kfree(eq); 644 goto clean; 645 } 646 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 647 eq->index = i; 648 spin_lock(&table->lock); 649 list_add_tail(&eq->list, &table->comp_eqs_list); 650 spin_unlock(&table->lock); 651 } 652 653 return 0; 654 655 clean: 656 free_comp_eqs(dev); 657 return err; 658 } 659 660 static int map_bf_area(struct mlx5_core_dev *dev) 661 { 662 resource_size_t bf_start = pci_resource_start(dev->pdev, 0); 663 resource_size_t bf_len = pci_resource_len(dev->pdev, 0); 664 665 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); 666 667 return dev->priv.bf_mapping ? 0 : -ENOMEM; 668 } 669 670 static void unmap_bf_area(struct mlx5_core_dev *dev) 671 { 672 if (dev->priv.bf_mapping) 673 io_mapping_free(dev->priv.bf_mapping); 674 } 675 676 static inline int fw_initializing(struct mlx5_core_dev *dev) 677 { 678 return ioread32be(&dev->iseg->initializing) >> 31; 679 } 680 681 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 682 { 683 u64 end = jiffies + msecs_to_jiffies(max_wait_mili); 684 int err = 0; 685 686 while (fw_initializing(dev)) { 687 if (time_after(jiffies, end)) { 688 err = -EBUSY; 689 break; 690 } 691 msleep(FW_INIT_WAIT_MS); 692 } 693 694 return err; 695 } 696 697 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 698 { 699 struct mlx5_device_context *dev_ctx; 700 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 701 702 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); 703 if (!dev_ctx) 704 return; 705 706 dev_ctx->intf = intf; 707 CURVNET_SET_QUIET(vnet0); 708 dev_ctx->context = intf->add(dev); 709 CURVNET_RESTORE(); 710 711 if (dev_ctx->context) { 712 spin_lock_irq(&priv->ctx_lock); 713 list_add_tail(&dev_ctx->list, &priv->ctx_list); 714 spin_unlock_irq(&priv->ctx_lock); 715 } else { 716 kfree(dev_ctx); 717 } 718 } 719 720 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 721 { 722 struct mlx5_device_context *dev_ctx; 723 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 724 725 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 726 if (dev_ctx->intf == intf) { 727 spin_lock_irq(&priv->ctx_lock); 728 list_del(&dev_ctx->list); 729 spin_unlock_irq(&priv->ctx_lock); 730 731 intf->remove(dev, dev_ctx->context); 732 kfree(dev_ctx); 733 return; 734 } 735 } 736 737 static int mlx5_register_device(struct mlx5_core_dev *dev) 738 { 739 struct mlx5_priv *priv = &dev->priv; 740 struct mlx5_interface *intf; 741 742 mutex_lock(&intf_mutex); 743 list_add_tail(&priv->dev_list, &dev_list); 744 list_for_each_entry(intf, &intf_list, list) 745 mlx5_add_device(intf, priv); 746 mutex_unlock(&intf_mutex); 747 748 return 0; 749 } 750 751 static void mlx5_unregister_device(struct mlx5_core_dev *dev) 752 { 753 struct mlx5_priv *priv = &dev->priv; 754 struct mlx5_interface *intf; 755 756 mutex_lock(&intf_mutex); 757 list_for_each_entry(intf, &intf_list, list) 758 mlx5_remove_device(intf, priv); 759 list_del(&priv->dev_list); 760 mutex_unlock(&intf_mutex); 761 } 762 763 int mlx5_register_interface(struct mlx5_interface *intf) 764 { 765 struct mlx5_priv *priv; 766 767 if (!intf->add || !intf->remove) 768 return -EINVAL; 769 770 mutex_lock(&intf_mutex); 771 list_add_tail(&intf->list, &intf_list); 772 list_for_each_entry(priv, &dev_list, dev_list) 773 mlx5_add_device(intf, priv); 774 mutex_unlock(&intf_mutex); 775 776 return 0; 777 } 778 EXPORT_SYMBOL(mlx5_register_interface); 779 780 void mlx5_unregister_interface(struct mlx5_interface *intf) 781 { 782 struct mlx5_priv *priv; 783 784 mutex_lock(&intf_mutex); 785 list_for_each_entry(priv, &dev_list, dev_list) 786 mlx5_remove_device(intf, priv); 787 list_del(&intf->list); 788 mutex_unlock(&intf_mutex); 789 } 790 EXPORT_SYMBOL(mlx5_unregister_interface); 791 792 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 793 { 794 struct mlx5_priv *priv = &mdev->priv; 795 struct mlx5_device_context *dev_ctx; 796 unsigned long flags; 797 void *result = NULL; 798 799 spin_lock_irqsave(&priv->ctx_lock, flags); 800 801 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 802 if ((dev_ctx->intf->protocol == protocol) && 803 dev_ctx->intf->get_dev) { 804 result = dev_ctx->intf->get_dev(dev_ctx->context); 805 break; 806 } 807 808 spin_unlock_irqrestore(&priv->ctx_lock, flags); 809 810 return result; 811 } 812 EXPORT_SYMBOL(mlx5_get_protocol_dev); 813 814 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 815 { 816 struct pci_dev *pdev = dev->pdev; 817 int err = 0; 818 819 pci_set_drvdata(dev->pdev, dev); 820 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 821 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 822 823 mutex_init(&priv->pgdir_mutex); 824 INIT_LIST_HEAD(&priv->pgdir_list); 825 spin_lock_init(&priv->mkey_lock); 826 827 priv->numa_node = NUMA_NO_NODE; 828 829 err = mlx5_pci_enable_device(dev); 830 if (err) { 831 device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n"); 832 goto err_dbg; 833 } 834 835 err = request_bar(pdev); 836 if (err) { 837 device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n"); 838 goto err_disable; 839 } 840 841 pci_set_master(pdev); 842 843 err = set_dma_caps(pdev); 844 if (err) { 845 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n"); 846 goto err_clr_master; 847 } 848 849 dev->iseg_base = pci_resource_start(dev->pdev, 0); 850 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 851 if (!dev->iseg) { 852 err = -ENOMEM; 853 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n"); 854 goto err_clr_master; 855 } 856 857 return 0; 858 859 err_clr_master: 860 pci_clear_master(dev->pdev); 861 release_bar(dev->pdev); 862 err_disable: 863 mlx5_pci_disable_device(dev); 864 err_dbg: 865 return err; 866 } 867 868 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 869 { 870 iounmap(dev->iseg); 871 pci_clear_master(dev->pdev); 872 release_bar(dev->pdev); 873 mlx5_pci_disable_device(dev); 874 } 875 876 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 877 { 878 struct pci_dev *pdev = dev->pdev; 879 int err; 880 881 err = mlx5_vsc_find_cap(dev); 882 if (err) 883 dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n"); 884 885 err = mlx5_query_hca_caps(dev); 886 if (err) { 887 dev_err(&pdev->dev, "query hca failed\n"); 888 goto out; 889 } 890 891 err = mlx5_query_board_id(dev); 892 if (err) { 893 dev_err(&pdev->dev, "query board id failed\n"); 894 goto out; 895 } 896 897 err = mlx5_eq_init(dev); 898 if (err) { 899 dev_err(&pdev->dev, "failed to initialize eq\n"); 900 goto out; 901 } 902 903 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 904 905 err = mlx5_init_cq_table(dev); 906 if (err) { 907 dev_err(&pdev->dev, "failed to initialize cq table\n"); 908 goto err_eq_cleanup; 909 } 910 911 mlx5_init_qp_table(dev); 912 mlx5_init_srq_table(dev); 913 mlx5_init_mr_table(dev); 914 915 #ifdef RATELIMIT 916 err = mlx5_init_rl_table(dev); 917 if (err) { 918 dev_err(&pdev->dev, "Failed to init rate limiting\n"); 919 goto err_tables_cleanup; 920 } 921 #endif 922 return 0; 923 924 #ifdef RATELIMIT 925 err_tables_cleanup: 926 mlx5_cleanup_mr_table(dev); 927 mlx5_cleanup_srq_table(dev); 928 mlx5_cleanup_qp_table(dev); 929 mlx5_cleanup_cq_table(dev); 930 #endif 931 932 err_eq_cleanup: 933 mlx5_eq_cleanup(dev); 934 935 out: 936 return err; 937 } 938 939 static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 940 { 941 #ifdef RATELIMIT 942 mlx5_cleanup_rl_table(dev); 943 #endif 944 mlx5_cleanup_mr_table(dev); 945 mlx5_cleanup_srq_table(dev); 946 mlx5_cleanup_qp_table(dev); 947 mlx5_cleanup_cq_table(dev); 948 mlx5_eq_cleanup(dev); 949 } 950 951 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 952 bool boot) 953 { 954 struct pci_dev *pdev = dev->pdev; 955 int err; 956 957 mutex_lock(&dev->intf_state_mutex); 958 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 959 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 960 __func__); 961 goto out; 962 } 963 964 device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 965 966 /* 967 * On load removing any previous indication of internal error, 968 * device is up 969 */ 970 dev->state = MLX5_DEVICE_STATE_UP; 971 972 err = mlx5_cmd_init(dev); 973 if (err) { 974 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n"); 975 goto out_err; 976 } 977 978 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); 979 if (err) { 980 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI); 981 goto err_cmd_cleanup; 982 } 983 984 err = mlx5_core_enable_hca(dev); 985 if (err) { 986 device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n"); 987 goto err_cmd_cleanup; 988 } 989 990 err = mlx5_core_set_issi(dev); 991 if (err) { 992 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n"); 993 goto err_disable_hca; 994 } 995 996 err = mlx5_pagealloc_start(dev); 997 if (err) { 998 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n"); 999 goto err_disable_hca; 1000 } 1001 1002 err = mlx5_satisfy_startup_pages(dev, 1); 1003 if (err) { 1004 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n"); 1005 goto err_pagealloc_stop; 1006 } 1007 1008 err = set_hca_ctrl(dev); 1009 if (err) { 1010 device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n"); 1011 goto reclaim_boot_pages; 1012 } 1013 1014 err = handle_hca_cap(dev); 1015 if (err) { 1016 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n"); 1017 goto reclaim_boot_pages; 1018 } 1019 1020 err = handle_hca_cap_atomic(dev); 1021 if (err) { 1022 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n"); 1023 goto reclaim_boot_pages; 1024 } 1025 1026 err = mlx5_satisfy_startup_pages(dev, 0); 1027 if (err) { 1028 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n"); 1029 goto reclaim_boot_pages; 1030 } 1031 1032 err = mlx5_cmd_init_hca(dev); 1033 if (err) { 1034 device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n"); 1035 goto reclaim_boot_pages; 1036 } 1037 1038 mlx5_start_health_poll(dev); 1039 1040 if (boot && mlx5_init_once(dev, priv)) { 1041 dev_err(&pdev->dev, "sw objs init failed\n"); 1042 goto err_stop_poll; 1043 } 1044 1045 err = mlx5_enable_msix(dev); 1046 if (err) { 1047 device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n"); 1048 goto err_cleanup_once; 1049 } 1050 1051 err = mlx5_alloc_uuars(dev, &priv->uuari); 1052 if (err) { 1053 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n"); 1054 goto err_disable_msix; 1055 } 1056 1057 err = mlx5_start_eqs(dev); 1058 if (err) { 1059 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n"); 1060 goto err_free_uar; 1061 } 1062 1063 err = alloc_comp_eqs(dev); 1064 if (err) { 1065 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n"); 1066 goto err_stop_eqs; 1067 } 1068 1069 if (map_bf_area(dev)) 1070 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n"); 1071 1072 err = mlx5_init_fs(dev); 1073 if (err) { 1074 mlx5_core_err(dev, "flow steering init %d\n", err); 1075 goto err_free_comp_eqs; 1076 } 1077 1078 err = mlx5_register_device(dev); 1079 if (err) { 1080 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 1081 goto err_fs; 1082 } 1083 1084 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1085 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1086 1087 out: 1088 mutex_unlock(&dev->intf_state_mutex); 1089 return 0; 1090 1091 err_fs: 1092 mlx5_cleanup_fs(dev); 1093 1094 err_free_comp_eqs: 1095 free_comp_eqs(dev); 1096 unmap_bf_area(dev); 1097 1098 err_stop_eqs: 1099 mlx5_stop_eqs(dev); 1100 1101 err_free_uar: 1102 mlx5_free_uuars(dev, &priv->uuari); 1103 1104 err_disable_msix: 1105 mlx5_disable_msix(dev); 1106 1107 err_cleanup_once: 1108 if (boot) 1109 mlx5_cleanup_once(dev); 1110 1111 err_stop_poll: 1112 mlx5_stop_health_poll(dev); 1113 if (mlx5_cmd_teardown_hca(dev)) { 1114 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); 1115 goto out_err; 1116 } 1117 1118 reclaim_boot_pages: 1119 mlx5_reclaim_startup_pages(dev); 1120 1121 err_pagealloc_stop: 1122 mlx5_pagealloc_stop(dev); 1123 1124 err_disable_hca: 1125 mlx5_core_disable_hca(dev); 1126 1127 err_cmd_cleanup: 1128 mlx5_cmd_cleanup(dev); 1129 1130 out_err: 1131 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1132 mutex_unlock(&dev->intf_state_mutex); 1133 1134 return err; 1135 } 1136 1137 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1138 bool cleanup) 1139 { 1140 int err = 0; 1141 1142 if (cleanup) 1143 mlx5_drain_health_recovery(dev); 1144 1145 mutex_lock(&dev->intf_state_mutex); 1146 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1147 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); 1148 if (cleanup) 1149 mlx5_cleanup_once(dev); 1150 goto out; 1151 } 1152 1153 mlx5_unregister_device(dev); 1154 1155 mlx5_cleanup_fs(dev); 1156 unmap_bf_area(dev); 1157 mlx5_wait_for_reclaim_vfs_pages(dev); 1158 free_comp_eqs(dev); 1159 mlx5_stop_eqs(dev); 1160 mlx5_free_uuars(dev, &priv->uuari); 1161 mlx5_disable_msix(dev); 1162 if (cleanup) 1163 mlx5_cleanup_once(dev); 1164 mlx5_stop_health_poll(dev); 1165 err = mlx5_cmd_teardown_hca(dev); 1166 if (err) { 1167 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); 1168 goto out; 1169 } 1170 mlx5_pagealloc_stop(dev); 1171 mlx5_reclaim_startup_pages(dev); 1172 mlx5_core_disable_hca(dev); 1173 mlx5_cmd_cleanup(dev); 1174 1175 out: 1176 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1177 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1178 mutex_unlock(&dev->intf_state_mutex); 1179 return err; 1180 } 1181 1182 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1183 unsigned long param) 1184 { 1185 struct mlx5_priv *priv = &dev->priv; 1186 struct mlx5_device_context *dev_ctx; 1187 unsigned long flags; 1188 1189 spin_lock_irqsave(&priv->ctx_lock, flags); 1190 1191 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1192 if (dev_ctx->intf->event) 1193 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1194 1195 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1196 } 1197 1198 struct mlx5_core_event_handler { 1199 void (*event)(struct mlx5_core_dev *dev, 1200 enum mlx5_dev_event event, 1201 void *data); 1202 }; 1203 1204 static int init_one(struct pci_dev *pdev, 1205 const struct pci_device_id *id) 1206 { 1207 struct mlx5_core_dev *dev; 1208 struct mlx5_priv *priv; 1209 int err; 1210 1211 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1212 priv = &dev->priv; 1213 if (id) 1214 priv->pci_dev_data = id->driver_data; 1215 1216 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) { 1217 printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF); 1218 prof_sel = MLX5_DEFAULT_PROF; 1219 } 1220 dev->profile = &profiles[prof_sel]; 1221 dev->pdev = pdev; 1222 dev->event = mlx5_core_event; 1223 1224 INIT_LIST_HEAD(&priv->ctx_list); 1225 spin_lock_init(&priv->ctx_lock); 1226 mutex_init(&dev->pci_status_mutex); 1227 mutex_init(&dev->intf_state_mutex); 1228 err = mlx5_pci_init(dev, priv); 1229 if (err) { 1230 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pci_init failed %d\n", err); 1231 goto clean_dev; 1232 } 1233 1234 err = mlx5_health_init(dev); 1235 if (err) { 1236 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_health_init failed %d\n", err); 1237 goto close_pci; 1238 } 1239 1240 mlx5_pagealloc_init(dev); 1241 1242 err = mlx5_load_one(dev, priv, true); 1243 if (err) { 1244 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err); 1245 goto clean_health; 1246 } 1247 1248 mlx5_fwdump_prep(dev); 1249 1250 pci_save_state(pdev->dev.bsddev); 1251 return 0; 1252 1253 clean_health: 1254 mlx5_pagealloc_cleanup(dev); 1255 mlx5_health_cleanup(dev); 1256 close_pci: 1257 mlx5_pci_close(dev, priv); 1258 clean_dev: 1259 kfree(dev); 1260 return err; 1261 } 1262 1263 static void remove_one(struct pci_dev *pdev) 1264 { 1265 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1266 struct mlx5_priv *priv = &dev->priv; 1267 1268 if (mlx5_unload_one(dev, priv, true)) { 1269 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); 1270 mlx5_health_cleanup(dev); 1271 return; 1272 } 1273 1274 mlx5_fwdump_clean(dev); 1275 mlx5_pagealloc_cleanup(dev); 1276 mlx5_health_cleanup(dev); 1277 mlx5_pci_close(dev, priv); 1278 pci_set_drvdata(pdev, NULL); 1279 kfree(dev); 1280 } 1281 1282 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1283 pci_channel_state_t state) 1284 { 1285 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1286 struct mlx5_priv *priv = &dev->priv; 1287 1288 dev_info(&pdev->dev, "%s was called\n", __func__); 1289 mlx5_enter_error_state(dev, false); 1290 mlx5_unload_one(dev, priv, false); 1291 1292 if (state) { 1293 mlx5_drain_health_wq(dev); 1294 mlx5_pci_disable_device(dev); 1295 } 1296 1297 return state == pci_channel_io_perm_failure ? 1298 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1299 } 1300 1301 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1302 { 1303 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1304 int err = 0; 1305 1306 dev_info(&pdev->dev, "%s was called\n", __func__); 1307 1308 err = mlx5_pci_enable_device(dev); 1309 if (err) { 1310 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" 1311 , __func__, err); 1312 return PCI_ERS_RESULT_DISCONNECT; 1313 } 1314 pci_set_master(pdev); 1315 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1316 pci_restore_state(pdev->dev.bsddev); 1317 pci_save_state(pdev->dev.bsddev); 1318 1319 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1320 } 1321 1322 /* wait for the device to show vital signs. For now we check 1323 * that we can read the device ID and that the health buffer 1324 * shows a non zero value which is different than 0xffffffff 1325 */ 1326 static void wait_vital(struct pci_dev *pdev) 1327 { 1328 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1329 struct mlx5_core_health *health = &dev->priv.health; 1330 const int niter = 100; 1331 u32 count; 1332 u16 did; 1333 int i; 1334 1335 /* Wait for firmware to be ready after reset */ 1336 msleep(1000); 1337 for (i = 0; i < niter; i++) { 1338 if (pci_read_config_word(pdev, 2, &did)) { 1339 dev_warn(&pdev->dev, "failed reading config word\n"); 1340 break; 1341 } 1342 if (did == pdev->device) { 1343 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); 1344 break; 1345 } 1346 msleep(50); 1347 } 1348 if (i == niter) 1349 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1350 1351 for (i = 0; i < niter; i++) { 1352 count = ioread32be(health->health_counter); 1353 if (count && count != 0xffffffff) { 1354 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1355 break; 1356 } 1357 msleep(50); 1358 } 1359 1360 if (i == niter) 1361 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1362 } 1363 1364 static void mlx5_pci_resume(struct pci_dev *pdev) 1365 { 1366 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1367 struct mlx5_priv *priv = &dev->priv; 1368 int err; 1369 1370 dev_info(&pdev->dev, "%s was called\n", __func__); 1371 1372 wait_vital(pdev); 1373 1374 err = mlx5_load_one(dev, priv, false); 1375 if (err) 1376 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" 1377 , __func__, err); 1378 else 1379 dev_info(&pdev->dev, "%s: device recovered\n", __func__); 1380 } 1381 1382 static const struct pci_error_handlers mlx5_err_handler = { 1383 .error_detected = mlx5_pci_err_detected, 1384 .slot_reset = mlx5_pci_slot_reset, 1385 .resume = mlx5_pci_resume 1386 }; 1387 1388 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1389 { 1390 int err; 1391 1392 if (!MLX5_CAP_GEN(dev, force_teardown)) { 1393 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); 1394 return -EOPNOTSUPP; 1395 } 1396 1397 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1398 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1399 return -EAGAIN; 1400 } 1401 1402 err = mlx5_cmd_force_teardown_hca(dev); 1403 if (err) { 1404 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1405 return err; 1406 } 1407 1408 mlx5_enter_error_state(dev, true); 1409 1410 return 0; 1411 } 1412 1413 static void shutdown_one(struct pci_dev *pdev) 1414 { 1415 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1416 struct mlx5_priv *priv = &dev->priv; 1417 int err; 1418 1419 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); 1420 err = mlx5_try_fast_unload(dev); 1421 if (err) 1422 mlx5_unload_one(dev, priv, false); 1423 mlx5_pci_disable_device(dev); 1424 } 1425 1426 static const struct pci_device_id mlx5_core_pci_table[] = { 1427 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 1428 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 1429 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 1430 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 1431 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 1432 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 1433 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ 1434 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 1435 { PCI_VDEVICE(MELLANOX, 4121) }, 1436 { PCI_VDEVICE(MELLANOX, 4122) }, 1437 { PCI_VDEVICE(MELLANOX, 4123) }, 1438 { PCI_VDEVICE(MELLANOX, 4124) }, 1439 { PCI_VDEVICE(MELLANOX, 4125) }, 1440 { PCI_VDEVICE(MELLANOX, 4126) }, 1441 { PCI_VDEVICE(MELLANOX, 4127) }, 1442 { PCI_VDEVICE(MELLANOX, 4128) }, 1443 { PCI_VDEVICE(MELLANOX, 4129) }, 1444 { PCI_VDEVICE(MELLANOX, 4130) }, 1445 { PCI_VDEVICE(MELLANOX, 4131) }, 1446 { PCI_VDEVICE(MELLANOX, 4132) }, 1447 { PCI_VDEVICE(MELLANOX, 4133) }, 1448 { PCI_VDEVICE(MELLANOX, 4134) }, 1449 { PCI_VDEVICE(MELLANOX, 4135) }, 1450 { PCI_VDEVICE(MELLANOX, 4136) }, 1451 { PCI_VDEVICE(MELLANOX, 4137) }, 1452 { PCI_VDEVICE(MELLANOX, 4138) }, 1453 { PCI_VDEVICE(MELLANOX, 4139) }, 1454 { PCI_VDEVICE(MELLANOX, 4140) }, 1455 { PCI_VDEVICE(MELLANOX, 4141) }, 1456 { PCI_VDEVICE(MELLANOX, 4142) }, 1457 { PCI_VDEVICE(MELLANOX, 4143) }, 1458 { PCI_VDEVICE(MELLANOX, 4144) }, 1459 { 0, } 1460 }; 1461 1462 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 1463 1464 void mlx5_disable_device(struct mlx5_core_dev *dev) 1465 { 1466 mlx5_pci_err_detected(dev->pdev, 0); 1467 } 1468 1469 void mlx5_recover_device(struct mlx5_core_dev *dev) 1470 { 1471 mlx5_pci_disable_device(dev); 1472 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 1473 mlx5_pci_resume(dev->pdev); 1474 } 1475 1476 struct pci_driver mlx5_core_driver = { 1477 .name = DRIVER_NAME, 1478 .id_table = mlx5_core_pci_table, 1479 .shutdown = shutdown_one, 1480 .probe = init_one, 1481 .remove = remove_one, 1482 .err_handler = &mlx5_err_handler 1483 }; 1484 1485 static int __init init(void) 1486 { 1487 int err; 1488 1489 err = pci_register_driver(&mlx5_core_driver); 1490 if (err) 1491 goto err_debug; 1492 1493 err = mlx5_fwdump_init(); 1494 if (err) 1495 goto err_fwdump; 1496 1497 return 0; 1498 1499 err_fwdump: 1500 pci_unregister_driver(&mlx5_core_driver); 1501 1502 err_debug: 1503 return err; 1504 } 1505 1506 static void __exit cleanup(void) 1507 { 1508 mlx5_fwdump_fini(); 1509 pci_unregister_driver(&mlx5_core_driver); 1510 } 1511 1512 module_init(init); 1513 module_exit(cleanup); 1514