1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <dev/mlx5/port.h> 31 #include <dev/mlx5/mlx5_ifc.h> 32 #include <dev/mlx5/mlx5_fpga/core.h> 33 #include "mlx5_core.h" 34 #include "eswitch.h" 35 36 #include "opt_rss.h" 37 38 #ifdef RSS 39 #include <net/rss_config.h> 40 #include <netinet/in_rss.h> 41 #endif 42 43 enum { 44 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), 45 MLX5_EQE_OWNER_INIT_VAL = 0x1, 46 }; 47 48 enum { 49 MLX5_NUM_SPARE_EQE = 0x80, 50 MLX5_NUM_ASYNC_EQE = 0x100, 51 MLX5_NUM_CMD_EQE = 32, 52 }; 53 54 enum { 55 MLX5_EQ_DOORBEL_OFFSET = 0x40, 56 }; 57 58 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ 59 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ 60 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ 61 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ 62 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ 63 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ 64 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 65 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 66 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ 67 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 68 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ 69 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT) | \ 70 (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE)) 71 72 struct map_eq_in { 73 u64 mask; 74 u32 reserved; 75 u32 unmap_eqn; 76 }; 77 78 struct cre_des_eq { 79 u8 reserved[15]; 80 u8 eqn; 81 }; 82 83 /*Function prototype*/ 84 static void mlx5_port_module_event(struct mlx5_core_dev *dev, 85 struct mlx5_eqe *eqe); 86 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 87 struct mlx5_eqe *eqe); 88 89 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) 90 { 91 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; 92 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; 93 94 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); 95 MLX5_SET(destroy_eq_in, in, eq_number, eqn); 96 97 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 98 } 99 100 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) 101 { 102 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); 103 } 104 105 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) 106 { 107 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); 108 109 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; 110 } 111 112 static const char *eqe_type_str(u8 type) 113 { 114 switch (type) { 115 case MLX5_EVENT_TYPE_COMP: 116 return "MLX5_EVENT_TYPE_COMP"; 117 case MLX5_EVENT_TYPE_PATH_MIG: 118 return "MLX5_EVENT_TYPE_PATH_MIG"; 119 case MLX5_EVENT_TYPE_COMM_EST: 120 return "MLX5_EVENT_TYPE_COMM_EST"; 121 case MLX5_EVENT_TYPE_SQ_DRAINED: 122 return "MLX5_EVENT_TYPE_SQ_DRAINED"; 123 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 124 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; 125 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 126 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; 127 case MLX5_EVENT_TYPE_CQ_ERROR: 128 return "MLX5_EVENT_TYPE_CQ_ERROR"; 129 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 130 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; 131 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 132 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; 133 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 134 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; 135 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 136 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; 137 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 138 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; 139 case MLX5_EVENT_TYPE_INTERNAL_ERROR: 140 return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; 141 case MLX5_EVENT_TYPE_PORT_CHANGE: 142 return "MLX5_EVENT_TYPE_PORT_CHANGE"; 143 case MLX5_EVENT_TYPE_GPIO_EVENT: 144 return "MLX5_EVENT_TYPE_GPIO_EVENT"; 145 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 146 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; 147 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: 148 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; 149 case MLX5_EVENT_TYPE_REMOTE_CONFIG: 150 return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; 151 case MLX5_EVENT_TYPE_DB_BF_CONGESTION: 152 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; 153 case MLX5_EVENT_TYPE_STALL_EVENT: 154 return "MLX5_EVENT_TYPE_STALL_EVENT"; 155 case MLX5_EVENT_TYPE_CMD: 156 return "MLX5_EVENT_TYPE_CMD"; 157 case MLX5_EVENT_TYPE_PAGE_REQUEST: 158 return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 159 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 160 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; 161 case MLX5_EVENT_TYPE_FPGA_ERROR: 162 return "MLX5_EVENT_TYPE_FPGA_ERROR"; 163 case MLX5_EVENT_TYPE_FPGA_QP_ERROR: 164 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; 165 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 166 return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT"; 167 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 168 return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT"; 169 default: 170 return "Unrecognized event"; 171 } 172 } 173 174 static enum mlx5_dev_event port_subtype_event(u8 subtype) 175 { 176 switch (subtype) { 177 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 178 return MLX5_DEV_EVENT_PORT_DOWN; 179 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 180 return MLX5_DEV_EVENT_PORT_UP; 181 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 182 return MLX5_DEV_EVENT_PORT_INITIALIZED; 183 case MLX5_PORT_CHANGE_SUBTYPE_LID: 184 return MLX5_DEV_EVENT_LID_CHANGE; 185 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 186 return MLX5_DEV_EVENT_PKEY_CHANGE; 187 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 188 return MLX5_DEV_EVENT_GUID_CHANGE; 189 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 190 return MLX5_DEV_EVENT_CLIENT_REREG; 191 } 192 return -1; 193 } 194 195 static enum mlx5_dev_event dcbx_subevent(u8 subtype) 196 { 197 switch (subtype) { 198 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 199 return MLX5_DEV_EVENT_ERROR_STATE_DCBX; 200 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 201 return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE; 202 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 203 return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE; 204 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 205 return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE; 206 } 207 return -1; 208 } 209 210 static void eq_update_ci(struct mlx5_eq *eq, int arm) 211 { 212 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); 213 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); 214 __raw_writel((__force u32) cpu_to_be32(val), addr); 215 /* We still want ordering, just not swabbing, so add a barrier */ 216 mb(); 217 } 218 219 static void 220 mlx5_temp_warning_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) 221 { 222 223 mlx5_core_warn(dev, 224 "High temperature on sensors with bit set %#jx %#jx\n", 225 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb), 226 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb)); 227 } 228 229 static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 230 { 231 struct mlx5_eqe *eqe; 232 int eqes_found = 0; 233 int set_ci = 0; 234 u32 cqn; 235 u32 rsn; 236 u8 port; 237 238 while ((eqe = next_eqe_sw(eq))) { 239 /* 240 * Make sure we read EQ entry contents after we've 241 * checked the ownership bit. 242 */ 243 atomic_thread_fence_acq(); 244 245 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", 246 eq->eqn, eqe_type_str(eqe->type)); 247 switch (eqe->type) { 248 case MLX5_EVENT_TYPE_COMP: 249 mlx5_cq_completion(dev, eqe); 250 break; 251 252 case MLX5_EVENT_TYPE_PATH_MIG: 253 case MLX5_EVENT_TYPE_COMM_EST: 254 case MLX5_EVENT_TYPE_SQ_DRAINED: 255 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 256 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 257 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 258 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 259 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 260 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 261 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", 262 eqe_type_str(eqe->type), eqe->type, rsn); 263 mlx5_rsc_event(dev, rsn, eqe->type); 264 break; 265 266 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 267 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 268 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 269 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", 270 eqe_type_str(eqe->type), eqe->type, rsn); 271 mlx5_srq_event(dev, rsn, eqe->type); 272 break; 273 274 case MLX5_EVENT_TYPE_CMD: 275 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 276 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), 277 MLX5_CMD_MODE_EVENTS); 278 } 279 break; 280 281 case MLX5_EVENT_TYPE_PORT_CHANGE: 282 port = (eqe->data.port.port >> 4) & 0xf; 283 switch (eqe->sub_type) { 284 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 285 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 286 case MLX5_PORT_CHANGE_SUBTYPE_LID: 287 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 288 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 289 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 290 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 291 if (dev->event) 292 dev->event(dev, port_subtype_event(eqe->sub_type), 293 (unsigned long)port); 294 break; 295 default: 296 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", 297 port, eqe->sub_type); 298 } 299 break; 300 301 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 302 port = (eqe->data.port.port >> 4) & 0xf; 303 switch (eqe->sub_type) { 304 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 305 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 306 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 307 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 308 if (dev->event) 309 dev->event(dev, 310 dcbx_subevent(eqe->sub_type), 311 0); 312 break; 313 default: 314 mlx5_core_warn(dev, 315 "dcbx event with unrecognized subtype: port %d, sub_type %d\n", 316 port, eqe->sub_type); 317 } 318 break; 319 320 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 321 mlx5_port_general_notification_event(dev, eqe); 322 break; 323 324 case MLX5_EVENT_TYPE_CQ_ERROR: 325 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 326 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 327 cqn, eqe->data.cq_err.syndrome); 328 mlx5_cq_event(dev, cqn, eqe->type); 329 break; 330 331 case MLX5_EVENT_TYPE_PAGE_REQUEST: 332 { 333 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 334 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 335 336 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", 337 func_id, npages); 338 mlx5_core_req_pages_handler(dev, func_id, npages); 339 } 340 break; 341 342 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 343 mlx5_port_module_event(dev, eqe); 344 break; 345 346 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 347 { 348 struct mlx5_eqe_vport_change *vc_eqe = 349 &eqe->data.vport_change; 350 u16 vport_num = be16_to_cpu(vc_eqe->vport_num); 351 352 if (dev->event) 353 dev->event(dev, 354 MLX5_DEV_EVENT_VPORT_CHANGE, 355 (unsigned long)vport_num); 356 } 357 if (dev->priv.eswitch != NULL) 358 mlx5_eswitch_vport_event(dev->priv.eswitch, 359 eqe); 360 break; 361 362 case MLX5_EVENT_TYPE_FPGA_ERROR: 363 case MLX5_EVENT_TYPE_FPGA_QP_ERROR: 364 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); 365 break; 366 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: 367 mlx5_temp_warning_event(dev, eqe); 368 break; 369 370 default: 371 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 372 eqe->type, eq->eqn); 373 break; 374 } 375 376 ++eq->cons_index; 377 eqes_found = 1; 378 ++set_ci; 379 380 /* The HCA will think the queue has overflowed if we 381 * don't tell it we've been processing events. We 382 * create our EQs with MLX5_NUM_SPARE_EQE extra 383 * entries, so we must update our consumer index at 384 * least that often. 385 */ 386 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { 387 eq_update_ci(eq, 0); 388 set_ci = 0; 389 } 390 } 391 392 eq_update_ci(eq, 1); 393 394 return eqes_found; 395 } 396 397 static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) 398 { 399 struct mlx5_eq *eq = eq_ptr; 400 struct mlx5_core_dev *dev = eq->dev; 401 402 /* check if IRQs are not disabled */ 403 if (likely(dev->priv.disable_irqs == 0)) 404 mlx5_eq_int(dev, eq); 405 406 /* MSI-X vectors always belong to us */ 407 return IRQ_HANDLED; 408 } 409 410 static void init_eq_buf(struct mlx5_eq *eq) 411 { 412 struct mlx5_eqe *eqe; 413 int i; 414 415 for (i = 0; i < eq->nent; i++) { 416 eqe = get_eqe(eq, i); 417 eqe->owner = MLX5_EQE_OWNER_INIT_VAL; 418 } 419 } 420 421 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 422 int nent, u64 mask) 423 { 424 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; 425 struct mlx5_priv *priv = &dev->priv; 426 __be64 *pas; 427 void *eqc; 428 int inlen; 429 u32 *in; 430 int err; 431 432 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); 433 eq->cons_index = 0; 434 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, 435 &eq->buf); 436 if (err) 437 return err; 438 439 init_eq_buf(eq); 440 441 inlen = MLX5_ST_SZ_BYTES(create_eq_in) + 442 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; 443 in = mlx5_vzalloc(inlen); 444 if (!in) { 445 err = -ENOMEM; 446 goto err_buf; 447 } 448 449 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); 450 mlx5_fill_page_array(&eq->buf, pas); 451 452 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 453 MLX5_SET64(create_eq_in, in, event_bitmask, mask); 454 455 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); 456 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); 457 MLX5_SET(eqc, eqc, uar_page, priv->uar->index); 458 MLX5_SET(eqc, eqc, intr, vecidx); 459 MLX5_SET(eqc, eqc, log_page_size, 460 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 461 462 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 463 if (err) 464 goto err_in; 465 466 eq->eqn = MLX5_GET(create_eq_out, out, eq_number); 467 eq->irqn = vecidx; 468 eq->dev = dev; 469 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; 470 err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 471 "mlx5_core", eq); 472 if (err) 473 goto err_eq; 474 #ifdef RSS 475 if (vecidx >= MLX5_EQ_VEC_COMP_BASE) { 476 u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE; 477 err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector, 478 rss_getcpu(bucket % rss_getnumbuckets())); 479 if (err) 480 goto err_irq; 481 } 482 #else 483 if (0) 484 goto err_irq; 485 #endif 486 487 488 /* EQs are created in ARMED state 489 */ 490 eq_update_ci(eq, 1); 491 492 kvfree(in); 493 return 0; 494 495 err_irq: 496 free_irq(priv->msix_arr[vecidx].vector, eq); 497 498 err_eq: 499 mlx5_cmd_destroy_eq(dev, eq->eqn); 500 501 err_in: 502 kvfree(in); 503 504 err_buf: 505 mlx5_buf_free(dev, &eq->buf); 506 return err; 507 } 508 EXPORT_SYMBOL_GPL(mlx5_create_map_eq); 509 510 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 511 { 512 int err; 513 514 free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); 515 err = mlx5_cmd_destroy_eq(dev, eq->eqn); 516 if (err) 517 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 518 eq->eqn); 519 mlx5_buf_free(dev, &eq->buf); 520 521 return err; 522 } 523 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); 524 525 int mlx5_eq_init(struct mlx5_core_dev *dev) 526 { 527 int err; 528 529 spin_lock_init(&dev->priv.eq_table.lock); 530 531 err = 0; 532 533 return err; 534 } 535 536 537 void mlx5_eq_cleanup(struct mlx5_core_dev *dev) 538 { 539 } 540 541 int mlx5_start_eqs(struct mlx5_core_dev *dev) 542 { 543 struct mlx5_eq_table *table = &dev->priv.eq_table; 544 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 545 int err; 546 547 if (MLX5_CAP_GEN(dev, port_module_event)) 548 async_event_mask |= (1ull << 549 MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT); 550 551 if (MLX5_CAP_GEN(dev, nic_vport_change_event)) 552 async_event_mask |= (1ull << 553 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 554 555 if (MLX5_CAP_GEN(dev, dcbx)) 556 async_event_mask |= (1ull << 557 MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT); 558 559 if (MLX5_CAP_GEN(dev, fpga)) 560 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | 561 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); 562 563 if (MLX5_CAP_GEN(dev, temp_warn_event)) 564 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); 565 566 if (MLX5_CAP_GEN(dev, general_notification_event)) { 567 async_event_mask |= (1ull << 568 MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT); 569 } 570 571 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 572 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD); 573 if (err) { 574 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); 575 return err; 576 } 577 578 mlx5_cmd_use_events(dev); 579 580 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 581 MLX5_NUM_ASYNC_EQE, async_event_mask); 582 if (err) { 583 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 584 goto err1; 585 } 586 587 err = mlx5_create_map_eq(dev, &table->pages_eq, 588 MLX5_EQ_VEC_PAGES, 589 /* TODO: sriov max_vf + */ 1, 590 1 << MLX5_EVENT_TYPE_PAGE_REQUEST); 591 if (err) { 592 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); 593 goto err2; 594 } 595 596 return err; 597 598 err2: 599 mlx5_destroy_unmap_eq(dev, &table->async_eq); 600 601 err1: 602 mlx5_cmd_use_polling(dev); 603 mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 604 return err; 605 } 606 607 int mlx5_stop_eqs(struct mlx5_core_dev *dev) 608 { 609 struct mlx5_eq_table *table = &dev->priv.eq_table; 610 int err; 611 612 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 613 if (err) 614 return err; 615 616 mlx5_destroy_unmap_eq(dev, &table->async_eq); 617 mlx5_cmd_use_polling(dev); 618 619 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 620 if (err) 621 mlx5_cmd_use_events(dev); 622 623 return err; 624 } 625 626 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 627 u32 *out, int outlen) 628 { 629 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; 630 631 memset(out, 0, outlen); 632 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); 633 MLX5_SET(query_eq_in, in, eq_number, eq->eqn); 634 635 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 636 } 637 EXPORT_SYMBOL_GPL(mlx5_core_eq_query); 638 639 static const char *mlx5_port_module_event_error_type_to_string(u8 error_type) 640 { 641 switch (error_type) { 642 case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: 643 return "Power budget exceeded"; 644 case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE: 645 return "Long Range for non MLNX cable"; 646 case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: 647 return "Bus stuck(I2C or data shorted)"; 648 case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: 649 return "No EEPROM/retry timeout"; 650 case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: 651 return "Enforce part number list"; 652 case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE: 653 return "Unknown identifier"; 654 case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: 655 return "High Temperature"; 656 case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED: 657 return "Bad or shorted cable/module"; 658 case MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED: 659 return "PMD type is not enabled"; 660 case MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE: 661 return "Laster_TEC_failure"; 662 case MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT: 663 return "High_current"; 664 case MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE: 665 return "High_voltage"; 666 case MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED: 667 return "pcie_system_power_slot_Exceeded"; 668 case MLX5_MODULE_EVENT_ERROR_HIGH_POWER: 669 return "High_power"; 670 case MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT: 671 return "Module_state_machine_fault"; 672 default: 673 return "Unknown error type"; 674 } 675 } 676 677 unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num) 678 { 679 if (module_num < 0 || module_num >= MLX5_MAX_PORTS) 680 return 0; /* undefined */ 681 return dev->module_status[module_num]; 682 } 683 684 static void mlx5_port_module_event(struct mlx5_core_dev *dev, 685 struct mlx5_eqe *eqe) 686 { 687 unsigned int module_num; 688 unsigned int module_status; 689 unsigned int error_type; 690 struct mlx5_eqe_port_module_event *module_event_eqe; 691 692 module_event_eqe = &eqe->data.port_module_event; 693 694 module_num = (unsigned int)module_event_eqe->module; 695 module_status = (unsigned int)module_event_eqe->module_status & 696 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 697 error_type = (unsigned int)module_event_eqe->error_type & 698 PORT_MODULE_EVENT_ERROR_TYPE_MASK; 699 700 if (module_status < MLX5_MODULE_STATUS_NUM) 701 dev->priv.pme_stats.status_counters[module_status]++; 702 switch (module_status) { 703 case MLX5_MODULE_STATUS_PLUGGED_ENABLED: 704 mlx5_core_info(dev, 705 "Module %u, status: plugged and enabled\n", 706 module_num); 707 break; 708 709 case MLX5_MODULE_STATUS_UNPLUGGED: 710 mlx5_core_info(dev, 711 "Module %u, status: unplugged\n", module_num); 712 break; 713 714 case MLX5_MODULE_STATUS_ERROR: 715 mlx5_core_err(dev, 716 "Module %u, status: error, %s (%d)\n", 717 module_num, 718 mlx5_port_module_event_error_type_to_string(error_type), 719 error_type); 720 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) 721 dev->priv.pme_stats.error_counters[error_type]++; 722 break; 723 724 default: 725 mlx5_core_info(dev, 726 "Module %u, unknown status %d\n", module_num, module_status); 727 } 728 /* store module status */ 729 if (module_num < MLX5_MAX_PORTS) 730 dev->module_status[module_num] = module_status; 731 } 732 733 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 734 struct mlx5_eqe *eqe) 735 { 736 u8 port = (eqe->data.port.port >> 4) & 0xf; 737 u32 rqn; 738 struct mlx5_eqe_general_notification_event *general_event; 739 740 switch (eqe->sub_type) { 741 case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT: 742 general_event = &eqe->data.general_notifications; 743 rqn = be32_to_cpu(general_event->rq_user_index_delay_drop) & 744 0xffffff; 745 break; 746 case MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT: 747 mlx5_trigger_health_watchdog(dev); 748 break; 749 default: 750 mlx5_core_warn(dev, 751 "general event with unrecognized subtype: port %d, sub_type %d\n", 752 port, eqe->sub_type); 753 break; 754 } 755 } 756 757 void 758 mlx5_disable_interrupts(struct mlx5_core_dev *dev) 759 { 760 int nvec = dev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 761 int x; 762 763 for (x = 0; x != nvec; x++) 764 disable_irq(dev->priv.msix_arr[x].vector); 765 } 766 767 void 768 mlx5_poll_interrupts(struct mlx5_core_dev *dev) 769 { 770 struct mlx5_eq *eq; 771 772 if (unlikely(dev->priv.disable_irqs != 0)) 773 return; 774 775 mlx5_eq_int(dev, &dev->priv.eq_table.cmd_eq); 776 mlx5_eq_int(dev, &dev->priv.eq_table.async_eq); 777 mlx5_eq_int(dev, &dev->priv.eq_table.pages_eq); 778 779 list_for_each_entry(eq, &dev->priv.eq_table.comp_eqs_list, list) 780 mlx5_eq_int(dev, eq); 781 } 782