1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 40 #include "mlx5_core.h" 41 42 enum { 43 CMD_IF_REV = 5, 44 }; 45 46 enum { 47 CMD_MODE_POLLING, 48 CMD_MODE_EVENTS 49 }; 50 51 enum { 52 NUM_LONG_LISTS = 2, 53 NUM_MED_LISTS = 64, 54 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 55 MLX5_CMD_DATA_BLOCK_SIZE, 56 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 57 }; 58 59 enum { 60 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 61 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 62 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 63 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 64 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 65 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 66 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 67 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 68 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 69 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 70 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 71 }; 72 73 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 74 struct mlx5_cmd_msg *in, 75 struct mlx5_cmd_msg *out, 76 void *uout, int uout_size, 77 mlx5_cmd_cbk_t cbk, 78 void *context, int page_queue) 79 { 80 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 81 struct mlx5_cmd_work_ent *ent; 82 83 ent = kzalloc(sizeof(*ent), alloc_flags); 84 if (!ent) 85 return ERR_PTR(-ENOMEM); 86 87 ent->in = in; 88 ent->out = out; 89 ent->uout = uout; 90 ent->uout_size = uout_size; 91 ent->callback = cbk; 92 ent->context = context; 93 ent->cmd = cmd; 94 ent->page_queue = page_queue; 95 96 return ent; 97 } 98 99 static u8 alloc_token(struct mlx5_cmd *cmd) 100 { 101 u8 token; 102 103 spin_lock(&cmd->token_lock); 104 cmd->token++; 105 if (cmd->token == 0) 106 cmd->token++; 107 token = cmd->token; 108 spin_unlock(&cmd->token_lock); 109 110 return token; 111 } 112 113 static int alloc_ent(struct mlx5_cmd *cmd) 114 { 115 unsigned long flags; 116 int ret; 117 118 spin_lock_irqsave(&cmd->alloc_lock, flags); 119 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 120 if (ret < cmd->max_reg_cmds) 121 clear_bit(ret, &cmd->bitmask); 122 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 123 124 return ret < cmd->max_reg_cmds ? ret : -ENOMEM; 125 } 126 127 static void free_ent(struct mlx5_cmd *cmd, int idx) 128 { 129 unsigned long flags; 130 131 spin_lock_irqsave(&cmd->alloc_lock, flags); 132 set_bit(idx, &cmd->bitmask); 133 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 134 } 135 136 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 137 { 138 return cmd->cmd_buf + (idx << cmd->log_stride); 139 } 140 141 static u8 xor8_buf(void *buf, int len) 142 { 143 u8 *ptr = buf; 144 u8 sum = 0; 145 int i; 146 147 for (i = 0; i < len; i++) 148 sum ^= ptr[i]; 149 150 return sum; 151 } 152 153 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 154 { 155 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 156 return -EINVAL; 157 158 if (xor8_buf(block, sizeof(*block)) != 0xff) 159 return -EINVAL; 160 161 return 0; 162 } 163 164 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 165 int csum) 166 { 167 block->token = token; 168 if (csum) { 169 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 170 sizeof(block->data) - 2); 171 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 172 } 173 } 174 175 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 176 { 177 struct mlx5_cmd_mailbox *next = msg->next; 178 179 while (next) { 180 calc_block_sig(next->buf, token, csum); 181 next = next->next; 182 } 183 } 184 185 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 186 { 187 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 188 calc_chain_sig(ent->in, ent->token, csum); 189 calc_chain_sig(ent->out, ent->token, csum); 190 } 191 192 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 193 { 194 int poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 195 u8 own; 196 197 do { 198 own = ent->lay->status_own; 199 if (!(own & CMD_OWNER_HW)) { 200 ent->ret = 0; 201 return; 202 } 203 usleep_range(5000, 10000); 204 } while (time_before(jiffies, poll_end)); 205 206 ent->ret = -ETIMEDOUT; 207 } 208 209 static void free_cmd(struct mlx5_cmd_work_ent *ent) 210 { 211 kfree(ent); 212 } 213 214 215 static int verify_signature(struct mlx5_cmd_work_ent *ent) 216 { 217 struct mlx5_cmd_mailbox *next = ent->out->next; 218 int err; 219 u8 sig; 220 221 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 222 if (sig != 0xff) 223 return -EINVAL; 224 225 while (next) { 226 err = verify_block_sig(next->buf); 227 if (err) 228 return err; 229 230 next = next->next; 231 } 232 233 return 0; 234 } 235 236 static void dump_buf(void *buf, int size, int data_only, int offset) 237 { 238 __be32 *p = buf; 239 int i; 240 241 for (i = 0; i < size; i += 16) { 242 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 243 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 244 be32_to_cpu(p[3])); 245 p += 4; 246 offset += 16; 247 } 248 if (!data_only) 249 pr_debug("\n"); 250 } 251 252 const char *mlx5_command_str(int command) 253 { 254 switch (command) { 255 case MLX5_CMD_OP_QUERY_HCA_CAP: 256 return "QUERY_HCA_CAP"; 257 258 case MLX5_CMD_OP_SET_HCA_CAP: 259 return "SET_HCA_CAP"; 260 261 case MLX5_CMD_OP_QUERY_ADAPTER: 262 return "QUERY_ADAPTER"; 263 264 case MLX5_CMD_OP_INIT_HCA: 265 return "INIT_HCA"; 266 267 case MLX5_CMD_OP_TEARDOWN_HCA: 268 return "TEARDOWN_HCA"; 269 270 case MLX5_CMD_OP_ENABLE_HCA: 271 return "MLX5_CMD_OP_ENABLE_HCA"; 272 273 case MLX5_CMD_OP_DISABLE_HCA: 274 return "MLX5_CMD_OP_DISABLE_HCA"; 275 276 case MLX5_CMD_OP_QUERY_PAGES: 277 return "QUERY_PAGES"; 278 279 case MLX5_CMD_OP_MANAGE_PAGES: 280 return "MANAGE_PAGES"; 281 282 case MLX5_CMD_OP_QUERY_ISSI: 283 return "QUERY_ISSI"; 284 285 case MLX5_CMD_OP_SET_ISSI: 286 return "SET_ISSI"; 287 288 case MLX5_CMD_OP_CREATE_MKEY: 289 return "CREATE_MKEY"; 290 291 case MLX5_CMD_OP_QUERY_MKEY: 292 return "QUERY_MKEY"; 293 294 case MLX5_CMD_OP_DESTROY_MKEY: 295 return "DESTROY_MKEY"; 296 297 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 298 return "QUERY_SPECIAL_CONTEXTS"; 299 300 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 301 return "PAGE_FAULT_RESUME"; 302 303 case MLX5_CMD_OP_CREATE_EQ: 304 return "CREATE_EQ"; 305 306 case MLX5_CMD_OP_DESTROY_EQ: 307 return "DESTROY_EQ"; 308 309 case MLX5_CMD_OP_QUERY_EQ: 310 return "QUERY_EQ"; 311 312 case MLX5_CMD_OP_GEN_EQE: 313 return "GEN_EQE"; 314 315 case MLX5_CMD_OP_CREATE_CQ: 316 return "CREATE_CQ"; 317 318 case MLX5_CMD_OP_DESTROY_CQ: 319 return "DESTROY_CQ"; 320 321 case MLX5_CMD_OP_QUERY_CQ: 322 return "QUERY_CQ"; 323 324 case MLX5_CMD_OP_MODIFY_CQ: 325 return "MODIFY_CQ"; 326 327 case MLX5_CMD_OP_CREATE_QP: 328 return "CREATE_QP"; 329 330 case MLX5_CMD_OP_DESTROY_QP: 331 return "DESTROY_QP"; 332 333 case MLX5_CMD_OP_RST2INIT_QP: 334 return "RST2INIT_QP"; 335 336 case MLX5_CMD_OP_INIT2RTR_QP: 337 return "INIT2RTR_QP"; 338 339 case MLX5_CMD_OP_RTR2RTS_QP: 340 return "RTR2RTS_QP"; 341 342 case MLX5_CMD_OP_RTS2RTS_QP: 343 return "RTS2RTS_QP"; 344 345 case MLX5_CMD_OP_SQERR2RTS_QP: 346 return "SQERR2RTS_QP"; 347 348 case MLX5_CMD_OP_2ERR_QP: 349 return "2ERR_QP"; 350 351 case MLX5_CMD_OP_2RST_QP: 352 return "2RST_QP"; 353 354 case MLX5_CMD_OP_QUERY_QP: 355 return "QUERY_QP"; 356 357 case MLX5_CMD_OP_SQD_RTS_QP: 358 return "SQD_RTS_QP"; 359 360 case MLX5_CMD_OP_MAD_IFC: 361 return "MAD_IFC"; 362 363 case MLX5_CMD_OP_INIT2INIT_QP: 364 return "INIT2INIT_QP"; 365 366 case MLX5_CMD_OP_CREATE_PSV: 367 return "CREATE_PSV"; 368 369 case MLX5_CMD_OP_DESTROY_PSV: 370 return "DESTROY_PSV"; 371 372 case MLX5_CMD_OP_CREATE_SRQ: 373 return "CREATE_SRQ"; 374 375 case MLX5_CMD_OP_DESTROY_SRQ: 376 return "DESTROY_SRQ"; 377 378 case MLX5_CMD_OP_QUERY_SRQ: 379 return "QUERY_SRQ"; 380 381 case MLX5_CMD_OP_ARM_RQ: 382 return "ARM_RQ"; 383 384 case MLX5_CMD_OP_CREATE_XRC_SRQ: 385 return "CREATE_XRC_SRQ"; 386 387 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 388 return "DESTROY_XRC_SRQ"; 389 390 case MLX5_CMD_OP_QUERY_XRC_SRQ: 391 return "QUERY_XRC_SRQ"; 392 393 case MLX5_CMD_OP_ARM_XRC_SRQ: 394 return "ARM_XRC_SRQ"; 395 396 case MLX5_CMD_OP_CREATE_DCT: 397 return "CREATE_DCT"; 398 399 case MLX5_CMD_OP_DESTROY_DCT: 400 return "DESTROY_DCT"; 401 402 case MLX5_CMD_OP_DRAIN_DCT: 403 return "DRAIN_DCT"; 404 405 case MLX5_CMD_OP_QUERY_DCT: 406 return "QUERY_DCT"; 407 408 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 409 return "ARM_DCT_FOR_KEY_VIOLATION"; 410 411 case MLX5_CMD_OP_QUERY_VPORT_STATE: 412 return "QUERY_VPORT_STATE"; 413 414 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 415 return "MODIFY_VPORT_STATE"; 416 417 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 418 return "QUERY_ESW_VPORT_CONTEXT"; 419 420 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 421 return "MODIFY_ESW_VPORT_CONTEXT"; 422 423 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 424 return "QUERY_NIC_VPORT_CONTEXT"; 425 426 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 427 return "MODIFY_NIC_VPORT_CONTEXT"; 428 429 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 430 return "QUERY_ROCE_ADDRESS"; 431 432 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 433 return "SET_ROCE_ADDRESS"; 434 435 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 436 return "QUERY_HCA_VPORT_CONTEXT"; 437 438 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 439 return "MODIFY_HCA_VPORT_CONTEXT"; 440 441 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 442 return "QUERY_HCA_VPORT_GID"; 443 444 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 445 return "QUERY_HCA_VPORT_PKEY"; 446 447 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 448 return "QUERY_VPORT_COUNTER"; 449 450 case MLX5_CMD_OP_SET_WOL_ROL: 451 return "SET_WOL_ROL"; 452 453 case MLX5_CMD_OP_QUERY_WOL_ROL: 454 return "QUERY_WOL_ROL"; 455 456 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 457 return "ALLOC_Q_COUNTER"; 458 459 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 460 return "DEALLOC_Q_COUNTER"; 461 462 case MLX5_CMD_OP_QUERY_Q_COUNTER: 463 return "QUERY_Q_COUNTER"; 464 465 case MLX5_CMD_OP_ALLOC_PD: 466 return "ALLOC_PD"; 467 468 case MLX5_CMD_OP_DEALLOC_PD: 469 return "DEALLOC_PD"; 470 471 case MLX5_CMD_OP_ALLOC_UAR: 472 return "ALLOC_UAR"; 473 474 case MLX5_CMD_OP_DEALLOC_UAR: 475 return "DEALLOC_UAR"; 476 477 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 478 return "CONFIG_INT_MODERATION"; 479 480 case MLX5_CMD_OP_ATTACH_TO_MCG: 481 return "ATTACH_TO_MCG"; 482 483 case MLX5_CMD_OP_DETACH_FROM_MCG: 484 return "DETACH_FROM_MCG"; 485 486 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 487 return "GET_DROPPED_PACKET_LOG"; 488 489 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 490 return "QUERY_MAD_DEMUX"; 491 492 case MLX5_CMD_OP_SET_MAD_DEMUX: 493 return "SET_MAD_DEMUX"; 494 495 case MLX5_CMD_OP_NOP: 496 return "NOP"; 497 498 case MLX5_CMD_OP_ALLOC_XRCD: 499 return "ALLOC_XRCD"; 500 501 case MLX5_CMD_OP_DEALLOC_XRCD: 502 return "DEALLOC_XRCD"; 503 504 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 505 return "ALLOC_TRANSPORT_DOMAIN"; 506 507 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 508 return "DEALLOC_TRANSPORT_DOMAIN"; 509 510 case MLX5_CMD_OP_QUERY_CONG_STATUS: 511 return "QUERY_CONG_STATUS"; 512 513 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 514 return "MODIFY_CONG_STATUS"; 515 516 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 517 return "QUERY_CONG_PARAMS"; 518 519 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 520 return "MODIFY_CONG_PARAMS"; 521 522 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 523 return "QUERY_CONG_STATISTICS"; 524 525 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 526 return "ADD_VXLAN_UDP_DPORT"; 527 528 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 529 return "DELETE_VXLAN_UDP_DPORT"; 530 531 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 532 return "SET_L2_TABLE_ENTRY"; 533 534 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 535 return "QUERY_L2_TABLE_ENTRY"; 536 537 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 538 return "DELETE_L2_TABLE_ENTRY"; 539 540 case MLX5_CMD_OP_CREATE_RMP: 541 return "CREATE_RMP"; 542 543 case MLX5_CMD_OP_MODIFY_RMP: 544 return "MODIFY_RMP"; 545 546 case MLX5_CMD_OP_DESTROY_RMP: 547 return "DESTROY_RMP"; 548 549 case MLX5_CMD_OP_QUERY_RMP: 550 return "QUERY_RMP"; 551 552 case MLX5_CMD_OP_CREATE_RQT: 553 return "CREATE_RQT"; 554 555 case MLX5_CMD_OP_MODIFY_RQT: 556 return "MODIFY_RQT"; 557 558 case MLX5_CMD_OP_DESTROY_RQT: 559 return "DESTROY_RQT"; 560 561 case MLX5_CMD_OP_QUERY_RQT: 562 return "QUERY_RQT"; 563 564 case MLX5_CMD_OP_ACCESS_REG: 565 return "MLX5_CMD_OP_ACCESS_REG"; 566 567 case MLX5_CMD_OP_CREATE_SQ: 568 return "CREATE_SQ"; 569 570 case MLX5_CMD_OP_MODIFY_SQ: 571 return "MODIFY_SQ"; 572 573 case MLX5_CMD_OP_DESTROY_SQ: 574 return "DESTROY_SQ"; 575 576 case MLX5_CMD_OP_QUERY_SQ: 577 return "QUERY_SQ"; 578 579 case MLX5_CMD_OP_CREATE_RQ: 580 return "CREATE_RQ"; 581 582 case MLX5_CMD_OP_MODIFY_RQ: 583 return "MODIFY_RQ"; 584 585 case MLX5_CMD_OP_DESTROY_RQ: 586 return "DESTROY_RQ"; 587 588 case MLX5_CMD_OP_QUERY_RQ: 589 return "QUERY_RQ"; 590 591 case MLX5_CMD_OP_CREATE_TIR: 592 return "CREATE_TIR"; 593 594 case MLX5_CMD_OP_MODIFY_TIR: 595 return "MODIFY_TIR"; 596 597 case MLX5_CMD_OP_DESTROY_TIR: 598 return "DESTROY_TIR"; 599 600 case MLX5_CMD_OP_QUERY_TIR: 601 return "QUERY_TIR"; 602 603 case MLX5_CMD_OP_CREATE_TIS: 604 return "CREATE_TIS"; 605 606 case MLX5_CMD_OP_MODIFY_TIS: 607 return "MODIFY_TIS"; 608 609 case MLX5_CMD_OP_DESTROY_TIS: 610 return "DESTROY_TIS"; 611 612 case MLX5_CMD_OP_QUERY_TIS: 613 return "QUERY_TIS"; 614 615 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 616 return "CREATE_FLOW_TABLE"; 617 618 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 619 return "DESTROY_FLOW_TABLE"; 620 621 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 622 return "QUERY_FLOW_TABLE"; 623 624 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 625 return "CREATE_FLOW_GROUP"; 626 627 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 628 return "DESTROY_FLOW_GROUP"; 629 630 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 631 return "QUERY_FLOW_GROUP"; 632 633 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 634 return "SET_FLOW_TABLE_ENTRY"; 635 636 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 637 return "QUERY_FLOW_TABLE_ENTRY"; 638 639 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 640 return "DELETE_FLOW_TABLE_ENTRY"; 641 642 default: return "unknown command opcode"; 643 } 644 } 645 646 static void dump_command(struct mlx5_core_dev *dev, 647 struct mlx5_cmd_work_ent *ent, int input) 648 { 649 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 650 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 651 struct mlx5_cmd_mailbox *next = msg->next; 652 int data_only; 653 u32 offset = 0; 654 int dump_len; 655 656 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 657 658 if (data_only) 659 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 660 "dump command data %s(0x%x) %s\n", 661 mlx5_command_str(op), op, 662 input ? "INPUT" : "OUTPUT"); 663 else 664 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 665 mlx5_command_str(op), op, 666 input ? "INPUT" : "OUTPUT"); 667 668 if (data_only) { 669 if (input) { 670 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 671 offset += sizeof(ent->lay->in); 672 } else { 673 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 674 offset += sizeof(ent->lay->out); 675 } 676 } else { 677 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 678 offset += sizeof(*ent->lay); 679 } 680 681 while (next && offset < msg->len) { 682 if (data_only) { 683 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 684 dump_buf(next->buf, dump_len, 1, offset); 685 offset += MLX5_CMD_DATA_BLOCK_SIZE; 686 } else { 687 mlx5_core_dbg(dev, "command block:\n"); 688 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); 689 offset += sizeof(struct mlx5_cmd_prot_block); 690 } 691 next = next->next; 692 } 693 694 if (data_only) 695 pr_debug("\n"); 696 } 697 698 static void cmd_work_handler(struct work_struct *work) 699 { 700 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 701 struct mlx5_cmd *cmd = ent->cmd; 702 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 703 struct mlx5_cmd_layout *lay; 704 struct semaphore *sem; 705 706 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 707 if (cmd->moving_to_polling) { 708 mlx5_core_warn(dev, "not expecting command execution, ignoring...\n"); 709 return; 710 } 711 712 down(sem); 713 if (!ent->page_queue) { 714 ent->idx = alloc_ent(cmd); 715 if (ent->idx < 0) { 716 mlx5_core_err(dev, "failed to allocate command entry\n"); 717 up(sem); 718 return; 719 } 720 } else { 721 ent->idx = cmd->max_reg_cmds; 722 } 723 724 ent->token = alloc_token(cmd); 725 cmd->ent_arr[ent->idx] = ent; 726 lay = get_inst(cmd, ent->idx); 727 ent->lay = lay; 728 memset(lay, 0, sizeof(*lay)); 729 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 730 ent->op = be32_to_cpu(lay->in[0]) >> 16; 731 if (ent->in->next) 732 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 733 lay->inlen = cpu_to_be32(ent->in->len); 734 if (ent->out->next) 735 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 736 lay->outlen = cpu_to_be32(ent->out->len); 737 lay->type = MLX5_PCI_CMD_XPORT; 738 lay->token = ent->token; 739 lay->status_own = CMD_OWNER_HW; 740 set_signature(ent, !cmd->checksum_disabled); 741 dump_command(dev, ent, 1); 742 ent->ts1 = ktime_get_ns(); 743 744 /* ring doorbell after the descriptor is valid */ 745 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 746 wmb(); 747 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 748 mmiowb(); 749 /* if not in polling don't use ent after this point*/ 750 if (cmd->mode == CMD_MODE_POLLING) { 751 poll_timeout(ent); 752 /* make sure we read the descriptor after ownership is SW */ 753 rmb(); 754 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 755 } 756 } 757 758 static const char *deliv_status_to_str(u8 status) 759 { 760 switch (status) { 761 case MLX5_CMD_DELIVERY_STAT_OK: 762 return "no errors"; 763 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 764 return "signature error"; 765 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 766 return "token error"; 767 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 768 return "bad block number"; 769 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 770 return "output pointer not aligned to block size"; 771 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 772 return "input pointer not aligned to block size"; 773 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 774 return "firmware internal error"; 775 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 776 return "command input length error"; 777 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 778 return "command ouput length error"; 779 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 780 return "reserved fields not cleared"; 781 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 782 return "bad command descriptor type"; 783 default: 784 return "unknown status code"; 785 } 786 } 787 788 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 789 { 790 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 791 792 return be16_to_cpu(hdr->opcode); 793 } 794 795 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 796 { 797 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 798 struct mlx5_cmd *cmd = &dev->cmd; 799 int err; 800 801 if (cmd->mode == CMD_MODE_POLLING) { 802 wait_for_completion(&ent->done); 803 err = ent->ret; 804 } else { 805 if (!wait_for_completion_timeout(&ent->done, timeout)) 806 err = -ETIMEDOUT; 807 else 808 err = 0; 809 } 810 if (err == -ETIMEDOUT) { 811 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 812 mlx5_command_str(msg_to_opcode(ent->in)), 813 msg_to_opcode(ent->in)); 814 } 815 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 816 err, deliv_status_to_str(ent->status), ent->status); 817 818 return err; 819 } 820 821 /* Notes: 822 * 1. Callback functions may not sleep 823 * 2. page queue commands do not support asynchrous completion 824 */ 825 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 826 struct mlx5_cmd_msg *out, void *uout, int uout_size, 827 mlx5_cmd_cbk_t callback, 828 void *context, int page_queue, u8 *status) 829 { 830 struct mlx5_cmd *cmd = &dev->cmd; 831 struct mlx5_cmd_work_ent *ent; 832 struct mlx5_cmd_stats *stats; 833 int err = 0; 834 s64 ds; 835 u16 op; 836 837 if (callback && page_queue) 838 return -EINVAL; 839 840 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, 841 page_queue); 842 if (IS_ERR(ent)) 843 return PTR_ERR(ent); 844 845 if (!callback) 846 init_completion(&ent->done); 847 848 INIT_WORK(&ent->work, cmd_work_handler); 849 if (page_queue) { 850 cmd_work_handler(&ent->work); 851 } else if (!queue_work(cmd->wq, &ent->work)) { 852 mlx5_core_warn(dev, "failed to queue work\n"); 853 err = -ENOMEM; 854 goto out_free; 855 } 856 857 if (!callback) { 858 err = wait_func(dev, ent); 859 if (err == -ETIMEDOUT) 860 goto out; 861 862 ds = ent->ts2 - ent->ts1; 863 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 864 if (op < ARRAY_SIZE(cmd->stats)) { 865 stats = &cmd->stats[op]; 866 spin_lock_irq(&stats->lock); 867 stats->sum += ds; 868 ++stats->n; 869 spin_unlock_irq(&stats->lock); 870 } 871 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 872 "fw exec time for %s is %lld nsec\n", 873 mlx5_command_str(op), (long long)ds); 874 *status = ent->status; 875 free_cmd(ent); 876 } 877 878 return err; 879 880 out_free: 881 free_cmd(ent); 882 out: 883 return err; 884 } 885 886 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) 887 { 888 struct mlx5_cmd_prot_block *block; 889 struct mlx5_cmd_mailbox *next; 890 int copy; 891 892 if (!to || !from) 893 return -ENOMEM; 894 895 copy = min_t(int, size, sizeof(to->first.data)); 896 memcpy(to->first.data, from, copy); 897 size -= copy; 898 from += copy; 899 900 next = to->next; 901 while (size) { 902 if (!next) { 903 /* this is a BUG */ 904 return -ENOMEM; 905 } 906 907 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 908 block = next->buf; 909 memcpy(block->data, from, copy); 910 from += copy; 911 size -= copy; 912 next = next->next; 913 } 914 915 return 0; 916 } 917 918 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 919 { 920 struct mlx5_cmd_prot_block *block; 921 struct mlx5_cmd_mailbox *next; 922 int copy; 923 924 if (!to || !from) 925 return -ENOMEM; 926 927 copy = min_t(int, size, sizeof(from->first.data)); 928 memcpy(to, from->first.data, copy); 929 size -= copy; 930 to += copy; 931 932 next = from->next; 933 while (size) { 934 if (!next) { 935 /* this is a BUG */ 936 return -ENOMEM; 937 } 938 939 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 940 block = next->buf; 941 942 memcpy(to, block->data, copy); 943 to += copy; 944 size -= copy; 945 next = next->next; 946 } 947 948 return 0; 949 } 950 951 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 952 gfp_t flags) 953 { 954 struct mlx5_cmd_mailbox *mailbox; 955 956 mailbox = kmalloc(sizeof(*mailbox), flags); 957 if (!mailbox) 958 return ERR_PTR(-ENOMEM); 959 960 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, 961 &mailbox->dma); 962 if (!mailbox->buf) { 963 mlx5_core_dbg(dev, "failed allocation\n"); 964 kfree(mailbox); 965 return ERR_PTR(-ENOMEM); 966 } 967 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); 968 mailbox->next = NULL; 969 970 return mailbox; 971 } 972 973 static void free_cmd_box(struct mlx5_core_dev *dev, 974 struct mlx5_cmd_mailbox *mailbox) 975 { 976 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 977 kfree(mailbox); 978 } 979 980 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 981 gfp_t flags, int size) 982 { 983 struct mlx5_cmd_mailbox *tmp, *head = NULL; 984 struct mlx5_cmd_prot_block *block; 985 struct mlx5_cmd_msg *msg; 986 int blen; 987 int err; 988 int n; 989 int i; 990 991 msg = kzalloc(sizeof(*msg), flags); 992 if (!msg) 993 return ERR_PTR(-ENOMEM); 994 995 blen = size - min_t(int, sizeof(msg->first.data), size); 996 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; 997 998 for (i = 0; i < n; i++) { 999 tmp = alloc_cmd_box(dev, flags); 1000 if (IS_ERR(tmp)) { 1001 mlx5_core_warn(dev, "failed allocating block\n"); 1002 err = PTR_ERR(tmp); 1003 goto err_alloc; 1004 } 1005 1006 block = tmp->buf; 1007 tmp->next = head; 1008 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 1009 block->block_num = cpu_to_be32(n - i - 1); 1010 head = tmp; 1011 } 1012 msg->next = head; 1013 msg->len = size; 1014 return msg; 1015 1016 err_alloc: 1017 while (head) { 1018 tmp = head->next; 1019 free_cmd_box(dev, head); 1020 head = tmp; 1021 } 1022 kfree(msg); 1023 1024 return ERR_PTR(err); 1025 } 1026 1027 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 1028 struct mlx5_cmd_msg *msg) 1029 { 1030 struct mlx5_cmd_mailbox *head = msg->next; 1031 struct mlx5_cmd_mailbox *next; 1032 1033 while (head) { 1034 next = head->next; 1035 free_cmd_box(dev, head); 1036 head = next; 1037 } 1038 kfree(msg); 1039 } 1040 1041 static void set_wqname(struct mlx5_core_dev *dev) 1042 { 1043 struct mlx5_cmd *cmd = &dev->cmd; 1044 1045 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1046 dev_name(&dev->pdev->dev)); 1047 } 1048 1049 static void clean_debug_files(struct mlx5_core_dev *dev) 1050 { 1051 } 1052 1053 1054 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1055 { 1056 struct mlx5_cmd *cmd = &dev->cmd; 1057 int i; 1058 1059 for (i = 0; i < cmd->max_reg_cmds; i++) 1060 down(&cmd->sem); 1061 1062 down(&cmd->pages_sem); 1063 1064 flush_workqueue(cmd->wq); 1065 1066 cmd->mode = CMD_MODE_EVENTS; 1067 1068 up(&cmd->pages_sem); 1069 for (i = 0; i < cmd->max_reg_cmds; i++) 1070 up(&cmd->sem); 1071 } 1072 1073 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1074 { 1075 struct mlx5_cmd *cmd = &dev->cmd; 1076 1077 synchronize_irq(dev->priv.eq_table.pages_eq.irqn); 1078 flush_workqueue(dev->priv.pg_wq); 1079 cmd->moving_to_polling = 1; 1080 flush_workqueue(cmd->wq); 1081 cmd->mode = CMD_MODE_POLLING; 1082 cmd->moving_to_polling = 0; 1083 } 1084 1085 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1086 { 1087 unsigned long flags; 1088 1089 if (msg->cache) { 1090 spin_lock_irqsave(&msg->cache->lock, flags); 1091 list_add_tail(&msg->list, &msg->cache->head); 1092 spin_unlock_irqrestore(&msg->cache->lock, flags); 1093 } else { 1094 mlx5_free_cmd_msg(dev, msg); 1095 } 1096 } 1097 1098 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) 1099 { 1100 struct mlx5_cmd *cmd = &dev->cmd; 1101 struct mlx5_cmd_work_ent *ent; 1102 mlx5_cmd_cbk_t callback; 1103 void *context; 1104 int err; 1105 int i; 1106 s64 ds; 1107 struct mlx5_cmd_stats *stats; 1108 unsigned long flags; 1109 1110 for (i = 0; i < (1 << cmd->log_sz); i++) { 1111 if (test_bit(i, &vector)) { 1112 struct semaphore *sem; 1113 1114 ent = cmd->ent_arr[i]; 1115 if (ent->page_queue) 1116 sem = &cmd->pages_sem; 1117 else 1118 sem = &cmd->sem; 1119 ent->ts2 = ktime_get_ns(); 1120 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1121 dump_command(dev, ent, 0); 1122 if (!ent->ret) { 1123 if (!cmd->checksum_disabled) 1124 ent->ret = verify_signature(ent); 1125 else 1126 ent->ret = 0; 1127 ent->status = ent->lay->status_own >> 1; 1128 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1129 ent->ret, deliv_status_to_str(ent->status), ent->status); 1130 } 1131 free_ent(cmd, ent->idx); 1132 if (ent->callback) { 1133 ds = ent->ts2 - ent->ts1; 1134 if (ent->op < ARRAY_SIZE(cmd->stats)) { 1135 stats = &cmd->stats[ent->op]; 1136 spin_lock_irqsave(&stats->lock, flags); 1137 stats->sum += ds; 1138 ++stats->n; 1139 spin_unlock_irqrestore(&stats->lock, flags); 1140 } 1141 1142 callback = ent->callback; 1143 context = ent->context; 1144 err = ent->ret; 1145 if (!err) 1146 err = mlx5_copy_from_msg(ent->uout, 1147 ent->out, 1148 ent->uout_size); 1149 1150 mlx5_free_cmd_msg(dev, ent->out); 1151 free_msg(dev, ent->in); 1152 1153 free_cmd(ent); 1154 callback(err, context); 1155 } else { 1156 complete(&ent->done); 1157 } 1158 up(sem); 1159 } 1160 } 1161 } 1162 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1163 1164 static int status_to_err(u8 status) 1165 { 1166 return status ? -1 : 0; /* TBD more meaningful codes */ 1167 } 1168 1169 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1170 gfp_t gfp) 1171 { 1172 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1173 struct mlx5_cmd *cmd = &dev->cmd; 1174 struct cache_ent *ent = NULL; 1175 1176 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1177 ent = &cmd->cache.large; 1178 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1179 ent = &cmd->cache.med; 1180 1181 if (ent) { 1182 spin_lock_irq(&ent->lock); 1183 if (!list_empty(&ent->head)) { 1184 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1185 list); 1186 /* For cached lists, we must explicitly state what is 1187 * the real size 1188 */ 1189 msg->len = in_size; 1190 list_del(&msg->list); 1191 } 1192 spin_unlock_irq(&ent->lock); 1193 } 1194 1195 if (IS_ERR(msg)) 1196 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1197 1198 return msg; 1199 } 1200 1201 static int is_manage_pages(struct mlx5_inbox_hdr *in) 1202 { 1203 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1204 } 1205 1206 static int cmd_exec_helper(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1207 int out_size, mlx5_cmd_cbk_t callback, void *context) 1208 { 1209 struct mlx5_cmd_msg *inb; 1210 struct mlx5_cmd_msg *outb; 1211 int pages_queue; 1212 gfp_t gfp; 1213 int err; 1214 u8 status = 0; 1215 1216 pages_queue = is_manage_pages(in); 1217 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1218 1219 inb = alloc_msg(dev, in_size, gfp); 1220 if (IS_ERR(inb)) { 1221 err = PTR_ERR(inb); 1222 return err; 1223 } 1224 1225 err = mlx5_copy_to_msg(inb, in, in_size); 1226 if (err) { 1227 mlx5_core_warn(dev, "err %d\n", err); 1228 goto out_in; 1229 } 1230 1231 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1232 if (IS_ERR(outb)) { 1233 err = PTR_ERR(outb); 1234 goto out_in; 1235 } 1236 1237 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1238 pages_queue, &status); 1239 if (err) 1240 goto out_out; 1241 1242 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1243 if (status) { 1244 err = status_to_err(status); 1245 goto out_out; 1246 } 1247 1248 if (callback) 1249 return err; 1250 1251 err = mlx5_copy_from_msg(out, outb, out_size); 1252 1253 out_out: 1254 mlx5_free_cmd_msg(dev, outb); 1255 1256 out_in: 1257 free_msg(dev, inb); 1258 return err; 1259 } 1260 1261 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1262 int out_size) 1263 { 1264 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1265 } 1266 EXPORT_SYMBOL(mlx5_cmd_exec); 1267 1268 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1269 void *out, int out_size, mlx5_cmd_cbk_t callback, 1270 void *context) 1271 { 1272 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1273 } 1274 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1275 1276 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1277 { 1278 struct mlx5_cmd *cmd = &dev->cmd; 1279 struct mlx5_cmd_msg *msg; 1280 struct mlx5_cmd_msg *n; 1281 1282 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1283 list_del(&msg->list); 1284 mlx5_free_cmd_msg(dev, msg); 1285 } 1286 1287 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1288 list_del(&msg->list); 1289 mlx5_free_cmd_msg(dev, msg); 1290 } 1291 } 1292 1293 static int create_msg_cache(struct mlx5_core_dev *dev) 1294 { 1295 struct mlx5_cmd *cmd = &dev->cmd; 1296 struct mlx5_cmd_msg *msg; 1297 int err; 1298 int i; 1299 1300 spin_lock_init(&cmd->cache.large.lock); 1301 INIT_LIST_HEAD(&cmd->cache.large.head); 1302 spin_lock_init(&cmd->cache.med.lock); 1303 INIT_LIST_HEAD(&cmd->cache.med.head); 1304 1305 for (i = 0; i < NUM_LONG_LISTS; i++) { 1306 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1307 if (IS_ERR(msg)) { 1308 err = PTR_ERR(msg); 1309 goto ex_err; 1310 } 1311 msg->cache = &cmd->cache.large; 1312 list_add_tail(&msg->list, &cmd->cache.large.head); 1313 } 1314 1315 for (i = 0; i < NUM_MED_LISTS; i++) { 1316 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1317 if (IS_ERR(msg)) { 1318 err = PTR_ERR(msg); 1319 goto ex_err; 1320 } 1321 msg->cache = &cmd->cache.med; 1322 list_add_tail(&msg->list, &cmd->cache.med.head); 1323 } 1324 1325 return 0; 1326 1327 ex_err: 1328 destroy_msg_cache(dev); 1329 return err; 1330 } 1331 1332 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1333 { 1334 struct device *ddev = &dev->pdev->dev; 1335 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1336 &cmd->alloc_dma, GFP_KERNEL); 1337 if (!cmd->cmd_alloc_buf) 1338 return -ENOMEM; 1339 1340 /* make sure it is aligned to 4K */ 1341 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 1342 cmd->cmd_buf = cmd->cmd_alloc_buf; 1343 cmd->dma = cmd->alloc_dma; 1344 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 1345 return 0; 1346 } 1347 1348 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, cmd->alloc_dma); 1349 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1350 &cmd->alloc_dma, GFP_KERNEL); 1351 if (!cmd->cmd_alloc_buf) 1352 return -ENOMEM; 1353 1354 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 1355 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 1356 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 1357 return 0; 1358 } 1359 1360 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1361 { 1362 struct device *ddev = &dev->pdev->dev; 1363 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma); 1364 } 1365 1366 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1367 { 1368 int size = sizeof(struct mlx5_cmd_prot_block); 1369 int align = roundup_pow_of_two(size); 1370 struct mlx5_cmd *cmd = &dev->cmd; 1371 u32 cmd_h, cmd_l; 1372 u16 cmd_if_rev; 1373 int err; 1374 int i; 1375 1376 cmd_if_rev = cmdif_rev_get(dev); 1377 if (cmd_if_rev != CMD_IF_REV) { 1378 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1379 return -EINVAL; 1380 } 1381 1382 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); 1383 if (!cmd->pool) 1384 return -ENOMEM; 1385 1386 err = alloc_cmd_page(dev, cmd); 1387 if (err) 1388 goto err_free_pool; 1389 1390 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1391 cmd->log_sz = cmd_l >> 4 & 0xf; 1392 cmd->log_stride = cmd_l & 0xf; 1393 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1394 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1395 err = -EINVAL; 1396 goto err_free_page; 1397 } 1398 1399 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1400 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1401 err = -EINVAL; 1402 goto err_free_page; 1403 } 1404 1405 cmd->checksum_disabled = 1; 1406 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1407 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1408 1409 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1410 if (cmd->cmdif_rev > CMD_IF_REV) { 1411 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1412 err = -ENOTSUPP; 1413 goto err_free_page; 1414 } 1415 1416 spin_lock_init(&cmd->alloc_lock); 1417 spin_lock_init(&cmd->token_lock); 1418 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1419 spin_lock_init(&cmd->stats[i].lock); 1420 1421 sema_init(&cmd->sem, cmd->max_reg_cmds); 1422 sema_init(&cmd->pages_sem, 1); 1423 1424 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1425 cmd_l = (u32)(cmd->dma); 1426 if (cmd_l & 0xfff) { 1427 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1428 err = -ENOMEM; 1429 goto err_free_page; 1430 } 1431 1432 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1433 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1434 1435 /* Make sure firmware sees the complete address before we proceed */ 1436 wmb(); 1437 1438 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1439 1440 cmd->mode = CMD_MODE_POLLING; 1441 1442 err = create_msg_cache(dev); 1443 if (err) { 1444 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1445 goto err_free_page; 1446 } 1447 1448 set_wqname(dev); 1449 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1450 if (!cmd->wq) { 1451 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1452 err = -ENOMEM; 1453 goto err_cache; 1454 } 1455 1456 return 0; 1457 1458 err_cache: 1459 destroy_msg_cache(dev); 1460 1461 err_free_page: 1462 free_cmd_page(dev, cmd); 1463 1464 err_free_pool: 1465 pci_pool_destroy(cmd->pool); 1466 1467 return err; 1468 } 1469 EXPORT_SYMBOL(mlx5_cmd_init); 1470 1471 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1472 { 1473 struct mlx5_cmd *cmd = &dev->cmd; 1474 1475 clean_debug_files(dev); 1476 destroy_workqueue(cmd->wq); 1477 destroy_msg_cache(dev); 1478 free_cmd_page(dev, cmd); 1479 pci_pool_destroy(cmd->pool); 1480 } 1481 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1482 1483 static const char *cmd_status_str(u8 status) 1484 { 1485 switch (status) { 1486 case MLX5_CMD_STAT_OK: 1487 return "OK"; 1488 case MLX5_CMD_STAT_INT_ERR: 1489 return "internal error"; 1490 case MLX5_CMD_STAT_BAD_OP_ERR: 1491 return "bad operation"; 1492 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1493 return "bad parameter"; 1494 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1495 return "bad system state"; 1496 case MLX5_CMD_STAT_BAD_RES_ERR: 1497 return "bad resource"; 1498 case MLX5_CMD_STAT_RES_BUSY: 1499 return "resource busy"; 1500 case MLX5_CMD_STAT_LIM_ERR: 1501 return "limits exceeded"; 1502 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1503 return "bad resource state"; 1504 case MLX5_CMD_STAT_IX_ERR: 1505 return "bad index"; 1506 case MLX5_CMD_STAT_NO_RES_ERR: 1507 return "no resources"; 1508 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1509 return "bad input length"; 1510 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1511 return "bad output length"; 1512 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1513 return "bad QP state"; 1514 case MLX5_CMD_STAT_BAD_PKT_ERR: 1515 return "bad packet (discarded)"; 1516 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1517 return "bad size too many outstanding CQEs"; 1518 default: 1519 return "unknown status"; 1520 } 1521 } 1522 1523 static int cmd_status_to_err_helper(u8 status) 1524 { 1525 switch (status) { 1526 case MLX5_CMD_STAT_OK: return 0; 1527 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1528 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1529 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1530 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1531 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1532 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1533 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1534 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1535 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1536 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1537 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1538 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1539 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1540 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1541 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1542 default: return -EIO; 1543 } 1544 } 1545 1546 /* this will be available till all the commands use set/get macros */ 1547 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1548 { 1549 if (!hdr->status) 1550 return 0; 1551 1552 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); 1553 1554 return cmd_status_to_err_helper(hdr->status); 1555 } 1556 1557 int mlx5_cmd_status_to_err_v2(void *ptr) 1558 { 1559 u32 syndrome; 1560 u8 status; 1561 1562 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1563 if (!status) 1564 return 0; 1565 1566 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1567 1568 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); 1569 1570 return cmd_status_to_err_helper(status); 1571 } 1572