1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Huawei Technologies Co., Ltd 3 */ 4 5 #include "hinic_compat.h" 6 #include "hinic_csr.h" 7 #include "hinic_pmd_hwdev.h" 8 #include "hinic_pmd_hwif.h" 9 #include "hinic_pmd_mgmt.h" 10 11 #define BUF_OUT_DEFAULT_SIZE 1 12 13 #define MAX_PF_MGMT_BUF_SIZE 2048UL 14 15 #define MGMT_MSG_SIZE_MIN 20 16 #define MGMT_MSG_SIZE_STEP 16 17 #define MGMT_MSG_RSVD_FOR_DEV 8 18 19 #define MGMT_MSG_TIMEOUT 5000 /* millisecond */ 20 21 #define SYNC_MSG_ID_MASK 0x1FF 22 #define ASYNC_MSG_ID_MASK 0x1FF 23 #define ASYNC_MSG_FLAG 0x200 24 25 #define MSG_NO_RESP 0xFFFF 26 27 #define MAX_MSG_SZ 2016 28 29 #define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) 30 31 #define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) 32 33 #define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ 34 (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) 35 36 #define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) 37 38 #define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ 39 ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ 40 | ASYNC_MSG_FLAG) 41 42 #define HINIC_SEQ_ID_MAX_VAL 42 43 #define HINIC_MSG_SEG_LEN 48 44 45 #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) 46 47 #define EQ_ELEM_DESC_TYPE_SHIFT 0 48 #define EQ_ELEM_DESC_SRC_SHIFT 7 49 #define EQ_ELEM_DESC_SIZE_SHIFT 8 50 #define EQ_ELEM_DESC_WRAPPED_SHIFT 31 51 52 #define EQ_ELEM_DESC_TYPE_MASK 0x7FU 53 #define EQ_ELEM_DESC_SRC_MASK 0x1U 54 #define EQ_ELEM_DESC_SIZE_MASK 0xFFU 55 #define EQ_ELEM_DESC_WRAPPED_MASK 0x1U 56 57 #define EQ_MSIX_RESEND_TIMER_CLEAR 1 58 59 #define EQ_ELEM_DESC_GET(val, member) \ 60 (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ 61 EQ_ELEM_DESC_##member##_MASK) 62 63 #define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0 64 #define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1 65 66 #define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \ 67 (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK) 68 69 #define HINIC_MSG_TO_MGMT_MAX_LEN 2016 70 71 /** 72 * mgmt_msg_len - calculate the total message length 73 * @msg_data_len: the length of the message data 74 * Return: the total message length 75 **/ 76 static u16 mgmt_msg_len(u16 msg_data_len) 77 { 78 /* u64 - the size of the header */ 79 u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + 80 msg_data_len); 81 82 if (msg_size > MGMT_MSG_SIZE_MIN) 83 msg_size = MGMT_MSG_SIZE_MIN + 84 ALIGN((msg_size - MGMT_MSG_SIZE_MIN), 85 MGMT_MSG_SIZE_STEP); 86 else 87 msg_size = MGMT_MSG_SIZE_MIN; 88 89 return msg_size; 90 } 91 92 /** 93 * prepare_header - prepare the header of the message 94 * @pf_to_mgmt: PF to MGMT channel 95 * @header: pointer of the header to prepare 96 * @msg_len: the length of the message 97 * @mod: module in the chip that will get the message 98 * @ack_type: the type to response 99 * @direction: the direction of the original message 100 * @cmd: the command to do 101 * @msg_id: message id 102 **/ 103 static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, 104 u64 *header, int msg_len, enum hinic_mod_type mod, 105 enum hinic_msg_ack_type ack_type, 106 enum hinic_msg_direction_type direction, 107 u8 cmd, u32 msg_id) 108 { 109 struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif; 110 111 *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | 112 HINIC_MSG_HEADER_SET(mod, MODULE) | 113 HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | 114 HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | 115 HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | 116 HINIC_MSG_HEADER_SET(0, SEQID) | 117 HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | 118 HINIC_MSG_HEADER_SET(direction, DIRECTION) | 119 HINIC_MSG_HEADER_SET(cmd, CMD) | 120 HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | 121 HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | 122 HINIC_MSG_HEADER_SET(msg_id, MSG_ID); 123 } 124 125 /** 126 * prepare_mgmt_cmd - prepare the mgmt command 127 * @mgmt_cmd: pointer to the command to prepare 128 * @header: pointer of the header to prepare 129 * @msg: the data of the message 130 * @msg_len: the length of the message 131 **/ 132 static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg, 133 int msg_len) 134 { 135 u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE; 136 137 memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); 138 139 mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; 140 cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV; 141 memcpy(mgmt_cmd, header, sizeof(*header)); 142 143 mgmt_cmd += sizeof(*header); 144 cmd_buf_max -= sizeof(*header); 145 memcpy(mgmt_cmd, msg, msg_len); 146 } 147 148 /** 149 * alloc_recv_msg - allocate received message memory 150 * @recv_msg: pointer that will hold the allocated data 151 * Return: 0 - success, negative - failure 152 **/ 153 static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) 154 { 155 int err; 156 157 recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); 158 if (!recv_msg->msg) { 159 PMD_DRV_LOG(ERR, "Allocate recv msg buf failed"); 160 return -ENOMEM; 161 } 162 163 recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); 164 if (!recv_msg->buf_out) { 165 PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed"); 166 err = -ENOMEM; 167 goto alloc_buf_out_err; 168 } 169 170 return 0; 171 172 alloc_buf_out_err: 173 kfree(recv_msg->msg); 174 return err; 175 } 176 177 /** 178 * free_recv_msg - free received message memory 179 * @recv_msg: pointer that holds the allocated data 180 **/ 181 static void free_recv_msg(struct hinic_recv_msg *recv_msg) 182 { 183 kfree(recv_msg->buf_out); 184 kfree(recv_msg->msg); 185 } 186 187 /** 188 * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel 189 * @pf_to_mgmt: PF to MGMT channel 190 * Return: 0 - success, negative - failure 191 **/ 192 static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) 193 { 194 int err; 195 196 err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); 197 if (err) { 198 PMD_DRV_LOG(ERR, "Allocate recv msg failed"); 199 return err; 200 } 201 202 err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); 203 if (err) { 204 PMD_DRV_LOG(ERR, "Allocate resp recv msg failed"); 205 goto alloc_msg_for_resp_err; 206 } 207 208 pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); 209 if (!pf_to_mgmt->async_msg_buf) { 210 PMD_DRV_LOG(ERR, "Allocate async msg buf failed"); 211 err = -ENOMEM; 212 goto async_msg_buf_err; 213 } 214 215 pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); 216 if (!pf_to_mgmt->sync_msg_buf) { 217 PMD_DRV_LOG(ERR, "Allocate sync msg buf failed"); 218 err = -ENOMEM; 219 goto sync_msg_buf_err; 220 } 221 222 return 0; 223 224 sync_msg_buf_err: 225 kfree(pf_to_mgmt->async_msg_buf); 226 227 async_msg_buf_err: 228 free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); 229 230 alloc_msg_for_resp_err: 231 free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); 232 233 return err; 234 } 235 236 /** 237 * free_msg_buf - free all the message buffers of PF to MGMT channel 238 * @pf_to_mgmt: PF to MGMT channel 239 * Return: 0 - success, negative - failure 240 **/ 241 static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) 242 { 243 kfree(pf_to_mgmt->sync_msg_buf); 244 kfree(pf_to_mgmt->async_msg_buf); 245 246 free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); 247 free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); 248 } 249 250 /** 251 * send_msg_to_mgmt_async - send async message 252 * @pf_to_mgmt: PF to MGMT channel 253 * @mod: module in the chip that will get the message 254 * @cmd: command of the message 255 * @msg: the data of the message 256 * @msg_len: the length of the message 257 * @direction: the direction of the original message 258 * @resp_msg_id: message id of response 259 * Return: 0 - success, negative - failure 260 **/ 261 static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, 262 enum hinic_mod_type mod, u8 cmd, 263 void *msg, u16 msg_len, 264 enum hinic_msg_direction_type direction, 265 u16 resp_msg_id) 266 { 267 void *mgmt_cmd = pf_to_mgmt->async_msg_buf; 268 struct hinic_api_cmd_chain *chain; 269 u64 header; 270 u16 cmd_size = mgmt_msg_len(msg_len); 271 272 if (direction == HINIC_MSG_RESPONSE) 273 prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, 274 direction, cmd, resp_msg_id); 275 else 276 prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, 277 direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); 278 279 prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); 280 281 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; 282 283 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, 284 cmd_size); 285 } 286 287 /** 288 * send_msg_to_mgmt_sync - send async message 289 * @pf_to_mgmt: PF to MGMT channel 290 * @mod: module in the chip that will get the message 291 * @cmd: command of the message 292 * @msg: the msg data 293 * @msg_len: the msg data length 294 * @ack_type: indicate mgmt command whether need ack or not 295 * @direction: the direction of the original message 296 * @resp_msg_id: msg id to response for 297 * Return: 0 - success, negative - failure 298 **/ 299 static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, 300 enum hinic_mod_type mod, u8 cmd, 301 void *msg, u16 msg_len, 302 enum hinic_msg_ack_type ack_type, 303 enum hinic_msg_direction_type direction, 304 __rte_unused u16 resp_msg_id) 305 { 306 void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; 307 struct hinic_api_cmd_chain *chain; 308 u64 header; 309 u16 cmd_size = mgmt_msg_len(msg_len); 310 311 if (direction == HINIC_MSG_RESPONSE) 312 prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, 313 direction, cmd, resp_msg_id); 314 else 315 prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, 316 direction, cmd, SYNC_MSG_ID(pf_to_mgmt)); 317 318 prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); 319 320 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT]; 321 322 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, 323 mgmt_cmd, cmd_size); 324 } 325 326 /** 327 * hinic_pf_to_mgmt_init - initialize PF to MGMT channel 328 * @hwdev: the pointer to the private hardware device object 329 * Return: 0 - success, negative - failure 330 **/ 331 static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) 332 { 333 struct hinic_msg_pf_to_mgmt *pf_to_mgmt; 334 int err; 335 336 pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); 337 if (!pf_to_mgmt) { 338 PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed"); 339 return -ENOMEM; 340 } 341 342 hwdev->pf_to_mgmt = pf_to_mgmt; 343 pf_to_mgmt->hwdev = hwdev; 344 345 err = hinic_mutex_init(&pf_to_mgmt->sync_msg_lock, NULL); 346 if (err) 347 goto mutex_init_err; 348 349 err = alloc_msg_buf(pf_to_mgmt); 350 if (err) { 351 PMD_DRV_LOG(ERR, "Allocate msg buffers failed"); 352 goto alloc_msg_buf_err; 353 } 354 355 err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); 356 if (err) { 357 PMD_DRV_LOG(ERR, "Init the api cmd chains failed"); 358 goto api_cmd_init_err; 359 } 360 361 return 0; 362 363 api_cmd_init_err: 364 free_msg_buf(pf_to_mgmt); 365 366 alloc_msg_buf_err: 367 hinic_mutex_destroy(&pf_to_mgmt->sync_msg_lock); 368 369 mutex_init_err: 370 kfree(pf_to_mgmt); 371 372 return err; 373 } 374 375 /** 376 * hinic_pf_to_mgmt_free - free PF to MGMT channel 377 * @hwdev: the pointer to the private hardware device object 378 **/ 379 static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) 380 { 381 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; 382 383 hinic_api_cmd_free(pf_to_mgmt->cmd_chain); 384 free_msg_buf(pf_to_mgmt); 385 hinic_mutex_destroy(&pf_to_mgmt->sync_msg_lock); 386 kfree(pf_to_mgmt); 387 } 388 389 static int 390 hinic_pf_to_mgmt_sync(struct hinic_hwdev *hwdev, 391 enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size, 392 void *buf_out, u16 *out_size, u32 timeout) 393 { 394 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; 395 struct hinic_recv_msg *recv_msg; 396 u32 timeo; 397 int err, i; 398 399 pthread_mutex_lock(&pf_to_mgmt->sync_msg_lock); 400 401 SYNC_MSG_ID_INC(pf_to_mgmt); 402 recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; 403 404 err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, 405 HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND, 406 MSG_NO_RESP); 407 if (err) { 408 PMD_DRV_LOG(ERR, "Send msg to mgmt failed"); 409 goto unlock_sync_msg; 410 } 411 412 timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); 413 for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) { 414 err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL); 415 if (err) { 416 PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d", 417 mod, cmd, pf_to_mgmt->sync_msg_id, err); 418 err = -ETIMEDOUT; 419 hinic_dump_aeq_info(hwdev); 420 goto unlock_sync_msg; 421 } else { 422 if (mod == recv_msg->mod && cmd == recv_msg->cmd && 423 recv_msg->msg_id == pf_to_mgmt->sync_msg_id) { 424 /* the expected response polled */ 425 break; 426 } 427 PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an " 428 "unexpected(mod=%d, cmd=%d, msg_id=%u) response", 429 pf_to_mgmt->rx_aeq->q_id, mod, cmd, 430 pf_to_mgmt->sync_msg_id, recv_msg->mod, 431 recv_msg->cmd, recv_msg->msg_id); 432 } 433 } 434 435 if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) { 436 PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed", 437 i, pf_to_mgmt->rx_aeq->q_id); 438 err = -EBADMSG; 439 goto unlock_sync_msg; 440 } 441 442 rte_smp_rmb(); 443 if (recv_msg->msg_len && buf_out && out_size) { 444 if (recv_msg->msg_len <= *out_size) { 445 memcpy(buf_out, recv_msg->msg, 446 recv_msg->msg_len); 447 *out_size = recv_msg->msg_len; 448 } else { 449 PMD_DRV_LOG(ERR, "Mgmt rsp's msg len:%u overflow.", 450 recv_msg->msg_len); 451 err = -ERANGE; 452 } 453 } 454 455 unlock_sync_msg: 456 if (err && out_size) 457 *out_size = 0; 458 pthread_mutex_unlock(&pf_to_mgmt->sync_msg_lock); 459 return err; 460 } 461 462 static int hinic_get_mgmt_channel_status(void *hwdev) 463 { 464 struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; 465 u32 val; 466 467 val = hinic_hwif_read_reg(hwif, HINIC_ICPL_RESERVD_ADDR); 468 469 return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); 470 } 471 472 int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, 473 void *buf_in, u16 in_size, 474 void *buf_out, u16 *out_size, u32 timeout) 475 { 476 int rc = HINIC_ERROR; 477 478 if (!hwdev || in_size > HINIC_MSG_TO_MGMT_MAX_LEN) 479 return -EINVAL; 480 481 /* If status is hot upgrading, don't send message to mgmt */ 482 if (hinic_get_mgmt_channel_status(hwdev)) 483 return -EPERM; 484 485 rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, 486 in_size, buf_out, out_size, 487 timeout); 488 489 return rc; 490 } 491 492 int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, 493 void *buf_in, u16 in_size, __rte_unused void *buf_out, 494 __rte_unused u16 *out_size) 495 { 496 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = 497 ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; 498 int err = -EINVAL; 499 500 if (!MSG_SZ_IS_VALID(in_size)) { 501 PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid"); 502 return err; 503 } 504 505 pthread_mutex_lock(&pf_to_mgmt->sync_msg_lock); 506 507 err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, 508 HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, 509 MSG_NO_RESP); 510 511 pthread_mutex_unlock(&pf_to_mgmt->sync_msg_lock); 512 513 return err; 514 } 515 516 static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, 517 u8 seq_id, u8 seg_len) 518 { 519 if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN) 520 return false; 521 522 if (seq_id == 0) { 523 recv_msg->sed_id = seq_id; 524 } else { 525 if (seq_id != recv_msg->sed_id + 1) { 526 recv_msg->sed_id = 0; 527 return false; 528 } 529 recv_msg->sed_id = seq_id; 530 } 531 532 return true; 533 } 534 535 /** 536 * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu 537 * @pf_to_mgmt: PF to MGMT channel 538 * @recv_msg: received message details 539 * @param: customized parameter 540 **/ 541 static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, 542 struct hinic_recv_msg *recv_msg, 543 void *param) 544 { 545 void *buf_out = recv_msg->buf_out; 546 u16 out_size = 0; 547 548 switch (recv_msg->mod) { 549 case HINIC_MOD_COMM: 550 hinic_comm_async_event_handle(pf_to_mgmt->hwdev, 551 recv_msg->cmd, recv_msg->msg, 552 recv_msg->msg_len, 553 buf_out, &out_size); 554 break; 555 case HINIC_MOD_L2NIC: 556 hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param, 557 recv_msg->cmd, recv_msg->msg, 558 recv_msg->msg_len, 559 buf_out, &out_size); 560 break; 561 case HINIC_MOD_HILINK: 562 hinic_hilink_async_event_handle(pf_to_mgmt->hwdev, 563 recv_msg->cmd, recv_msg->msg, 564 recv_msg->msg_len, 565 buf_out, &out_size); 566 break; 567 default: 568 PMD_DRV_LOG(ERR, "No handler, mod = %d", recv_msg->mod); 569 break; 570 } 571 572 if (!recv_msg->async_mgmt_to_pf) { 573 if (!out_size) 574 out_size = BUF_OUT_DEFAULT_SIZE; 575 576 /* MGMT sent sync msg, send the response */ 577 (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, 578 recv_msg->cmd, buf_out, out_size, 579 HINIC_MSG_RESPONSE, 580 recv_msg->msg_id); 581 } 582 } 583 584 /** 585 * recv_mgmt_msg_handler - handler a message from mgmt cpu 586 * @pf_to_mgmt: PF to MGMT channel 587 * @header: the header of the message 588 * @recv_msg: received message details 589 * @param: customized parameter 590 * Return: 0 when aeq is response message, -1 default result, 591 * and when wrong message or not last message 592 **/ 593 static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, 594 u8 *header, struct hinic_recv_msg *recv_msg, 595 void *param) 596 { 597 u64 msg_header = *((u64 *)header); 598 void *msg_body = header + sizeof(msg_header); 599 u8 *dest_msg; 600 u8 seq_id, seq_len; 601 u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE; 602 603 seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID); 604 seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN); 605 606 if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { 607 PMD_DRV_LOG(ERR, 608 "Mgmt msg sequence and segment check fail, " 609 "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x", 610 hinic_global_func_id(pf_to_mgmt->hwdev), 611 recv_msg->sed_id, seq_id, seq_len); 612 return HINIC_RECV_NEXT_AEQE; 613 } 614 615 dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN; 616 msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN; 617 memcpy(dest_msg, msg_body, seq_len); 618 619 if (!HINIC_MSG_HEADER_GET(msg_header, LAST)) 620 return HINIC_RECV_NEXT_AEQE; 621 622 recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD); 623 recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE); 624 recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header, 625 ASYNC_MGMT_TO_PF); 626 recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN); 627 recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID); 628 629 if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE) 630 return HINIC_RECV_DONE; 631 632 hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param); 633 634 return HINIC_RECV_NEXT_AEQE; 635 } 636 637 /** 638 * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event 639 * @hwdev: the pointer to the private hardware device object 640 * @header: the header of the message 641 * @size: unused 642 * @param: customized parameter 643 * Return: 0 when aeq is response message, 644 * -1 default result, and when wrong message or not last message 645 **/ 646 static int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, 647 __rte_unused u8 size, void *param) 648 { 649 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = 650 ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; 651 struct hinic_recv_msg *recv_msg; 652 653 recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == 654 HINIC_MSG_DIRECT_SEND) ? 655 &pf_to_mgmt->recv_msg_from_mgmt : 656 &pf_to_mgmt->recv_resp_msg_from_mgmt; 657 658 return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param); 659 } 660 661 static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event, 662 u8 *data, u8 size, void *param) 663 { 664 int rc = 0; 665 666 switch (event) { 667 case HINIC_MSG_FROM_MGMT_CPU: 668 rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param); 669 break; 670 default: 671 PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d", 672 event, size); 673 rc = HINIC_RECV_NEXT_AEQE; 674 break; 675 } 676 677 return rc; 678 } 679 680 /** 681 * hinic_aeq_poll_msg - poll one or continue aeqe, and call dedicated process 682 * @eq: aeq of the chip 683 * @timeout: 0 - poll all aeqe in eq, used in interrupt mode, 684 * > 0 - poll aeq until get aeqe with 'last' field set to 1, 685 * used in polling mode. 686 * @param: customized parameter 687 * Return: 0 - Success, EIO - poll timeout, ENODEV - swe not support 688 **/ 689 int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param) 690 { 691 struct hinic_aeq_elem *aeqe_pos; 692 enum hinic_aeq_type event; 693 u32 aeqe_desc = 0; 694 u16 i; 695 u8 size; 696 int done = HINIC_ERROR; 697 int err = -EFAULT; 698 unsigned long end; 699 700 for (i = 0; ((timeout == 0) && (i < eq->eq_len)) || 701 ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) { 702 err = -EIO; 703 end = jiffies + msecs_to_jiffies(timeout); 704 do { 705 aeqe_pos = GET_CURR_AEQ_ELEM(eq); 706 rte_rmb(); 707 708 /* Data in HW is in Big endian Format */ 709 aeqe_desc = be32_to_cpu(aeqe_pos->desc); 710 711 /* HW updates wrapped bit, 712 * when it adds eq element event 713 */ 714 if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) 715 != eq->wrapped) { 716 err = 0; 717 break; 718 } 719 720 if (timeout != 0) 721 usleep(1000); 722 } while (time_before(jiffies, end)); 723 724 if (err != HINIC_OK) /*poll time out*/ 725 break; 726 727 event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); 728 if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { 729 PMD_DRV_LOG(ERR, "AEQ sw event not support %d", 730 event); 731 return -ENODEV; 732 733 } else { 734 size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); 735 done = hinic_handle_aeqe(eq->hwdev, event, 736 aeqe_pos->aeqe_data, 737 size, param); 738 } 739 740 eq->cons_idx++; 741 if (eq->cons_idx == eq->eq_len) { 742 eq->cons_idx = 0; 743 eq->wrapped = !eq->wrapped; 744 } 745 } 746 747 eq_update_ci(eq); 748 749 return err; 750 } 751 752 int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev) 753 { 754 int rc; 755 756 rc = hinic_pf_to_mgmt_init(hwdev); 757 if (rc) 758 return rc; 759 760 hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN]; 761 762 return 0; 763 } 764 765 void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev) 766 { 767 hinic_pf_to_mgmt_free(hwdev); 768 } 769 770 void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param) 771 { 772 struct hinic_eq *aeq = &hwdev->aeqs->aeq[0]; 773 774 /* clear resend timer cnt register */ 775 hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, 776 EQ_MSIX_RESEND_TIMER_CLEAR); 777 (void)hinic_aeq_poll_msg(aeq, 0, param); 778 } 779