1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <assert.h> 6 #include <unistd.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 10 #include "nicvf_plat.h" 11 12 #define NICVF_MBOX_PF_RESPONSE_DELAY_US (1000) 13 14 static const char *mbox_message[NIC_MBOX_MSG_MAX] = { 15 [NIC_MBOX_MSG_INVALID] = "NIC_MBOX_MSG_INVALID", 16 [NIC_MBOX_MSG_READY] = "NIC_MBOX_MSG_READY", 17 [NIC_MBOX_MSG_ACK] = "NIC_MBOX_MSG_ACK", 18 [NIC_MBOX_MSG_NACK] = "NIC_MBOX_MSG_ACK", 19 [NIC_MBOX_MSG_QS_CFG] = "NIC_MBOX_MSG_QS_CFG", 20 [NIC_MBOX_MSG_RQ_CFG] = "NIC_MBOX_MSG_RQ_CFG", 21 [NIC_MBOX_MSG_SQ_CFG] = "NIC_MBOX_MSG_SQ_CFG", 22 [NIC_MBOX_MSG_RQ_DROP_CFG] = "NIC_MBOX_MSG_RQ_DROP_CFG", 23 [NIC_MBOX_MSG_SET_MAC] = "NIC_MBOX_MSG_SET_MAC", 24 [NIC_MBOX_MSG_SET_MAX_FRS] = "NIC_MBOX_MSG_SET_MAX_FRS", 25 [NIC_MBOX_MSG_CPI_CFG] = "NIC_MBOX_MSG_CPI_CFG", 26 [NIC_MBOX_MSG_RSS_SIZE] = "NIC_MBOX_MSG_RSS_SIZE", 27 [NIC_MBOX_MSG_RSS_CFG] = "NIC_MBOX_MSG_RSS_CFG", 28 [NIC_MBOX_MSG_RSS_CFG_CONT] = "NIC_MBOX_MSG_RSS_CFG_CONT", 29 [NIC_MBOX_MSG_RQ_BP_CFG] = "NIC_MBOX_MSG_RQ_BP_CFG", 30 [NIC_MBOX_MSG_RQ_SW_SYNC] = "NIC_MBOX_MSG_RQ_SW_SYNC", 31 [NIC_MBOX_MSG_BGX_LINK_CHANGE] = "NIC_MBOX_MSG_BGX_LINK_CHANGE", 32 [NIC_MBOX_MSG_ALLOC_SQS] = "NIC_MBOX_MSG_ALLOC_SQS", 33 [NIC_MBOX_MSG_LOOPBACK] = "NIC_MBOX_MSG_LOOPBACK", 34 [NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER", 35 [NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE", 36 [NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN", 37 }; 38 39 static inline const char * __rte_unused 40 nicvf_mbox_msg_str(int msg) 41 { 42 assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX); 43 /* undefined messages */ 44 if (mbox_message[msg] == NULL) 45 msg = 0; 46 return mbox_message[msg]; 47 } 48 49 static inline void 50 nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx) 51 { 52 uint64_t *mbx_data; 53 uint64_t mbx_addr; 54 int i; 55 56 mbx_addr = NIC_VF_PF_MAILBOX_0_1; 57 mbx_data = (uint64_t *)mbx; 58 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 59 nicvf_reg_write(nic, mbx_addr, *mbx_data); 60 mbx_data++; 61 mbx_addr += sizeof(uint64_t); 62 } 63 nicvf_mbox_log("msg sent %s (VF%d)", 64 nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id); 65 } 66 67 static inline void 68 nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx) 69 { 70 nicvf_mbox_send_msg_to_pf_raw(nic, mbx); 71 /* Messages without ack are racy!*/ 72 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); 73 } 74 75 static inline int 76 nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx) 77 { 78 long timeout; 79 long sleep = 10; 80 int i, retry = 5; 81 82 for (i = 0; i < retry; i++) { 83 nic->pf_acked = false; 84 nic->pf_nacked = false; 85 nicvf_smp_wmb(); 86 87 nicvf_mbox_send_msg_to_pf_raw(nic, mbx); 88 89 /* Handling case if mbox is called inside interrupt context, 90 * Eg if hotplug attach/detach request is initiated from 91 * secondary and primary handles the request in interrupt 92 * context as part of multprocess framework. 93 */ 94 if (rte_thread_is_intr()) { 95 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); 96 nicvf_reg_poll_interrupts(nic); 97 } 98 99 /* Give some time to get PF response */ 100 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); 101 timeout = NIC_MBOX_MSG_TIMEOUT; 102 while (timeout > 0) { 103 /* Periodic poll happens from nicvf_interrupt() */ 104 nicvf_smp_rmb(); 105 106 if (nic->pf_nacked) 107 return -EINVAL; 108 if (nic->pf_acked) 109 return 0; 110 111 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); 112 timeout -= sleep; 113 } 114 } 115 nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)", 116 mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg), 117 nic->vf_id, i, retry); 118 return -EBUSY; 119 } 120 121 122 int 123 nicvf_handle_mbx_intr(struct nicvf *nic) 124 { 125 struct nic_mbx mbx; 126 uint64_t *mbx_data = (uint64_t *)&mbx; 127 uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1; 128 size_t i; 129 130 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 131 *mbx_data = nicvf_reg_read(nic, mbx_addr); 132 mbx_data++; 133 mbx_addr += sizeof(uint64_t); 134 } 135 136 /* Overwrite the message so we won't receive it again */ 137 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0); 138 139 nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg, 140 nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id); 141 142 switch (mbx.msg.msg) { 143 case NIC_MBOX_MSG_READY: 144 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; 145 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; 146 nic->node = mbx.nic_cfg.node_id; 147 nic->sqs_mode = mbx.nic_cfg.sqs_mode; 148 nic->loopback_supported = mbx.nic_cfg.loopback_supported; 149 ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr, 150 (struct ether_addr *)nic->mac_addr); 151 nic->pf_acked = true; 152 break; 153 case NIC_MBOX_MSG_ACK: 154 nic->pf_acked = true; 155 break; 156 case NIC_MBOX_MSG_NACK: 157 nic->pf_nacked = true; 158 break; 159 case NIC_MBOX_MSG_RSS_SIZE: 160 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; 161 nic->pf_acked = true; 162 break; 163 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 164 nic->link_up = mbx.link_status.link_up; 165 nic->duplex = mbx.link_status.duplex; 166 nic->speed = mbx.link_status.speed; 167 nic->pf_acked = true; 168 break; 169 case NIC_MBOX_MSG_ALLOC_SQS: 170 assert_primary(nic); 171 if (mbx.sqs_alloc.qs_count != nic->sqs_count) { 172 nicvf_log_error("Received %" PRIu8 "/%" PRIu8 173 " secondary qsets", 174 mbx.sqs_alloc.qs_count, 175 nic->sqs_count); 176 abort(); 177 } 178 for (i = 0; i < mbx.sqs_alloc.qs_count; i++) { 179 if (mbx.sqs_alloc.svf[i] != nic->snicvf[i]->vf_id) { 180 nicvf_log_error("Received secondary qset[%zu] " 181 "ID %" PRIu8 " expected %" 182 PRIu8, i, mbx.sqs_alloc.svf[i], 183 nic->snicvf[i]->vf_id); 184 abort(); 185 } 186 } 187 nic->pf_acked = true; 188 break; 189 default: 190 nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s", 191 mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg)); 192 break; 193 } 194 nicvf_smp_wmb(); 195 196 return mbx.msg.msg; 197 } 198 199 /* 200 * Checks if VF is able to communicate with PF 201 * and also gets the VNIC number this VF is associated to. 202 */ 203 int 204 nicvf_mbox_check_pf_ready(struct nicvf *nic) 205 { 206 struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} }; 207 208 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 209 } 210 211 int 212 nicvf_mbox_set_mac_addr(struct nicvf *nic, 213 const uint8_t mac[NICVF_MAC_ADDR_SIZE]) 214 { 215 struct nic_mbx mbx = { }; 216 int i; 217 218 mbx.msg.msg = NIC_MBOX_MSG_SET_MAC; 219 mbx.mac.vf_id = nic->vf_id; 220 for (i = 0; i < 6; i++) 221 mbx.mac.mac_addr[i] = mac[i]; 222 223 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 224 } 225 226 int 227 nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt) 228 { 229 struct nic_mbx mbx = { }; 230 231 mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG; 232 mbx.cpi_cfg.vf_id = nic->vf_id; 233 mbx.cpi_cfg.cpi_alg = nic->cpi_alg; 234 mbx.cpi_cfg.rq_cnt = qcnt; 235 236 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 237 } 238 239 int 240 nicvf_mbox_get_rss_size(struct nicvf *nic) 241 { 242 struct nic_mbx mbx = { }; 243 244 mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE; 245 mbx.rss_size.vf_id = nic->vf_id; 246 247 /* Result will be stored in nic->rss_info.rss_size */ 248 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 249 } 250 251 int 252 nicvf_mbox_config_rss(struct nicvf *nic) 253 { 254 struct nic_mbx mbx = { }; 255 struct nicvf_rss_reta_info *rss = &nic->rss_info; 256 size_t tot_len = rss->rss_size; 257 size_t cur_len; 258 size_t cur_idx = 0; 259 size_t i; 260 261 mbx.rss_cfg.vf_id = nic->vf_id; 262 mbx.rss_cfg.hash_bits = rss->hash_bits; 263 mbx.rss_cfg.tbl_len = 0; 264 mbx.rss_cfg.tbl_offset = 0; 265 266 while (cur_idx < tot_len) { 267 cur_len = nicvf_min(tot_len - cur_idx, 268 (size_t)RSS_IND_TBL_LEN_PER_MBX_MSG); 269 mbx.msg.msg = (cur_idx > 0) ? 270 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; 271 mbx.rss_cfg.tbl_offset = cur_idx; 272 mbx.rss_cfg.tbl_len = cur_len; 273 for (i = 0; i < cur_len; i++) 274 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++]; 275 276 if (nicvf_mbox_send_msg_to_pf(nic, &mbx)) 277 return NICVF_ERR_RSS_TBL_UPDATE; 278 } 279 280 return 0; 281 } 282 283 int 284 nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx, 285 struct pf_rq_cfg *pf_rq_cfg) 286 { 287 struct nic_mbx mbx = { }; 288 289 mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG; 290 mbx.rq.qs_num = nic->vf_id; 291 mbx.rq.rq_num = qidx; 292 mbx.rq.cfg = pf_rq_cfg->value; 293 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 294 } 295 296 int 297 nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx) 298 { 299 struct nic_mbx mbx = { }; 300 301 mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG; 302 mbx.sq.qs_num = nic->vf_id; 303 mbx.sq.sq_num = qidx; 304 mbx.sq.sqs_mode = nic->sqs_mode; 305 mbx.sq.cfg = (nic->vf_id << 3) | qidx; 306 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 307 } 308 309 int 310 nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg) 311 { 312 struct nic_mbx mbx = { }; 313 314 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN 315 qs_cfg->be = 1; 316 #endif 317 /* Send a mailbox msg to PF to config Qset */ 318 mbx.msg.msg = NIC_MBOX_MSG_QS_CFG; 319 mbx.qs.num = nic->vf_id; 320 mbx.qs.sqs_count = nic->sqs_count; 321 mbx.qs.cfg = qs_cfg->value; 322 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 323 } 324 325 int 326 nicvf_mbox_request_sqs(struct nicvf *nic) 327 { 328 struct nic_mbx mbx = { }; 329 size_t i; 330 331 assert_primary(nic); 332 assert(nic->sqs_count > 0); 333 assert(nic->sqs_count <= MAX_SQS_PER_VF); 334 335 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; 336 mbx.sqs_alloc.spec = 1; 337 mbx.sqs_alloc.qs_count = nic->sqs_count; 338 339 /* Set no of Rx/Tx queues in each of the SQsets */ 340 for (i = 0; i < nic->sqs_count; i++) 341 mbx.sqs_alloc.svf[i] = nic->snicvf[i]->vf_id; 342 343 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 344 } 345 346 int 347 nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable) 348 { 349 struct nic_mbx mbx = { }; 350 struct pf_rq_drop_cfg *drop_cfg; 351 352 /* Enable CQ drop to reserve sufficient CQEs for all tx packets */ 353 mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 354 mbx.rq.qs_num = nic->vf_id; 355 mbx.rq.rq_num = qidx; 356 drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg; 357 drop_cfg->value = 0; 358 if (enable) { 359 drop_cfg->cq_red = 1; 360 drop_cfg->cq_drop = 2; 361 } 362 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 363 } 364 365 int 366 nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu) 367 { 368 struct nic_mbx mbx = { }; 369 370 mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS; 371 mbx.frs.max_frs = mtu; 372 mbx.frs.vf_id = nic->vf_id; 373 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 374 } 375 376 int 377 nicvf_mbox_rq_sync(struct nicvf *nic) 378 { 379 struct nic_mbx mbx = { }; 380 381 /* Make sure all packets in the pipeline are written back into mem */ 382 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 383 mbx.rq.cfg = 0; 384 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 385 } 386 387 int 388 nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable) 389 { 390 struct nic_mbx mbx = { }; 391 392 mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG; 393 mbx.rq.qs_num = nic->vf_id; 394 mbx.rq.rq_num = qidx; 395 mbx.rq.cfg = 0; 396 if (enable) 397 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0); 398 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 399 } 400 401 int 402 nicvf_mbox_loopback_config(struct nicvf *nic, bool enable) 403 { 404 struct nic_mbx mbx = { }; 405 406 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK; 407 mbx.lbk.vf_id = nic->vf_id; 408 mbx.lbk.enable = enable; 409 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 410 } 411 412 int 413 nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask, 414 uint8_t tx_stat_mask, uint16_t rq_stat_mask, 415 uint16_t sq_stat_mask) 416 { 417 struct nic_mbx mbx = { }; 418 419 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 420 mbx.reset_stat.rx_stat_mask = rx_stat_mask; 421 mbx.reset_stat.tx_stat_mask = tx_stat_mask; 422 mbx.reset_stat.rq_stat_mask = rq_stat_mask; 423 mbx.reset_stat.sq_stat_mask = sq_stat_mask; 424 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 425 } 426 427 int 428 nicvf_mbox_set_link_up_down(struct nicvf *nic, bool enable) 429 { 430 struct nic_mbx mbx = { }; 431 432 mbx.lbk.msg = NIC_MBOX_MSG_SET_LINK; 433 mbx.lbk.vf_id = nic->vf_id; 434 mbx.lbk.enable = enable; 435 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 436 } 437 438 439 int 440 nicvf_mbox_change_mode(struct nicvf *nic, struct change_link_mode *cfg) 441 { 442 struct nic_mbx mbx = { }; 443 444 mbx.mode.msg = NIC_MBOX_MSG_CHANGE_MODE; 445 mbx.mode.vf_id = nic->vf_id; 446 mbx.mode.speed = cfg->speed; 447 mbx.mode.duplex = cfg->duplex; 448 mbx.mode.autoneg = cfg->autoneg; 449 mbx.mode.qlm_mode = cfg->qlm_mode; 450 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 451 } 452 453 void 454 nicvf_mbox_shutdown(struct nicvf *nic) 455 { 456 struct nic_mbx mbx = { }; 457 458 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 459 nicvf_mbox_send_msg_to_pf(nic, &mbx); 460 } 461 462 void 463 nicvf_mbox_cfg_done(struct nicvf *nic) 464 { 465 struct nic_mbx mbx = { }; 466 467 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 468 nicvf_mbox_send_async_msg_to_pf(nic, &mbx); 469 } 470 471 void 472 nicvf_mbox_link_change(struct nicvf *nic) 473 { 474 struct nic_mbx mbx = { }; 475 476 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 477 nicvf_mbox_send_async_msg_to_pf(nic, &mbx); 478 } 479 480 void 481 nicvf_mbox_reset_xcast(struct nicvf *nic) 482 { 483 struct nic_mbx mbx = { }; 484 485 mbx.msg.msg = NIC_MBOX_MSG_RESET_XCAST; 486 nicvf_mbox_send_msg_to_pf(nic, &mbx); 487 } 488 489 int 490 nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac) 491 { 492 struct nic_mbx mbx = { }; 493 494 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 495 mbx.xcast.mode = mode; 496 mbx.xcast.mac = mac; 497 498 return nicvf_mbox_send_msg_to_pf(nic, &mbx); 499 } 500