1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <ethdev_pci.h> 6 #include <rte_ether.h> 7 #include <rte_kvargs.h> 8 9 #include "otx_ep_common.h" 10 #include "otx_ep_vf.h" 11 #include "otx2_ep_vf.h" 12 #include "cnxk_ep_vf.h" 13 #include "otx_ep_mbox.h" 14 15 /* 16 * When a new command is implemented, the below table should be updated 17 * with new command and it's version info. 18 */ 19 static uint32_t otx_ep_cmd_versions[OTX_EP_MBOX_CMD_MAX] = { 20 [0 ... OTX_EP_MBOX_CMD_DEV_REMOVE] = OTX_EP_MBOX_VERSION_V1, 21 [OTX_EP_MBOX_CMD_GET_FW_INFO ... OTX_EP_MBOX_NOTIF_LINK_STATUS] = OTX_EP_MBOX_VERSION_V2, 22 [OTX_EP_MBOX_NOTIF_PF_FLR] = OTX_EP_MBOX_VERSION_V3 23 24 }; 25 26 static int 27 __otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep, 28 union otx_ep_mbox_word cmd, 29 union otx_ep_mbox_word *rsp) 30 { 31 volatile uint64_t reg_val = 0ull; 32 int count = 0; 33 34 reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0)); 35 if (reg_val == UINT64_MAX) 36 return -ENODEV; 37 38 cmd.s.type = OTX_EP_MBOX_TYPE_CMD; 39 otx2_write64(cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0)); 40 41 /* No response for notification messages */ 42 if (!rsp) 43 return 0; 44 45 for (count = 0; count < OTX_EP_MBOX_TIMEOUT_MS; count++) { 46 rte_delay_ms(1); 47 reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0)); 48 if (reg_val == UINT64_MAX) 49 return -ENODEV; 50 if (reg_val != cmd.u64) { 51 rsp->u64 = reg_val; 52 break; 53 } 54 } 55 if (count == OTX_EP_MBOX_TIMEOUT_MS) { 56 otx_ep_err("mbox send Timeout count:%d", count); 57 return OTX_EP_MBOX_TIMEOUT_MS; 58 } 59 if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) { 60 otx_ep_err("mbox received NACK from PF"); 61 return OTX_EP_MBOX_CMD_STATUS_NACK; 62 } 63 64 rsp->u64 = reg_val; 65 return 0; 66 } 67 68 static int 69 otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep, 70 union otx_ep_mbox_word cmd, 71 union otx_ep_mbox_word *rsp) 72 { 73 int ret; 74 75 rte_spinlock_lock(&otx_ep->mbox_lock); 76 if (otx_ep_cmd_versions[cmd.s.opcode] > otx_ep->mbox_neg_ver) { 77 otx_ep_dbg("CMD:%d not supported in Version:%d", cmd.s.opcode, 78 otx_ep->mbox_neg_ver); 79 rte_spinlock_unlock(&otx_ep->mbox_lock); 80 return -EOPNOTSUPP; 81 } 82 ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, rsp); 83 rte_spinlock_unlock(&otx_ep->mbox_lock); 84 return ret; 85 } 86 87 static int 88 otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep, 89 enum otx_ep_mbox_opcode opcode, 90 uint8_t *data, int32_t *size) 91 { 92 union otx_ep_mbox_word cmd; 93 union otx_ep_mbox_word rsp; 94 int read_cnt, i = 0, ret; 95 int data_len = 0, tmp_len = 0; 96 97 rte_spinlock_lock(&otx_ep->mbox_lock); 98 cmd.u64 = 0; 99 cmd.s_data.opcode = opcode; 100 cmd.s_data.frag = 0; 101 /* Send cmd to read data from PF */ 102 ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 103 if (ret) { 104 otx_ep_err("mbox bulk read data request failed"); 105 rte_spinlock_unlock(&otx_ep->mbox_lock); 106 return ret; 107 } 108 /* PF sends the data length of requested CMD 109 * in ACK 110 */ 111 memcpy(&data_len, rsp.s_data.data, sizeof(data_len)); 112 tmp_len = data_len; 113 cmd.u64 = 0; 114 rsp.u64 = 0; 115 cmd.s_data.opcode = opcode; 116 cmd.s_data.frag = 1; 117 while (data_len) { 118 ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 119 if (ret) { 120 otx_ep_err("mbox bulk read data request failed"); 121 otx_ep->mbox_data_index = 0; 122 memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE); 123 rte_spinlock_unlock(&otx_ep->mbox_lock); 124 return ret; 125 } 126 if (data_len > OTX_EP_MBOX_MAX_DATA_SIZE) { 127 data_len -= OTX_EP_MBOX_MAX_DATA_SIZE; 128 read_cnt = OTX_EP_MBOX_MAX_DATA_SIZE; 129 } else { 130 read_cnt = data_len; 131 data_len = 0; 132 } 133 for (i = 0; i < read_cnt; i++) { 134 otx_ep->mbox_data_buf[otx_ep->mbox_data_index] = 135 rsp.s_data.data[i]; 136 otx_ep->mbox_data_index++; 137 } 138 cmd.u64 = 0; 139 rsp.u64 = 0; 140 cmd.s_data.opcode = opcode; 141 cmd.s_data.frag = 1; 142 } 143 memcpy(data, otx_ep->mbox_data_buf, tmp_len); 144 *size = tmp_len; 145 otx_ep->mbox_data_index = 0; 146 memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE); 147 rte_spinlock_unlock(&otx_ep->mbox_lock); 148 return 0; 149 } 150 151 int 152 otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 153 { 154 struct otx_ep_device *otx_ep = 155 (struct otx_ep_device *)(eth_dev)->data->dev_private; 156 union otx_ep_mbox_word cmd; 157 union otx_ep_mbox_word rsp; 158 int ret = 0; 159 160 cmd.u64 = 0; 161 cmd.s_set_mtu.opcode = OTX_EP_MBOX_CMD_SET_MTU; 162 cmd.s_set_mtu.mtu = mtu; 163 164 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 165 if (ret) { 166 otx_ep_err("set MTU failed"); 167 return -EINVAL; 168 } 169 otx_ep_dbg("mtu set success mtu %u", mtu); 170 171 return 0; 172 } 173 174 int 175 otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev, 176 struct rte_ether_addr *mac_addr) 177 { 178 struct otx_ep_device *otx_ep = 179 (struct otx_ep_device *)(eth_dev)->data->dev_private; 180 union otx_ep_mbox_word cmd; 181 union otx_ep_mbox_word rsp; 182 int i, ret; 183 184 cmd.u64 = 0; 185 cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_SET_MAC_ADDR; 186 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 187 cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i]; 188 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 189 if (ret) { 190 otx_ep_err("set MAC address failed"); 191 return -EINVAL; 192 } 193 otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT, 194 __func__, RTE_ETHER_ADDR_BYTES(mac_addr)); 195 rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs); 196 return 0; 197 } 198 199 int 200 otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev, 201 struct rte_ether_addr *mac_addr) 202 { 203 struct otx_ep_device *otx_ep = 204 (struct otx_ep_device *)(eth_dev)->data->dev_private; 205 union otx_ep_mbox_word cmd; 206 union otx_ep_mbox_word rsp; 207 int i, ret; 208 209 cmd.u64 = 0; 210 cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR; 211 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 212 if (ret) { 213 otx_ep_err("get MAC address failed"); 214 return -EINVAL; 215 } 216 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 217 mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i]; 218 otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT, 219 __func__, RTE_ETHER_ADDR_BYTES(mac_addr)); 220 return 0; 221 } 222 223 int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev, 224 uint8_t *oper_up) 225 { 226 struct otx_ep_device *otx_ep = 227 (struct otx_ep_device *)(eth_dev)->data->dev_private; 228 union otx_ep_mbox_word cmd; 229 union otx_ep_mbox_word rsp; 230 int ret; 231 232 cmd.u64 = 0; 233 cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS; 234 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 235 if (ret) { 236 otx_ep_err("Get link status failed"); 237 return -EINVAL; 238 } 239 *oper_up = rsp.s_link_status.status; 240 return 0; 241 } 242 243 int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, 244 struct rte_eth_link *link) 245 { 246 int32_t ret, size; 247 struct otx_ep_iface_link_info link_info; 248 struct otx_ep_device *otx_ep = 249 (struct otx_ep_device *)(eth_dev)->data->dev_private; 250 memset(&link_info, 0, sizeof(struct otx_ep_iface_link_info)); 251 ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO, 252 (uint8_t *)&link_info, (int32_t *)&size); 253 if (ret) { 254 otx_ep_err("Get link info failed"); 255 return ret; 256 } 257 link->link_status = RTE_ETH_LINK_UP; 258 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 259 link->link_autoneg = (link_info.autoneg == 260 OTX_EP_LINK_AUTONEG) ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 261 262 link->link_autoneg = link_info.autoneg; 263 link->link_speed = link_info.speed; 264 return 0; 265 } 266 267 void 268 otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep) 269 { 270 rte_write64(0x2, (uint8_t *)otx_ep->hw_addr + 271 CNXK_EP_R_MBOX_PF_VF_INT(0)); 272 } 273 274 void 275 otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep) 276 { 277 rte_write64(0x00, (uint8_t *)otx_ep->hw_addr + 278 CNXK_EP_R_MBOX_PF_VF_INT(0)); 279 } 280 281 int 282 otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev) 283 { 284 struct otx_ep_device *otx_ep = 285 (struct otx_ep_device *)(eth_dev)->data->dev_private; 286 union otx_ep_mbox_word cmd; 287 union otx_ep_mbox_word rsp; 288 int ret; 289 290 rsp.u64 = 0; 291 cmd.u64 = 0; 292 cmd.s_get_mtu.opcode = OTX_EP_MBOX_CMD_GET_MTU; 293 294 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 295 if (ret) 296 return ret; 297 return rsp.s_get_mtu.mtu; 298 } 299 300 static void 301 otx_ep_mbox_version_check(struct otx_ep_device *otx_ep) 302 { 303 union otx_ep_mbox_word cmd; 304 union otx_ep_mbox_word rsp; 305 int ret; 306 307 cmd.u64 = 0; 308 cmd.s_version.opcode = OTX_EP_MBOX_CMD_VERSION; 309 cmd.s_version.version = OTX_EP_MBOX_VERSION_CURRENT; 310 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); 311 312 /* 313 * VF receives NACK or version info as zero 314 * only if PF driver running old version of Mailbox 315 * In this case VF mailbox version fallbacks to base 316 * mailbox vesrion OTX_EP_MBOX_VERSION_V1. 317 * Default VF mbox_neg_ver is set to OTX_EP_MBOX_VERSION_V1 318 * during initialization of PMD driver. 319 */ 320 if (ret == OTX_EP_MBOX_CMD_STATUS_NACK || rsp.s_version.version == 0) { 321 otx_ep_dbg("VF Mbox version fallback to base version from:%u", 322 (uint32_t)cmd.s_version.version); 323 return; 324 } 325 otx_ep->mbox_neg_ver = (uint32_t)rsp.s_version.version; 326 otx_ep_dbg("VF Mbox version:%u Negotiated VF version with PF:%u", 327 (uint32_t)cmd.s_version.version, 328 (uint32_t)rsp.s_version.version); 329 } 330 331 static void 332 otx_ep_mbox_intr_handler(void *param) 333 { 334 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 335 struct otx_ep_device *otx_ep = (struct otx_ep_device *)eth_dev->data->dev_private; 336 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 337 union otx_ep_mbox_word mbox_cmd; 338 339 if (otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0)) & CNXK_EP_MBOX_INTR) { 340 mbox_cmd.u64 = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_DATA(0)); 341 otx2_write64(CNXK_EP_MBOX_ENAB | CNXK_EP_MBOX_INTR, 342 otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0)); 343 if (mbox_cmd.s.opcode == OTX_EP_MBOX_NOTIF_PF_FLR) { 344 rte_spinlock_lock(&otx_ep->mbox_lock); 345 mbox_cmd.s.type = OTX_EP_MBOX_TYPE_RSP_ACK; 346 otx2_write64(mbox_cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_DATA(0)); 347 rte_spinlock_unlock(&otx_ep->mbox_lock); 348 rte_dev_event_callback_process(pdev->name, RTE_DEV_EVENT_REMOVE); 349 } else { 350 otx_ep_err("Invalid mbox opcode"); 351 } 352 } 353 } 354 355 int 356 otx_ep_mbox_init(struct rte_eth_dev *eth_dev) 357 { 358 struct otx_ep_device *otx_ep = (struct otx_ep_device *)eth_dev->data->dev_private; 359 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 360 uint64_t reg_val; 361 362 otx_ep_mbox_version_check(otx_ep); 363 364 rte_intr_callback_register(pdev->intr_handle, otx_ep_mbox_intr_handler, (void *)eth_dev); 365 366 if (rte_intr_enable(pdev->intr_handle)) { 367 otx_ep_err("rte_intr_enable failed"); 368 return -1; 369 } 370 371 reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0)); 372 if (reg_val == UINT64_MAX) 373 return -ENODEV; 374 375 /* Enable pf-vf mbox interrupt & clear the status */ 376 otx2_write64(CNXK_EP_MBOX_ENAB | CNXK_EP_MBOX_INTR, 377 otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0)); 378 379 return 0; 380 } 381 382 void 383 otx_ep_mbox_uninit(struct rte_eth_dev *eth_dev) 384 { 385 struct otx_ep_device *otx_ep = (struct otx_ep_device *)eth_dev->data->dev_private; 386 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 387 388 otx2_write64(0, otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0)); 389 390 rte_intr_disable(pdev->intr_handle); 391 392 rte_intr_callback_unregister(pdev->intr_handle, otx_ep_mbox_intr_handler, (void *)eth_dev); 393 } 394 395 int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev) 396 { 397 struct otx_ep_device *otx_ep = 398 (struct otx_ep_device *)(eth_dev)->data->dev_private; 399 union otx_ep_mbox_word cmd; 400 int ret; 401 402 cmd.u64 = 0; 403 cmd.s_version.opcode = OTX_EP_MBOX_CMD_DEV_REMOVE; 404 ret = otx_ep_send_mbox_cmd(otx_ep, cmd, NULL); 405 return ret; 406 } 407