1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. 3 */ 4 5 #include <stdlib.h> 6 #include <fcntl.h> 7 #include <unistd.h> 8 #include <sys/types.h> 9 #include <sys/socket.h> 10 #include <sys/un.h> 11 #include <sys/ioctl.h> 12 #include <sys/mman.h> 13 #include <linux/if_ether.h> 14 #include <errno.h> 15 #include <sys/eventfd.h> 16 17 #include <rte_version.h> 18 #include <rte_mbuf.h> 19 #include <rte_ether.h> 20 #include <rte_ethdev_driver.h> 21 #include <rte_ethdev_vdev.h> 22 #include <rte_malloc.h> 23 #include <rte_kvargs.h> 24 #include <rte_bus_vdev.h> 25 #include <rte_string_fns.h> 26 27 #include "rte_eth_memif.h" 28 #include "memif_socket.h" 29 30 #define ETH_MEMIF_ID_ARG "id" 31 #define ETH_MEMIF_ROLE_ARG "role" 32 #define ETH_MEMIF_PKT_BUFFER_SIZE_ARG "bsize" 33 #define ETH_MEMIF_RING_SIZE_ARG "rsize" 34 #define ETH_MEMIF_SOCKET_ARG "socket" 35 #define ETH_MEMIF_MAC_ARG "mac" 36 #define ETH_MEMIF_ZC_ARG "zero-copy" 37 #define ETH_MEMIF_SECRET_ARG "secret" 38 39 static const char * const valid_arguments[] = { 40 ETH_MEMIF_ID_ARG, 41 ETH_MEMIF_ROLE_ARG, 42 ETH_MEMIF_PKT_BUFFER_SIZE_ARG, 43 ETH_MEMIF_RING_SIZE_ARG, 44 ETH_MEMIF_SOCKET_ARG, 45 ETH_MEMIF_MAC_ARG, 46 ETH_MEMIF_ZC_ARG, 47 ETH_MEMIF_SECRET_ARG, 48 NULL 49 }; 50 51 #define MEMIF_MP_SEND_REGION "memif_mp_send_region" 52 53 const char * 54 memif_version(void) 55 { 56 return ("memif-" RTE_STR(MEMIF_VERSION_MAJOR) "." RTE_STR(MEMIF_VERSION_MINOR)); 57 } 58 59 /* Message header to synchronize regions */ 60 struct mp_region_msg { 61 char port_name[RTE_DEV_NAME_MAX_LEN]; 62 memif_region_index_t idx; 63 memif_region_size_t size; 64 }; 65 66 static int 67 memif_mp_send_region(const struct rte_mp_msg *msg, const void *peer) 68 { 69 struct rte_eth_dev *dev; 70 struct pmd_process_private *proc_private; 71 const struct mp_region_msg *msg_param = (const struct mp_region_msg *)msg->param; 72 struct rte_mp_msg reply; 73 struct mp_region_msg *reply_param = (struct mp_region_msg *)reply.param; 74 uint16_t port_id; 75 int ret; 76 77 /* Get requested port */ 78 ret = rte_eth_dev_get_port_by_name(msg_param->port_name, &port_id); 79 if (ret) { 80 MIF_LOG(ERR, "Failed to get port id for %s", 81 msg_param->port_name); 82 return -1; 83 } 84 dev = &rte_eth_devices[port_id]; 85 proc_private = dev->process_private; 86 87 memset(&reply, 0, sizeof(reply)); 88 strlcpy(reply.name, msg->name, sizeof(reply.name)); 89 reply_param->idx = msg_param->idx; 90 if (proc_private->regions[msg_param->idx] != NULL) { 91 reply_param->size = proc_private->regions[msg_param->idx]->region_size; 92 reply.fds[0] = proc_private->regions[msg_param->idx]->fd; 93 reply.num_fds = 1; 94 } 95 reply.len_param = sizeof(*reply_param); 96 if (rte_mp_reply(&reply, peer) < 0) { 97 MIF_LOG(ERR, "Failed to reply to an add region request"); 98 return -1; 99 } 100 101 return 0; 102 } 103 104 /* 105 * Request regions 106 * Called by secondary process, when ports link status goes up. 107 */ 108 static int 109 memif_mp_request_regions(struct rte_eth_dev *dev) 110 { 111 int ret, i; 112 struct timespec timeout = {.tv_sec = 5, .tv_nsec = 0}; 113 struct rte_mp_msg msg, *reply; 114 struct rte_mp_reply replies; 115 struct mp_region_msg *msg_param = (struct mp_region_msg *)msg.param; 116 struct mp_region_msg *reply_param; 117 struct memif_region *r; 118 struct pmd_process_private *proc_private = dev->process_private; 119 120 MIF_LOG(DEBUG, "Requesting memory regions"); 121 122 for (i = 0; i < ETH_MEMIF_MAX_REGION_NUM; i++) { 123 /* Prepare the message */ 124 memset(&msg, 0, sizeof(msg)); 125 strlcpy(msg.name, MEMIF_MP_SEND_REGION, sizeof(msg.name)); 126 strlcpy(msg_param->port_name, dev->data->name, 127 sizeof(msg_param->port_name)); 128 msg_param->idx = i; 129 msg.len_param = sizeof(*msg_param); 130 131 /* Send message */ 132 ret = rte_mp_request_sync(&msg, &replies, &timeout); 133 if (ret < 0 || replies.nb_received != 1) { 134 MIF_LOG(ERR, "Failed to send mp msg: %d", 135 rte_errno); 136 return -1; 137 } 138 139 reply = &replies.msgs[0]; 140 reply_param = (struct mp_region_msg *)reply->param; 141 142 if (reply_param->size > 0) { 143 r = rte_zmalloc("region", sizeof(struct memif_region), 0); 144 if (r == NULL) { 145 MIF_LOG(ERR, "Failed to alloc memif region."); 146 free(reply); 147 return -ENOMEM; 148 } 149 r->region_size = reply_param->size; 150 if (reply->num_fds < 1) { 151 MIF_LOG(ERR, "Missing file descriptor."); 152 free(reply); 153 return -1; 154 } 155 r->fd = reply->fds[0]; 156 r->addr = NULL; 157 158 proc_private->regions[reply_param->idx] = r; 159 proc_private->regions_num++; 160 } 161 free(reply); 162 } 163 164 return memif_connect(dev); 165 } 166 167 static int 168 memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info) 169 { 170 dev_info->max_mac_addrs = 1; 171 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN; 172 dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS; 173 dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS; 174 dev_info->min_rx_bufsize = 0; 175 176 return 0; 177 } 178 179 static memif_ring_t * 180 memif_get_ring(struct pmd_internals *pmd, struct pmd_process_private *proc_private, 181 memif_ring_type_t type, uint16_t ring_num) 182 { 183 /* rings only in region 0 */ 184 void *p = proc_private->regions[0]->addr; 185 int ring_size = sizeof(memif_ring_t) + sizeof(memif_desc_t) * 186 (1 << pmd->run.log2_ring_size); 187 188 p = (uint8_t *)p + (ring_num + type * pmd->run.num_s2m_rings) * ring_size; 189 190 return (memif_ring_t *)p; 191 } 192 193 static memif_region_offset_t 194 memif_get_ring_offset(struct rte_eth_dev *dev, struct memif_queue *mq, 195 memif_ring_type_t type, uint16_t num) 196 { 197 struct pmd_internals *pmd = dev->data->dev_private; 198 struct pmd_process_private *proc_private = dev->process_private; 199 200 return ((uint8_t *)memif_get_ring(pmd, proc_private, type, num) - 201 (uint8_t *)proc_private->regions[mq->region]->addr); 202 } 203 204 static memif_ring_t * 205 memif_get_ring_from_queue(struct pmd_process_private *proc_private, 206 struct memif_queue *mq) 207 { 208 struct memif_region *r; 209 210 r = proc_private->regions[mq->region]; 211 if (r == NULL) 212 return NULL; 213 214 return (memif_ring_t *)((uint8_t *)r->addr + mq->ring_offset); 215 } 216 217 static void * 218 memif_get_buffer(struct pmd_process_private *proc_private, memif_desc_t *d) 219 { 220 return ((uint8_t *)proc_private->regions[d->region]->addr + d->offset); 221 } 222 223 static int 224 memif_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *cur_tail, 225 struct rte_mbuf *tail) 226 { 227 /* Check for number-of-segments-overflow */ 228 if (unlikely(head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)) 229 return -EOVERFLOW; 230 231 /* Chain 'tail' onto the old tail */ 232 cur_tail->next = tail; 233 234 /* accumulate number of segments and total length. */ 235 head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs); 236 237 tail->pkt_len = tail->data_len; 238 head->pkt_len += tail->pkt_len; 239 240 return 0; 241 } 242 243 static uint16_t 244 eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 245 { 246 struct memif_queue *mq = queue; 247 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; 248 struct pmd_process_private *proc_private = 249 rte_eth_devices[mq->in_port].process_private; 250 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); 251 uint16_t cur_slot, last_slot, n_slots, ring_size, mask, s0; 252 uint16_t n_rx_pkts = 0; 253 uint16_t mbuf_size = rte_pktmbuf_data_room_size(mq->mempool) - 254 RTE_PKTMBUF_HEADROOM; 255 uint16_t src_len, src_off, dst_len, dst_off, cp_len; 256 memif_ring_type_t type = mq->type; 257 memif_desc_t *d0; 258 struct rte_mbuf *mbuf, *mbuf_head, *mbuf_tail; 259 uint64_t b; 260 ssize_t size __rte_unused; 261 uint16_t head; 262 int ret; 263 struct rte_eth_link link; 264 265 if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) 266 return 0; 267 if (unlikely(ring == NULL)) { 268 /* Secondary process will attempt to request regions. */ 269 ret = rte_eth_link_get(mq->in_port, &link); 270 if (ret < 0) 271 MIF_LOG(ERR, "Failed to get port %u link info: %s", 272 mq->in_port, rte_strerror(-ret)); 273 return 0; 274 } 275 276 /* consume interrupt */ 277 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) 278 size = read(mq->intr_handle.fd, &b, sizeof(b)); 279 280 ring_size = 1 << mq->log2_ring_size; 281 mask = ring_size - 1; 282 283 cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail; 284 last_slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail; 285 if (cur_slot == last_slot) 286 goto refill; 287 n_slots = last_slot - cur_slot; 288 289 while (n_slots && n_rx_pkts < nb_pkts) { 290 mbuf_head = rte_pktmbuf_alloc(mq->mempool); 291 if (unlikely(mbuf_head == NULL)) 292 goto no_free_bufs; 293 mbuf = mbuf_head; 294 mbuf->port = mq->in_port; 295 296 next_slot: 297 s0 = cur_slot & mask; 298 d0 = &ring->desc[s0]; 299 300 src_len = d0->length; 301 dst_off = 0; 302 src_off = 0; 303 304 do { 305 dst_len = mbuf_size - dst_off; 306 if (dst_len == 0) { 307 dst_off = 0; 308 dst_len = mbuf_size; 309 310 /* store pointer to tail */ 311 mbuf_tail = mbuf; 312 mbuf = rte_pktmbuf_alloc(mq->mempool); 313 if (unlikely(mbuf == NULL)) 314 goto no_free_bufs; 315 mbuf->port = mq->in_port; 316 ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf); 317 if (unlikely(ret < 0)) { 318 MIF_LOG(ERR, "number-of-segments-overflow"); 319 rte_pktmbuf_free(mbuf); 320 goto no_free_bufs; 321 } 322 } 323 cp_len = RTE_MIN(dst_len, src_len); 324 325 rte_pktmbuf_data_len(mbuf) += cp_len; 326 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf); 327 if (mbuf != mbuf_head) 328 rte_pktmbuf_pkt_len(mbuf_head) += cp_len; 329 330 memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, dst_off), 331 (uint8_t *)memif_get_buffer(proc_private, d0) + 332 src_off, cp_len); 333 334 src_off += cp_len; 335 dst_off += cp_len; 336 src_len -= cp_len; 337 } while (src_len); 338 339 cur_slot++; 340 n_slots--; 341 342 if (d0->flags & MEMIF_DESC_FLAG_NEXT) 343 goto next_slot; 344 345 mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head); 346 *bufs++ = mbuf_head; 347 n_rx_pkts++; 348 } 349 350 no_free_bufs: 351 if (type == MEMIF_RING_S2M) { 352 rte_mb(); 353 ring->tail = cur_slot; 354 mq->last_head = cur_slot; 355 } else { 356 mq->last_tail = cur_slot; 357 } 358 359 refill: 360 if (type == MEMIF_RING_M2S) { 361 head = ring->head; 362 n_slots = ring_size - head + mq->last_tail; 363 364 while (n_slots--) { 365 s0 = head++ & mask; 366 d0 = &ring->desc[s0]; 367 d0->length = pmd->run.pkt_buffer_size; 368 } 369 rte_mb(); 370 ring->head = head; 371 } 372 373 mq->n_pkts += n_rx_pkts; 374 return n_rx_pkts; 375 } 376 377 static uint16_t 378 eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 379 { 380 struct memif_queue *mq = queue; 381 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; 382 struct pmd_process_private *proc_private = 383 rte_eth_devices[mq->in_port].process_private; 384 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); 385 uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0; 386 uint16_t src_len, src_off, dst_len, dst_off, cp_len; 387 memif_ring_type_t type = mq->type; 388 memif_desc_t *d0; 389 struct rte_mbuf *mbuf; 390 struct rte_mbuf *mbuf_head; 391 uint64_t a; 392 ssize_t size; 393 struct rte_eth_link link; 394 395 if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) 396 return 0; 397 if (unlikely(ring == NULL)) { 398 int ret; 399 400 /* Secondary process will attempt to request regions. */ 401 ret = rte_eth_link_get(mq->in_port, &link); 402 if (ret < 0) 403 MIF_LOG(ERR, "Failed to get port %u link info: %s", 404 mq->in_port, rte_strerror(-ret)); 405 return 0; 406 } 407 408 ring_size = 1 << mq->log2_ring_size; 409 mask = ring_size - 1; 410 411 n_free = ring->tail - mq->last_tail; 412 mq->last_tail += n_free; 413 slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail; 414 415 if (type == MEMIF_RING_S2M) 416 n_free = ring_size - ring->head + mq->last_tail; 417 else 418 n_free = ring->head - ring->tail; 419 420 while (n_tx_pkts < nb_pkts && n_free) { 421 mbuf_head = *bufs++; 422 mbuf = mbuf_head; 423 424 saved_slot = slot; 425 d0 = &ring->desc[slot & mask]; 426 dst_off = 0; 427 dst_len = (type == MEMIF_RING_S2M) ? 428 pmd->run.pkt_buffer_size : d0->length; 429 430 next_in_chain: 431 src_off = 0; 432 src_len = rte_pktmbuf_data_len(mbuf); 433 434 while (src_len) { 435 if (dst_len == 0) { 436 if (n_free) { 437 slot++; 438 n_free--; 439 d0->flags |= MEMIF_DESC_FLAG_NEXT; 440 d0 = &ring->desc[slot & mask]; 441 dst_off = 0; 442 dst_len = (type == MEMIF_RING_S2M) ? 443 pmd->run.pkt_buffer_size : d0->length; 444 d0->flags = 0; 445 } else { 446 slot = saved_slot; 447 goto no_free_slots; 448 } 449 } 450 cp_len = RTE_MIN(dst_len, src_len); 451 452 memcpy((uint8_t *)memif_get_buffer(proc_private, d0) + dst_off, 453 rte_pktmbuf_mtod_offset(mbuf, void *, src_off), 454 cp_len); 455 456 mq->n_bytes += cp_len; 457 src_off += cp_len; 458 dst_off += cp_len; 459 src_len -= cp_len; 460 dst_len -= cp_len; 461 462 d0->length = dst_off; 463 } 464 465 if (rte_pktmbuf_is_contiguous(mbuf) == 0) { 466 mbuf = mbuf->next; 467 goto next_in_chain; 468 } 469 470 n_tx_pkts++; 471 slot++; 472 n_free--; 473 rte_pktmbuf_free(mbuf_head); 474 } 475 476 no_free_slots: 477 rte_mb(); 478 if (type == MEMIF_RING_S2M) 479 ring->head = slot; 480 else 481 ring->tail = slot; 482 483 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) { 484 a = 1; 485 size = write(mq->intr_handle.fd, &a, sizeof(a)); 486 if (unlikely(size < 0)) { 487 MIF_LOG(WARNING, 488 "Failed to send interrupt. %s", strerror(errno)); 489 } 490 } 491 492 mq->n_pkts += n_tx_pkts; 493 return n_tx_pkts; 494 } 495 496 void 497 memif_free_regions(struct pmd_process_private *proc_private) 498 { 499 int i; 500 struct memif_region *r; 501 502 MIF_LOG(DEBUG, "Free memory regions"); 503 /* regions are allocated contiguously, so it's 504 * enough to loop until 'proc_private->regions_num' 505 */ 506 for (i = 0; i < proc_private->regions_num; i++) { 507 r = proc_private->regions[i]; 508 if (r != NULL) { 509 if (r->addr != NULL) { 510 munmap(r->addr, r->region_size); 511 if (r->fd > 0) { 512 close(r->fd); 513 r->fd = -1; 514 } 515 } 516 rte_free(r); 517 proc_private->regions[i] = NULL; 518 } 519 } 520 proc_private->regions_num = 0; 521 } 522 523 static int 524 memif_region_init_shm(struct rte_eth_dev *dev, uint8_t has_buffers) 525 { 526 struct pmd_internals *pmd = dev->data->dev_private; 527 struct pmd_process_private *proc_private = dev->process_private; 528 char shm_name[ETH_MEMIF_SHM_NAME_SIZE]; 529 int ret = 0; 530 struct memif_region *r; 531 532 if (proc_private->regions_num >= ETH_MEMIF_MAX_REGION_NUM) { 533 MIF_LOG(ERR, "Too many regions."); 534 return -1; 535 } 536 537 r = rte_zmalloc("region", sizeof(struct memif_region), 0); 538 if (r == NULL) { 539 MIF_LOG(ERR, "Failed to alloc memif region."); 540 return -ENOMEM; 541 } 542 543 /* calculate buffer offset */ 544 r->pkt_buffer_offset = (pmd->run.num_s2m_rings + pmd->run.num_m2s_rings) * 545 (sizeof(memif_ring_t) + sizeof(memif_desc_t) * 546 (1 << pmd->run.log2_ring_size)); 547 548 r->region_size = r->pkt_buffer_offset; 549 /* if region has buffers, add buffers size to region_size */ 550 if (has_buffers == 1) 551 r->region_size += (uint32_t)(pmd->run.pkt_buffer_size * 552 (1 << pmd->run.log2_ring_size) * 553 (pmd->run.num_s2m_rings + 554 pmd->run.num_m2s_rings)); 555 556 memset(shm_name, 0, sizeof(char) * ETH_MEMIF_SHM_NAME_SIZE); 557 snprintf(shm_name, ETH_MEMIF_SHM_NAME_SIZE, "memif_region_%d", 558 proc_private->regions_num); 559 560 r->fd = memfd_create(shm_name, MFD_ALLOW_SEALING); 561 if (r->fd < 0) { 562 MIF_LOG(ERR, "Failed to create shm file: %s.", strerror(errno)); 563 ret = -1; 564 goto error; 565 } 566 567 ret = fcntl(r->fd, F_ADD_SEALS, F_SEAL_SHRINK); 568 if (ret < 0) { 569 MIF_LOG(ERR, "Failed to add seals to shm file: %s.", strerror(errno)); 570 goto error; 571 } 572 573 ret = ftruncate(r->fd, r->region_size); 574 if (ret < 0) { 575 MIF_LOG(ERR, "Failed to truncate shm file: %s.", strerror(errno)); 576 goto error; 577 } 578 579 r->addr = mmap(NULL, r->region_size, PROT_READ | 580 PROT_WRITE, MAP_SHARED, r->fd, 0); 581 if (r->addr == MAP_FAILED) { 582 MIF_LOG(ERR, "Failed to mmap shm region: %s.", strerror(ret)); 583 ret = -1; 584 goto error; 585 } 586 587 proc_private->regions[proc_private->regions_num] = r; 588 proc_private->regions_num++; 589 590 return ret; 591 592 error: 593 if (r->fd > 0) 594 close(r->fd); 595 r->fd = -1; 596 597 return ret; 598 } 599 600 static int 601 memif_regions_init(struct rte_eth_dev *dev) 602 { 603 int ret; 604 605 /* create one buffer region */ 606 ret = memif_region_init_shm(dev, /* has buffer */ 1); 607 if (ret < 0) 608 return ret; 609 610 return 0; 611 } 612 613 static void 614 memif_init_rings(struct rte_eth_dev *dev) 615 { 616 struct pmd_internals *pmd = dev->data->dev_private; 617 struct pmd_process_private *proc_private = dev->process_private; 618 memif_ring_t *ring; 619 int i, j; 620 uint16_t slot; 621 622 for (i = 0; i < pmd->run.num_s2m_rings; i++) { 623 ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2M, i); 624 ring->head = 0; 625 ring->tail = 0; 626 ring->cookie = MEMIF_COOKIE; 627 ring->flags = 0; 628 for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) { 629 slot = i * (1 << pmd->run.log2_ring_size) + j; 630 ring->desc[j].region = 0; 631 ring->desc[j].offset = 632 proc_private->regions[0]->pkt_buffer_offset + 633 (uint32_t)(slot * pmd->run.pkt_buffer_size); 634 ring->desc[j].length = pmd->run.pkt_buffer_size; 635 } 636 } 637 638 for (i = 0; i < pmd->run.num_m2s_rings; i++) { 639 ring = memif_get_ring(pmd, proc_private, MEMIF_RING_M2S, i); 640 ring->head = 0; 641 ring->tail = 0; 642 ring->cookie = MEMIF_COOKIE; 643 ring->flags = 0; 644 for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) { 645 slot = (i + pmd->run.num_s2m_rings) * 646 (1 << pmd->run.log2_ring_size) + j; 647 ring->desc[j].region = 0; 648 ring->desc[j].offset = 649 proc_private->regions[0]->pkt_buffer_offset + 650 (uint32_t)(slot * pmd->run.pkt_buffer_size); 651 ring->desc[j].length = pmd->run.pkt_buffer_size; 652 } 653 } 654 } 655 656 /* called only by slave */ 657 static void 658 memif_init_queues(struct rte_eth_dev *dev) 659 { 660 struct pmd_internals *pmd = dev->data->dev_private; 661 struct memif_queue *mq; 662 int i; 663 664 for (i = 0; i < pmd->run.num_s2m_rings; i++) { 665 mq = dev->data->tx_queues[i]; 666 mq->log2_ring_size = pmd->run.log2_ring_size; 667 /* queues located only in region 0 */ 668 mq->region = 0; 669 mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2M, i); 670 mq->last_head = 0; 671 mq->last_tail = 0; 672 mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK); 673 if (mq->intr_handle.fd < 0) { 674 MIF_LOG(WARNING, 675 "Failed to create eventfd for tx queue %d: %s.", i, 676 strerror(errno)); 677 } 678 } 679 680 for (i = 0; i < pmd->run.num_m2s_rings; i++) { 681 mq = dev->data->rx_queues[i]; 682 mq->log2_ring_size = pmd->run.log2_ring_size; 683 /* queues located only in region 0 */ 684 mq->region = 0; 685 mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_M2S, i); 686 mq->last_head = 0; 687 mq->last_tail = 0; 688 mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK); 689 if (mq->intr_handle.fd < 0) { 690 MIF_LOG(WARNING, 691 "Failed to create eventfd for rx queue %d: %s.", i, 692 strerror(errno)); 693 } 694 } 695 } 696 697 int 698 memif_init_regions_and_queues(struct rte_eth_dev *dev) 699 { 700 int ret; 701 702 ret = memif_regions_init(dev); 703 if (ret < 0) 704 return ret; 705 706 memif_init_rings(dev); 707 708 memif_init_queues(dev); 709 710 return 0; 711 } 712 713 int 714 memif_connect(struct rte_eth_dev *dev) 715 { 716 struct pmd_internals *pmd = dev->data->dev_private; 717 struct pmd_process_private *proc_private = dev->process_private; 718 struct memif_region *mr; 719 struct memif_queue *mq; 720 memif_ring_t *ring; 721 int i; 722 723 for (i = 0; i < proc_private->regions_num; i++) { 724 mr = proc_private->regions[i]; 725 if (mr != NULL) { 726 if (mr->addr == NULL) { 727 if (mr->fd < 0) 728 return -1; 729 mr->addr = mmap(NULL, mr->region_size, 730 PROT_READ | PROT_WRITE, 731 MAP_SHARED, mr->fd, 0); 732 if (mr->addr == NULL) 733 return -1; 734 } 735 } 736 } 737 738 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 739 for (i = 0; i < pmd->run.num_s2m_rings; i++) { 740 mq = (pmd->role == MEMIF_ROLE_SLAVE) ? 741 dev->data->tx_queues[i] : dev->data->rx_queues[i]; 742 ring = memif_get_ring_from_queue(proc_private, mq); 743 if (ring == NULL || ring->cookie != MEMIF_COOKIE) { 744 MIF_LOG(ERR, "Wrong ring"); 745 return -1; 746 } 747 ring->head = 0; 748 ring->tail = 0; 749 mq->last_head = 0; 750 mq->last_tail = 0; 751 /* enable polling mode */ 752 if (pmd->role == MEMIF_ROLE_MASTER) 753 ring->flags = MEMIF_RING_FLAG_MASK_INT; 754 } 755 for (i = 0; i < pmd->run.num_m2s_rings; i++) { 756 mq = (pmd->role == MEMIF_ROLE_SLAVE) ? 757 dev->data->rx_queues[i] : dev->data->tx_queues[i]; 758 ring = memif_get_ring_from_queue(proc_private, mq); 759 if (ring == NULL || ring->cookie != MEMIF_COOKIE) { 760 MIF_LOG(ERR, "Wrong ring"); 761 return -1; 762 } 763 ring->head = 0; 764 ring->tail = 0; 765 mq->last_head = 0; 766 mq->last_tail = 0; 767 /* enable polling mode */ 768 if (pmd->role == MEMIF_ROLE_SLAVE) 769 ring->flags = MEMIF_RING_FLAG_MASK_INT; 770 } 771 772 pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING; 773 pmd->flags |= ETH_MEMIF_FLAG_CONNECTED; 774 dev->data->dev_link.link_status = ETH_LINK_UP; 775 } 776 MIF_LOG(INFO, "Connected."); 777 return 0; 778 } 779 780 static int 781 memif_dev_start(struct rte_eth_dev *dev) 782 { 783 struct pmd_internals *pmd = dev->data->dev_private; 784 int ret = 0; 785 786 switch (pmd->role) { 787 case MEMIF_ROLE_SLAVE: 788 ret = memif_connect_slave(dev); 789 break; 790 case MEMIF_ROLE_MASTER: 791 ret = memif_connect_master(dev); 792 break; 793 default: 794 MIF_LOG(ERR, "%s: Unknown role: %d.", 795 rte_vdev_device_name(pmd->vdev), pmd->role); 796 ret = -1; 797 break; 798 } 799 800 return ret; 801 } 802 803 static void 804 memif_dev_close(struct rte_eth_dev *dev) 805 { 806 struct pmd_internals *pmd = dev->data->dev_private; 807 int i; 808 809 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 810 memif_msg_enq_disconnect(pmd->cc, "Device closed", 0); 811 memif_disconnect(dev); 812 813 for (i = 0; i < dev->data->nb_rx_queues; i++) 814 (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]); 815 for (i = 0; i < dev->data->nb_tx_queues; i++) 816 (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]); 817 818 memif_socket_remove_device(dev); 819 } else { 820 memif_disconnect(dev); 821 } 822 823 rte_free(dev->process_private); 824 } 825 826 static int 827 memif_dev_configure(struct rte_eth_dev *dev) 828 { 829 struct pmd_internals *pmd = dev->data->dev_private; 830 831 /* 832 * SLAVE - TXQ 833 * MASTER - RXQ 834 */ 835 pmd->cfg.num_s2m_rings = (pmd->role == MEMIF_ROLE_SLAVE) ? 836 dev->data->nb_tx_queues : dev->data->nb_rx_queues; 837 838 /* 839 * SLAVE - RXQ 840 * MASTER - TXQ 841 */ 842 pmd->cfg.num_m2s_rings = (pmd->role == MEMIF_ROLE_SLAVE) ? 843 dev->data->nb_rx_queues : dev->data->nb_tx_queues; 844 845 return 0; 846 } 847 848 static int 849 memif_tx_queue_setup(struct rte_eth_dev *dev, 850 uint16_t qid, 851 uint16_t nb_tx_desc __rte_unused, 852 unsigned int socket_id __rte_unused, 853 const struct rte_eth_txconf *tx_conf __rte_unused) 854 { 855 struct pmd_internals *pmd = dev->data->dev_private; 856 struct memif_queue *mq; 857 858 mq = rte_zmalloc("tx-queue", sizeof(struct memif_queue), 0); 859 if (mq == NULL) { 860 MIF_LOG(ERR, "%s: Failed to allocate tx queue id: %u", 861 rte_vdev_device_name(pmd->vdev), qid); 862 return -ENOMEM; 863 } 864 865 mq->type = 866 (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_S2M : MEMIF_RING_M2S; 867 mq->n_pkts = 0; 868 mq->n_bytes = 0; 869 mq->intr_handle.fd = -1; 870 mq->intr_handle.type = RTE_INTR_HANDLE_EXT; 871 dev->data->tx_queues[qid] = mq; 872 873 return 0; 874 } 875 876 static int 877 memif_rx_queue_setup(struct rte_eth_dev *dev, 878 uint16_t qid, 879 uint16_t nb_rx_desc __rte_unused, 880 unsigned int socket_id __rte_unused, 881 const struct rte_eth_rxconf *rx_conf __rte_unused, 882 struct rte_mempool *mb_pool) 883 { 884 struct pmd_internals *pmd = dev->data->dev_private; 885 struct memif_queue *mq; 886 887 mq = rte_zmalloc("rx-queue", sizeof(struct memif_queue), 0); 888 if (mq == NULL) { 889 MIF_LOG(ERR, "%s: Failed to allocate rx queue id: %u", 890 rte_vdev_device_name(pmd->vdev), qid); 891 return -ENOMEM; 892 } 893 894 mq->type = (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_M2S : MEMIF_RING_S2M; 895 mq->n_pkts = 0; 896 mq->n_bytes = 0; 897 mq->intr_handle.fd = -1; 898 mq->intr_handle.type = RTE_INTR_HANDLE_EXT; 899 mq->mempool = mb_pool; 900 mq->in_port = dev->data->port_id; 901 dev->data->rx_queues[qid] = mq; 902 903 return 0; 904 } 905 906 static void 907 memif_queue_release(void *queue) 908 { 909 struct memif_queue *mq = (struct memif_queue *)queue; 910 911 if (!mq) 912 return; 913 914 rte_free(mq); 915 } 916 917 static int 918 memif_link_update(struct rte_eth_dev *dev, 919 int wait_to_complete __rte_unused) 920 { 921 struct pmd_process_private *proc_private; 922 923 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 924 proc_private = dev->process_private; 925 if (dev->data->dev_link.link_status == ETH_LINK_UP && 926 proc_private->regions_num == 0) { 927 memif_mp_request_regions(dev); 928 } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN && 929 proc_private->regions_num > 0) { 930 memif_free_regions(proc_private); 931 } 932 } 933 return 0; 934 } 935 936 static int 937 memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 938 { 939 struct pmd_internals *pmd = dev->data->dev_private; 940 struct memif_queue *mq; 941 int i; 942 uint8_t tmp, nq; 943 944 stats->ipackets = 0; 945 stats->ibytes = 0; 946 stats->opackets = 0; 947 stats->obytes = 0; 948 949 tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_s2m_rings : 950 pmd->run.num_m2s_rings; 951 nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : 952 RTE_ETHDEV_QUEUE_STAT_CNTRS; 953 954 /* RX stats */ 955 for (i = 0; i < nq; i++) { 956 mq = dev->data->rx_queues[i]; 957 stats->q_ipackets[i] = mq->n_pkts; 958 stats->q_ibytes[i] = mq->n_bytes; 959 stats->ipackets += mq->n_pkts; 960 stats->ibytes += mq->n_bytes; 961 } 962 963 tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_m2s_rings : 964 pmd->run.num_s2m_rings; 965 nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : 966 RTE_ETHDEV_QUEUE_STAT_CNTRS; 967 968 /* TX stats */ 969 for (i = 0; i < nq; i++) { 970 mq = dev->data->tx_queues[i]; 971 stats->q_opackets[i] = mq->n_pkts; 972 stats->q_obytes[i] = mq->n_bytes; 973 stats->opackets += mq->n_pkts; 974 stats->obytes += mq->n_bytes; 975 } 976 return 0; 977 } 978 979 static int 980 memif_stats_reset(struct rte_eth_dev *dev) 981 { 982 struct pmd_internals *pmd = dev->data->dev_private; 983 int i; 984 struct memif_queue *mq; 985 986 for (i = 0; i < pmd->run.num_s2m_rings; i++) { 987 mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->tx_queues[i] : 988 dev->data->rx_queues[i]; 989 mq->n_pkts = 0; 990 mq->n_bytes = 0; 991 } 992 for (i = 0; i < pmd->run.num_m2s_rings; i++) { 993 mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->rx_queues[i] : 994 dev->data->tx_queues[i]; 995 mq->n_pkts = 0; 996 mq->n_bytes = 0; 997 } 998 999 return 0; 1000 } 1001 1002 static int 1003 memif_rx_queue_intr_enable(struct rte_eth_dev *dev __rte_unused, 1004 uint16_t qid __rte_unused) 1005 { 1006 MIF_LOG(WARNING, "Interrupt mode not supported."); 1007 1008 return -1; 1009 } 1010 1011 static int 1012 memif_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t qid __rte_unused) 1013 { 1014 struct pmd_internals *pmd __rte_unused = dev->data->dev_private; 1015 1016 return 0; 1017 } 1018 1019 static const struct eth_dev_ops ops = { 1020 .dev_start = memif_dev_start, 1021 .dev_close = memif_dev_close, 1022 .dev_infos_get = memif_dev_info, 1023 .dev_configure = memif_dev_configure, 1024 .tx_queue_setup = memif_tx_queue_setup, 1025 .rx_queue_setup = memif_rx_queue_setup, 1026 .rx_queue_release = memif_queue_release, 1027 .tx_queue_release = memif_queue_release, 1028 .rx_queue_intr_enable = memif_rx_queue_intr_enable, 1029 .rx_queue_intr_disable = memif_rx_queue_intr_disable, 1030 .link_update = memif_link_update, 1031 .stats_get = memif_stats_get, 1032 .stats_reset = memif_stats_reset, 1033 }; 1034 1035 static int 1036 memif_create(struct rte_vdev_device *vdev, enum memif_role_t role, 1037 memif_interface_id_t id, uint32_t flags, 1038 const char *socket_filename, 1039 memif_log2_ring_size_t log2_ring_size, 1040 uint16_t pkt_buffer_size, const char *secret, 1041 struct rte_ether_addr *ether_addr) 1042 { 1043 int ret = 0; 1044 struct rte_eth_dev *eth_dev; 1045 struct rte_eth_dev_data *data; 1046 struct pmd_internals *pmd; 1047 struct pmd_process_private *process_private; 1048 const unsigned int numa_node = vdev->device.numa_node; 1049 const char *name = rte_vdev_device_name(vdev); 1050 1051 if (flags & ETH_MEMIF_FLAG_ZERO_COPY) { 1052 MIF_LOG(ERR, "Zero-copy slave not supported."); 1053 return -1; 1054 } 1055 1056 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd)); 1057 if (eth_dev == NULL) { 1058 MIF_LOG(ERR, "%s: Unable to allocate device struct.", name); 1059 return -1; 1060 } 1061 1062 process_private = (struct pmd_process_private *) 1063 rte_zmalloc(name, sizeof(struct pmd_process_private), 1064 RTE_CACHE_LINE_SIZE); 1065 1066 if (process_private == NULL) { 1067 MIF_LOG(ERR, "Failed to alloc memory for process private"); 1068 return -1; 1069 } 1070 eth_dev->process_private = process_private; 1071 1072 pmd = eth_dev->data->dev_private; 1073 memset(pmd, 0, sizeof(*pmd)); 1074 1075 pmd->id = id; 1076 pmd->flags = flags; 1077 pmd->flags |= ETH_MEMIF_FLAG_DISABLED; 1078 pmd->role = role; 1079 1080 ret = memif_socket_init(eth_dev, socket_filename); 1081 if (ret < 0) 1082 return ret; 1083 1084 memset(pmd->secret, 0, sizeof(char) * ETH_MEMIF_SECRET_SIZE); 1085 if (secret != NULL) 1086 strlcpy(pmd->secret, secret, sizeof(pmd->secret)); 1087 1088 pmd->cfg.log2_ring_size = log2_ring_size; 1089 /* set in .dev_configure() */ 1090 pmd->cfg.num_s2m_rings = 0; 1091 pmd->cfg.num_m2s_rings = 0; 1092 1093 pmd->cfg.pkt_buffer_size = pkt_buffer_size; 1094 1095 data = eth_dev->data; 1096 data->dev_private = pmd; 1097 data->numa_node = numa_node; 1098 data->mac_addrs = ether_addr; 1099 1100 eth_dev->dev_ops = &ops; 1101 eth_dev->device = &vdev->device; 1102 eth_dev->rx_pkt_burst = eth_memif_rx; 1103 eth_dev->tx_pkt_burst = eth_memif_tx; 1104 1105 eth_dev->data->dev_flags &= RTE_ETH_DEV_CLOSE_REMOVE; 1106 1107 rte_eth_dev_probing_finish(eth_dev); 1108 1109 return 0; 1110 } 1111 1112 static int 1113 memif_set_role(const char *key __rte_unused, const char *value, 1114 void *extra_args) 1115 { 1116 enum memif_role_t *role = (enum memif_role_t *)extra_args; 1117 1118 if (strstr(value, "master") != NULL) { 1119 *role = MEMIF_ROLE_MASTER; 1120 } else if (strstr(value, "slave") != NULL) { 1121 *role = MEMIF_ROLE_SLAVE; 1122 } else { 1123 MIF_LOG(ERR, "Unknown role: %s.", value); 1124 return -EINVAL; 1125 } 1126 return 0; 1127 } 1128 1129 static int 1130 memif_set_zc(const char *key __rte_unused, const char *value, void *extra_args) 1131 { 1132 uint32_t *flags = (uint32_t *)extra_args; 1133 1134 if (strstr(value, "yes") != NULL) { 1135 *flags |= ETH_MEMIF_FLAG_ZERO_COPY; 1136 } else if (strstr(value, "no") != NULL) { 1137 *flags &= ~ETH_MEMIF_FLAG_ZERO_COPY; 1138 } else { 1139 MIF_LOG(ERR, "Failed to parse zero-copy param: %s.", value); 1140 return -EINVAL; 1141 } 1142 return 0; 1143 } 1144 1145 static int 1146 memif_set_id(const char *key __rte_unused, const char *value, void *extra_args) 1147 { 1148 memif_interface_id_t *id = (memif_interface_id_t *)extra_args; 1149 1150 /* even if parsing fails, 0 is a valid id */ 1151 *id = strtoul(value, NULL, 10); 1152 return 0; 1153 } 1154 1155 static int 1156 memif_set_bs(const char *key __rte_unused, const char *value, void *extra_args) 1157 { 1158 unsigned long tmp; 1159 uint16_t *pkt_buffer_size = (uint16_t *)extra_args; 1160 1161 tmp = strtoul(value, NULL, 10); 1162 if (tmp == 0 || tmp > 0xFFFF) { 1163 MIF_LOG(ERR, "Invalid buffer size: %s.", value); 1164 return -EINVAL; 1165 } 1166 *pkt_buffer_size = tmp; 1167 return 0; 1168 } 1169 1170 static int 1171 memif_set_rs(const char *key __rte_unused, const char *value, void *extra_args) 1172 { 1173 unsigned long tmp; 1174 memif_log2_ring_size_t *log2_ring_size = 1175 (memif_log2_ring_size_t *)extra_args; 1176 1177 tmp = strtoul(value, NULL, 10); 1178 if (tmp == 0 || tmp > ETH_MEMIF_MAX_LOG2_RING_SIZE) { 1179 MIF_LOG(ERR, "Invalid ring size: %s (max %u).", 1180 value, ETH_MEMIF_MAX_LOG2_RING_SIZE); 1181 return -EINVAL; 1182 } 1183 *log2_ring_size = tmp; 1184 return 0; 1185 } 1186 1187 /* check if directory exists and if we have permission to read/write */ 1188 static int 1189 memif_check_socket_filename(const char *filename) 1190 { 1191 char *dir = NULL, *tmp; 1192 uint32_t idx; 1193 int ret = 0; 1194 1195 tmp = strrchr(filename, '/'); 1196 if (tmp != NULL) { 1197 idx = tmp - filename; 1198 dir = rte_zmalloc("memif_tmp", sizeof(char) * (idx + 1), 0); 1199 if (dir == NULL) { 1200 MIF_LOG(ERR, "Failed to allocate memory."); 1201 return -1; 1202 } 1203 strlcpy(dir, filename, sizeof(char) * (idx + 1)); 1204 } 1205 1206 if (dir == NULL || (faccessat(-1, dir, F_OK | R_OK | 1207 W_OK, AT_EACCESS) < 0)) { 1208 MIF_LOG(ERR, "Invalid socket directory."); 1209 ret = -EINVAL; 1210 } 1211 1212 if (dir != NULL) 1213 rte_free(dir); 1214 1215 return ret; 1216 } 1217 1218 static int 1219 memif_set_socket_filename(const char *key __rte_unused, const char *value, 1220 void *extra_args) 1221 { 1222 const char **socket_filename = (const char **)extra_args; 1223 1224 *socket_filename = value; 1225 return memif_check_socket_filename(*socket_filename); 1226 } 1227 1228 static int 1229 memif_set_mac(const char *key __rte_unused, const char *value, void *extra_args) 1230 { 1231 struct rte_ether_addr *ether_addr = (struct rte_ether_addr *)extra_args; 1232 1233 if (rte_ether_unformat_addr(value, ether_addr) < 0) 1234 MIF_LOG(WARNING, "Failed to parse mac '%s'.", value); 1235 return 0; 1236 } 1237 1238 static int 1239 memif_set_secret(const char *key __rte_unused, const char *value, void *extra_args) 1240 { 1241 const char **secret = (const char **)extra_args; 1242 1243 *secret = value; 1244 return 0; 1245 } 1246 1247 static int 1248 rte_pmd_memif_probe(struct rte_vdev_device *vdev) 1249 { 1250 RTE_BUILD_BUG_ON(sizeof(memif_msg_t) != 128); 1251 RTE_BUILD_BUG_ON(sizeof(memif_desc_t) != 16); 1252 int ret = 0; 1253 struct rte_kvargs *kvlist; 1254 const char *name = rte_vdev_device_name(vdev); 1255 enum memif_role_t role = MEMIF_ROLE_SLAVE; 1256 memif_interface_id_t id = 0; 1257 uint16_t pkt_buffer_size = ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE; 1258 memif_log2_ring_size_t log2_ring_size = ETH_MEMIF_DEFAULT_RING_SIZE; 1259 const char *socket_filename = ETH_MEMIF_DEFAULT_SOCKET_FILENAME; 1260 uint32_t flags = 0; 1261 const char *secret = NULL; 1262 struct rte_ether_addr *ether_addr = rte_zmalloc("", 1263 sizeof(struct rte_ether_addr), 0); 1264 struct rte_eth_dev *eth_dev; 1265 1266 rte_eth_random_addr(ether_addr->addr_bytes); 1267 1268 MIF_LOG(INFO, "Initialize MEMIF: %s.", name); 1269 1270 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1271 eth_dev = rte_eth_dev_attach_secondary(name); 1272 if (!eth_dev) { 1273 MIF_LOG(ERR, "Failed to probe %s", name); 1274 return -1; 1275 } 1276 1277 eth_dev->dev_ops = &ops; 1278 eth_dev->device = &vdev->device; 1279 eth_dev->rx_pkt_burst = eth_memif_rx; 1280 eth_dev->tx_pkt_burst = eth_memif_tx; 1281 1282 if (!rte_eal_primary_proc_alive(NULL)) { 1283 MIF_LOG(ERR, "Primary process is missing"); 1284 return -1; 1285 } 1286 1287 eth_dev->process_private = (struct pmd_process_private *) 1288 rte_zmalloc(name, 1289 sizeof(struct pmd_process_private), 1290 RTE_CACHE_LINE_SIZE); 1291 if (eth_dev->process_private == NULL) { 1292 MIF_LOG(ERR, 1293 "Failed to alloc memory for process private"); 1294 return -1; 1295 } 1296 1297 rte_eth_dev_probing_finish(eth_dev); 1298 1299 return 0; 1300 } 1301 1302 ret = rte_mp_action_register(MEMIF_MP_SEND_REGION, memif_mp_send_region); 1303 /* 1304 * Primary process can continue probing, but secondary process won't 1305 * be able to get memory regions information 1306 */ 1307 if (ret < 0 && rte_errno != EEXIST) 1308 MIF_LOG(WARNING, "Failed to register mp action callback: %s", 1309 strerror(rte_errno)); 1310 1311 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments); 1312 1313 /* parse parameters */ 1314 if (kvlist != NULL) { 1315 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ROLE_ARG, 1316 &memif_set_role, &role); 1317 if (ret < 0) 1318 goto exit; 1319 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ID_ARG, 1320 &memif_set_id, &id); 1321 if (ret < 0) 1322 goto exit; 1323 ret = rte_kvargs_process(kvlist, ETH_MEMIF_PKT_BUFFER_SIZE_ARG, 1324 &memif_set_bs, &pkt_buffer_size); 1325 if (ret < 0) 1326 goto exit; 1327 ret = rte_kvargs_process(kvlist, ETH_MEMIF_RING_SIZE_ARG, 1328 &memif_set_rs, &log2_ring_size); 1329 if (ret < 0) 1330 goto exit; 1331 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ARG, 1332 &memif_set_socket_filename, 1333 (void *)(&socket_filename)); 1334 if (ret < 0) 1335 goto exit; 1336 ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG, 1337 &memif_set_mac, ether_addr); 1338 if (ret < 0) 1339 goto exit; 1340 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ZC_ARG, 1341 &memif_set_zc, &flags); 1342 if (ret < 0) 1343 goto exit; 1344 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SECRET_ARG, 1345 &memif_set_secret, (void *)(&secret)); 1346 if (ret < 0) 1347 goto exit; 1348 } 1349 1350 /* create interface */ 1351 ret = memif_create(vdev, role, id, flags, socket_filename, 1352 log2_ring_size, pkt_buffer_size, secret, ether_addr); 1353 1354 exit: 1355 if (kvlist != NULL) 1356 rte_kvargs_free(kvlist); 1357 return ret; 1358 } 1359 1360 static int 1361 rte_pmd_memif_remove(struct rte_vdev_device *vdev) 1362 { 1363 struct rte_eth_dev *eth_dev; 1364 1365 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev)); 1366 if (eth_dev == NULL) 1367 return 0; 1368 1369 rte_eth_dev_close(eth_dev->data->port_id); 1370 1371 return 0; 1372 } 1373 1374 static struct rte_vdev_driver pmd_memif_drv = { 1375 .probe = rte_pmd_memif_probe, 1376 .remove = rte_pmd_memif_remove, 1377 }; 1378 1379 RTE_PMD_REGISTER_VDEV(net_memif, pmd_memif_drv); 1380 1381 RTE_PMD_REGISTER_PARAM_STRING(net_memif, 1382 ETH_MEMIF_ID_ARG "=<int>" 1383 ETH_MEMIF_ROLE_ARG "=master|slave" 1384 ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>" 1385 ETH_MEMIF_RING_SIZE_ARG "=<int>" 1386 ETH_MEMIF_SOCKET_ARG "=<string>" 1387 ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx" 1388 ETH_MEMIF_ZC_ARG "=yes|no" 1389 ETH_MEMIF_SECRET_ARG "=<string>"); 1390 1391 int memif_logtype; 1392 1393 RTE_INIT(memif_init_log) 1394 { 1395 memif_logtype = rte_log_register("pmd.net.memif"); 1396 if (memif_logtype >= 0) 1397 rte_log_set_level(memif_logtype, RTE_LOG_NOTICE); 1398 } 1399