1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2018 Microsoft Corporation 3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc. 4 * All rights reserved. 5 */ 6 7 #include <stdint.h> 8 #include <string.h> 9 #include <stdio.h> 10 #include <errno.h> 11 #include <unistd.h> 12 #include <strings.h> 13 #include <malloc.h> 14 15 #include <rte_ethdev.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_memzone.h> 19 #include <rte_malloc.h> 20 #include <rte_atomic.h> 21 #include <rte_branch_prediction.h> 22 #include <rte_ether.h> 23 #include <rte_common.h> 24 #include <rte_errno.h> 25 #include <rte_memory.h> 26 #include <rte_eal.h> 27 #include <rte_dev.h> 28 #include <rte_net.h> 29 #include <rte_bus_vmbus.h> 30 #include <rte_spinlock.h> 31 32 #include "hn_logs.h" 33 #include "hn_var.h" 34 #include "hn_rndis.h" 35 #include "hn_nvs.h" 36 #include "ndis.h" 37 38 #define HN_NVS_SEND_MSG_SIZE \ 39 (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis)) 40 41 #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */ 42 #define HN_TXCOPY_THRESHOLD 512 43 44 #define HN_RXCOPY_THRESHOLD 256 45 #define HN_RXQ_EVENT_DEFAULT 2048 46 47 struct hn_rxinfo { 48 uint32_t vlan_info; 49 uint32_t csum_info; 50 uint32_t hash_info; 51 uint32_t hash_value; 52 }; 53 54 #define HN_RXINFO_VLAN 0x0001 55 #define HN_RXINFO_CSUM 0x0002 56 #define HN_RXINFO_HASHINF 0x0004 57 #define HN_RXINFO_HASHVAL 0x0008 58 #define HN_RXINFO_ALL \ 59 (HN_RXINFO_VLAN | \ 60 HN_RXINFO_CSUM | \ 61 HN_RXINFO_HASHINF | \ 62 HN_RXINFO_HASHVAL) 63 64 #define HN_NDIS_VLAN_INFO_INVALID 0xffffffff 65 #define HN_NDIS_RXCSUM_INFO_INVALID 0 66 #define HN_NDIS_HASH_INFO_INVALID 0 67 68 /* 69 * Per-transmit book keeping. 70 * A slot in transmit ring (chim_index) is reserved for each transmit. 71 * 72 * There are two types of transmit: 73 * - buffered transmit where chimney buffer is used and RNDIS header 74 * is in the buffer. mbuf == NULL for this case. 75 * 76 * - direct transmit where RNDIS header is in the in rndis_pkt 77 * mbuf is freed after transmit. 78 * 79 * Descriptors come from per-port pool which is used 80 * to limit number of outstanding requests per device. 81 */ 82 struct hn_txdesc { 83 struct rte_mbuf *m; 84 85 uint16_t queue_id; 86 uint16_t chim_index; 87 uint32_t chim_size; 88 uint32_t data_size; 89 uint32_t packets; 90 91 struct rndis_packet_msg *rndis_pkt; 92 }; 93 94 #define HN_RNDIS_PKT_LEN \ 95 (sizeof(struct rndis_packet_msg) + \ 96 RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \ 97 RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \ 98 RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \ 99 RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE)) 100 101 /* Minimum space required for a packet */ 102 #define HN_PKTSIZE_MIN(align) \ 103 RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align) 104 105 #define DEFAULT_TX_FREE_THRESH 32U 106 107 static void 108 hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m) 109 { 110 uint32_t s = m->pkt_len; 111 const struct ether_addr *ea; 112 113 if (s == 64) { 114 stats->size_bins[1]++; 115 } else if (s > 64 && s < 1024) { 116 uint32_t bin; 117 118 /* count zeros, and offset into correct bin */ 119 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; 120 stats->size_bins[bin]++; 121 } else { 122 if (s < 64) 123 stats->size_bins[0]++; 124 else if (s < 1519) 125 stats->size_bins[6]++; 126 else 127 stats->size_bins[7]++; 128 } 129 130 ea = rte_pktmbuf_mtod(m, const struct ether_addr *); 131 if (is_multicast_ether_addr(ea)) { 132 if (is_broadcast_ether_addr(ea)) 133 stats->broadcast++; 134 else 135 stats->multicast++; 136 } 137 } 138 139 static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt) 140 { 141 return pkt->pktinfooffset + pkt->pktinfolen; 142 } 143 144 static inline uint32_t 145 hn_rndis_pktmsg_offset(uint32_t ofs) 146 { 147 return ofs - offsetof(struct rndis_packet_msg, dataoffset); 148 } 149 150 static void hn_txd_init(struct rte_mempool *mp __rte_unused, 151 void *opaque, void *obj, unsigned int idx) 152 { 153 struct hn_txdesc *txd = obj; 154 struct rte_eth_dev *dev = opaque; 155 struct rndis_packet_msg *pkt; 156 157 memset(txd, 0, sizeof(*txd)); 158 txd->chim_index = idx; 159 160 pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN, 161 rte_align32pow2(HN_RNDIS_PKT_LEN), 162 dev->device->numa_node); 163 if (!pkt) 164 rte_exit(EXIT_FAILURE, "can not allocate RNDIS header"); 165 166 txd->rndis_pkt = pkt; 167 } 168 169 /* 170 * Unlike Linux and FreeBSD, this driver uses a mempool 171 * to limit outstanding transmits and reserve buffers 172 */ 173 int 174 hn_tx_pool_init(struct rte_eth_dev *dev) 175 { 176 struct hn_data *hv = dev->data->dev_private; 177 char name[RTE_MEMPOOL_NAMESIZE]; 178 struct rte_mempool *mp; 179 180 snprintf(name, sizeof(name), 181 "hn_txd_%u", dev->data->port_id); 182 183 PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d", 184 name, hv->chim_cnt, sizeof(struct hn_txdesc), 185 dev->device->numa_node); 186 187 mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc), 188 HN_TXD_CACHE_SIZE, 0, 189 NULL, NULL, 190 hn_txd_init, dev, 191 dev->device->numa_node, 0); 192 if (!mp) { 193 PMD_DRV_LOG(ERR, 194 "mempool %s create failed: %d", name, rte_errno); 195 return -rte_errno; 196 } 197 198 hv->tx_pool = mp; 199 return 0; 200 } 201 202 void 203 hn_tx_pool_uninit(struct rte_eth_dev *dev) 204 { 205 struct hn_data *hv = dev->data->dev_private; 206 207 if (hv->tx_pool) { 208 rte_mempool_free(hv->tx_pool); 209 hv->tx_pool = NULL; 210 } 211 } 212 213 static void hn_reset_txagg(struct hn_tx_queue *txq) 214 { 215 txq->agg_szleft = txq->agg_szmax; 216 txq->agg_pktleft = txq->agg_pktmax; 217 txq->agg_txd = NULL; 218 txq->agg_prevpkt = NULL; 219 } 220 221 int 222 hn_dev_tx_queue_setup(struct rte_eth_dev *dev, 223 uint16_t queue_idx, uint16_t nb_desc __rte_unused, 224 unsigned int socket_id, 225 const struct rte_eth_txconf *tx_conf) 226 227 { 228 struct hn_data *hv = dev->data->dev_private; 229 struct hn_tx_queue *txq; 230 uint32_t tx_free_thresh; 231 int err; 232 233 PMD_INIT_FUNC_TRACE(); 234 235 txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, 236 socket_id); 237 if (!txq) 238 return -ENOMEM; 239 240 txq->hv = hv; 241 txq->chan = hv->channels[queue_idx]; 242 txq->port_id = dev->data->port_id; 243 txq->queue_id = queue_idx; 244 245 tx_free_thresh = tx_conf->tx_free_thresh; 246 if (tx_free_thresh == 0) 247 tx_free_thresh = RTE_MIN(hv->chim_cnt / 4, 248 DEFAULT_TX_FREE_THRESH); 249 250 if (tx_free_thresh >= hv->chim_cnt - 3) 251 tx_free_thresh = hv->chim_cnt - 3; 252 253 txq->free_thresh = tx_free_thresh; 254 255 txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); 256 txq->agg_pktmax = hv->rndis_agg_pkts; 257 txq->agg_align = hv->rndis_agg_align; 258 259 hn_reset_txagg(txq); 260 261 err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc, 262 socket_id, tx_conf); 263 if (err) { 264 rte_free(txq); 265 return err; 266 } 267 268 dev->data->tx_queues[queue_idx] = txq; 269 return 0; 270 } 271 272 void 273 hn_dev_tx_queue_release(void *arg) 274 { 275 struct hn_tx_queue *txq = arg; 276 struct hn_txdesc *txd; 277 278 PMD_INIT_FUNC_TRACE(); 279 280 if (!txq) 281 return; 282 283 /* If any pending data is still present just drop it */ 284 txd = txq->agg_txd; 285 if (txd) 286 rte_mempool_put(txq->hv->tx_pool, txd); 287 288 rte_free(txq); 289 } 290 291 static void 292 hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, 293 unsigned long xactid, const struct hn_nvs_rndis_ack *ack) 294 { 295 struct hn_txdesc *txd = (struct hn_txdesc *)xactid; 296 struct hn_tx_queue *txq; 297 298 /* Control packets are sent with xacid == 0 */ 299 if (!txd) 300 return; 301 302 txq = dev->data->tx_queues[queue_id]; 303 if (likely(ack->status == NVS_STATUS_OK)) { 304 PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u", 305 txq->port_id, txq->queue_id, txd->chim_index, 306 txd->packets, txd->data_size); 307 txq->stats.bytes += txd->data_size; 308 txq->stats.packets += txd->packets; 309 } else { 310 PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u", 311 txq->port_id, txq->queue_id, txd->chim_index, ack->status); 312 ++txq->stats.errors; 313 } 314 315 rte_pktmbuf_free(txd->m); 316 317 rte_mempool_put(txq->hv->tx_pool, txd); 318 } 319 320 /* Handle transmit completion events */ 321 static void 322 hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id, 323 const struct vmbus_chanpkt_hdr *pkt, 324 const void *data) 325 { 326 const struct hn_nvs_hdr *hdr = data; 327 328 switch (hdr->type) { 329 case NVS_TYPE_RNDIS_ACK: 330 hn_nvs_send_completed(dev, queue_id, pkt->xactid, data); 331 break; 332 333 default: 334 PMD_TX_LOG(NOTICE, 335 "unexpected send completion type %u", 336 hdr->type); 337 } 338 } 339 340 /* Parse per-packet info (meta data) */ 341 static int 342 hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen, 343 struct hn_rxinfo *info) 344 { 345 const struct rndis_pktinfo *pi = info_data; 346 uint32_t mask = 0; 347 348 while (info_dlen != 0) { 349 const void *data; 350 uint32_t dlen; 351 352 if (unlikely(info_dlen < sizeof(*pi))) 353 return -EINVAL; 354 355 if (unlikely(info_dlen < pi->size)) 356 return -EINVAL; 357 info_dlen -= pi->size; 358 359 if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK)) 360 return -EINVAL; 361 if (unlikely(pi->size < pi->offset)) 362 return -EINVAL; 363 364 dlen = pi->size - pi->offset; 365 data = pi->data; 366 367 switch (pi->type) { 368 case NDIS_PKTINFO_TYPE_VLAN: 369 if (unlikely(dlen < NDIS_VLAN_INFO_SIZE)) 370 return -EINVAL; 371 info->vlan_info = *((const uint32_t *)data); 372 mask |= HN_RXINFO_VLAN; 373 break; 374 375 case NDIS_PKTINFO_TYPE_CSUM: 376 if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE)) 377 return -EINVAL; 378 info->csum_info = *((const uint32_t *)data); 379 mask |= HN_RXINFO_CSUM; 380 break; 381 382 case NDIS_PKTINFO_TYPE_HASHVAL: 383 if (unlikely(dlen < NDIS_HASH_VALUE_SIZE)) 384 return -EINVAL; 385 info->hash_value = *((const uint32_t *)data); 386 mask |= HN_RXINFO_HASHVAL; 387 break; 388 389 case NDIS_PKTINFO_TYPE_HASHINF: 390 if (unlikely(dlen < NDIS_HASH_INFO_SIZE)) 391 return -EINVAL; 392 info->hash_info = *((const uint32_t *)data); 393 mask |= HN_RXINFO_HASHINF; 394 break; 395 396 default: 397 goto next; 398 } 399 400 if (mask == HN_RXINFO_ALL) 401 break; /* All found; done */ 402 next: 403 pi = (const struct rndis_pktinfo *) 404 ((const uint8_t *)pi + pi->size); 405 } 406 407 /* 408 * Final fixup. 409 * - If there is no hash value, invalidate the hash info. 410 */ 411 if (!(mask & HN_RXINFO_HASHVAL)) 412 info->hash_info = HN_NDIS_HASH_INFO_INVALID; 413 return 0; 414 } 415 416 /* 417 * Ack the consumed RXBUF associated w/ this channel packet, 418 * so that this RXBUF can be recycled by the hypervisor. 419 */ 420 static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb) 421 { 422 struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo; 423 struct hn_data *hv = rxb->hv; 424 425 if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) { 426 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); 427 --hv->rxbuf_outstanding; 428 } 429 } 430 431 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque) 432 { 433 hn_rx_buf_release(opaque); 434 } 435 436 static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq, 437 const struct vmbus_chanpkt_rxbuf *pkt) 438 { 439 struct hn_rx_bufinfo *rxb; 440 441 rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid; 442 rxb->chan = rxq->chan; 443 rxb->xactid = pkt->hdr.xactid; 444 rxb->hv = rxq->hv; 445 446 rxb->shinfo.free_cb = hn_rx_buf_free_cb; 447 rxb->shinfo.fcb_opaque = rxb; 448 rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1); 449 return rxb; 450 } 451 452 static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, 453 uint8_t *data, unsigned int headroom, unsigned int dlen, 454 const struct hn_rxinfo *info) 455 { 456 struct hn_data *hv = rxq->hv; 457 struct rte_mbuf *m; 458 459 m = rte_pktmbuf_alloc(rxq->mb_pool); 460 if (unlikely(!m)) { 461 struct rte_eth_dev *dev = 462 &rte_eth_devices[rxq->port_id]; 463 464 dev->data->rx_mbuf_alloc_failed++; 465 return; 466 } 467 468 /* 469 * For large packets, avoid copy if possible but need to keep 470 * some space available in receive area for later packets. 471 */ 472 if (dlen >= HN_RXCOPY_THRESHOLD && 473 hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) { 474 struct rte_mbuf_ext_shared_info *shinfo; 475 const void *rxbuf; 476 rte_iova_t iova; 477 478 /* 479 * Build an external mbuf that points to recveive area. 480 * Use refcount to handle multiple packets in same 481 * receive buffer section. 482 */ 483 rxbuf = hv->rxbuf_res->addr; 484 iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); 485 shinfo = &rxb->shinfo; 486 487 if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1) 488 ++hv->rxbuf_outstanding; 489 490 rte_pktmbuf_attach_extbuf(m, data, iova, 491 dlen + headroom, shinfo); 492 m->data_off = headroom; 493 } else { 494 /* Mbuf's in pool must be large enough to hold small packets */ 495 if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) { 496 rte_pktmbuf_free_seg(m); 497 ++rxq->stats.errors; 498 return; 499 } 500 rte_memcpy(rte_pktmbuf_mtod(m, void *), 501 data + headroom, dlen); 502 } 503 504 m->port = rxq->port_id; 505 m->pkt_len = dlen; 506 m->data_len = dlen; 507 m->packet_type = rte_net_get_ptype(m, NULL, 508 RTE_PTYPE_L2_MASK | 509 RTE_PTYPE_L3_MASK | 510 RTE_PTYPE_L4_MASK); 511 512 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { 513 m->vlan_tci = info->vlan_info; 514 m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; 515 516 /* NDIS always strips tag, put it back if necessary */ 517 if (!hv->vlan_strip && rte_vlan_insert(&m)) { 518 PMD_DRV_LOG(DEBUG, "vlan insert failed"); 519 ++rxq->stats.errors; 520 rte_pktmbuf_free(m); 521 return; 522 } 523 } 524 525 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) { 526 if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) 527 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 528 529 if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK 530 | NDIS_RXCSUM_INFO_TCPCS_OK)) 531 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 532 else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED 533 | NDIS_RXCSUM_INFO_UDPCS_FAILED)) 534 m->ol_flags |= PKT_RX_L4_CKSUM_BAD; 535 } 536 537 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) { 538 m->ol_flags |= PKT_RX_RSS_HASH; 539 m->hash.rss = info->hash_value; 540 } 541 542 PMD_RX_LOG(DEBUG, 543 "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64, 544 rxq->port_id, rxq->queue_id, rxb->xactid, 545 m->pkt_len, m->packet_type, m->ol_flags); 546 547 ++rxq->stats.packets; 548 rxq->stats.bytes += m->pkt_len; 549 hn_update_packet_stats(&rxq->stats, m); 550 551 if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) { 552 ++rxq->stats.ring_full; 553 rte_pktmbuf_free(m); 554 } 555 } 556 557 static void hn_rndis_rx_data(struct hn_rx_queue *rxq, 558 struct hn_rx_bufinfo *rxb, 559 void *data, uint32_t dlen) 560 { 561 unsigned int data_off, data_len, pktinfo_off, pktinfo_len; 562 const struct rndis_packet_msg *pkt = data; 563 struct hn_rxinfo info = { 564 .vlan_info = HN_NDIS_VLAN_INFO_INVALID, 565 .csum_info = HN_NDIS_RXCSUM_INFO_INVALID, 566 .hash_info = HN_NDIS_HASH_INFO_INVALID, 567 }; 568 int err; 569 570 hn_rndis_dump(pkt); 571 572 if (unlikely(dlen < sizeof(*pkt))) 573 goto error; 574 575 if (unlikely(dlen < pkt->len)) 576 goto error; /* truncated RNDIS from host */ 577 578 if (unlikely(pkt->len < pkt->datalen 579 + pkt->oobdatalen + pkt->pktinfolen)) 580 goto error; 581 582 if (unlikely(pkt->datalen == 0)) 583 goto error; 584 585 /* Check offsets. */ 586 if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN)) 587 goto error; 588 589 if (likely(pkt->pktinfooffset > 0) && 590 unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN || 591 (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK))) 592 goto error; 593 594 data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); 595 data_len = pkt->datalen; 596 pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset); 597 pktinfo_len = pkt->pktinfolen; 598 599 if (likely(pktinfo_len > 0)) { 600 err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off, 601 pktinfo_len, &info); 602 if (err) 603 goto error; 604 } 605 606 if (unlikely(data_off + data_len > pkt->len)) 607 goto error; 608 609 if (unlikely(data_len < ETHER_HDR_LEN)) 610 goto error; 611 612 hn_rxpkt(rxq, rxb, data, data_off, data_len, &info); 613 return; 614 error: 615 ++rxq->stats.errors; 616 } 617 618 static void 619 hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq, 620 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len) 621 { 622 const struct rndis_msghdr *hdr = buf; 623 624 switch (hdr->type) { 625 case RNDIS_PACKET_MSG: 626 if (dev->data->dev_started) 627 hn_rndis_rx_data(rxq, rxb, buf, len); 628 break; 629 630 case RNDIS_INDICATE_STATUS_MSG: 631 hn_rndis_link_status(dev, buf); 632 break; 633 634 case RNDIS_INITIALIZE_CMPLT: 635 case RNDIS_QUERY_CMPLT: 636 case RNDIS_SET_CMPLT: 637 hn_rndis_receive_response(rxq->hv, buf, len); 638 break; 639 640 default: 641 PMD_DRV_LOG(NOTICE, 642 "unexpected RNDIS message (type %#x len %u)", 643 hdr->type, len); 644 break; 645 } 646 } 647 648 static void 649 hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, 650 struct hn_data *hv, 651 struct hn_rx_queue *rxq, 652 const struct vmbus_chanpkt_hdr *hdr, 653 const void *buf) 654 { 655 const struct vmbus_chanpkt_rxbuf *pkt; 656 const struct hn_nvs_hdr *nvs_hdr = buf; 657 uint32_t rxbuf_sz = hv->rxbuf_res->len; 658 char *rxbuf = hv->rxbuf_res->addr; 659 unsigned int i, hlen, count; 660 struct hn_rx_bufinfo *rxb; 661 662 /* At minimum we need type header */ 663 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) { 664 PMD_RX_LOG(ERR, "invalid receive nvs RNDIS"); 665 return; 666 } 667 668 /* Make sure that this is a RNDIS message. */ 669 if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) { 670 PMD_RX_LOG(ERR, "nvs type %u, not RNDIS", 671 nvs_hdr->type); 672 return; 673 } 674 675 hlen = vmbus_chanpkt_getlen(hdr->hlen); 676 if (unlikely(hlen < sizeof(*pkt))) { 677 PMD_RX_LOG(ERR, "invalid rxbuf chanpkt"); 678 return; 679 } 680 681 pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr); 682 if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) { 683 PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x", 684 pkt->rxbuf_id); 685 return; 686 } 687 688 count = pkt->rxbuf_cnt; 689 if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf, 690 rxbuf[count]))) { 691 PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count); 692 return; 693 } 694 695 if (pkt->hdr.xactid > hv->rxbuf_section_cnt) { 696 PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64, 697 pkt->hdr.xactid); 698 return; 699 } 700 701 /* Setup receive buffer info to allow for callback */ 702 rxb = hn_rx_buf_init(rxq, pkt); 703 704 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */ 705 for (i = 0; i < count; ++i) { 706 unsigned int ofs, len; 707 708 ofs = pkt->rxbuf[i].ofs; 709 len = pkt->rxbuf[i].len; 710 711 if (unlikely(ofs + len > rxbuf_sz)) { 712 PMD_RX_LOG(ERR, 713 "%uth RNDIS msg overflow ofs %u, len %u", 714 i, ofs, len); 715 continue; 716 } 717 718 if (unlikely(len == 0)) { 719 PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len); 720 continue; 721 } 722 723 hn_rndis_receive(dev, rxq, rxb, 724 rxbuf + ofs, len); 725 } 726 727 /* Send ACK now if external mbuf not used */ 728 hn_rx_buf_release(rxb); 729 } 730 731 /* 732 * Called when NVS inband events are received. 733 * Send up a two part message with port_id and the NVS message 734 * to the pipe to the netvsc-vf-event control thread. 735 */ 736 static void hn_nvs_handle_notify(struct rte_eth_dev *dev, 737 const struct vmbus_chanpkt_hdr *pkt, 738 const void *data) 739 { 740 const struct hn_nvs_hdr *hdr = data; 741 742 switch (hdr->type) { 743 case NVS_TYPE_TXTBL_NOTE: 744 /* Transmit indirection table has locking problems 745 * in DPDK and therefore not implemented 746 */ 747 PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table"); 748 break; 749 750 case NVS_TYPE_VFASSOC_NOTE: 751 hn_nvs_handle_vfassoc(dev, pkt, data); 752 break; 753 754 default: 755 PMD_DRV_LOG(INFO, 756 "got notify, nvs type %u", hdr->type); 757 } 758 } 759 760 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, 761 uint16_t queue_id, 762 unsigned int socket_id) 763 { 764 struct hn_rx_queue *rxq; 765 766 rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq), 767 RTE_CACHE_LINE_SIZE, socket_id); 768 if (!rxq) 769 return NULL; 770 771 rxq->hv = hv; 772 rxq->chan = hv->channels[queue_id]; 773 rte_spinlock_init(&rxq->ring_lock); 774 rxq->port_id = hv->port_id; 775 rxq->queue_id = queue_id; 776 rxq->event_sz = HN_RXQ_EVENT_DEFAULT; 777 rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT, 778 RTE_CACHE_LINE_SIZE, socket_id); 779 if (!rxq->event_buf) { 780 rte_free(rxq); 781 return NULL; 782 } 783 784 return rxq; 785 } 786 787 int 788 hn_dev_rx_queue_setup(struct rte_eth_dev *dev, 789 uint16_t queue_idx, uint16_t nb_desc, 790 unsigned int socket_id, 791 const struct rte_eth_rxconf *rx_conf, 792 struct rte_mempool *mp) 793 { 794 struct hn_data *hv = dev->data->dev_private; 795 char ring_name[RTE_RING_NAMESIZE]; 796 struct hn_rx_queue *rxq; 797 unsigned int count; 798 int error = -ENOMEM; 799 800 PMD_INIT_FUNC_TRACE(); 801 802 if (queue_idx == 0) { 803 rxq = hv->primary; 804 } else { 805 rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); 806 if (!rxq) 807 return -ENOMEM; 808 } 809 810 rxq->mb_pool = mp; 811 count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues; 812 if (nb_desc == 0 || nb_desc > count) 813 nb_desc = count; 814 815 /* 816 * Staging ring from receive event logic to rx_pkts. 817 * rx_pkts assumes caller is handling multi-thread issue. 818 * event logic has locking. 819 */ 820 snprintf(ring_name, sizeof(ring_name), 821 "hn_rx_%u_%u", dev->data->port_id, queue_idx); 822 rxq->rx_ring = rte_ring_create(ring_name, 823 rte_align32pow2(nb_desc), 824 socket_id, 0); 825 if (!rxq->rx_ring) 826 goto fail; 827 828 error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc, 829 socket_id, rx_conf, mp); 830 if (error) 831 goto fail; 832 833 dev->data->rx_queues[queue_idx] = rxq; 834 return 0; 835 836 fail: 837 rte_ring_free(rxq->rx_ring); 838 rte_free(rxq->event_buf); 839 rte_free(rxq); 840 return error; 841 } 842 843 void 844 hn_dev_rx_queue_release(void *arg) 845 { 846 struct hn_rx_queue *rxq = arg; 847 848 PMD_INIT_FUNC_TRACE(); 849 850 if (!rxq) 851 return; 852 853 rte_ring_free(rxq->rx_ring); 854 rxq->rx_ring = NULL; 855 rxq->mb_pool = NULL; 856 857 hn_vf_rx_queue_release(rxq->hv, rxq->queue_id); 858 859 /* Keep primary queue to allow for control operations */ 860 if (rxq != rxq->hv->primary) { 861 rte_free(rxq->event_buf); 862 rte_free(rxq); 863 } 864 } 865 866 int 867 hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt) 868 { 869 struct hn_tx_queue *txq = arg; 870 871 return hn_process_events(txq->hv, txq->queue_id, free_cnt); 872 } 873 874 /* 875 * Process pending events on the channel. 876 * Called from both Rx queue poll and Tx cleanup 877 */ 878 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, 879 uint32_t tx_limit) 880 { 881 struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id]; 882 struct hn_rx_queue *rxq; 883 uint32_t bytes_read = 0; 884 uint32_t tx_done = 0; 885 int ret = 0; 886 887 rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id]; 888 889 /* If no pending data then nothing to do */ 890 if (rte_vmbus_chan_rx_empty(rxq->chan)) 891 return 0; 892 893 /* 894 * Since channel is shared between Rx and TX queue need to have a lock 895 * since DPDK does not force same CPU to be used for Rx/Tx. 896 */ 897 if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock))) 898 return 0; 899 900 for (;;) { 901 const struct vmbus_chanpkt_hdr *pkt; 902 uint32_t len = rxq->event_sz; 903 const void *data; 904 905 retry: 906 ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len); 907 if (ret == -EAGAIN) 908 break; /* ring is empty */ 909 910 if (unlikely(ret == -ENOBUFS)) { 911 /* event buffer not large enough to read ring */ 912 913 PMD_DRV_LOG(DEBUG, 914 "event buffer expansion (need %u)", len); 915 rxq->event_sz = len + len / 4; 916 rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz, 917 RTE_CACHE_LINE_SIZE); 918 if (rxq->event_buf) 919 goto retry; 920 /* out of memory, no more events now */ 921 rxq->event_sz = 0; 922 break; 923 } 924 925 if (unlikely(ret <= 0)) { 926 /* This indicates a failure to communicate (or worse) */ 927 rte_exit(EXIT_FAILURE, 928 "vmbus ring buffer error: %d", ret); 929 } 930 931 bytes_read += ret; 932 pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf; 933 data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen); 934 935 switch (pkt->type) { 936 case VMBUS_CHANPKT_TYPE_COMP: 937 ++tx_done; 938 hn_nvs_handle_comp(dev, queue_id, pkt, data); 939 break; 940 941 case VMBUS_CHANPKT_TYPE_RXBUF: 942 hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data); 943 break; 944 945 case VMBUS_CHANPKT_TYPE_INBAND: 946 hn_nvs_handle_notify(dev, pkt, data); 947 break; 948 949 default: 950 PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type); 951 break; 952 } 953 954 if (tx_limit && tx_done >= tx_limit) 955 break; 956 957 if (rxq->rx_ring && rte_ring_full(rxq->rx_ring)) 958 break; 959 } 960 961 if (bytes_read > 0) 962 rte_vmbus_chan_signal_read(rxq->chan, bytes_read); 963 964 rte_spinlock_unlock(&rxq->ring_lock); 965 966 return tx_done; 967 } 968 969 static void hn_append_to_chim(struct hn_tx_queue *txq, 970 struct rndis_packet_msg *pkt, 971 const struct rte_mbuf *m) 972 { 973 struct hn_txdesc *txd = txq->agg_txd; 974 uint8_t *buf = (uint8_t *)pkt; 975 unsigned int data_offs; 976 977 hn_rndis_dump(pkt); 978 979 data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); 980 txd->chim_size += pkt->len; 981 txd->data_size += m->pkt_len; 982 ++txd->packets; 983 hn_update_packet_stats(&txq->stats, m); 984 985 for (; m; m = m->next) { 986 uint16_t len = rte_pktmbuf_data_len(m); 987 988 rte_memcpy(buf + data_offs, 989 rte_pktmbuf_mtod(m, const char *), len); 990 data_offs += len; 991 } 992 } 993 994 /* 995 * Send pending aggregated data in chimney buffer (if any). 996 * Returns error if send was unsuccessful because channel ring buffer 997 * was full. 998 */ 999 static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig) 1000 1001 { 1002 struct hn_txdesc *txd = txq->agg_txd; 1003 struct hn_nvs_rndis rndis; 1004 int ret; 1005 1006 if (!txd) 1007 return 0; 1008 1009 rndis = (struct hn_nvs_rndis) { 1010 .type = NVS_TYPE_RNDIS, 1011 .rndis_mtype = NVS_RNDIS_MTYPE_DATA, 1012 .chim_idx = txd->chim_index, 1013 .chim_sz = txd->chim_size, 1014 }; 1015 1016 PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u", 1017 txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size); 1018 1019 ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC, 1020 &rndis, sizeof(rndis), (uintptr_t)txd, need_sig); 1021 1022 if (likely(ret == 0)) 1023 hn_reset_txagg(txq); 1024 else 1025 PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d", 1026 txq->port_id, txq->queue_id, ret); 1027 1028 return ret; 1029 } 1030 1031 static struct hn_txdesc *hn_new_txd(struct hn_data *hv, 1032 struct hn_tx_queue *txq) 1033 { 1034 struct hn_txdesc *txd; 1035 1036 if (rte_mempool_get(hv->tx_pool, (void **)&txd)) { 1037 ++txq->stats.ring_full; 1038 PMD_TX_LOG(DEBUG, "tx pool exhausted!"); 1039 return NULL; 1040 } 1041 1042 txd->m = NULL; 1043 txd->queue_id = txq->queue_id; 1044 txd->packets = 0; 1045 txd->data_size = 0; 1046 txd->chim_size = 0; 1047 1048 return txd; 1049 } 1050 1051 static void * 1052 hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) 1053 { 1054 struct hn_txdesc *agg_txd = txq->agg_txd; 1055 struct rndis_packet_msg *pkt; 1056 void *chim; 1057 1058 if (agg_txd) { 1059 unsigned int padding, olen; 1060 1061 /* 1062 * Update the previous RNDIS packet's total length, 1063 * it can be increased due to the mandatory alignment 1064 * padding for this RNDIS packet. And update the 1065 * aggregating txdesc's chimney sending buffer size 1066 * accordingly. 1067 * 1068 * Zero-out the padding, as required by the RNDIS spec. 1069 */ 1070 pkt = txq->agg_prevpkt; 1071 olen = pkt->len; 1072 padding = RTE_ALIGN(olen, txq->agg_align) - olen; 1073 if (padding > 0) { 1074 agg_txd->chim_size += padding; 1075 pkt->len += padding; 1076 memset((uint8_t *)pkt + olen, 0, padding); 1077 } 1078 1079 chim = (uint8_t *)pkt + pkt->len; 1080 1081 txq->agg_pktleft--; 1082 txq->agg_szleft -= pktsize; 1083 if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) { 1084 /* 1085 * Probably can't aggregate more packets, 1086 * flush this aggregating txdesc proactively. 1087 */ 1088 txq->agg_pktleft = 0; 1089 } 1090 } else { 1091 agg_txd = hn_new_txd(hv, txq); 1092 if (!agg_txd) 1093 return NULL; 1094 1095 chim = (uint8_t *)hv->chim_res->addr 1096 + agg_txd->chim_index * hv->chim_szmax; 1097 1098 txq->agg_txd = agg_txd; 1099 txq->agg_pktleft = txq->agg_pktmax - 1; 1100 txq->agg_szleft = txq->agg_szmax - pktsize; 1101 } 1102 txq->agg_prevpkt = chim; 1103 1104 return chim; 1105 } 1106 1107 static inline void * 1108 hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, 1109 uint32_t pi_dlen, uint32_t pi_type) 1110 { 1111 const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen); 1112 struct rndis_pktinfo *pi; 1113 1114 /* 1115 * Per-packet-info does not move; it only grows. 1116 * 1117 * NOTE: 1118 * pktinfooffset in this phase counts from the beginning 1119 * of rndis_packet_msg. 1120 */ 1121 pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt)); 1122 1123 pkt->pktinfolen += pi_size; 1124 1125 pi->size = pi_size; 1126 pi->type = pi_type; 1127 pi->offset = RNDIS_PKTINFO_OFFSET; 1128 1129 return pi->data; 1130 } 1131 1132 /* Put RNDIS header and packet info on packet */ 1133 static void hn_encap(struct rndis_packet_msg *pkt, 1134 uint16_t queue_id, 1135 const struct rte_mbuf *m) 1136 { 1137 unsigned int hlen = m->l2_len + m->l3_len; 1138 uint32_t *pi_data; 1139 uint32_t pkt_hlen; 1140 1141 pkt->type = RNDIS_PACKET_MSG; 1142 pkt->len = m->pkt_len; 1143 pkt->dataoffset = 0; 1144 pkt->datalen = m->pkt_len; 1145 pkt->oobdataoffset = 0; 1146 pkt->oobdatalen = 0; 1147 pkt->oobdataelements = 0; 1148 pkt->pktinfooffset = sizeof(*pkt); 1149 pkt->pktinfolen = 0; 1150 pkt->vchandle = 0; 1151 pkt->reserved = 0; 1152 1153 /* 1154 * Set the hash value for this packet, to the queue_id to cause 1155 * TX done event for this packet on the right channel. 1156 */ 1157 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE, 1158 NDIS_PKTINFO_TYPE_HASHVAL); 1159 *pi_data = queue_id; 1160 1161 if (m->ol_flags & PKT_TX_VLAN_PKT) { 1162 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, 1163 NDIS_PKTINFO_TYPE_VLAN); 1164 *pi_data = m->vlan_tci; 1165 } 1166 1167 if (m->ol_flags & PKT_TX_TCP_SEG) { 1168 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE, 1169 NDIS_PKTINFO_TYPE_LSO); 1170 1171 if (m->ol_flags & PKT_TX_IPV6) { 1172 *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen, 1173 m->tso_segsz); 1174 } else { 1175 *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen, 1176 m->tso_segsz); 1177 } 1178 } else if (m->ol_flags & 1179 (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) { 1180 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE, 1181 NDIS_PKTINFO_TYPE_CSUM); 1182 *pi_data = 0; 1183 1184 if (m->ol_flags & PKT_TX_IPV6) 1185 *pi_data |= NDIS_TXCSUM_INFO_IPV6; 1186 if (m->ol_flags & PKT_TX_IPV4) { 1187 *pi_data |= NDIS_TXCSUM_INFO_IPV4; 1188 1189 if (m->ol_flags & PKT_TX_IP_CKSUM) 1190 *pi_data |= NDIS_TXCSUM_INFO_IPCS; 1191 } 1192 1193 if (m->ol_flags & PKT_TX_TCP_CKSUM) 1194 *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen); 1195 else if (m->ol_flags & PKT_TX_UDP_CKSUM) 1196 *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen); 1197 } 1198 1199 pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen; 1200 /* Fixup RNDIS packet message total length */ 1201 pkt->len += pkt_hlen; 1202 1203 /* Convert RNDIS packet message offsets */ 1204 pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen); 1205 pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset); 1206 } 1207 1208 /* How many scatter gather list elements ar needed */ 1209 static unsigned int hn_get_slots(const struct rte_mbuf *m) 1210 { 1211 unsigned int slots = 1; /* for RNDIS header */ 1212 1213 while (m) { 1214 unsigned int size = rte_pktmbuf_data_len(m); 1215 unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK; 1216 1217 slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE; 1218 m = m->next; 1219 } 1220 1221 return slots; 1222 } 1223 1224 /* Build scatter gather list from chained mbuf */ 1225 static unsigned int hn_fill_sg(struct vmbus_gpa *sg, 1226 const struct rte_mbuf *m) 1227 { 1228 unsigned int segs = 0; 1229 1230 while (m) { 1231 rte_iova_t addr = rte_mbuf_data_iova(m); 1232 unsigned int page = addr / PAGE_SIZE; 1233 unsigned int offset = addr & PAGE_MASK; 1234 unsigned int len = rte_pktmbuf_data_len(m); 1235 1236 while (len > 0) { 1237 unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset); 1238 1239 sg[segs].page = page; 1240 sg[segs].ofs = offset; 1241 sg[segs].len = bytes; 1242 segs++; 1243 1244 ++page; 1245 offset = 0; 1246 len -= bytes; 1247 } 1248 m = m->next; 1249 } 1250 1251 return segs; 1252 } 1253 1254 /* Transmit directly from mbuf */ 1255 static int hn_xmit_sg(struct hn_tx_queue *txq, 1256 const struct hn_txdesc *txd, const struct rte_mbuf *m, 1257 bool *need_sig) 1258 { 1259 struct vmbus_gpa sg[hn_get_slots(m)]; 1260 struct hn_nvs_rndis nvs_rndis = { 1261 .type = NVS_TYPE_RNDIS, 1262 .rndis_mtype = NVS_RNDIS_MTYPE_DATA, 1263 .chim_sz = txd->chim_size, 1264 }; 1265 rte_iova_t addr; 1266 unsigned int segs; 1267 1268 /* attach aggregation data if present */ 1269 if (txd->chim_size > 0) 1270 nvs_rndis.chim_idx = txd->chim_index; 1271 else 1272 nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID; 1273 1274 hn_rndis_dump(txd->rndis_pkt); 1275 1276 /* pass IOVA of rndis header in first segment */ 1277 addr = rte_malloc_virt2iova(txd->rndis_pkt); 1278 if (unlikely(addr == RTE_BAD_IOVA)) { 1279 PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); 1280 return -EINVAL; 1281 } 1282 1283 sg[0].page = addr / PAGE_SIZE; 1284 sg[0].ofs = addr & PAGE_MASK; 1285 sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt)); 1286 segs = 1; 1287 1288 hn_update_packet_stats(&txq->stats, m); 1289 1290 segs += hn_fill_sg(sg + 1, m); 1291 1292 PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u", 1293 txq->port_id, txq->queue_id, txd->chim_index, 1294 segs, nvs_rndis.chim_sz); 1295 1296 return hn_nvs_send_sglist(txq->chan, sg, segs, 1297 &nvs_rndis, sizeof(nvs_rndis), 1298 (uintptr_t)txd, need_sig); 1299 } 1300 1301 uint16_t 1302 hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 1303 { 1304 struct hn_tx_queue *txq = ptxq; 1305 uint16_t queue_id = txq->queue_id; 1306 struct hn_data *hv = txq->hv; 1307 struct rte_eth_dev *vf_dev; 1308 bool need_sig = false; 1309 uint16_t nb_tx; 1310 int ret; 1311 1312 if (unlikely(hv->closed)) 1313 return 0; 1314 1315 /* Transmit over VF if present and up */ 1316 vf_dev = hn_get_vf_dev(hv); 1317 1318 if (vf_dev && vf_dev->data->dev_started) { 1319 void *sub_q = vf_dev->data->tx_queues[queue_id]; 1320 1321 return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); 1322 } 1323 1324 if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh) 1325 hn_process_events(hv, txq->queue_id, 0); 1326 1327 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1328 struct rte_mbuf *m = tx_pkts[nb_tx]; 1329 uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; 1330 struct rndis_packet_msg *pkt; 1331 1332 /* For small packets aggregate them in chimney buffer */ 1333 if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) { 1334 /* If this packet will not fit, then flush */ 1335 if (txq->agg_pktleft == 0 || 1336 RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) { 1337 if (hn_flush_txagg(txq, &need_sig)) 1338 goto fail; 1339 } 1340 1341 pkt = hn_try_txagg(hv, txq, pkt_size); 1342 if (unlikely(!pkt)) 1343 break; 1344 1345 hn_encap(pkt, queue_id, m); 1346 hn_append_to_chim(txq, pkt, m); 1347 1348 rte_pktmbuf_free(m); 1349 1350 /* if buffer is full, flush */ 1351 if (txq->agg_pktleft == 0 && 1352 hn_flush_txagg(txq, &need_sig)) 1353 goto fail; 1354 } else { 1355 struct hn_txdesc *txd; 1356 1357 /* can send chimney data and large packet at once */ 1358 txd = txq->agg_txd; 1359 if (txd) { 1360 hn_reset_txagg(txq); 1361 } else { 1362 txd = hn_new_txd(hv, txq); 1363 if (unlikely(!txd)) 1364 break; 1365 } 1366 1367 pkt = txd->rndis_pkt; 1368 txd->m = m; 1369 txd->data_size += m->pkt_len; 1370 ++txd->packets; 1371 1372 hn_encap(pkt, queue_id, m); 1373 1374 ret = hn_xmit_sg(txq, txd, m, &need_sig); 1375 if (unlikely(ret != 0)) { 1376 PMD_TX_LOG(NOTICE, "sg send failed: %d", ret); 1377 ++txq->stats.errors; 1378 rte_mempool_put(hv->tx_pool, txd); 1379 goto fail; 1380 } 1381 } 1382 } 1383 1384 /* If partial buffer left, then try and send it. 1385 * if that fails, then reuse it on next send. 1386 */ 1387 hn_flush_txagg(txq, &need_sig); 1388 1389 fail: 1390 if (need_sig) 1391 rte_vmbus_chan_signal_tx(txq->chan); 1392 1393 return nb_tx; 1394 } 1395 1396 uint16_t 1397 hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 1398 { 1399 struct hn_rx_queue *rxq = prxq; 1400 struct hn_data *hv = rxq->hv; 1401 struct rte_eth_dev *vf_dev; 1402 uint16_t nb_rcv; 1403 1404 if (unlikely(hv->closed)) 1405 return 0; 1406 1407 /* Transmit over VF if present and up */ 1408 vf_dev = hn_get_vf_dev(hv); 1409 1410 if (vf_dev && vf_dev->data->dev_started) { 1411 /* Normally, with SR-IOV the ring buffer will be empty */ 1412 hn_process_events(hv, rxq->queue_id, 0); 1413 1414 /* Get mbufs some bufs off of staging ring */ 1415 nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring, 1416 (void **)rx_pkts, 1417 nb_pkts / 2, NULL); 1418 /* And rest off of VF */ 1419 nb_rcv += rte_eth_rx_burst(vf_dev->data->port_id, 1420 rxq->queue_id, 1421 rx_pkts + nb_rcv, nb_pkts - nb_rcv); 1422 } else { 1423 /* If receive ring is not full then get more */ 1424 if (rte_ring_count(rxq->rx_ring) < nb_pkts) 1425 hn_process_events(hv, rxq->queue_id, 0); 1426 1427 nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring, 1428 (void **)rx_pkts, 1429 nb_pkts, NULL); 1430 } 1431 1432 return nb_rcv; 1433 } 1434