1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2018 Microsoft Corporation 3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc. 4 * All rights reserved. 5 */ 6 7 #include <stdint.h> 8 #include <string.h> 9 #include <stdio.h> 10 #include <errno.h> 11 #include <unistd.h> 12 #include <strings.h> 13 #include <malloc.h> 14 15 #include <rte_ethdev.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_memzone.h> 19 #include <rte_malloc.h> 20 #include <rte_atomic.h> 21 #include <rte_branch_prediction.h> 22 #include <rte_ether.h> 23 #include <rte_common.h> 24 #include <rte_errno.h> 25 #include <rte_memory.h> 26 #include <rte_eal.h> 27 #include <rte_dev.h> 28 #include <rte_net.h> 29 #include <rte_bus_vmbus.h> 30 #include <rte_spinlock.h> 31 32 #include "hn_logs.h" 33 #include "hn_var.h" 34 #include "hn_rndis.h" 35 #include "hn_nvs.h" 36 #include "ndis.h" 37 38 #define HN_NVS_SEND_MSG_SIZE \ 39 (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis)) 40 41 #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */ 42 #define HN_TXCOPY_THRESHOLD 512 43 44 #define HN_RXCOPY_THRESHOLD 256 45 #define HN_RXQ_EVENT_DEFAULT 2048 46 47 struct hn_rxinfo { 48 uint32_t vlan_info; 49 uint32_t csum_info; 50 uint32_t hash_info; 51 uint32_t hash_value; 52 }; 53 54 #define HN_RXINFO_VLAN 0x0001 55 #define HN_RXINFO_CSUM 0x0002 56 #define HN_RXINFO_HASHINF 0x0004 57 #define HN_RXINFO_HASHVAL 0x0008 58 #define HN_RXINFO_ALL \ 59 (HN_RXINFO_VLAN | \ 60 HN_RXINFO_CSUM | \ 61 HN_RXINFO_HASHINF | \ 62 HN_RXINFO_HASHVAL) 63 64 #define HN_NDIS_VLAN_INFO_INVALID 0xffffffff 65 #define HN_NDIS_RXCSUM_INFO_INVALID 0 66 #define HN_NDIS_HASH_INFO_INVALID 0 67 68 /* 69 * Per-transmit book keeping. 70 * A slot in transmit ring (chim_index) is reserved for each transmit. 71 * 72 * There are two types of transmit: 73 * - buffered transmit where chimney buffer is used and RNDIS header 74 * is in the buffer. mbuf == NULL for this case. 75 * 76 * - direct transmit where RNDIS header is in the in rndis_pkt 77 * mbuf is freed after transmit. 78 * 79 * Descriptors come from per-port pool which is used 80 * to limit number of outstanding requests per device. 81 */ 82 struct hn_txdesc { 83 struct rte_mbuf *m; 84 85 uint16_t queue_id; 86 uint16_t chim_index; 87 uint32_t chim_size; 88 uint32_t data_size; 89 uint32_t packets; 90 91 struct rndis_packet_msg *rndis_pkt; 92 }; 93 94 #define HN_RNDIS_PKT_LEN \ 95 (sizeof(struct rndis_packet_msg) + \ 96 RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \ 97 RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \ 98 RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \ 99 RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE)) 100 101 /* Minimum space required for a packet */ 102 #define HN_PKTSIZE_MIN(align) \ 103 RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align) 104 105 #define DEFAULT_TX_FREE_THRESH 32U 106 107 static void 108 hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m) 109 { 110 uint32_t s = m->pkt_len; 111 const struct ether_addr *ea; 112 113 if (s == 64) { 114 stats->size_bins[1]++; 115 } else if (s > 64 && s < 1024) { 116 uint32_t bin; 117 118 /* count zeros, and offset into correct bin */ 119 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; 120 stats->size_bins[bin]++; 121 } else { 122 if (s < 64) 123 stats->size_bins[0]++; 124 else if (s < 1519) 125 stats->size_bins[6]++; 126 else if (s >= 1519) 127 stats->size_bins[7]++; 128 } 129 130 ea = rte_pktmbuf_mtod(m, const struct ether_addr *); 131 if (is_multicast_ether_addr(ea)) { 132 if (is_broadcast_ether_addr(ea)) 133 stats->broadcast++; 134 else 135 stats->multicast++; 136 } 137 } 138 139 static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt) 140 { 141 return pkt->pktinfooffset + pkt->pktinfolen; 142 } 143 144 static inline uint32_t 145 hn_rndis_pktmsg_offset(uint32_t ofs) 146 { 147 return ofs - offsetof(struct rndis_packet_msg, dataoffset); 148 } 149 150 static void hn_txd_init(struct rte_mempool *mp __rte_unused, 151 void *opaque, void *obj, unsigned int idx) 152 { 153 struct hn_txdesc *txd = obj; 154 struct rte_eth_dev *dev = opaque; 155 struct rndis_packet_msg *pkt; 156 157 memset(txd, 0, sizeof(*txd)); 158 txd->chim_index = idx; 159 160 pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN, 161 rte_align32pow2(HN_RNDIS_PKT_LEN), 162 dev->device->numa_node); 163 if (!pkt) 164 rte_exit(EXIT_FAILURE, "can not allocate RNDIS header"); 165 166 txd->rndis_pkt = pkt; 167 } 168 169 /* 170 * Unlike Linux and FreeBSD, this driver uses a mempool 171 * to limit outstanding transmits and reserve buffers 172 */ 173 int 174 hn_tx_pool_init(struct rte_eth_dev *dev) 175 { 176 struct hn_data *hv = dev->data->dev_private; 177 char name[RTE_MEMPOOL_NAMESIZE]; 178 struct rte_mempool *mp; 179 180 snprintf(name, sizeof(name), 181 "hn_txd_%u", dev->data->port_id); 182 183 PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d", 184 name, hv->chim_cnt, sizeof(struct hn_txdesc), 185 dev->device->numa_node); 186 187 mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc), 188 HN_TXD_CACHE_SIZE, 0, 189 NULL, NULL, 190 hn_txd_init, dev, 191 dev->device->numa_node, 0); 192 if (!mp) { 193 PMD_DRV_LOG(ERR, 194 "mempool %s create failed: %d", name, rte_errno); 195 return -rte_errno; 196 } 197 198 hv->tx_pool = mp; 199 return 0; 200 } 201 202 static void hn_reset_txagg(struct hn_tx_queue *txq) 203 { 204 txq->agg_szleft = txq->agg_szmax; 205 txq->agg_pktleft = txq->agg_pktmax; 206 txq->agg_txd = NULL; 207 txq->agg_prevpkt = NULL; 208 } 209 210 int 211 hn_dev_tx_queue_setup(struct rte_eth_dev *dev, 212 uint16_t queue_idx, uint16_t nb_desc __rte_unused, 213 unsigned int socket_id, 214 const struct rte_eth_txconf *tx_conf) 215 216 { 217 struct hn_data *hv = dev->data->dev_private; 218 struct hn_tx_queue *txq; 219 uint32_t tx_free_thresh; 220 221 PMD_INIT_FUNC_TRACE(); 222 223 txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, 224 socket_id); 225 if (!txq) 226 return -ENOMEM; 227 228 txq->hv = hv; 229 txq->chan = hv->channels[queue_idx]; 230 txq->port_id = dev->data->port_id; 231 txq->queue_id = queue_idx; 232 233 tx_free_thresh = tx_conf->tx_free_thresh; 234 if (tx_free_thresh == 0) 235 tx_free_thresh = RTE_MIN(hv->chim_cnt / 4, 236 DEFAULT_TX_FREE_THRESH); 237 238 if (tx_free_thresh >= hv->chim_cnt - 3) 239 tx_free_thresh = hv->chim_cnt - 3; 240 241 txq->free_thresh = tx_free_thresh; 242 243 txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); 244 txq->agg_pktmax = hv->rndis_agg_pkts; 245 txq->agg_align = hv->rndis_agg_align; 246 247 hn_reset_txagg(txq); 248 249 dev->data->tx_queues[queue_idx] = txq; 250 251 return 0; 252 } 253 254 void 255 hn_dev_tx_queue_release(void *arg) 256 { 257 struct hn_tx_queue *txq = arg; 258 struct hn_txdesc *txd; 259 260 PMD_INIT_FUNC_TRACE(); 261 262 if (!txq) 263 return; 264 265 /* If any pending data is still present just drop it */ 266 txd = txq->agg_txd; 267 if (txd) 268 rte_mempool_put(txq->hv->tx_pool, txd); 269 270 rte_free(txq); 271 } 272 273 void 274 hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, 275 struct rte_eth_txq_info *qinfo) 276 { 277 struct hn_data *hv = dev->data->dev_private; 278 struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx]; 279 280 qinfo->conf.tx_free_thresh = txq->free_thresh; 281 qinfo->nb_desc = hv->tx_pool->size; 282 } 283 284 static void 285 hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, 286 unsigned long xactid, const struct hn_nvs_rndis_ack *ack) 287 { 288 struct hn_txdesc *txd = (struct hn_txdesc *)xactid; 289 struct hn_tx_queue *txq; 290 291 /* Control packets are sent with xacid == 0 */ 292 if (!txd) 293 return; 294 295 txq = dev->data->tx_queues[queue_id]; 296 if (likely(ack->status == NVS_STATUS_OK)) { 297 PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u", 298 txq->port_id, txq->queue_id, txd->chim_index, 299 txd->packets, txd->data_size); 300 txq->stats.bytes += txd->data_size; 301 txq->stats.packets += txd->packets; 302 } else { 303 PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u", 304 txq->port_id, txq->queue_id, txd->chim_index, ack->status); 305 ++txq->stats.errors; 306 } 307 308 rte_pktmbuf_free(txd->m); 309 310 rte_mempool_put(txq->hv->tx_pool, txd); 311 } 312 313 /* Handle transmit completion events */ 314 static void 315 hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id, 316 const struct vmbus_chanpkt_hdr *pkt, 317 const void *data) 318 { 319 const struct hn_nvs_hdr *hdr = data; 320 321 switch (hdr->type) { 322 case NVS_TYPE_RNDIS_ACK: 323 hn_nvs_send_completed(dev, queue_id, pkt->xactid, data); 324 break; 325 326 default: 327 PMD_TX_LOG(NOTICE, 328 "unexpected send completion type %u", 329 hdr->type); 330 } 331 } 332 333 /* Parse per-packet info (meta data) */ 334 static int 335 hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen, 336 struct hn_rxinfo *info) 337 { 338 const struct rndis_pktinfo *pi = info_data; 339 uint32_t mask = 0; 340 341 while (info_dlen != 0) { 342 const void *data; 343 uint32_t dlen; 344 345 if (unlikely(info_dlen < sizeof(*pi))) 346 return -EINVAL; 347 348 if (unlikely(info_dlen < pi->size)) 349 return -EINVAL; 350 info_dlen -= pi->size; 351 352 if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK)) 353 return -EINVAL; 354 if (unlikely(pi->size < pi->offset)) 355 return -EINVAL; 356 357 dlen = pi->size - pi->offset; 358 data = pi->data; 359 360 switch (pi->type) { 361 case NDIS_PKTINFO_TYPE_VLAN: 362 if (unlikely(dlen < NDIS_VLAN_INFO_SIZE)) 363 return -EINVAL; 364 info->vlan_info = *((const uint32_t *)data); 365 mask |= HN_RXINFO_VLAN; 366 break; 367 368 case NDIS_PKTINFO_TYPE_CSUM: 369 if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE)) 370 return -EINVAL; 371 info->csum_info = *((const uint32_t *)data); 372 mask |= HN_RXINFO_CSUM; 373 break; 374 375 case NDIS_PKTINFO_TYPE_HASHVAL: 376 if (unlikely(dlen < NDIS_HASH_VALUE_SIZE)) 377 return -EINVAL; 378 info->hash_value = *((const uint32_t *)data); 379 mask |= HN_RXINFO_HASHVAL; 380 break; 381 382 case NDIS_PKTINFO_TYPE_HASHINF: 383 if (unlikely(dlen < NDIS_HASH_INFO_SIZE)) 384 return -EINVAL; 385 info->hash_info = *((const uint32_t *)data); 386 mask |= HN_RXINFO_HASHINF; 387 break; 388 389 default: 390 goto next; 391 } 392 393 if (mask == HN_RXINFO_ALL) 394 break; /* All found; done */ 395 next: 396 pi = (const struct rndis_pktinfo *) 397 ((const uint8_t *)pi + pi->size); 398 } 399 400 /* 401 * Final fixup. 402 * - If there is no hash value, invalidate the hash info. 403 */ 404 if (!(mask & HN_RXINFO_HASHVAL)) 405 info->hash_info = HN_NDIS_HASH_INFO_INVALID; 406 return 0; 407 } 408 409 /* 410 * Ack the consumed RXBUF associated w/ this channel packet, 411 * so that this RXBUF can be recycled by the hypervisor. 412 */ 413 static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb) 414 { 415 struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo; 416 struct hn_data *hv = rxb->hv; 417 418 if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) { 419 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); 420 --hv->rxbuf_outstanding; 421 } 422 } 423 424 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque) 425 { 426 hn_rx_buf_release(opaque); 427 } 428 429 static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq, 430 const struct vmbus_chanpkt_rxbuf *pkt) 431 { 432 struct hn_rx_bufinfo *rxb; 433 434 rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid; 435 rxb->chan = rxq->chan; 436 rxb->xactid = pkt->hdr.xactid; 437 rxb->hv = rxq->hv; 438 439 rxb->shinfo.free_cb = hn_rx_buf_free_cb; 440 rxb->shinfo.fcb_opaque = rxb; 441 rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1); 442 return rxb; 443 } 444 445 static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, 446 uint8_t *data, unsigned int headroom, unsigned int dlen, 447 const struct hn_rxinfo *info) 448 { 449 struct hn_data *hv = rxq->hv; 450 struct rte_mbuf *m; 451 452 m = rte_pktmbuf_alloc(rxq->mb_pool); 453 if (unlikely(!m)) { 454 struct rte_eth_dev *dev = 455 &rte_eth_devices[rxq->port_id]; 456 457 dev->data->rx_mbuf_alloc_failed++; 458 return; 459 } 460 461 /* 462 * For large packets, avoid copy if possible but need to keep 463 * some space available in receive area for later packets. 464 */ 465 if (dlen >= HN_RXCOPY_THRESHOLD && 466 hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) { 467 struct rte_mbuf_ext_shared_info *shinfo; 468 const void *rxbuf; 469 rte_iova_t iova; 470 471 /* 472 * Build an external mbuf that points to recveive area. 473 * Use refcount to handle multiple packets in same 474 * receive buffer section. 475 */ 476 rxbuf = hv->rxbuf_res->addr; 477 iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); 478 shinfo = &rxb->shinfo; 479 480 if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1) 481 ++hv->rxbuf_outstanding; 482 483 rte_pktmbuf_attach_extbuf(m, data, iova, 484 dlen + headroom, shinfo); 485 m->data_off = headroom; 486 } else { 487 /* Mbuf's in pool must be large enough to hold small packets */ 488 if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) { 489 rte_pktmbuf_free_seg(m); 490 ++rxq->stats.errors; 491 return; 492 } 493 rte_memcpy(rte_pktmbuf_mtod(m, void *), 494 data + headroom, dlen); 495 } 496 497 m->port = rxq->port_id; 498 m->pkt_len = dlen; 499 m->data_len = dlen; 500 m->packet_type = rte_net_get_ptype(m, NULL, 501 RTE_PTYPE_L2_MASK | 502 RTE_PTYPE_L3_MASK | 503 RTE_PTYPE_L4_MASK); 504 505 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { 506 m->vlan_tci = info->vlan_info; 507 m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; 508 } 509 510 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) { 511 if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) 512 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 513 514 if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK 515 | NDIS_RXCSUM_INFO_TCPCS_OK)) 516 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 517 else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED 518 | NDIS_RXCSUM_INFO_UDPCS_FAILED)) 519 m->ol_flags |= PKT_RX_L4_CKSUM_BAD; 520 } 521 522 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) { 523 m->ol_flags |= PKT_RX_RSS_HASH; 524 m->hash.rss = info->hash_value; 525 } 526 527 PMD_RX_LOG(DEBUG, 528 "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64, 529 rxq->port_id, rxq->queue_id, rxb->xactid, 530 m->pkt_len, m->packet_type, m->ol_flags); 531 532 ++rxq->stats.packets; 533 rxq->stats.bytes += m->pkt_len; 534 hn_update_packet_stats(&rxq->stats, m); 535 536 if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) { 537 ++rxq->ring_full; 538 rte_pktmbuf_free(m); 539 } 540 } 541 542 static void hn_rndis_rx_data(struct hn_rx_queue *rxq, 543 struct hn_rx_bufinfo *rxb, 544 void *data, uint32_t dlen) 545 { 546 unsigned int data_off, data_len, pktinfo_off, pktinfo_len; 547 const struct rndis_packet_msg *pkt = data; 548 struct hn_rxinfo info = { 549 .vlan_info = HN_NDIS_VLAN_INFO_INVALID, 550 .csum_info = HN_NDIS_RXCSUM_INFO_INVALID, 551 .hash_info = HN_NDIS_HASH_INFO_INVALID, 552 }; 553 int err; 554 555 hn_rndis_dump(pkt); 556 557 if (unlikely(dlen < sizeof(*pkt))) 558 goto error; 559 560 if (unlikely(dlen < pkt->len)) 561 goto error; /* truncated RNDIS from host */ 562 563 if (unlikely(pkt->len < pkt->datalen 564 + pkt->oobdatalen + pkt->pktinfolen)) 565 goto error; 566 567 if (unlikely(pkt->datalen == 0)) 568 goto error; 569 570 /* Check offsets. */ 571 if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN)) 572 goto error; 573 574 if (likely(pkt->pktinfooffset > 0) && 575 unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN || 576 (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK))) 577 goto error; 578 579 data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); 580 data_len = pkt->datalen; 581 pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset); 582 pktinfo_len = pkt->pktinfolen; 583 584 if (likely(pktinfo_len > 0)) { 585 err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off, 586 pktinfo_len, &info); 587 if (err) 588 goto error; 589 } 590 591 if (unlikely(data_off + data_len > pkt->len)) 592 goto error; 593 594 if (unlikely(data_len < ETHER_HDR_LEN)) 595 goto error; 596 597 hn_rxpkt(rxq, rxb, data, data_off, data_len, &info); 598 return; 599 error: 600 ++rxq->stats.errors; 601 } 602 603 static void 604 hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq, 605 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len) 606 { 607 const struct rndis_msghdr *hdr = buf; 608 609 switch (hdr->type) { 610 case RNDIS_PACKET_MSG: 611 if (dev->data->dev_started) 612 hn_rndis_rx_data(rxq, rxb, buf, len); 613 break; 614 615 case RNDIS_INDICATE_STATUS_MSG: 616 hn_rndis_link_status(rxq->hv, buf); 617 break; 618 619 case RNDIS_INITIALIZE_CMPLT: 620 case RNDIS_QUERY_CMPLT: 621 case RNDIS_SET_CMPLT: 622 hn_rndis_receive_response(rxq->hv, buf, len); 623 break; 624 625 default: 626 PMD_DRV_LOG(NOTICE, 627 "unexpected RNDIS message (type %#x len %u)", 628 hdr->type, len); 629 break; 630 } 631 } 632 633 static void 634 hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, 635 struct hn_data *hv, 636 struct hn_rx_queue *rxq, 637 const struct vmbus_chanpkt_hdr *hdr, 638 const void *buf) 639 { 640 const struct vmbus_chanpkt_rxbuf *pkt; 641 const struct hn_nvs_hdr *nvs_hdr = buf; 642 uint32_t rxbuf_sz = hv->rxbuf_res->len; 643 char *rxbuf = hv->rxbuf_res->addr; 644 unsigned int i, hlen, count; 645 struct hn_rx_bufinfo *rxb; 646 647 /* At minimum we need type header */ 648 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) { 649 PMD_RX_LOG(ERR, "invalid receive nvs RNDIS"); 650 return; 651 } 652 653 /* Make sure that this is a RNDIS message. */ 654 if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) { 655 PMD_RX_LOG(ERR, "nvs type %u, not RNDIS", 656 nvs_hdr->type); 657 return; 658 } 659 660 hlen = vmbus_chanpkt_getlen(hdr->hlen); 661 if (unlikely(hlen < sizeof(*pkt))) { 662 PMD_RX_LOG(ERR, "invalid rxbuf chanpkt"); 663 return; 664 } 665 666 pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr); 667 if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) { 668 PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x", 669 pkt->rxbuf_id); 670 return; 671 } 672 673 count = pkt->rxbuf_cnt; 674 if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf, 675 rxbuf[count]))) { 676 PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count); 677 return; 678 } 679 680 if (pkt->hdr.xactid > hv->rxbuf_section_cnt) { 681 PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64, 682 pkt->hdr.xactid); 683 return; 684 } 685 686 /* Setup receive buffer info to allow for callback */ 687 rxb = hn_rx_buf_init(rxq, pkt); 688 689 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */ 690 for (i = 0; i < count; ++i) { 691 unsigned int ofs, len; 692 693 ofs = pkt->rxbuf[i].ofs; 694 len = pkt->rxbuf[i].len; 695 696 if (unlikely(ofs + len > rxbuf_sz)) { 697 PMD_RX_LOG(ERR, 698 "%uth RNDIS msg overflow ofs %u, len %u", 699 i, ofs, len); 700 continue; 701 } 702 703 if (unlikely(len == 0)) { 704 PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len); 705 continue; 706 } 707 708 hn_rndis_receive(dev, rxq, rxb, 709 rxbuf + ofs, len); 710 } 711 712 /* Send ACK now if external mbuf not used */ 713 hn_rx_buf_release(rxb); 714 } 715 716 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, 717 uint16_t queue_id, 718 unsigned int socket_id) 719 { 720 struct hn_rx_queue *rxq; 721 722 rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq), 723 RTE_CACHE_LINE_SIZE, socket_id); 724 if (!rxq) 725 return NULL; 726 727 rxq->hv = hv; 728 rxq->chan = hv->channels[queue_id]; 729 rte_spinlock_init(&rxq->ring_lock); 730 rxq->port_id = hv->port_id; 731 rxq->queue_id = queue_id; 732 rxq->event_sz = HN_RXQ_EVENT_DEFAULT; 733 rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT, 734 RTE_CACHE_LINE_SIZE, socket_id); 735 if (!rxq->event_buf) { 736 rte_free(rxq); 737 return NULL; 738 } 739 740 return rxq; 741 } 742 743 int 744 hn_dev_rx_queue_setup(struct rte_eth_dev *dev, 745 uint16_t queue_idx, uint16_t nb_desc, 746 unsigned int socket_id, 747 const struct rte_eth_rxconf *rx_conf __rte_unused, 748 struct rte_mempool *mp) 749 { 750 struct hn_data *hv = dev->data->dev_private; 751 char ring_name[RTE_RING_NAMESIZE]; 752 struct hn_rx_queue *rxq; 753 unsigned int count; 754 755 PMD_INIT_FUNC_TRACE(); 756 757 if (queue_idx == 0) { 758 rxq = hv->primary; 759 } else { 760 rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); 761 if (!rxq) 762 return -ENOMEM; 763 } 764 765 rxq->mb_pool = mp; 766 count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues; 767 if (nb_desc == 0 || nb_desc > count) 768 nb_desc = count; 769 770 /* 771 * Staging ring from receive event logic to rx_pkts. 772 * rx_pkts assumes caller is handling multi-thread issue. 773 * event logic has locking. 774 */ 775 snprintf(ring_name, sizeof(ring_name), 776 "hn_rx_%u_%u", dev->data->port_id, queue_idx); 777 rxq->rx_ring = rte_ring_create(ring_name, 778 rte_align32pow2(nb_desc), 779 socket_id, 0); 780 if (!rxq->rx_ring) 781 goto fail; 782 783 dev->data->rx_queues[queue_idx] = rxq; 784 return 0; 785 786 fail: 787 rte_ring_free(rxq->rx_ring); 788 rte_free(rxq->event_buf); 789 rte_free(rxq); 790 return -ENOMEM; 791 } 792 793 void 794 hn_dev_rx_queue_release(void *arg) 795 { 796 struct hn_rx_queue *rxq = arg; 797 798 PMD_INIT_FUNC_TRACE(); 799 800 if (!rxq) 801 return; 802 803 rte_ring_free(rxq->rx_ring); 804 rxq->rx_ring = NULL; 805 rxq->mb_pool = NULL; 806 807 if (rxq != rxq->hv->primary) { 808 rte_free(rxq->event_buf); 809 rte_free(rxq); 810 } 811 } 812 813 int 814 hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt) 815 { 816 struct hn_tx_queue *txq = arg; 817 818 return hn_process_events(txq->hv, txq->queue_id, free_cnt); 819 } 820 821 void 822 hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, 823 struct rte_eth_rxq_info *qinfo) 824 { 825 struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx]; 826 827 qinfo->mp = rxq->mb_pool; 828 qinfo->scattered_rx = 1; 829 qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring); 830 } 831 832 static void 833 hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr, 834 const void *data) 835 { 836 const struct hn_nvs_hdr *hdr = data; 837 838 if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) { 839 PMD_DRV_LOG(ERR, "invalid nvs notify"); 840 return; 841 } 842 843 PMD_DRV_LOG(INFO, 844 "got notify, nvs type %u", hdr->type); 845 } 846 847 /* 848 * Process pending events on the channel. 849 * Called from both Rx queue poll and Tx cleanup 850 */ 851 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, 852 uint32_t tx_limit) 853 { 854 struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id]; 855 struct hn_rx_queue *rxq; 856 uint32_t bytes_read = 0; 857 uint32_t tx_done = 0; 858 int ret = 0; 859 860 rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id]; 861 862 /* If no pending data then nothing to do */ 863 if (rte_vmbus_chan_rx_empty(rxq->chan)) 864 return 0; 865 866 /* 867 * Since channel is shared between Rx and TX queue need to have a lock 868 * since DPDK does not force same CPU to be used for Rx/Tx. 869 */ 870 if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock))) 871 return 0; 872 873 for (;;) { 874 const struct vmbus_chanpkt_hdr *pkt; 875 uint32_t len = rxq->event_sz; 876 const void *data; 877 878 retry: 879 ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len); 880 if (ret == -EAGAIN) 881 break; /* ring is empty */ 882 883 if (unlikely(ret == -ENOBUFS)) { 884 /* event buffer not large enough to read ring */ 885 886 PMD_DRV_LOG(DEBUG, 887 "event buffer expansion (need %u)", len); 888 rxq->event_sz = len + len / 4; 889 rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz, 890 RTE_CACHE_LINE_SIZE); 891 if (rxq->event_buf) 892 goto retry; 893 /* out of memory, no more events now */ 894 rxq->event_sz = 0; 895 break; 896 } 897 898 if (unlikely(ret <= 0)) { 899 /* This indicates a failure to communicate (or worse) */ 900 rte_exit(EXIT_FAILURE, 901 "vmbus ring buffer error: %d", ret); 902 } 903 904 bytes_read += ret; 905 pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf; 906 data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen); 907 908 switch (pkt->type) { 909 case VMBUS_CHANPKT_TYPE_COMP: 910 ++tx_done; 911 hn_nvs_handle_comp(dev, queue_id, pkt, data); 912 break; 913 914 case VMBUS_CHANPKT_TYPE_RXBUF: 915 hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data); 916 break; 917 918 case VMBUS_CHANPKT_TYPE_INBAND: 919 hn_nvs_handle_notify(pkt, data); 920 break; 921 922 default: 923 PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type); 924 break; 925 } 926 927 if (tx_limit && tx_done >= tx_limit) 928 break; 929 930 if (rxq->rx_ring && rte_ring_full(rxq->rx_ring)) 931 break; 932 } 933 934 if (bytes_read > 0) 935 rte_vmbus_chan_signal_read(rxq->chan, bytes_read); 936 937 rte_spinlock_unlock(&rxq->ring_lock); 938 939 return tx_done; 940 } 941 942 static void hn_append_to_chim(struct hn_tx_queue *txq, 943 struct rndis_packet_msg *pkt, 944 const struct rte_mbuf *m) 945 { 946 struct hn_txdesc *txd = txq->agg_txd; 947 uint8_t *buf = (uint8_t *)pkt; 948 unsigned int data_offs; 949 950 hn_rndis_dump(pkt); 951 952 data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); 953 txd->chim_size += pkt->len; 954 txd->data_size += m->pkt_len; 955 ++txd->packets; 956 hn_update_packet_stats(&txq->stats, m); 957 958 for (; m; m = m->next) { 959 uint16_t len = rte_pktmbuf_data_len(m); 960 961 rte_memcpy(buf + data_offs, 962 rte_pktmbuf_mtod(m, const char *), len); 963 data_offs += len; 964 } 965 } 966 967 /* 968 * Send pending aggregated data in chimney buffer (if any). 969 * Returns error if send was unsuccessful because channel ring buffer 970 * was full. 971 */ 972 static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig) 973 974 { 975 struct hn_txdesc *txd = txq->agg_txd; 976 struct hn_nvs_rndis rndis; 977 int ret; 978 979 if (!txd) 980 return 0; 981 982 rndis = (struct hn_nvs_rndis) { 983 .type = NVS_TYPE_RNDIS, 984 .rndis_mtype = NVS_RNDIS_MTYPE_DATA, 985 .chim_idx = txd->chim_index, 986 .chim_sz = txd->chim_size, 987 }; 988 989 PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u", 990 txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size); 991 992 ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC, 993 &rndis, sizeof(rndis), (uintptr_t)txd, need_sig); 994 995 if (likely(ret == 0)) 996 hn_reset_txagg(txq); 997 else 998 PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d", 999 txq->port_id, txq->queue_id, ret); 1000 1001 return ret; 1002 } 1003 1004 static struct hn_txdesc *hn_new_txd(struct hn_data *hv, 1005 struct hn_tx_queue *txq) 1006 { 1007 struct hn_txdesc *txd; 1008 1009 if (rte_mempool_get(hv->tx_pool, (void **)&txd)) { 1010 ++txq->stats.nomemory; 1011 PMD_TX_LOG(DEBUG, "tx pool exhausted!"); 1012 return NULL; 1013 } 1014 1015 txd->m = NULL; 1016 txd->queue_id = txq->queue_id; 1017 txd->packets = 0; 1018 txd->data_size = 0; 1019 txd->chim_size = 0; 1020 1021 return txd; 1022 } 1023 1024 static void * 1025 hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) 1026 { 1027 struct hn_txdesc *agg_txd = txq->agg_txd; 1028 struct rndis_packet_msg *pkt; 1029 void *chim; 1030 1031 if (agg_txd) { 1032 unsigned int padding, olen; 1033 1034 /* 1035 * Update the previous RNDIS packet's total length, 1036 * it can be increased due to the mandatory alignment 1037 * padding for this RNDIS packet. And update the 1038 * aggregating txdesc's chimney sending buffer size 1039 * accordingly. 1040 * 1041 * Zero-out the padding, as required by the RNDIS spec. 1042 */ 1043 pkt = txq->agg_prevpkt; 1044 olen = pkt->len; 1045 padding = RTE_ALIGN(olen, txq->agg_align) - olen; 1046 if (padding > 0) { 1047 agg_txd->chim_size += padding; 1048 pkt->len += padding; 1049 memset((uint8_t *)pkt + olen, 0, padding); 1050 } 1051 1052 chim = (uint8_t *)pkt + pkt->len; 1053 1054 txq->agg_pktleft--; 1055 txq->agg_szleft -= pktsize; 1056 if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) { 1057 /* 1058 * Probably can't aggregate more packets, 1059 * flush this aggregating txdesc proactively. 1060 */ 1061 txq->agg_pktleft = 0; 1062 } 1063 } else { 1064 agg_txd = hn_new_txd(hv, txq); 1065 if (!agg_txd) 1066 return NULL; 1067 1068 chim = (uint8_t *)hv->chim_res->addr 1069 + agg_txd->chim_index * hv->chim_szmax; 1070 1071 txq->agg_txd = agg_txd; 1072 txq->agg_pktleft = txq->agg_pktmax - 1; 1073 txq->agg_szleft = txq->agg_szmax - pktsize; 1074 } 1075 txq->agg_prevpkt = chim; 1076 1077 return chim; 1078 } 1079 1080 static inline void * 1081 hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, 1082 uint32_t pi_dlen, uint32_t pi_type) 1083 { 1084 const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen); 1085 struct rndis_pktinfo *pi; 1086 1087 /* 1088 * Per-packet-info does not move; it only grows. 1089 * 1090 * NOTE: 1091 * pktinfooffset in this phase counts from the beginning 1092 * of rndis_packet_msg. 1093 */ 1094 pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt)); 1095 1096 pkt->pktinfolen += pi_size; 1097 1098 pi->size = pi_size; 1099 pi->type = pi_type; 1100 pi->offset = RNDIS_PKTINFO_OFFSET; 1101 1102 return pi->data; 1103 } 1104 1105 /* Put RNDIS header and packet info on packet */ 1106 static void hn_encap(struct rndis_packet_msg *pkt, 1107 uint16_t queue_id, 1108 const struct rte_mbuf *m) 1109 { 1110 unsigned int hlen = m->l2_len + m->l3_len; 1111 uint32_t *pi_data; 1112 uint32_t pkt_hlen; 1113 1114 pkt->type = RNDIS_PACKET_MSG; 1115 pkt->len = m->pkt_len; 1116 pkt->dataoffset = 0; 1117 pkt->datalen = m->pkt_len; 1118 pkt->oobdataoffset = 0; 1119 pkt->oobdatalen = 0; 1120 pkt->oobdataelements = 0; 1121 pkt->pktinfooffset = sizeof(*pkt); 1122 pkt->pktinfolen = 0; 1123 pkt->vchandle = 0; 1124 pkt->reserved = 0; 1125 1126 /* 1127 * Set the hash value for this packet, to the queue_id to cause 1128 * TX done event for this packet on the right channel. 1129 */ 1130 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE, 1131 NDIS_PKTINFO_TYPE_HASHVAL); 1132 *pi_data = queue_id; 1133 1134 if (m->ol_flags & PKT_TX_VLAN_PKT) { 1135 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, 1136 NDIS_PKTINFO_TYPE_VLAN); 1137 *pi_data = m->vlan_tci; 1138 } 1139 1140 if (m->ol_flags & PKT_TX_TCP_SEG) { 1141 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE, 1142 NDIS_PKTINFO_TYPE_LSO); 1143 1144 if (m->ol_flags & PKT_TX_IPV6) { 1145 *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen, 1146 m->tso_segsz); 1147 } else { 1148 *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen, 1149 m->tso_segsz); 1150 } 1151 } else if (m->ol_flags & 1152 (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) { 1153 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE, 1154 NDIS_PKTINFO_TYPE_CSUM); 1155 *pi_data = 0; 1156 1157 if (m->ol_flags & PKT_TX_IPV6) 1158 *pi_data |= NDIS_TXCSUM_INFO_IPV6; 1159 if (m->ol_flags & PKT_TX_IPV4) { 1160 *pi_data |= NDIS_TXCSUM_INFO_IPV4; 1161 1162 if (m->ol_flags & PKT_TX_IP_CKSUM) 1163 *pi_data |= NDIS_TXCSUM_INFO_IPCS; 1164 } 1165 1166 if (m->ol_flags & PKT_TX_TCP_CKSUM) 1167 *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen); 1168 else if (m->ol_flags & PKT_TX_UDP_CKSUM) 1169 *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen); 1170 } 1171 1172 pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen; 1173 /* Fixup RNDIS packet message total length */ 1174 pkt->len += pkt_hlen; 1175 1176 /* Convert RNDIS packet message offsets */ 1177 pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen); 1178 pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset); 1179 } 1180 1181 /* How many scatter gather list elements ar needed */ 1182 static unsigned int hn_get_slots(const struct rte_mbuf *m) 1183 { 1184 unsigned int slots = 1; /* for RNDIS header */ 1185 1186 while (m) { 1187 unsigned int size = rte_pktmbuf_data_len(m); 1188 unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK; 1189 1190 slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE; 1191 m = m->next; 1192 } 1193 1194 return slots; 1195 } 1196 1197 /* Build scatter gather list from chained mbuf */ 1198 static unsigned int hn_fill_sg(struct vmbus_gpa *sg, 1199 const struct rte_mbuf *m) 1200 { 1201 unsigned int segs = 0; 1202 1203 while (m) { 1204 rte_iova_t addr = rte_mbuf_data_iova(m); 1205 unsigned int page = addr / PAGE_SIZE; 1206 unsigned int offset = addr & PAGE_MASK; 1207 unsigned int len = rte_pktmbuf_data_len(m); 1208 1209 while (len > 0) { 1210 unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset); 1211 1212 sg[segs].page = page; 1213 sg[segs].ofs = offset; 1214 sg[segs].len = bytes; 1215 segs++; 1216 1217 ++page; 1218 offset = 0; 1219 len -= bytes; 1220 } 1221 m = m->next; 1222 } 1223 1224 return segs; 1225 } 1226 1227 /* Transmit directly from mbuf */ 1228 static int hn_xmit_sg(struct hn_tx_queue *txq, 1229 const struct hn_txdesc *txd, const struct rte_mbuf *m, 1230 bool *need_sig) 1231 { 1232 struct vmbus_gpa sg[hn_get_slots(m)]; 1233 struct hn_nvs_rndis nvs_rndis = { 1234 .type = NVS_TYPE_RNDIS, 1235 .rndis_mtype = NVS_RNDIS_MTYPE_DATA, 1236 .chim_sz = txd->chim_size, 1237 }; 1238 rte_iova_t addr; 1239 unsigned int segs; 1240 1241 /* attach aggregation data if present */ 1242 if (txd->chim_size > 0) 1243 nvs_rndis.chim_idx = txd->chim_index; 1244 else 1245 nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID; 1246 1247 hn_rndis_dump(txd->rndis_pkt); 1248 1249 /* pass IOVA of rndis header in first segment */ 1250 addr = rte_malloc_virt2iova(txd->rndis_pkt); 1251 if (unlikely(addr == RTE_BAD_IOVA)) { 1252 PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); 1253 return -EINVAL; 1254 } 1255 1256 sg[0].page = addr / PAGE_SIZE; 1257 sg[0].ofs = addr & PAGE_MASK; 1258 sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt)); 1259 segs = 1; 1260 1261 hn_update_packet_stats(&txq->stats, m); 1262 1263 segs += hn_fill_sg(sg + 1, m); 1264 1265 PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u", 1266 txq->port_id, txq->queue_id, txd->chim_index, 1267 segs, nvs_rndis.chim_sz); 1268 1269 return hn_nvs_send_sglist(txq->chan, sg, segs, 1270 &nvs_rndis, sizeof(nvs_rndis), 1271 (uintptr_t)txd, need_sig); 1272 } 1273 1274 uint16_t 1275 hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 1276 { 1277 struct hn_tx_queue *txq = ptxq; 1278 struct hn_data *hv = txq->hv; 1279 bool need_sig = false; 1280 uint16_t nb_tx; 1281 int ret; 1282 1283 if (unlikely(hv->closed)) 1284 return 0; 1285 1286 if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh) 1287 hn_process_events(hv, txq->queue_id, 0); 1288 1289 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1290 struct rte_mbuf *m = tx_pkts[nb_tx]; 1291 uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; 1292 struct rndis_packet_msg *pkt; 1293 1294 /* For small packets aggregate them in chimney buffer */ 1295 if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) { 1296 /* If this packet will not fit, then flush */ 1297 if (txq->agg_pktleft == 0 || 1298 RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) { 1299 if (hn_flush_txagg(txq, &need_sig)) 1300 goto fail; 1301 } 1302 1303 pkt = hn_try_txagg(hv, txq, pkt_size); 1304 if (unlikely(!pkt)) 1305 break; 1306 1307 hn_encap(pkt, txq->queue_id, m); 1308 hn_append_to_chim(txq, pkt, m); 1309 1310 rte_pktmbuf_free(m); 1311 1312 /* if buffer is full, flush */ 1313 if (txq->agg_pktleft == 0 && 1314 hn_flush_txagg(txq, &need_sig)) 1315 goto fail; 1316 } else { 1317 struct hn_txdesc *txd; 1318 1319 /* can send chimney data and large packet at once */ 1320 txd = txq->agg_txd; 1321 if (txd) { 1322 hn_reset_txagg(txq); 1323 } else { 1324 txd = hn_new_txd(hv, txq); 1325 if (unlikely(!txd)) 1326 break; 1327 } 1328 1329 pkt = txd->rndis_pkt; 1330 txd->m = m; 1331 txd->data_size += m->pkt_len; 1332 ++txd->packets; 1333 1334 hn_encap(pkt, txq->queue_id, m); 1335 1336 ret = hn_xmit_sg(txq, txd, m, &need_sig); 1337 if (unlikely(ret != 0)) { 1338 PMD_TX_LOG(NOTICE, "sg send failed: %d", ret); 1339 ++txq->stats.errors; 1340 rte_mempool_put(hv->tx_pool, txd); 1341 goto fail; 1342 } 1343 } 1344 } 1345 1346 /* If partial buffer left, then try and send it. 1347 * if that fails, then reuse it on next send. 1348 */ 1349 hn_flush_txagg(txq, &need_sig); 1350 1351 fail: 1352 if (need_sig) 1353 rte_vmbus_chan_signal_tx(txq->chan); 1354 1355 return nb_tx; 1356 } 1357 1358 uint16_t 1359 hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 1360 { 1361 struct hn_rx_queue *rxq = prxq; 1362 struct hn_data *hv = rxq->hv; 1363 1364 if (unlikely(hv->closed)) 1365 return 0; 1366 1367 /* If ring is empty then process more */ 1368 if (rte_ring_count(rxq->rx_ring) < nb_pkts) 1369 hn_process_events(hv, rxq->queue_id, 0); 1370 1371 /* Get mbufs off staging ring */ 1372 return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts, 1373 nb_pkts, NULL); 1374 } 1375