Lines Matching defs:mbuf_pool

2891 		  struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2995 cur = rte_pktmbuf_alloc(mbuf_pool);
3099 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
3125 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
3161 buf_len, mbuf_pool->name);
3168 mbuf_pool, legacy_ol_flags, 0, false);
3195 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3200 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
3206 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3211 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
3407 struct rte_mempool *mbuf_pool,
3437 buf_len, mbuf_pool->name);
3444 mbuf_pool, legacy_ol_flags, 0, false);
3459 struct rte_mempool *mbuf_pool,
3469 ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
3519 struct rte_mempool *mbuf_pool,
3532 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
3549 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3572 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3577 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3583 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3588 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3593 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3660 pkts[nb_rx] = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3670 nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
3673 nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
3677 nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
3680 nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
3746 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
3780 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count)) {
3819 __func__, buf_len, mbuf_pool->name);
3828 err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkt, mbuf_pool,
3898 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3904 return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
3911 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3917 return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
3942 struct rte_mempool *mbuf_pool,
3966 buf_len, mbuf_pool->name);
3973 err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts, mbuf_pool,
4055 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
4074 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count)) {
4099 if (unlikely(virtio_dev_tx_async_single_packed(dev, vq, mbuf_pool, pkt,
4169 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
4174 return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
4181 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
4186 return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
4192 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
4278 pkts[nb_rx] = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
4288 nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
4291 nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
4295 nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
4298 nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,