Lines Matching defs:rq

34 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
44 if (rq->mbuf[ix].mbuf != NULL)
55 for (i = 1; i < rq->nsegs; i++) {
56 if (mb_head->m_pkthdr.len >= rq->wqe_sz)
73 err = mlx5_accel_ipsec_rx_tag_add(rq->ifp, &rq->mbuf[ix]);
76 err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
81 bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
92 for (; i < rq->nsegs; i++) {
97 rq->mbuf[ix].mbuf = mb;
98 rq->mbuf[ix].data = mb->m_data;
100 bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map,
110 mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
112 if (unlikely(rq->enabled == 0))
115 while (!mlx5_wq_ll_is_full(&rq->wq)) {
116 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
118 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
119 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq);
122 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
128 mlx5_wq_ll_update_db_record(&rq->wq);
280 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
283 if_t ifp = rq->ifp;
292 rq->stats.lro_packets++;
293 rq->stats.lro_bytes += cqe_bcnt;
352 mb->m_pkthdr.flowid = rq->ix;
377 rq->stats.csum_none++;
388 rq->stats.csum_none++;
396 c = container_of(rq, struct mlx5e_channel, rq);
416 rq->stats.decrypted_ok_packets++;
419 rq->stats.decrypted_error_packets++;
502 mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
507 CURVNET_SET_QUIET(if_getvnet(rq->ifp));
508 pfil = rq->channel->priv->pfil;
517 cqe = mlx5e_get_cqe(&rq->cq);
522 mlx5e_decompress_cqes(&rq->cq);
524 mlx5_cqwq_pop(&rq->cq.wq);
528 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
531 bus_dmamap_sync(rq->dma_tag,
532 rq->mbuf[wqe_counter].dma_map,
536 mlx5e_dump_err_cqe(&rq->cq, rq->rqn, (const void *)cqe);
537 rq->stats.wqe_err++;
542 rv = pfil_mem_in(rq->channel->priv->pfil,
543 rq->mbuf[wqe_counter].data, seglen, rq->ifp, &mb);
553 rq->stats.packets++;
572 to rq->mbuf */ &&
580 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
583 mb = rq->mbuf[wqe_counter].mbuf;
584 rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */
586 bus_dmamap_unload(rq->dma_tag,
587 rq->mbuf[wqe_counter].dma_map);
590 mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter],
592 rq->stats.bytes += byte_cnt;
593 rq->stats.packets++;
595 mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp);
599 tcp_lro_queue_mbuf(&rq->lro, mb);
602 (if_getcapenable(rq->ifp) & IFCAP_LRO) == 0 ||
603 rq->lro.lro_cnt == 0 ||
604 tcp_lro_rx(&rq->lro, mb, 0) != 0) {
605 if_input(rq->ifp, mb);
609 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
614 mlx5_cqwq_update_db_record(&rq->cq.wq);
624 struct mlx5e_channel *c = container_of(mcq, struct mlx5e_channel, rq.cq.mcq);
625 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
638 mb->m_data[14] = rq->ix;
639 mb->m_pkthdr.rcvif = rq->ifp;
640 mb->m_pkthdr.leaf_rcvif = rq->ifp;
641 if_input(rq->ifp, mb);
654 mtx_lock(&rq->mtx);
661 if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) !=
667 mlx5e_post_rx_wqes(rq);
669 mlx5e_post_rx_wqes(rq);
671 if (rq->dim.mode != NET_DIM_CQ_PERIOD_MODE_DISABLED)
672 net_dim(&rq->dim, rq->stats.packets, rq->stats.bytes);
673 mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock));
674 tcp_lro_flush_all(&rq->lro);
675 mtx_unlock(&rq->mtx);