Lines Matching full:rx
38 struct gve_rx_ring *rx = &priv->rx[i];
40 if (rx->page_info != NULL) {
41 free(rx->page_info, M_GVE);
42 rx->page_info = NULL;
45 if (rx->data_ring != NULL) {
46 gve_dma_free_coherent(&rx->data_ring_mem);
47 rx->data_ring = NULL;
50 if (rx->desc_ring != NULL) {
51 gve_dma_free_coherent(&rx->desc_ring_mem);
52 rx->desc_ring = NULL;
59 struct gve_rx_ring *rx = &priv->rx[i];
60 struct gve_ring_com *com = &rx->com;
63 gve_free_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
77 gve_prefill_rx_slots(struct gve_rx_ring *rx)
79 struct gve_ring_com *com = &rx->com;
84 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i);
85 rx->page_info[i].page_offset = 0;
86 rx->page_info[i].page_address = com->qpl->dmas[i].cpu_addr;
87 rx->page_info[i].page = com->qpl->pages[i];
93 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map,
100 struct gve_rx_ring *rx = &priv->rx[i];
101 struct gve_ring_com *com = &rx->com;
106 CACHE_LINE_SIZE, &rx->desc_ring_mem);
109 "Failed to alloc desc ring for rx ring %d", i);
113 rx->mask = priv->rx_pages_per_qpl - 1;
114 rx->desc_ring = rx->desc_ring_mem.cpu_addr;
118 device_printf(priv->dev, "No QPL left for rx ring %d", i);
122 rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info),
127 CACHE_LINE_SIZE, &rx->data_ring_mem);
130 "Failed to alloc data ring for rx ring %d", i);
133 rx->data_ring = rx->data_ring_mem.cpu_addr;
135 gve_prefill_rx_slots(rx);
146 struct gve_rx_ring *rx = &priv->rx[i];
147 struct gve_ring_com *com = &rx->com;
153 gve_alloc_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
159 "Failed to alloc queue resources for rx ring %d", i);
184 priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues,
198 free(priv->rx, M_GVE);
210 free(priv->rx, M_GVE);
214 gve_rx_clear_data_ring(struct gve_rx_ring *rx)
216 struct gve_priv *priv = rx->com.priv;
220 * The Rx data ring has this invariant: "the networking stack is not
229 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i +
230 rx->page_info[i].page_offset);
231 rx->fill_cnt++;
234 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map,
239 gve_rx_clear_desc_ring(struct gve_rx_ring *rx)
241 struct gve_priv *priv = rx->com.priv;
245 rx->desc_ring[i] = (struct gve_rx_desc){};
247 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
254 struct gve_rx_ring *rx = &priv->rx[i];
261 rx->seq_no = 1;
262 rx->cnt = 0;
263 rx->fill_cnt = 0;
264 rx->mask = priv->rx_desc_cnt - 1;
266 gve_rx_clear_desc_ring(rx);
267 gve_rx_clear_data_ring(rx);
273 struct gve_rx_ring *rx = &priv->rx[i];
274 struct gve_ring_com *com = &rx->com;
277 if (tcp_lro_init(&rx->lro) != 0)
278 device_printf(priv->dev, "Failed to init lro for rx ring %d", i);
279 rx->lro.ifp = priv->ifp;
283 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx);
285 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq_dqo, rx);
286 com->cleanup_tq = taskqueue_create_fast("gve rx", M_WAITOK,
293 /* GQ RX bufs are prefilled at ring alloc time */
294 gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt);
296 gve_rx_prefill_buffers_dqo(rx);
303 struct gve_rx_ring *rx;
321 rx = &priv->rx[i];
322 com = &rx->com;
341 struct gve_rx_ring *rx = &priv->rx[i];
342 struct gve_ring_com *com = &rx->com;
350 tcp_lro_free(&rx->lro);
351 rx->ctx = (struct gve_rx_ctx){};
376 struct gve_rx_ring *rx = arg;
377 struct gve_priv *priv = rx->com.priv;
378 struct gve_ring_com *com = &rx->com;
384 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task);
421 gve_rx_create_mbuf(struct gve_priv *priv, struct gve_rx_ring *rx,
425 struct gve_rx_ctx *ctx = &rx->ctx;
440 counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1);
446 KASSERT(len <= MCLBYTES, ("gve rx fragment bigger than cluster mbuf"));
491 counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1);
504 counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1);
526 gve_rx(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_desc *desc,
532 struct gve_rx_ctx *ctx = &rx->ctx;
548 counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1);
549 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
555 page_info = &rx->page_info[idx];
556 data_slot = &rx->data_ring[idx];
557 page_dma_handle = &(rx->com.qpl->dmas[idx]);
565 mbuf = gve_rx_create_mbuf(priv, rx, page_info, len, data_slot,
570 counter_u64_add_protected(rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1);
571 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
603 (rx->lro.lro_cnt != 0) && /* LRO resources exist */
604 (tcp_lro_rx(&rx->lro, mbuf, 0) == 0))
611 counter_u64_add_protected(rx->stats.rbytes, ctx->total_size);
612 counter_u64_add_protected(rx->stats.rpackets, 1);
619 rx->ctx = (struct gve_rx_ctx){};
623 gve_rx_work_pending(struct gve_rx_ring *rx)
629 next_idx = rx->cnt & rx->mask;
630 desc = rx->desc_ring + next_idx;
634 return (GVE_SEQNO(flags_seq) == rx->seq_no);
644 gve_rx_cleanup(struct gve_priv *priv, struct gve_rx_ring *rx, int budget)
646 uint32_t idx = rx->cnt & rx->mask;
648 struct gve_rx_ctx *ctx = &rx->ctx;
653 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
655 desc = &rx->desc_ring[idx];
658 (GVE_SEQNO(desc->flags_seq) == rx->seq_no)) {
660 gve_rx(priv, rx, desc, idx);
662 rx->cnt++;
663 idx = rx->cnt & rx->mask;
664 desc = &rx->desc_ring[idx];
665 rx->seq_no = gve_next_seqno(rx->seq_no);
672 rx->ctx = (struct gve_rx_ctx){};
675 GVE_SEQNO(desc->flags_seq), rx->seq_no);
680 tcp_lro_flush_all(&rx->lro);
682 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map,
686 rx->fill_cnt += work_done;
687 gve_db_bar_write_4(priv, rx->com.db_offset, rx->fill_cnt);
693 struct gve_rx_ring *rx = arg;
694 struct gve_priv *priv = rx->com.priv;
699 gve_rx_cleanup(priv, rx, /*budget=*/128);
701 gve_db_bar_write_4(priv, rx->com.irq_db_offset,
711 if (gve_rx_work_pending(rx)) {
712 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
713 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task);