Lines Matching defs:rxq
410 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
419 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
427 mlen = rxq->datasize;
446 counter_u64_add(rxq->stats.dma_mapping_err, 1);
467 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
1340 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1346 init_completion(&rxq->fence_event);
1350 req.wq_obj_handle = rxq->rxobj;
1356 rxq->rxq_idx, err);
1363 rxq->rxq_idx, err, resp.hdr.status);
1370 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1372 rxq->rxq_idx);
1383 struct mana_rxq *rxq;
1387 rxq = apc->rxqs[rxq_idx];
1388 err = mana_fence_rq(apc, rxq);
1589 mana_post_pkt_rxq(struct mana_rxq *rxq)
1595 curr_index = rxq->buf_index++;
1596 if (rxq->buf_index == rxq->num_rx_buf)
1597 rxq->buf_index = 0;
1599 recv_buf_oob = &rxq->rx_oobs[curr_index];
1601 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1604 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1605 rxq->rxq_idx, err);
1610 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1611 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1617 struct mana_rxq *rxq)
1619 struct mana_stats *rx_stats = &rxq->stats;
1620 if_t ndev = rxq->ndev;
1622 uint16_t rxq_idx = rxq->rxq_idx;
1628 rxq->rx_cq.work_done++;
1700 rxq->lro_tried++;
1701 if (rxq->lro.lro_cnt != 0 &&
1702 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1705 rxq->lro_failed++;
1720 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1725 if_t ndev = rxq->ndev;
1738 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1747 complete(&rxq->fence_event);
1764 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1768 curr = rxq->buf_index;
1769 rxbuf_oob = &rxq->rx_oobs[curr];
1781 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1784 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1789 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1796 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1799 mana_rx_mbuf(old_mbuf, oob, rxq);
1802 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1804 mana_post_pkt_rxq(rxq);
1825 /* verify recv cqe references the right rxq */
1826 if (comp[i].wq_num != cq->rxq->gdma_id) {
1830 comp[i].wq_num, cq->rxq->gdma_id);
1834 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1839 cq->rxq->gdma_rq->gdma_dev->gdma_context;
1841 mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
1844 tcp_lro_flush_all(&cq->rxq->lro);
2192 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2199 if (!rxq)
2209 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2211 mana_deinit_cq(apc, &rxq->rx_cq);
2213 mana_free_counters((counter_u64_t *)&rxq->stats,
2214 sizeof(rxq->stats));
2217 tcp_lro_free(&rxq->lro);
2219 for (i = 0; i < rxq->num_rx_buf; i++) {
2220 rx_oob = &rxq->rx_oobs[i];
2223 mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2228 if (rxq->gdma_rq)
2229 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2231 free(rxq, M_DEVBUF);
2239 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2245 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2247 "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2253 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2254 rx_oob = &rxq->rx_oobs[buf_idx];
2266 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2291 mana_push_wqe(struct mana_rxq *rxq)
2297 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2298 rx_oob = &rxq->rx_oobs[buf_idx];
2300 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2320 struct mana_rxq *rxq;
2325 rxq = malloc(sizeof(*rxq) +
2328 rxq->ndev = ndev;
2329 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2330 rxq->rxq_idx = rxq_idx;
2335 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2336 if (rxq->datasize > MAX_FRAME_SIZE)
2337 rxq->datasize = MAX_FRAME_SIZE;
2339 mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2340 rxq_idx, rxq->datasize);
2342 rxq->rxobj = INVALID_MANA_HANDLE;
2344 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2350 err = tcp_lro_init(&rxq->lro);
2352 if_printf(ndev, "Failed to create LRO for rxq %d\n",
2355 rxq->lro.ifp = ndev;
2359 mana_alloc_counters((counter_u64_t *)&rxq->stats,
2360 sizeof(rxq->stats));
2370 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2375 cq = &rxq->rx_cq;
2377 cq->rxq = rxq;
2392 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2393 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2401 &wq_spec, &cq_spec, &rxq->rxobj);
2405 rxq->gdma_rq->id = wq_spec.queue_index;
2408 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2411 rxq->gdma_id = rxq->gdma_rq->id;
2414 err = mana_push_wqe(rxq);
2445 apc->port_idx, rxq->rxq_idx, cq->cpu);
2449 apc->port_idx, rxq->rxq_idx);
2455 return rxq;
2459 mana_destroy_rxq(apc, rxq, false);
2471 struct mana_rxq *rxq;
2476 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2477 if (!rxq) {
2482 apc->rxqs[i] = rxq;
2493 struct mana_rxq *rxq;
2497 rxq = apc->rxqs[rxq_idx];
2498 if (!rxq)
2501 mana_destroy_rxq(apc, rxq, true);