Lines Matching defs:rx
1010 struct mvneta_rx_ring *rx;
1016 rx = MVNETA_RX_RING(sc, q);
1018 rx->queue_status = MVNETA_QUEUE_DISABLED;
1203 struct mvneta_rx_ring *rx;
1211 rx = MVNETA_RX_RING(sc, q);
1212 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1215 (void**)&(rx->desc),
1217 &rx->desc_map);
1218 if (error != 0 || rx->desc == NULL)
1220 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1221 rx->desc,
1223 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1234 rxbuf = &rx->rxbuf[i];
1346 struct mvneta_rx_ring *rx;
1353 rx = MVNETA_RX_RING(sc, q);
1355 if (rx->desc_pa != 0)
1356 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1358 kva = (void *)rx->desc;
1360 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1362 lro = &rx->lro;
1365 if (mtx_name(&rx->ring_mtx) != NULL)
1366 mtx_destroy(&rx->ring_mtx);
1368 memset(rx, 0, sizeof(*rx));
1374 struct mvneta_rx_ring *rx;
1381 rx = MVNETA_RX_RING(sc, q);
1382 rx->dma = rx->cpu = 0;
1383 rx->queue_th_received = MVNETA_RXTH_COUNT;
1384 rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */
1387 rx->lro_enabled = FALSE;
1389 lro = &rx->lro;
1394 rx->lro_enabled = TRUE;
1470 struct mvneta_rx_ring *rx;
1474 rx = MVNETA_RX_RING(sc, q);
1479 rxbuf = &rx->rxbuf[i];
1482 rx->dma = rx->cpu = 0;
1492 struct mvneta_rx_ring *rx;
1497 rx = MVNETA_RX_RING(sc, q);
1498 DASSERT(rx->desc_pa != 0);
1501 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1520 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1552 struct mvneta_rx_ring *rx;
1556 rx = MVNETA_RX_RING(sc, q);
1560 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1563 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1576 rx->queue_status = MVNETA_QUEUE_WORKING;
1753 struct mvneta_rx_ring *rx;
1783 * We were unable to refill the rx queue and left the rx func, leaving
1787 rx = MVNETA_RX_RING(sc, q);
1788 if (rx->needs_refill == TRUE) {
2044 struct mvneta_rx_ring *rx;
2100 rx = MVNETA_RX_RING(sc, q);
2101 rx->lro_enabled = !rx->lro_enabled;
2999 struct mvneta_rx_ring *rx;
3009 rx = MVNETA_RX_RING(sc, q);
3012 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3015 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3020 ndma = rx_counter_adv(rx->dma, 1);
3021 mvneta_prefetch(&rx->desc[ndma]);
3022 mvneta_prefetch(&rx->rxbuf[ndma]);
3025 r = &rx->desc[rx->dma];
3026 rxbuf = &rx->rxbuf[rx->dma];
3050 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3063 rx->dma = ndma;
3065 if (__predict_false(rx->lro_enabled &&
3071 if (rx->lro.lro_cnt != 0) {
3072 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3084 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3096 rx->dma = ndma;
3116 tcp_lro_flush_all(&rx->lro);
3131 struct mvneta_rx_ring *rx;
3141 rx = MVNETA_RX_RING(sc, q);
3153 rxbuf = &rx->rxbuf[rx->cpu];
3171 r = &rx->desc[rx->cpu];
3173 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3175 rx->cpu = rx_counter_adv(rx->cpu, 1);
3179 rx->needs_refill = TRUE;
3183 rx->needs_refill = FALSE;
3184 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3359 struct mvneta_rx_ring *rx;
3364 rx = NULL;
3380 rx = MVNETA_RX_RING(sc, arg->queue);
3381 time_mvtclk = rx->queue_th_time;
3400 rx->queue_th_time = time_mvtclk;
3401 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3443 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3489 /* hw.mvneta.mvneta[unit].rx.[queue] */
3495 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */