Lines Matching defs:rxr

298 igc_rxdesc_sync(struct rx_ring *rxr, int id, int ops)
301 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
684 struct rx_ring *rxr = &sc->rx_rings[iq];
689 rxr->sc = sc;
690 rxr->rxr_igcq = &sc->queues[iq];
691 rxr->me = iq;
693 timeout_set(&rxr->rx_refill, igc_rxrefill, rxr);
695 if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) {
700 rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr;
701 memset(rxr->rx_base, 0, rsize);
718 q->rxr = &sc->rx_rings[iq];
724 for (struct rx_ring *rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
725 igc_dma_free(sc, &rxr->rxdma);
770 struct rx_ring *rxr = &sc->rx_rings[iq];
772 igc_dma_free(sc, &rxr->rxdma);
1565 struct rx_ring *rxr = &sc->rx_rings[iq];
1567 mutex_enter(&rxr->rxr_lock);
1568 igc_rxfill(rxr);
1569 mutex_exit(&rxr->rxr_lock);
1921 struct rx_ring *rxr = &sc->rx_rings[iq];
1923 igc_clear_receive_status(rxr);
2015 struct rx_ring *rxr = &sc->rx_rings[iq];
2019 ifr[iq].ifr_info = rxr->rx_ring;
2031 igc_rxfill(struct rx_ring *rxr)
2033 struct igc_softc *sc = rxr->sc;
2037 if (igc_get_buf(rxr, id, false)) {
2038 panic("%s: msix=%d i=%d\n", __func__, rxr->me, id);
2043 rxr->last_desc_filled = id;
2044 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
2045 rxr->next_to_check = 0;
2049 igc_rxrefill(struct rx_ring *rxr, int end)
2051 struct igc_softc *sc = rxr->sc;
2054 for (id = rxr->next_to_check; id != end; id = igc_rxdesc_incr(sc, id)) {
2055 if (igc_get_buf(rxr, id, true)) {
2057 panic("%s: msix=%d id=%d\n", __func__, rxr->me, id);
2063 rxr->last_desc_filled == id ? "same" : "diff",
2064 rxr->last_desc_filled, id);
2065 rxr->last_desc_filled = id;
2066 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
2077 igc_rxeof(struct rx_ring *rxr, u_int limit)
2079 struct igc_softc *sc = rxr->sc;
2080 struct igc_queue *q = rxr->rxr_igcq;
2085 id = rxr->next_to_check;
2087 union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
2091 igc_rxdesc_sync(rxr, id,
2097 igc_rxdesc_sync(rxr, id,
2103 igc_rxdesc_sync(rxr, id,
2114 rxbuf = &rxr->rx_buffers[id];
2116 bus_dmamap_sync(rxr->rxdma.dma_tag, map,
2118 bus_dmamap_unload(rxr->rxdma.dma_tag, map);
2139 igc_rxdesc_sync(rxr, id,
2160 id, rxr->last_desc_filled);
2169 nxbuf = &rxr->rx_buffers[nextp];
2221 DPRINTF(RX, "fill queue[%d]\n", rxr->me);
2222 igc_rxrefill(rxr, id);
2225 rxr->next_to_check == id ? "same" : "diff",
2226 rxr->next_to_check, id);
2227 rxr->next_to_check = id;
2576 igc_get_buf(struct rx_ring *rxr, int id, bool strict)
2578 struct igc_softc *sc = rxr->sc;
2579 struct igc_queue *q = rxr->rxr_igcq;
2580 struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
2609 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, map, m,
2616 bus_dmamap_sync(rxr->rxdma.dma_tag, map, 0,
2620 union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
2621 igc_rxdesc_sync(rxr, id, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2623 igc_rxdesc_sync(rxr, id, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2760 struct rx_ring *rxr = iq->rxr;
2774 mutex_enter(&rxr->rxr_lock);
2775 rxmore = igc_rxeof(rxr, rxlimit);
2776 mutex_exit(&rxr->rxr_lock);
2794 struct rx_ring *rxr = iq->rxr;
2836 mutex_enter(&rxr->rxr_lock);
2837 rxmore = igc_rxeof(rxr, rxlimit);
2838 mutex_exit(&rxr->rxr_lock);
2866 struct rx_ring *rxr = iq->rxr;
2883 mutex_enter(&rxr->rxr_lock);
2884 rxmore = igc_rxeof(rxr, rxlimit);
2885 mutex_exit(&rxr->rxr_lock);
3465 igc_allocate_receive_buffers(struct rx_ring *rxr)
3467 struct igc_softc *sc = rxr->sc;
3470 rxr->rx_buffers =
3474 struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
3476 error = bus_dmamap_create(rxr->rxdma.dma_tag, MCLBYTES, 1,
3484 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
3485 rxr->rxdma.dma_map->dm_mapsize,
3503 struct rx_ring *rxr = &sc->rx_rings[iq];
3505 if (igc_setup_receive_ring(rxr))
3521 igc_setup_receive_ring(struct rx_ring *rxr)
3523 struct igc_softc *sc = rxr->sc;
3528 memset(rxr->rx_base, 0, rsize);
3530 if (igc_allocate_receive_buffers(rxr))
3534 rxr->next_to_check = 0;
3535 rxr->last_desc_filled = 0;
3537 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3629 struct rx_ring *rxr = &sc->rx_rings[iq];
3631 rxr->rxdma.dma_map->dm_segs[0].ds_addr;
3645 IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
3674 struct rx_ring *rxr = &sc->rx_rings[iq];
3676 igc_free_receive_buffers(rxr);
3686 igc_free_receive_buffers(struct rx_ring *rxr)
3688 struct igc_softc *sc = rxr->sc;
3690 if (rxr->rx_buffers != NULL) {
3692 struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
3696 bus_dmamap_sync(rxr->rxdma.dma_tag, map,
3698 bus_dmamap_unload(rxr->rxdma.dma_tag, map);
3702 bus_dmamap_destroy(rxr->rxdma.dma_tag, map);
3705 kmem_free(rxr->rx_buffers,
3707 rxr->rx_buffers = NULL;
3710 mutex_destroy(&rxr->rxr_lock);
3719 igc_clear_receive_status(struct rx_ring *rxr)
3721 struct igc_softc *sc = rxr->sc;
3723 mutex_enter(&rxr->rxr_lock);
3726 union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
3728 igc_rxdesc_sync(rxr, id,
3731 igc_rxdesc_sync(rxr, id,
3735 mutex_exit(&rxr->rxr_lock);