Lines Matching defs:que

647 	struct em_queue *que = sc->queues; /* Use only first queue. */
655 head = que->tx.sc_tx_desc_head;
656 free = que->tx.sc_tx_desc_tail;
662 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
663 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
678 used = em_encap(que, m);
703 E1000_WRITE_REG(&sc->hw, TDT(que->me),
704 que->tx.sc_tx_desc_head);
713 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
714 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
722 E1000_WRITE_REG(&sc->hw, TDT(que->me),
723 que->tx.sc_tx_desc_head);
825 struct em_queue *que = sc->queues; /* Use only first queue. */
837 que->tx.sc_tx_desc_head, que->tx.sc_tx_desc_tail,
838 E1000_READ_REG(&sc->hw, TDH(que->me)),
839 E1000_READ_REG(&sc->hw, TDT(que->me)));
1012 struct em_queue *que = sc->queues; /* single queue */
1023 em_txeof(que);
1024 if (em_rxeof(que))
1025 em_rxrefill_locked(que);
1193 em_encap(struct em_queue *que, struct mbuf *m)
1195 struct em_softc *sc = que->sc;
1208 head = que->tx.sc_tx_desc_head;
1209 pkt = &que->tx.sc_tx_pkts_ring[head];
1232 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
1233 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
1239 used += em_tso_setup(que, m, head, &txd_upper,
1244 used += em_tx_ctx_setup(que, m, head, &txd_upper,
1248 used += em_transmit_checksum_setup(que, m, head,
1269 desc = &que->tx.sc_tx_desc_ring[head];
1274 (que->tx.sc_txd_cmd | txd_lower |
1285 desc = &que->tx.sc_tx_desc_ring[head];
1288 desc->lower.data = htole32(que->tx.sc_txd_cmd |
1316 que->tx.sc_tx_desc_head = head;
1326 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
1327 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
1350 struct em_queue *que = sc->queues; /* single queue chip */
1352 hw_tdt = E1000_READ_REG(&sc->hw, TDT(que->me));
1353 sw_tdt = que->tx.sc_tx_desc_head;
1356 tx_desc = &que->tx.sc_tx_desc_ring[hw_tdt];
1368 E1000_WRITE_REG(&sc->hw, TDT(que->me), hw_tdt);
1422 struct em_queue *que = sc->queues; /* single queue chip */
1424 if ((E1000_READ_REG(&sc->hw, TDT(que->me)) ==
1425 E1000_READ_REG(&sc->hw, TDH(que->me))) &&
1607 struct em_queue *que = sc->queues; /* Use only first queue. */
1615 timeout_del(&que->rx_refill);
1705 struct em_queue *que = NULL;
1779 que = malloc(sizeof(struct em_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
1780 if (que == NULL) {
1784 que->me = 0;
1785 que->sc = sc;
1786 timeout_set(&que->rx_refill, em_rxrefill, que);
1788 sc->queues = que;
1823 struct em_queue *que = NULL;
1843 FOREACH_QUEUE(sc, que) {
1844 if (que->rx.sc_rx_desc_ring != NULL) {
1845 que->rx.sc_rx_desc_ring = NULL;
1846 em_dma_free(sc, &que->rx.sc_rx_dma);
1848 if (que->tx.sc_tx_desc_ring != NULL) {
1849 que->tx.sc_tx_desc_ring = NULL;
1850 em_dma_free(sc, &que->tx.sc_tx_dma);
1852 if (que->tag)
1853 pci_intr_disestablish(pc, que->tag);
1854 que->tag = NULL;
1855 que->eims = 0;
1856 que->me = 0;
1857 que->sc = NULL;
2211 struct em_queue *que;
2213 FOREACH_QUEUE(sc, que) {
2214 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
2215 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
2218 que->tx.sc_tx_pkts_ring = mallocarray(sc->sc_tx_slots,
2219 sizeof(*que->tx.sc_tx_pkts_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2220 if (que->tx.sc_tx_pkts_ring == NULL) {
2238 struct em_queue *que;
2245 FOREACH_QUEUE(sc, que) {
2246 bzero((void *) que->tx.sc_tx_desc_ring,
2250 pkt = &que->tx.sc_tx_pkts_ring[i];
2261 que->tx.sc_tx_desc_head = 0;
2262 que->tx.sc_tx_desc_tail = 0;
2265 que->tx.active_checksum_context = OFFLOAD_NONE;
2285 struct em_queue *que;
2289 FOREACH_QUEUE(sc, que) {
2291 bus_addr = que->tx.sc_tx_dma.dma_map->dm_segs[0].ds_addr;
2292 E1000_WRITE_REG(&sc->hw, TDLEN(que->me),
2295 E1000_WRITE_REG(&sc->hw, TDBAH(que->me), (u_int32_t)(bus_addr >> 32));
2296 E1000_WRITE_REG(&sc->hw, TDBAL(que->me), (u_int32_t)bus_addr);
2299 E1000_WRITE_REG(&sc->hw, TDT(que->me), 0);
2300 E1000_WRITE_REG(&sc->hw, TDH(que->me), 0);
2303 E1000_READ_REG(&sc->hw, TDBAL(que->me)),
2304 E1000_READ_REG(&sc->hw, TDLEN(que->me)));
2335 que->tx.sc_txd_cmd = E1000_TXD_CMD_IFCS;
2341 reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL(que->me));
2343 E1000_WRITE_REG(&sc->hw, TXDCTL(que->me), reg_tctl);
2345 que->tx.sc_txd_cmd |= E1000_TXD_CMD_IDE;
2385 struct em_queue *que;
2391 FOREACH_QUEUE(sc, que) {
2392 if (que->tx.sc_tx_pkts_ring != NULL) {
2394 pkt = &que->tx.sc_tx_pkts_ring[i];
2414 free(que->tx.sc_tx_pkts_ring, M_DEVBUF,
2415 sc->sc_tx_slots * sizeof(*que->tx.sc_tx_pkts_ring));
2416 que->tx.sc_tx_pkts_ring = NULL;
2419 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
2420 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
2426 em_tso_setup(struct em_queue *que, struct mbuf *mp, u_int head,
2435 TD = (struct e1000_adv_tx_context_desc *)&que->tx.sc_tx_desc_ring[head];
2474 if (que->sc->hw.mac_type == em_82575)
2475 mss_l4len_idx |= (que->me & 0xff) << 4;
2493 em_tx_ctx_setup(struct em_queue *que, struct mbuf *mp, u_int head,
2503 TD = (struct e1000_adv_tx_context_desc *)&que->tx.sc_tx_desc_ring[head];
2554 if (que->sc->hw.mac_type == em_82575)
2555 mss_l4len_idx |= (que->me & 0xff) << 4;
2573 em_transmit_checksum_setup(struct em_queue *que, struct mbuf *mp, u_int head,
2581 if (que->tx.active_checksum_context == OFFLOAD_TCP_IP)
2584 que->tx.active_checksum_context = OFFLOAD_TCP_IP;
2588 if (que->tx.active_checksum_context == OFFLOAD_UDP_IP)
2591 que->tx.active_checksum_context = OFFLOAD_UDP_IP;
2601 TXD = (struct em_context_desc *)&que->tx.sc_tx_desc_ring[head];
2613 if (que->tx.active_checksum_context == OFFLOAD_TCP_IP) {
2617 } else if (que->tx.active_checksum_context == OFFLOAD_UDP_IP) {
2624 TXD->cmd_and_length = htole32(que->tx.sc_txd_cmd | E1000_TXD_CMD_DEXT);
2637 em_txeof(struct em_queue *que)
2639 struct em_softc *sc = que->sc;
2646 head = que->tx.sc_tx_desc_head;
2647 tail = que->tx.sc_tx_desc_tail;
2652 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
2653 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
2657 pkt = &que->tx.sc_tx_pkts_ring[tail];
2658 desc = &que->tx.sc_tx_desc_ring[pkt->pkt_eop];
2681 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,
2682 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,
2688 que->tx.sc_tx_desc_tail = tail;
2702 em_get_buf(struct em_queue *que, int i)
2704 struct em_softc *sc = que->sc;
2710 pkt = &que->rx.sc_rx_pkts_ring[i];
2711 desc = &que->rx.sc_rx_desc_ring[i];
2752 struct em_queue *que;
2757 FOREACH_QUEUE(sc, que) {
2758 que->rx.sc_rx_pkts_ring = mallocarray(sc->sc_rx_slots,
2759 sizeof(*que->rx.sc_rx_pkts_ring),
2761 if (que->rx.sc_rx_pkts_ring == NULL) {
2767 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,
2768 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,
2772 pkt = &que->rx.sc_rx_pkts_ring[i];
2803 struct em_queue *que;
2809 FOREACH_QUEUE(sc, que) {
2810 memset(que->rx.sc_rx_desc_ring, 0,
2811 sc->sc_rx_slots * sizeof(*que->rx.sc_rx_desc_ring));
2814 que->rx.sc_rx_desc_tail = 0;
2815 que->rx.sc_rx_desc_head = sc->sc_rx_slots - 1;
2818 if_rxr_init(&que->rx.sc_rx_ring, lwm, sc->sc_rx_slots);
2820 if (em_rxfill(que) == 0) {
2838 struct em_queue *que;
2911 FOREACH_QUEUE(sc, que) {
2917 reg_srrctl = E1000_READ_REG(&sc->hw, SRRCTL(que->me));
2919 E1000_WRITE_REG(&sc->hw, SRRCTL(que->me), reg_srrctl);
2923 bus_addr = que->rx.sc_rx_dma.dma_map->dm_segs[0].ds_addr;
2924 E1000_WRITE_REG(&sc->hw, RDLEN(que->me),
2925 sc->sc_rx_slots * sizeof(*que->rx.sc_rx_desc_ring));
2926 E1000_WRITE_REG(&sc->hw, RDBAH(que->me), (u_int32_t)(bus_addr >> 32));
2927 E1000_WRITE_REG(&sc->hw, RDBAL(que->me), (u_int32_t)bus_addr);
2934 reg = E1000_READ_REG(&sc->hw, RXDCTL(que->me));
2936 E1000_WRITE_REG(&sc->hw, RXDCTL(que->me), reg);
2944 FOREACH_QUEUE(sc, que) {
2945 E1000_WRITE_REG(&sc->hw, RDH(que->me), 0);
2946 E1000_WRITE_REG(&sc->hw, RDT(que->me), que->rx.sc_rx_desc_head);
2958 struct em_queue *que;
2964 FOREACH_QUEUE(sc, que) {
2965 if_rxr_init(&que->rx.sc_rx_ring, 0, 0);
2967 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,
2968 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,
2971 if (que->rx.sc_rx_pkts_ring != NULL) {
2973 pkt = &que->rx.sc_rx_pkts_ring[i];
2987 free(que->rx.sc_rx_pkts_ring, M_DEVBUF,
2988 sc->sc_rx_slots * sizeof(*que->rx.sc_rx_pkts_ring));
2989 que->rx.sc_rx_pkts_ring = NULL;
2992 if (que->rx.fmp != NULL) {
2993 m_freem(que->rx.fmp);
2994 que->rx.fmp = NULL;
2995 que->rx.lmp = NULL;
3001 em_rxfill(struct em_queue *que)
3003 struct em_softc *sc = que->sc;
3008 i = que->rx.sc_rx_desc_head;
3010 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,
3011 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,
3014 for (slots = if_rxr_get(&que->rx.sc_rx_ring, sc->sc_rx_slots);
3019 if (em_get_buf(que, i) != 0)
3022 que->rx.sc_rx_desc_head = i;
3026 if_rxr_put(&que->rx.sc_rx_ring, slots);
3028 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,
3029 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,
3038 struct em_queue *que = arg;
3042 em_rxrefill_locked(que);
3047 em_rxrefill_locked(struct em_queue *que)
3049 struct em_softc *sc = que->sc;
3051 if (em_rxfill(que))
3052 E1000_WRITE_REG(&sc->hw, RDT(que->me), que->rx.sc_rx_desc_head);
3053 else if (if_rxr_needrefill(&que->rx.sc_rx_ring))
3054 timeout_add(&que->rx_refill, 1);
3065 em_rxeof(struct em_queue *que)
3067 struct em_softc *sc = que->sc;
3081 if (if_rxr_inuse(&que->rx.sc_rx_ring) == 0)
3084 i = que->rx.sc_rx_desc_tail;
3086 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,
3087 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,
3093 pkt = &que->rx.sc_rx_pkts_ring[i];
3094 desc = &que->rx.sc_rx_desc_ring[i];
3110 if_rxr_put(&que->rx.sc_rx_ring, 1);
3136 if (que->rx.fmp != NULL)
3137 pkt_len += que->rx.fmp->m_pkthdr.len;
3156 if (que->rx.fmp == NULL) {
3158 que->rx.fmp = m; /* Store the first mbuf */
3159 que->rx.lmp = m;
3169 que->rx.lmp->m_len -= prev_len_adj;
3170 que->rx.fmp->m_pkthdr.len -= prev_len_adj;
3172 que->rx.lmp->m_next = m;
3173 que->rx.lmp = m;
3174 que->rx.fmp->m_pkthdr.len += m->m_len;
3178 m = que->rx.fmp;
3190 que->rx.fmp = NULL;
3191 que->rx.lmp = NULL;
3194 que->rx.dropped_pkts++;
3196 if (que->rx.fmp != NULL) {
3197 m_freem(que->rx.fmp);
3198 que->rx.fmp = NULL;
3199 que->rx.lmp = NULL;
3208 } while (if_rxr_inuse(&que->rx.sc_rx_ring) > 0);
3210 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,
3211 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,
3214 que->rx.sc_rx_desc_tail = i;
3217 if_rxr_livelocked(&que->rx.sc_rx_ring);
3472 em_flush_tx_ring(struct em_queue *que)
3474 struct em_softc *sc = que->sc;
3479 KASSERT(que->tx.sc_tx_desc_ring != NULL);
3484 KASSERT(EM_READ_REG(&sc->hw, E1000_TDT(que->me)) == que->tx.sc_tx_desc_head);
3486 txd = &que->tx.sc_tx_desc_ring[que->tx.sc_tx_desc_head];
3487 txd->buffer_addr = que->tx.sc_tx_dma.dma_map->dm_segs[0].ds_addr;
3495 if (++que->tx.sc_tx_desc_head == sc->sc_tx_slots)
3496 que->tx.sc_tx_desc_head = 0;
3498 EM_WRITE_REG(&sc->hw, E1000_TDT(que->me), que->tx.sc_tx_desc_head);
3510 em_flush_rx_ring(struct em_queue *que)
3513 struct em_softc *sc = que->sc;
3520 rxdctl = EM_READ_REG(&sc->hw, E1000_RXDCTL(que->me));
3528 EM_WRITE_REG(&sc->hw, E1000_RXDCTL(que->me), rxdctl);
3550 struct em_queue *que = sc->queues; /* Use only first queue. */
3561 tdlen = EM_READ_REG(&sc->hw, E1000_TDLEN(que->me));
3565 em_flush_tx_ring(que);
3570 em_flush_rx_ring(que);
3970 struct em_queue *que = sc->queues; /* Use only first queue. */
3991 que->me = vec;
3992 que->eims = 1 << vec;
3993 snprintf(que->name, sizeof(que->name), "%s:%d", DEVNAME(sc), vec);
3996 que->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
3997 em_queue_intr_msix, que, que->name);
3998 if (que->tag == NULL) {
4038 struct em_queue *que = vque;
4039 struct em_softc *sc = que->sc;
4043 em_txeof(que);
4044 if (em_rxeof(que))
4045 em_rxrefill_locked(que);
4048 em_enable_queue_intr_msix(que);
4084 struct em_queue *que;
4102 * The funky calculation of offsets and checking if que->me is
4106 FOREACH_QUEUE(sc, que) {
4107 index = que->me >> 1;
4109 if (que->me & 1) {
4111 ivar |= (que->me | E1000_IVAR_VALID) << 16;
4114 ivar |= que->me | E1000_IVAR_VALID;
4120 FOREACH_QUEUE(sc, que) {
4121 index = que->me >> 1;
4123 if (que->me & 1) {
4125 ivar |= (que->me | E1000_IVAR_VALID) << 24;
4128 ivar |= (que->me | E1000_IVAR_VALID) << 8;
4131 sc->msix_queuesmask |= que->eims;
4141 FOREACH_QUEUE(sc, que) {
4142 index = que->me & 0x7; /* Each IVAR has two entries */
4144 if (que->me < 8) {
4146 ivar |= que->me | E1000_IVAR_VALID;
4149 ivar |= (que->me | E1000_IVAR_VALID) << 16;
4152 sc->msix_queuesmask |= que->eims;
4155 FOREACH_QUEUE(sc, que) {
4156 index = que->me & 0x7; /* Each IVAR has two entries */
4158 if (que->me < 8) {
4160 ivar |= (que->me | E1000_IVAR_VALID) << 8;
4163 ivar |= (que->me | E1000_IVAR_VALID) << 24;
4166 sc->msix_queuesmask |= que->eims;
4187 FOREACH_QUEUE(sc, que)
4188 E1000_WRITE_REG(&sc->hw, EITR(que->me), newitr);
4194 em_enable_queue_intr_msix(struct em_queue *que)
4196 E1000_WRITE_REG(&que->sc->hw, EIMS, que->eims);
4203 struct em_queue *que;
4205 FOREACH_QUEUE(sc, que) {
4208 &que->tx.sc_tx_dma) != 0) {
4213 que->tx.sc_tx_desc_ring =
4214 (struct em_tx_desc *)que->tx.sc_tx_dma.dma_vaddr;
4218 &que->rx.sc_rx_dma) != 0) {
4223 que->rx.sc_rx_desc_ring =
4224 (struct em_rx_desc *)que->rx.sc_rx_dma.dma_vaddr;