Lines Matching defs:tx_ring

399 		txr = &adapter->tx_ring[i];
413 que->tx_ring = txr;
431 txr = &adapter->tx_ring[i];
474 struct ena_ring *txr = &adapter->tx_ring[qid];
569 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc)
571 struct ena_adapter *adapter = tx_ring->adapter;
579 req_id, tx_ring->qid);
583 req_id, tx_ring->qid);
584 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
595 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
597 struct ena_adapter *adapter = tx_ring->adapter;
606 for (i = 0; i < tx_ring->ring_size; ++i) {
607 tx_info = &tx_ring->tx_buffer_info[i];
640 struct ena_ring *tx_ring = que->tx_ring;
650 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
652 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
653 if (unlikely(tx_ring->tx_buffer_info == NULL))
656 size = sizeof(uint16_t) * tx_ring->ring_size;
657 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
658 if (unlikely(tx_ring->free_tx_ids == NULL))
661 size = tx_ring->tx_max_header_size;
662 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
664 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
668 for (i = 0; i < tx_ring->ring_size; i++)
669 tx_ring->free_tx_ids[i] = i;
672 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
673 sizeof(tx_ring->tx_stats));
675 tx_ring->next_to_use = 0;
676 tx_ring->next_to_clean = 0;
677 tx_ring->acum_pkts = 0;
680 ENA_RING_MTX_LOCK(tx_ring);
681 drbr_flush(adapter->ifp, tx_ring->br);
682 ENA_RING_MTX_UNLOCK(tx_ring);
685 for (i = 0; i < tx_ring->ring_size; i++) {
687 &tx_ring->tx_buffer_info[i].dmamap);
696 map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
712 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
713 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
714 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
715 if (unlikely(tx_ring->enqueue_tq == NULL)) {
718 i = tx_ring->ring_size;
722 tx_ring->running = true;
732 taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
738 ena_release_all_tx_dmamap(tx_ring);
740 free(tx_ring->free_tx_ids, M_DEVBUF);
741 tx_ring->free_tx_ids = NULL;
743 free(tx_ring->tx_buffer_info, M_DEVBUF);
744 tx_ring->tx_buffer_info = NULL;
759 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
765 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
766 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
768 taskqueue_free(tx_ring->enqueue_tq);
770 ENA_RING_MTX_LOCK(tx_ring);
772 drbr_flush(adapter->ifp, tx_ring->br);
775 for (int i = 0; i < tx_ring->ring_size; i++) {
777 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
779 tx_ring->tx_buffer_info[i].dmamap);
781 tx_ring->tx_buffer_info[i].dmamap);
785 nm_info = &tx_ring->tx_buffer_info[i].nm_info;
801 m_freem(tx_ring->tx_buffer_info[i].mbuf);
802 tx_ring->tx_buffer_info[i].mbuf = NULL;
804 ENA_RING_MTX_UNLOCK(tx_ring);
807 free(tx_ring->tx_buffer_info, M_DEVBUF);
808 tx_ring->tx_buffer_info = NULL;
810 free(tx_ring->free_tx_ids, M_DEVBUF);
811 tx_ring->free_tx_ids = NULL;
813 free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
814 tx_ring->push_buf_intermediate_buf = NULL;
1504 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1506 ENA_RING_MTX_LOCK(tx_ring);
1507 for (int i = 0; i < tx_ring->ring_size; i++) {
1508 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1531 ENA_RING_MTX_UNLOCK(tx_ring);
1611 ring = &adapter->tx_ring[i];
2101 struct ena_ring *tx_ring;
2110 tx_ring = &adapter->tx_ring[i];
2111 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
2147 adapter->tx_ring[i].ring_size = new_tx_size;
2206 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
3155 struct ena_ring *tx_ring)
3161 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id);
3166 tx_ring->qid);
3173 tx_ring->qid);
3180 struct ena_ring *tx_ring)
3195 for (i = 0; i < tx_ring->ring_size; i++) {
3196 tx_buf = &tx_ring->tx_buffer_info[i];
3205 if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
3214 tx_ring->qid);
3225 tx_ring->tx_last_cleanup_ticks);
3231 tx_ring->qid, i, time_since_last_cleanup,
3254 cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
3255 cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running)));
3257 reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring);
3263 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
3277 struct ena_ring *tx_ring;
3296 tx_ring = &adapter->tx_ring[i];
3299 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);