Lines Matching defs:tx_ring

49 static inline int ena_get_tx_req_id(struct ena_ring *tx_ring,
58 static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
73 struct ena_ring *tx_ring;
80 tx_ring = que->tx_ring;
86 atomic_store_8(&tx_ring->cleanup_running, 1);
98 atomic_store_8(&tx_ring->first_interrupt, 1);
103 txc = ena_tx_cleanup(tx_ring);
116 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
118 atomic_store_8(&tx_ring->cleanup_running, 0);
124 struct ena_ring *tx_ring = (struct ena_ring *)arg;
125 if_t ifp = tx_ring->adapter->ifp;
127 while (!drbr_empty(ifp, tx_ring->br) && tx_ring->running &&
129 ENA_RING_MTX_LOCK(tx_ring);
130 ena_start_xmit(tx_ring);
131 ENA_RING_MTX_UNLOCK(tx_ring);
139 struct ena_ring *tx_ring;
166 tx_ring = &adapter->tx_ring[i];
169 is_drbr_empty = drbr_empty(ifp, tx_ring->br);
170 ret = drbr_enqueue(ifp, tx_ring->br, m);
172 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
176 if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) {
177 ena_start_xmit(tx_ring);
178 ENA_RING_MTX_UNLOCK(tx_ring);
180 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
190 struct ena_ring *tx_ring = adapter->tx_ring;
193 for (i = 0; i < adapter->num_io_queues; ++i, ++tx_ring)
194 if (!drbr_empty(ifp, tx_ring->br)) {
195 ENA_RING_MTX_LOCK(tx_ring);
196 drbr_flush(ifp, tx_ring->br);
197 ENA_RING_MTX_UNLOCK(tx_ring);
208 ena_get_tx_req_id(struct ena_ring *tx_ring, struct ena_com_io_cq *io_cq,
211 struct ena_adapter *adapter = tx_ring->adapter;
217 rc = validate_tx_req_id(tx_ring, *req_id, rc);
219 if (unlikely(tx_ring->tx_buffer_info[*req_id].mbuf == NULL)) {
222 *req_id, tx_ring->qid);
232 * @tx_ring: ring for which we want to clean packets
242 ena_tx_cleanup(struct ena_ring *tx_ring)
256 adapter = tx_ring->que->adapter;
257 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
259 next_to_clean = tx_ring->next_to_clean;
262 if (netmap_tx_irq(adapter->ifp, tx_ring->qid) != NM_IRQ_PASS)
270 rc = ena_get_tx_req_id(tx_ring, io_cq, &req_id);
274 tx_info = &tx_ring->tx_buffer_info[req_id];
286 tx_ring->qid, mbuf);
292 tx_ring->free_tx_ids[next_to_clean] = req_id;
294 tx_ring->ring_size);
299 tx_ring->next_to_clean = next_to_clean;
310 tx_ring->qid, work_done);
314 tx_ring->next_to_clean = next_to_clean;
321 * ena_xmit_mbuf() before checking for tx_ring->running.
325 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
327 if (unlikely(!tx_ring->running && above_thresh)) {
328 ENA_RING_MTX_LOCK(tx_ring);
330 tx_ring->ena_com_io_sq, ENA_TX_RESUME_THRESH);
331 if (!tx_ring->running && above_thresh) {
332 tx_ring->running = true;
333 counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1);
334 taskqueue_enqueue(tx_ring->enqueue_tq,
335 &tx_ring->enqueue_task);
337 ENA_RING_MTX_UNLOCK(tx_ring);
340 tx_ring->tx_last_cleanup_ticks = ticks;
820 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
826 adapter = tx_ring->adapter;
834 ((*mbuf)->m_pkthdr.len < tx_ring->tx_max_header_size))
837 counter_u64_add(tx_ring->tx_stats.collapse, 1);
842 counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
853 ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
856 struct ena_adapter *adapter = tx_ring->adapter;
880 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
892 tx_ring->tx_max_header_size);
903 tx_ring->push_buf_intermediate_buf);
904 *push_hdr = tx_ring->push_buf_intermediate_buf;
906 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
914 if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) {
917 offset = tx_ring->tx_max_header_size;
962 counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
968 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
984 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
985 adapter = tx_ring->que->adapter;
990 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
999 next_to_use = tx_ring->next_to_use;
1000 req_id = tx_ring->free_tx_ids[next_to_use];
1001 tx_info = &tx_ring->tx_buffer_info[req_id];
1007 rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len);
1022 if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
1023 ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) {
1026 tx_ring->que->id);
1027 ena_ring_tx_doorbell(tx_ring);
1035 tx_ring->que->id);
1041 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
1046 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
1047 counter_u64_add_protected(tx_ring->tx_stats.bytes,
1059 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
1060 tx_ring->ring_size);
1066 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1068 ena_log_io(pdev, DBG, "Stop queue %d\n", tx_ring->que->id);
1070 tx_ring->running = false;
1071 counter_u64_add(tx_ring->tx_stats.queue_stop, 1);
1082 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1084 tx_ring->running = true;
1085 counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1);
1102 ena_start_xmit(struct ena_ring *tx_ring)
1105 struct ena_adapter *adapter = tx_ring->adapter;
1108 ENA_RING_MTX_ASSERT(tx_ring);
1116 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
1121 if (unlikely(!tx_ring->running)) {
1122 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1126 if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
1128 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1130 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1133 drbr_advance(adapter->ifp, tx_ring->br);
1139 drbr_advance(adapter->ifp, tx_ring->br);
1144 tx_ring->acum_pkts++;
1149 if (likely(tx_ring->acum_pkts != 0)) {
1151 ena_ring_tx_doorbell(tx_ring);
1154 if (unlikely(!tx_ring->running))
1155 taskqueue_enqueue(tx_ring->que->cleanup_tq,
1156 &tx_ring->que->cleanup_task);