Lines Matching defs:rx_ring

400 		rxr = &adapter->rx_ring[i];
414 que->rx_ring = rxr;
432 rxr = &adapter->rx_ring[i];
475 struct ena_ring *rxr = &adapter->rx_ring[qid];
873 struct ena_ring *rx_ring = que->rx_ring;
876 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
880 rx_ring->initialized = false;
889 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
891 size = sizeof(uint16_t) * rx_ring->ring_size;
892 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
894 for (i = 0; i < rx_ring->ring_size; i++)
895 rx_ring->free_rx_ids[i] = i;
898 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
899 sizeof(rx_ring->rx_stats));
901 rx_ring->next_to_clean = 0;
902 rx_ring->next_to_use = 0;
905 for (i = 0; i < rx_ring->ring_size; i++) {
907 &(rx_ring->rx_buffer_info[i].map));
917 int err = tcp_lro_init(&rx_ring->lro);
924 rx_ring->lro.ifp = adapter->ifp;
933 rx_ring->rx_buffer_info[i].map);
936 free(rx_ring->free_rx_ids, M_DEVBUF);
937 rx_ring->free_rx_ids = NULL;
938 free(rx_ring->rx_buffer_info, M_DEVBUF);
939 rx_ring->rx_buffer_info = NULL;
953 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
956 for (int i = 0; i < rx_ring->ring_size; i++) {
958 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
959 m_freem(rx_ring->rx_buffer_info[i].mbuf);
960 rx_ring->rx_buffer_info[i].mbuf = NULL;
962 rx_ring->rx_buffer_info[i].map);
964 rx_ring->rx_buffer_info[i].map);
968 tcp_lro_free(&rx_ring->lro);
971 free(rx_ring->rx_buffer_info, M_DEVBUF);
972 rx_ring->rx_buffer_info = NULL;
974 free(rx_ring->free_rx_ids, M_DEVBUF);
975 rx_ring->free_rx_ids = NULL;
1022 ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1037 rx_ring->rx_mbuf_sz);
1040 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1043 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1048 mlen = rx_ring->rx_mbuf_sz;
1062 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1085 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1103 * @rx_ring: the ring which we want to feed with free descriptors
1108 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1110 struct ena_adapter *adapter = rx_ring->adapter;
1116 ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
1118 next_to_use = rx_ring->next_to_use;
1126 req_id = rx_ring->free_rx_ids[next_to_use];
1127 rx_info = &rx_ring->rx_buffer_info[req_id];
1129 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1130 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring,
1134 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1138 rx_ring->qid);
1141 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1146 rx_ring->qid);
1150 rx_ring->ring_size);
1154 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1157 rx_ring->qid, i, num);
1161 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1163 rx_ring->next_to_use = next_to_use;
1441 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1444 for (i = 0; i < rx_ring->ring_size; i++) {
1445 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1448 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1453 ena_netmap_free_rx_slot(adapter, rx_ring,
1468 struct ena_ring *rx_ring;
1472 rx_ring = &adapter->rx_ring[i];
1473 bufs_num = rx_ring->ring_size - 1;
1474 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1481 rx_ring->initialized = true;
1647 ring = &adapter->rx_ring[i];
2148 adapter->rx_ring[i].ring_size = new_rx_size;
2207 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
3131 struct ena_ring *rx_ring)
3133 if (likely(atomic_load_8(&rx_ring->first_interrupt)))
3136 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3139 rx_ring->no_interrupt_event_cnt++;
3141 if (rx_ring->no_interrupt_event_cnt ==
3145 rx_ring->qid);
3278 struct ena_ring *rx_ring;
3297 rx_ring = &adapter->rx_ring[i];
3303 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3332 struct ena_ring *rx_ring;
3342 rx_ring = &adapter->rx_ring[i];
3345 rx_ring->ena_com_io_sq);
3346 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3347 rx_ring->empty_rx_queue++;
3349 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3350 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3357 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3358 &rx_ring->que->cleanup_task);
3359 rx_ring->empty_rx_queue = 0;
3362 rx_ring->empty_rx_queue = 0;