1983ce116SIgor Romanov /* SPDX-License-Identifier: BSD-3-Clause
2983ce116SIgor Romanov *
3983ce116SIgor Romanov * Copyright(c) 2020-2021 Xilinx, Inc.
4983ce116SIgor Romanov */
5983ce116SIgor Romanov
6983ce116SIgor Romanov #include <rte_common.h>
796fd2bd6SIgor Romanov #include <rte_service_component.h>
8983ce116SIgor Romanov
9983ce116SIgor Romanov #include "efx.h"
1096fd2bd6SIgor Romanov #include "efx_regs_counters_pkt_format.h"
11983ce116SIgor Romanov
12983ce116SIgor Romanov #include "sfc_ev.h"
13983ce116SIgor Romanov #include "sfc.h"
14983ce116SIgor Romanov #include "sfc_rx.h"
15983ce116SIgor Romanov #include "sfc_mae_counter.h"
16983ce116SIgor Romanov #include "sfc_service.h"
17983ce116SIgor Romanov
18ce1f72dcSViacheslav Galaktionov /**
19ce1f72dcSViacheslav Galaktionov * Approximate maximum number of counters per packet.
20ce1f72dcSViacheslav Galaktionov * In fact maximum depends on per-counter data offset which is specified
21ce1f72dcSViacheslav Galaktionov * in counter packet header.
22ce1f72dcSViacheslav Galaktionov */
23ce1f72dcSViacheslav Galaktionov #define SFC_MAE_COUNTERS_PER_PACKET_MAX \
24ce1f72dcSViacheslav Galaktionov ((SFC_MAE_COUNTER_STREAM_PACKET_SIZE - \
25ce1f72dcSViacheslav Galaktionov ER_RX_SL_PACKETISER_HEADER_WORD_SIZE) / \
26ce1f72dcSViacheslav Galaktionov ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE)
27ce1f72dcSViacheslav Galaktionov
28ce1f72dcSViacheslav Galaktionov /**
29ce1f72dcSViacheslav Galaktionov * Minimum number of Rx buffers in counters only Rx queue.
30ce1f72dcSViacheslav Galaktionov */
31ce1f72dcSViacheslav Galaktionov #define SFC_MAE_COUNTER_RXQ_BUFS_MIN \
32ce1f72dcSViacheslav Galaktionov (SFC_COUNTER_RXQ_RX_DESC_COUNT - SFC_COUNTER_RXQ_REFILL_LEVEL)
33ce1f72dcSViacheslav Galaktionov
34ce1f72dcSViacheslav Galaktionov /**
35ce1f72dcSViacheslav Galaktionov * Approximate number of counter updates fit in counters only Rx queue.
36ce1f72dcSViacheslav Galaktionov * The number is inaccurate since SFC_MAE_COUNTERS_PER_PACKET_MAX is
37ce1f72dcSViacheslav Galaktionov * inaccurate (see above). However, it provides the gist for a number of
38ce1f72dcSViacheslav Galaktionov * counter updates which can fit in an Rx queue after empty poll.
39ce1f72dcSViacheslav Galaktionov *
40ce1f72dcSViacheslav Galaktionov * The define is not actually used, but provides calculations details.
41ce1f72dcSViacheslav Galaktionov */
42ce1f72dcSViacheslav Galaktionov #define SFC_MAE_COUNTERS_RXQ_SPACE \
43ce1f72dcSViacheslav Galaktionov (SFC_MAE_COUNTER_RXQ_BUFS_MIN * SFC_MAE_COUNTERS_PER_PACKET_MAX)
44ce1f72dcSViacheslav Galaktionov
45983ce116SIgor Romanov static uint32_t
sfc_mae_counter_get_service_lcore(struct sfc_adapter * sa)46983ce116SIgor Romanov sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa)
47983ce116SIgor Romanov {
48983ce116SIgor Romanov uint32_t cid;
49983ce116SIgor Romanov
50983ce116SIgor Romanov cid = sfc_get_service_lcore(sa->socket_id);
51983ce116SIgor Romanov if (cid != RTE_MAX_LCORE)
52983ce116SIgor Romanov return cid;
53983ce116SIgor Romanov
54983ce116SIgor Romanov if (sa->socket_id != SOCKET_ID_ANY)
55983ce116SIgor Romanov cid = sfc_get_service_lcore(SOCKET_ID_ANY);
56983ce116SIgor Romanov
57983ce116SIgor Romanov if (cid == RTE_MAX_LCORE) {
58983ce116SIgor Romanov sfc_warn(sa, "failed to get service lcore for counter service");
59983ce116SIgor Romanov } else if (sa->socket_id != SOCKET_ID_ANY) {
60983ce116SIgor Romanov sfc_warn(sa,
61983ce116SIgor Romanov "failed to get service lcore for counter service at socket %d, but got at socket %u",
62983ce116SIgor Romanov sa->socket_id, rte_lcore_to_socket_id(cid));
63983ce116SIgor Romanov }
64983ce116SIgor Romanov return cid;
65983ce116SIgor Romanov }
66983ce116SIgor Romanov
67983ce116SIgor Romanov bool
sfc_mae_counter_rxq_required(struct sfc_adapter * sa)68983ce116SIgor Romanov sfc_mae_counter_rxq_required(struct sfc_adapter *sa)
69983ce116SIgor Romanov {
70983ce116SIgor Romanov const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
71983ce116SIgor Romanov
72983ce116SIgor Romanov if (encp->enc_mae_supported == B_FALSE)
73983ce116SIgor Romanov return false;
74983ce116SIgor Romanov
75983ce116SIgor Romanov return true;
76983ce116SIgor Romanov }
77983ce116SIgor Romanov
78983ce116SIgor Romanov int
sfc_mae_counter_fw_rsrc_enable(struct sfc_adapter * sa,struct sfc_mae_counter * counterp)794a06eacbSIvan Malov sfc_mae_counter_fw_rsrc_enable(struct sfc_adapter *sa,
804a06eacbSIvan Malov struct sfc_mae_counter *counterp)
8196fd2bd6SIgor Romanov {
8296fd2bd6SIgor Romanov struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
831c941a5fSIvan Malov struct sfc_mae_counter_records *counters;
844a06eacbSIvan Malov struct sfc_mae_counter_record *p;
8596fd2bd6SIgor Romanov efx_counter_t mae_counter;
8696fd2bd6SIgor Romanov uint32_t generation_count;
8796fd2bd6SIgor Romanov uint32_t unused;
8896fd2bd6SIgor Romanov int rc;
8996fd2bd6SIgor Romanov
901c941a5fSIvan Malov switch (counterp->type) {
911c941a5fSIvan Malov case EFX_COUNTER_TYPE_ACTION:
921c941a5fSIvan Malov counters = ®->action_counters;
931c941a5fSIvan Malov break;
941588d135SIvan Malov case EFX_COUNTER_TYPE_CONNTRACK:
951588d135SIvan Malov counters = ®->conntrack_counters;
961588d135SIvan Malov break;
971c941a5fSIvan Malov default:
981c941a5fSIvan Malov rc = EINVAL;
991c941a5fSIvan Malov goto fail_counter_type_check;
1001c941a5fSIvan Malov }
1011c941a5fSIvan Malov
10296fd2bd6SIgor Romanov /*
10396fd2bd6SIgor Romanov * The actual count of counters allocated is ignored since a failure
10496fd2bd6SIgor Romanov * to allocate a single counter is indicated by non-zero return code.
10596fd2bd6SIgor Romanov */
1061c941a5fSIvan Malov rc = efx_mae_counters_alloc_type(sa->nic, counterp->type, 1, &unused,
1071c941a5fSIvan Malov &mae_counter, &generation_count);
10896fd2bd6SIgor Romanov if (rc != 0) {
10996fd2bd6SIgor Romanov sfc_err(sa, "failed to alloc MAE counter: %s",
11096fd2bd6SIgor Romanov rte_strerror(rc));
11196fd2bd6SIgor Romanov goto fail_mae_counter_alloc;
11296fd2bd6SIgor Romanov }
11396fd2bd6SIgor Romanov
11496fd2bd6SIgor Romanov if (mae_counter.id >= counters->n_mae_counters) {
11596fd2bd6SIgor Romanov /*
11696fd2bd6SIgor Romanov * ID of a counter is expected to be within the range
11796fd2bd6SIgor Romanov * between 0 and the maximum count of counters to always
11896fd2bd6SIgor Romanov * fit into a pre-allocated array size of maximum counter ID.
11996fd2bd6SIgor Romanov */
12096fd2bd6SIgor Romanov sfc_err(sa, "MAE counter ID is out of expected range");
12196fd2bd6SIgor Romanov rc = EFAULT;
12296fd2bd6SIgor Romanov goto fail_counter_id_range;
12396fd2bd6SIgor Romanov }
12496fd2bd6SIgor Romanov
125dffc1de1SIvan Malov counterp->fw_rsrc.counter_id.id = mae_counter.id;
12696fd2bd6SIgor Romanov
12796fd2bd6SIgor Romanov p = &counters->mae_counters[mae_counter.id];
12896fd2bd6SIgor Romanov
12996fd2bd6SIgor Romanov /*
13096fd2bd6SIgor Romanov * Ordering is relaxed since it is the only operation on counter value.
13196fd2bd6SIgor Romanov * And it does not depend on different stores/loads in other threads.
13296fd2bd6SIgor Romanov * Paired with relaxed ordering in counter increment.
13396fd2bd6SIgor Romanov */
13496fd2bd6SIgor Romanov __atomic_store(&p->reset.pkts_bytes.int128,
13596fd2bd6SIgor Romanov &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
13696fd2bd6SIgor Romanov p->generation_count = generation_count;
13796fd2bd6SIgor Romanov
138f55fe01fSIvan Malov p->ft_switch_hit_counter = counterp->ft_switch_hit_counter;
1399df2d8f5SIvan Malov
14096fd2bd6SIgor Romanov /*
14196fd2bd6SIgor Romanov * The flag is set at the very end of add operation and reset
14296fd2bd6SIgor Romanov * at the beginning of delete operation. Release ordering is
14396fd2bd6SIgor Romanov * paired with acquire ordering on load in counter increment operation.
14496fd2bd6SIgor Romanov */
14596fd2bd6SIgor Romanov __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
14696fd2bd6SIgor Romanov
1471c941a5fSIvan Malov sfc_info(sa, "enabled MAE counter 0x%x-#%u with reset pkts=%" PRIu64
1481c941a5fSIvan Malov " bytes=%" PRIu64, counterp->type, mae_counter.id,
14996fd2bd6SIgor Romanov p->reset.pkts, p->reset.bytes);
15096fd2bd6SIgor Romanov
15196fd2bd6SIgor Romanov return 0;
15296fd2bd6SIgor Romanov
15396fd2bd6SIgor Romanov fail_counter_id_range:
1541c941a5fSIvan Malov (void)efx_mae_counters_free_type(sa->nic, counterp->type, 1, &unused,
1551c941a5fSIvan Malov &mae_counter, NULL);
15696fd2bd6SIgor Romanov
15796fd2bd6SIgor Romanov fail_mae_counter_alloc:
1581c941a5fSIvan Malov fail_counter_type_check:
15996fd2bd6SIgor Romanov sfc_log_init(sa, "failed: %s", rte_strerror(rc));
16096fd2bd6SIgor Romanov return rc;
16196fd2bd6SIgor Romanov }
16296fd2bd6SIgor Romanov
16396fd2bd6SIgor Romanov int
sfc_mae_counter_fw_rsrc_disable(struct sfc_adapter * sa,struct sfc_mae_counter * counter)1644a06eacbSIvan Malov sfc_mae_counter_fw_rsrc_disable(struct sfc_adapter *sa,
1654a06eacbSIvan Malov struct sfc_mae_counter *counter)
16696fd2bd6SIgor Romanov {
16796fd2bd6SIgor Romanov struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
168dffc1de1SIvan Malov efx_counter_t *mae_counter = &counter->fw_rsrc.counter_id;
1691c941a5fSIvan Malov struct sfc_mae_counter_records *counters;
1704a06eacbSIvan Malov struct sfc_mae_counter_record *p;
17196fd2bd6SIgor Romanov uint32_t unused;
17296fd2bd6SIgor Romanov int rc;
17396fd2bd6SIgor Romanov
1741c941a5fSIvan Malov switch (counter->type) {
1751c941a5fSIvan Malov case EFX_COUNTER_TYPE_ACTION:
1761c941a5fSIvan Malov counters = ®->action_counters;
1771c941a5fSIvan Malov break;
1781588d135SIvan Malov case EFX_COUNTER_TYPE_CONNTRACK:
1791588d135SIvan Malov counters = ®->conntrack_counters;
1801588d135SIvan Malov break;
1811c941a5fSIvan Malov default:
1821c941a5fSIvan Malov return EINVAL;
1831c941a5fSIvan Malov }
1841c941a5fSIvan Malov
185dffc1de1SIvan Malov SFC_ASSERT(mae_counter->id < counters->n_mae_counters);
18696fd2bd6SIgor Romanov /*
18796fd2bd6SIgor Romanov * The flag is set at the very end of add operation and reset
18896fd2bd6SIgor Romanov * at the beginning of delete operation. Release ordering is
18996fd2bd6SIgor Romanov * paired with acquire ordering on load in counter increment operation.
19096fd2bd6SIgor Romanov */
191dffc1de1SIvan Malov p = &counters->mae_counters[mae_counter->id];
19296fd2bd6SIgor Romanov __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
19396fd2bd6SIgor Romanov
1941c941a5fSIvan Malov rc = efx_mae_counters_free_type(sa->nic, counter->type, 1, &unused,
1951c941a5fSIvan Malov mae_counter, NULL);
19696fd2bd6SIgor Romanov if (rc != 0)
1971c941a5fSIvan Malov sfc_err(sa, "failed to free MAE counter 0x%x-#%u: %s",
1981c941a5fSIvan Malov counter->type, mae_counter->id, rte_strerror(rc));
19996fd2bd6SIgor Romanov
2001c941a5fSIvan Malov sfc_info(sa, "disabled MAE counter 0x%x-#%u with reset pkts=%" PRIu64
2011c941a5fSIvan Malov " bytes=%" PRIu64, counter->type, mae_counter->id,
20296fd2bd6SIgor Romanov p->reset.pkts, p->reset.bytes);
20396fd2bd6SIgor Romanov
20496fd2bd6SIgor Romanov /*
20596fd2bd6SIgor Romanov * Do this regardless of what efx_mae_counters_free() return value is.
20696fd2bd6SIgor Romanov * If there's some error, the resulting resource leakage is bad, but
20796fd2bd6SIgor Romanov * nothing sensible can be done in this case.
20896fd2bd6SIgor Romanov */
209dffc1de1SIvan Malov mae_counter->id = EFX_MAE_RSRC_ID_INVALID;
21096fd2bd6SIgor Romanov
21196fd2bd6SIgor Romanov return rc;
21296fd2bd6SIgor Romanov }
21396fd2bd6SIgor Romanov
21496fd2bd6SIgor Romanov static void
sfc_mae_counter_increment(struct sfc_adapter * sa,struct sfc_mae_counter_records * counters,uint32_t mae_counter_id,uint32_t generation_count,uint64_t pkts,uint64_t bytes)21596fd2bd6SIgor Romanov sfc_mae_counter_increment(struct sfc_adapter *sa,
2164a06eacbSIvan Malov struct sfc_mae_counter_records *counters,
21796fd2bd6SIgor Romanov uint32_t mae_counter_id,
21896fd2bd6SIgor Romanov uint32_t generation_count,
21996fd2bd6SIgor Romanov uint64_t pkts, uint64_t bytes)
22096fd2bd6SIgor Romanov {
2214a06eacbSIvan Malov struct sfc_mae_counter_record *p =
2224a06eacbSIvan Malov &counters->mae_counters[mae_counter_id];
22396fd2bd6SIgor Romanov struct sfc_mae_counters_xstats *xstats = &counters->xstats;
22496fd2bd6SIgor Romanov union sfc_pkts_bytes cnt_val;
22596fd2bd6SIgor Romanov bool inuse;
22696fd2bd6SIgor Romanov
22796fd2bd6SIgor Romanov /*
22896fd2bd6SIgor Romanov * Acquire ordering is paired with release ordering in counter add
22996fd2bd6SIgor Romanov * and delete operations.
23096fd2bd6SIgor Romanov */
23196fd2bd6SIgor Romanov __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
23296fd2bd6SIgor Romanov if (!inuse) {
23396fd2bd6SIgor Romanov /*
23496fd2bd6SIgor Romanov * Two possible cases include:
23596fd2bd6SIgor Romanov * 1) Counter is just allocated. Too early counter update
23696fd2bd6SIgor Romanov * cannot be processed properly.
23796fd2bd6SIgor Romanov * 2) Stale update of freed and not reallocated counter.
23896fd2bd6SIgor Romanov * There is no point in processing that update.
23996fd2bd6SIgor Romanov */
24096fd2bd6SIgor Romanov xstats->not_inuse_update++;
24196fd2bd6SIgor Romanov return;
24296fd2bd6SIgor Romanov }
24396fd2bd6SIgor Romanov
24496fd2bd6SIgor Romanov if (unlikely(generation_count < p->generation_count)) {
24596fd2bd6SIgor Romanov /*
24696fd2bd6SIgor Romanov * It is a stale update for the reallocated counter
24796fd2bd6SIgor Romanov * (i.e., freed and the same ID allocated again).
24896fd2bd6SIgor Romanov */
24996fd2bd6SIgor Romanov xstats->realloc_update++;
25096fd2bd6SIgor Romanov return;
25196fd2bd6SIgor Romanov }
25296fd2bd6SIgor Romanov
25396fd2bd6SIgor Romanov cnt_val.pkts = p->value.pkts + pkts;
25496fd2bd6SIgor Romanov cnt_val.bytes = p->value.bytes + bytes;
25596fd2bd6SIgor Romanov
25696fd2bd6SIgor Romanov /*
25796fd2bd6SIgor Romanov * Ordering is relaxed since it is the only operation on counter value.
25896fd2bd6SIgor Romanov * And it does not depend on different stores/loads in other threads.
25996fd2bd6SIgor Romanov * Paired with relaxed ordering on counter reset.
26096fd2bd6SIgor Romanov */
26196fd2bd6SIgor Romanov __atomic_store(&p->value.pkts_bytes,
26296fd2bd6SIgor Romanov &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
26396fd2bd6SIgor Romanov
264f55fe01fSIvan Malov if (p->ft_switch_hit_counter != NULL) {
265f55fe01fSIvan Malov uint64_t ft_switch_hit_counter;
2669df2d8f5SIvan Malov
267f55fe01fSIvan Malov ft_switch_hit_counter = *p->ft_switch_hit_counter + pkts;
268f55fe01fSIvan Malov __atomic_store_n(p->ft_switch_hit_counter, ft_switch_hit_counter,
2699df2d8f5SIvan Malov __ATOMIC_RELAXED);
2709df2d8f5SIvan Malov }
2719df2d8f5SIvan Malov
2721c941a5fSIvan Malov sfc_info(sa, "update MAE counter 0x%x-#%u: pkts+%" PRIu64 "=%" PRIu64
2731c941a5fSIvan Malov ", bytes+%" PRIu64 "=%" PRIu64, counters->type, mae_counter_id,
27496fd2bd6SIgor Romanov pkts, cnt_val.pkts, bytes, cnt_val.bytes);
27596fd2bd6SIgor Romanov }
27696fd2bd6SIgor Romanov
27796fd2bd6SIgor Romanov static void
sfc_mae_parse_counter_packet(struct sfc_adapter * sa,struct sfc_mae_counter_registry * counter_registry,const struct rte_mbuf * m)27896fd2bd6SIgor Romanov sfc_mae_parse_counter_packet(struct sfc_adapter *sa,
27996fd2bd6SIgor Romanov struct sfc_mae_counter_registry *counter_registry,
28096fd2bd6SIgor Romanov const struct rte_mbuf *m)
28196fd2bd6SIgor Romanov {
2821c941a5fSIvan Malov struct sfc_mae_counter_records *counters;
28396fd2bd6SIgor Romanov uint32_t generation_count;
28496fd2bd6SIgor Romanov const efx_xword_t *hdr;
28596fd2bd6SIgor Romanov const efx_oword_t *counters_data;
28696fd2bd6SIgor Romanov unsigned int version;
28796fd2bd6SIgor Romanov unsigned int id;
28896fd2bd6SIgor Romanov unsigned int header_offset;
28996fd2bd6SIgor Romanov unsigned int payload_offset;
29096fd2bd6SIgor Romanov unsigned int counter_count;
29196fd2bd6SIgor Romanov unsigned int required_len;
29296fd2bd6SIgor Romanov unsigned int i;
29396fd2bd6SIgor Romanov
29496fd2bd6SIgor Romanov if (unlikely(m->nb_segs != 1)) {
29596fd2bd6SIgor Romanov sfc_err(sa, "unexpectedly scattered MAE counters packet (%u segments)",
29696fd2bd6SIgor Romanov m->nb_segs);
29796fd2bd6SIgor Romanov return;
29896fd2bd6SIgor Romanov }
29996fd2bd6SIgor Romanov
30096fd2bd6SIgor Romanov if (unlikely(m->data_len < ER_RX_SL_PACKETISER_HEADER_WORD_SIZE)) {
30196fd2bd6SIgor Romanov sfc_err(sa, "too short MAE counters packet (%u bytes)",
30296fd2bd6SIgor Romanov m->data_len);
30396fd2bd6SIgor Romanov return;
30496fd2bd6SIgor Romanov }
30596fd2bd6SIgor Romanov
30696fd2bd6SIgor Romanov /*
30796fd2bd6SIgor Romanov * The generation count is located in the Rx prefix in the USER_MARK
30896fd2bd6SIgor Romanov * field which is written into hash.fdir.hi field of an mbuf. See
30996fd2bd6SIgor Romanov * SF-123581-TC SmartNIC Datapath Offloads section 4.7.5 Counters.
31096fd2bd6SIgor Romanov */
31196fd2bd6SIgor Romanov generation_count = m->hash.fdir.hi;
31296fd2bd6SIgor Romanov
31396fd2bd6SIgor Romanov hdr = rte_pktmbuf_mtod(m, const efx_xword_t *);
31496fd2bd6SIgor Romanov
31596fd2bd6SIgor Romanov version = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_VERSION);
31696fd2bd6SIgor Romanov if (unlikely(version != ERF_SC_PACKETISER_HEADER_VERSION_2)) {
31796fd2bd6SIgor Romanov sfc_err(sa, "unexpected MAE counters packet version %u",
31896fd2bd6SIgor Romanov version);
31996fd2bd6SIgor Romanov return;
32096fd2bd6SIgor Romanov }
32196fd2bd6SIgor Romanov
32296fd2bd6SIgor Romanov id = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_IDENTIFIER);
3231c941a5fSIvan Malov
3241c941a5fSIvan Malov switch (id) {
3251c941a5fSIvan Malov case ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR:
3261c941a5fSIvan Malov counters = &counter_registry->action_counters;
3271c941a5fSIvan Malov break;
3281588d135SIvan Malov case ERF_SC_PACKETISER_HEADER_IDENTIFIER_CT:
3291588d135SIvan Malov counters = &counter_registry->conntrack_counters;
3301588d135SIvan Malov break;
3311c941a5fSIvan Malov default:
33296fd2bd6SIgor Romanov sfc_err(sa, "unexpected MAE counters source identifier %u", id);
33396fd2bd6SIgor Romanov return;
33496fd2bd6SIgor Romanov }
33596fd2bd6SIgor Romanov
33696fd2bd6SIgor Romanov /* Packet layout definitions assume fixed header offset in fact */
33796fd2bd6SIgor Romanov header_offset =
33896fd2bd6SIgor Romanov EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_HEADER_OFFSET);
33996fd2bd6SIgor Romanov if (unlikely(header_offset !=
34096fd2bd6SIgor Romanov ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT)) {
34196fd2bd6SIgor Romanov sfc_err(sa, "unexpected MAE counters packet header offset %u",
34296fd2bd6SIgor Romanov header_offset);
34396fd2bd6SIgor Romanov return;
34496fd2bd6SIgor Romanov }
34596fd2bd6SIgor Romanov
34696fd2bd6SIgor Romanov payload_offset =
34796fd2bd6SIgor Romanov EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET);
34896fd2bd6SIgor Romanov
34996fd2bd6SIgor Romanov counter_count = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_COUNT);
35096fd2bd6SIgor Romanov
35196fd2bd6SIgor Romanov required_len = payload_offset +
35296fd2bd6SIgor Romanov counter_count * sizeof(counters_data[0]);
35396fd2bd6SIgor Romanov if (unlikely(required_len > m->data_len)) {
35496fd2bd6SIgor Romanov sfc_err(sa, "truncated MAE counters packet: %u counters, packet length is %u vs %u required",
35596fd2bd6SIgor Romanov counter_count, m->data_len, required_len);
35696fd2bd6SIgor Romanov /*
35796fd2bd6SIgor Romanov * In theory it is possible process available counters data,
35896fd2bd6SIgor Romanov * but such condition is really unexpected and it is
35996fd2bd6SIgor Romanov * better to treat entire packet as corrupted.
36096fd2bd6SIgor Romanov */
36196fd2bd6SIgor Romanov return;
36296fd2bd6SIgor Romanov }
36396fd2bd6SIgor Romanov
36496fd2bd6SIgor Romanov /* Ensure that counters data is 32-bit aligned */
36596fd2bd6SIgor Romanov if (unlikely(payload_offset % sizeof(uint32_t) != 0)) {
36696fd2bd6SIgor Romanov sfc_err(sa, "unsupported MAE counters payload offset %u, must be 32-bit aligned",
36796fd2bd6SIgor Romanov payload_offset);
36896fd2bd6SIgor Romanov return;
36996fd2bd6SIgor Romanov }
37096fd2bd6SIgor Romanov RTE_BUILD_BUG_ON(sizeof(counters_data[0]) !=
37196fd2bd6SIgor Romanov ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE);
37296fd2bd6SIgor Romanov
37396fd2bd6SIgor Romanov counters_data =
37496fd2bd6SIgor Romanov rte_pktmbuf_mtod_offset(m, const efx_oword_t *, payload_offset);
37596fd2bd6SIgor Romanov
37696fd2bd6SIgor Romanov sfc_info(sa, "update %u MAE counters with gc=%u",
37796fd2bd6SIgor Romanov counter_count, generation_count);
37896fd2bd6SIgor Romanov
37996fd2bd6SIgor Romanov for (i = 0; i < counter_count; ++i) {
38096fd2bd6SIgor Romanov uint32_t packet_count_lo;
38196fd2bd6SIgor Romanov uint32_t packet_count_hi;
38296fd2bd6SIgor Romanov uint32_t byte_count_lo;
38396fd2bd6SIgor Romanov uint32_t byte_count_hi;
38496fd2bd6SIgor Romanov
38596fd2bd6SIgor Romanov /*
38696fd2bd6SIgor Romanov * Use 32-bit field accessors below since counters data
38796fd2bd6SIgor Romanov * is not 64-bit aligned.
38896fd2bd6SIgor Romanov * 32-bit alignment is checked above taking into account
38996fd2bd6SIgor Romanov * that start of packet data is 32-bit aligned
39096fd2bd6SIgor Romanov * (cache-line size aligned in fact).
39196fd2bd6SIgor Romanov */
39296fd2bd6SIgor Romanov packet_count_lo =
39396fd2bd6SIgor Romanov EFX_OWORD_FIELD32(counters_data[i],
39496fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO);
39596fd2bd6SIgor Romanov packet_count_hi =
39696fd2bd6SIgor Romanov EFX_OWORD_FIELD32(counters_data[i],
39796fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_HI);
39896fd2bd6SIgor Romanov byte_count_lo =
39996fd2bd6SIgor Romanov EFX_OWORD_FIELD32(counters_data[i],
40096fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO);
40196fd2bd6SIgor Romanov byte_count_hi =
40296fd2bd6SIgor Romanov EFX_OWORD_FIELD32(counters_data[i],
40396fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_HI);
4041588d135SIvan Malov
4051588d135SIvan Malov if (id == ERF_SC_PACKETISER_HEADER_IDENTIFIER_CT) {
4061588d135SIvan Malov /*
4071588d135SIvan Malov * FIXME:
4081588d135SIvan Malov *
4091588d135SIvan Malov * CT counters are 1-bit saturating counters.
4101588d135SIvan Malov * There is no way to express this in DPDK
4111588d135SIvan Malov * currently, so increment the hit count
4121588d135SIvan Malov * by one to let the application know
4131588d135SIvan Malov * that the flow is still effective.
4141588d135SIvan Malov */
4151588d135SIvan Malov packet_count_lo = 1;
4161588d135SIvan Malov packet_count_hi = 0;
4171588d135SIvan Malov byte_count_lo = 0;
4181588d135SIvan Malov byte_count_hi = 0;
4191588d135SIvan Malov }
4201588d135SIvan Malov
42196fd2bd6SIgor Romanov sfc_mae_counter_increment(sa,
4221c941a5fSIvan Malov counters,
42396fd2bd6SIgor Romanov EFX_OWORD_FIELD32(counters_data[i],
42496fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX),
42596fd2bd6SIgor Romanov generation_count,
42696fd2bd6SIgor Romanov (uint64_t)packet_count_lo |
42796fd2bd6SIgor Romanov ((uint64_t)packet_count_hi <<
42896fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO_WIDTH),
42996fd2bd6SIgor Romanov (uint64_t)byte_count_lo |
43096fd2bd6SIgor Romanov ((uint64_t)byte_count_hi <<
43196fd2bd6SIgor Romanov ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO_WIDTH));
43296fd2bd6SIgor Romanov }
43396fd2bd6SIgor Romanov }
43496fd2bd6SIgor Romanov
43596fd2bd6SIgor Romanov static int32_t
sfc_mae_counter_poll_packets(struct sfc_adapter * sa)436ce1f72dcSViacheslav Galaktionov sfc_mae_counter_poll_packets(struct sfc_adapter *sa)
43796fd2bd6SIgor Romanov {
43896fd2bd6SIgor Romanov struct sfc_mae_counter_registry *counter_registry =
43996fd2bd6SIgor Romanov &sa->mae.counter_registry;
44096fd2bd6SIgor Romanov struct rte_mbuf *mbufs[SFC_MAE_COUNTER_RX_BURST];
44196fd2bd6SIgor Romanov unsigned int pushed_diff;
44296fd2bd6SIgor Romanov unsigned int pushed;
44396fd2bd6SIgor Romanov unsigned int i;
44496fd2bd6SIgor Romanov uint16_t n;
44596fd2bd6SIgor Romanov int rc;
44696fd2bd6SIgor Romanov
44796fd2bd6SIgor Romanov n = counter_registry->rx_pkt_burst(counter_registry->rx_dp, mbufs,
44896fd2bd6SIgor Romanov SFC_MAE_COUNTER_RX_BURST);
44996fd2bd6SIgor Romanov
45096fd2bd6SIgor Romanov for (i = 0; i < n; i++)
45196fd2bd6SIgor Romanov sfc_mae_parse_counter_packet(sa, counter_registry, mbufs[i]);
45296fd2bd6SIgor Romanov
45396fd2bd6SIgor Romanov rte_pktmbuf_free_bulk(mbufs, n);
45496fd2bd6SIgor Romanov
45596fd2bd6SIgor Romanov if (!counter_registry->use_credits)
456ce1f72dcSViacheslav Galaktionov return n;
45796fd2bd6SIgor Romanov
45896fd2bd6SIgor Romanov pushed = sfc_rx_get_pushed(sa, counter_registry->rx_dp);
45996fd2bd6SIgor Romanov pushed_diff = pushed - counter_registry->pushed_n_buffers;
46096fd2bd6SIgor Romanov
46196fd2bd6SIgor Romanov if (pushed_diff >= SFC_COUNTER_RXQ_REFILL_LEVEL) {
46296fd2bd6SIgor Romanov rc = efx_mae_counters_stream_give_credits(sa->nic, pushed_diff);
46396fd2bd6SIgor Romanov if (rc == 0) {
46496fd2bd6SIgor Romanov counter_registry->pushed_n_buffers = pushed;
46596fd2bd6SIgor Romanov } else {
46696fd2bd6SIgor Romanov /*
46796fd2bd6SIgor Romanov * FIXME: counters might be important for the
46896fd2bd6SIgor Romanov * application. Handle the error in order to recover
46996fd2bd6SIgor Romanov * from the failure
47096fd2bd6SIgor Romanov */
47196fd2bd6SIgor Romanov SFC_GENERIC_LOG(DEBUG, "Give credits failed: %s",
47296fd2bd6SIgor Romanov rte_strerror(rc));
47396fd2bd6SIgor Romanov }
47496fd2bd6SIgor Romanov }
47596fd2bd6SIgor Romanov
476ce1f72dcSViacheslav Galaktionov return n;
477ce1f72dcSViacheslav Galaktionov }
478ce1f72dcSViacheslav Galaktionov
479ce1f72dcSViacheslav Galaktionov static int32_t
sfc_mae_counter_service_routine(void * arg)480ce1f72dcSViacheslav Galaktionov sfc_mae_counter_service_routine(void *arg)
481ce1f72dcSViacheslav Galaktionov {
482ce1f72dcSViacheslav Galaktionov struct sfc_adapter *sa = arg;
483ce1f72dcSViacheslav Galaktionov
484ce1f72dcSViacheslav Galaktionov /*
485ce1f72dcSViacheslav Galaktionov * We cannot propagate any errors and we don't need to know
486ce1f72dcSViacheslav Galaktionov * the number of packets we've received.
487ce1f72dcSViacheslav Galaktionov */
488ce1f72dcSViacheslav Galaktionov (void)sfc_mae_counter_poll_packets(sa);
489ce1f72dcSViacheslav Galaktionov
49096fd2bd6SIgor Romanov return 0;
49196fd2bd6SIgor Romanov }
49296fd2bd6SIgor Romanov
493*a7ba40b2SThomas Monjalon static uint32_t
sfc_mae_counter_thread(void * data)494ce1f72dcSViacheslav Galaktionov sfc_mae_counter_thread(void *data)
495ce1f72dcSViacheslav Galaktionov {
496ce1f72dcSViacheslav Galaktionov struct sfc_adapter *sa = data;
497ce1f72dcSViacheslav Galaktionov struct sfc_mae_counter_registry *counter_registry =
498ce1f72dcSViacheslav Galaktionov &sa->mae.counter_registry;
499ce1f72dcSViacheslav Galaktionov int32_t rc;
500ce1f72dcSViacheslav Galaktionov
501ce1f72dcSViacheslav Galaktionov while (__atomic_load_n(&counter_registry->polling.thread.run,
502ce1f72dcSViacheslav Galaktionov __ATOMIC_ACQUIRE)) {
503ce1f72dcSViacheslav Galaktionov rc = sfc_mae_counter_poll_packets(sa);
504ce1f72dcSViacheslav Galaktionov if (rc == 0) {
505ce1f72dcSViacheslav Galaktionov /*
506ce1f72dcSViacheslav Galaktionov * The queue is empty. Do not burn CPU.
507ce1f72dcSViacheslav Galaktionov * An empty queue has just enough space for about
508ce1f72dcSViacheslav Galaktionov * SFC_MAE_COUNTERS_RXQ_SPACE counter updates which is
509ce1f72dcSViacheslav Galaktionov * more than 100K, so we can sleep a bit. The queue uses
510ce1f72dcSViacheslav Galaktionov * a credit-based flow control anyway, so firmware will
511ce1f72dcSViacheslav Galaktionov * not enqueue more counter updates until the host
512ce1f72dcSViacheslav Galaktionov * supplies it with additional credits. The counters are
513ce1f72dcSViacheslav Galaktionov * 48bits wide, so the timeout need only be short enough
514ce1f72dcSViacheslav Galaktionov * to ensure that the counter values do not overflow
515ce1f72dcSViacheslav Galaktionov * before the next counter update. Also we should not
516ce1f72dcSViacheslav Galaktionov * delay counter updates for a long time, otherwise
517ce1f72dcSViacheslav Galaktionov * application may decide that flow is idle and should
518ce1f72dcSViacheslav Galaktionov * be removed.
519ce1f72dcSViacheslav Galaktionov */
520ce1f72dcSViacheslav Galaktionov rte_delay_ms(1);
521ce1f72dcSViacheslav Galaktionov }
522ce1f72dcSViacheslav Galaktionov }
523ce1f72dcSViacheslav Galaktionov
524*a7ba40b2SThomas Monjalon return 0;
525ce1f72dcSViacheslav Galaktionov }
526ce1f72dcSViacheslav Galaktionov
52796fd2bd6SIgor Romanov static void
sfc_mae_counter_service_unregister(struct sfc_adapter * sa)52896fd2bd6SIgor Romanov sfc_mae_counter_service_unregister(struct sfc_adapter *sa)
52996fd2bd6SIgor Romanov {
53096fd2bd6SIgor Romanov struct sfc_mae_counter_registry *registry =
53196fd2bd6SIgor Romanov &sa->mae.counter_registry;
53296fd2bd6SIgor Romanov const unsigned int wait_ms = 10000;
53396fd2bd6SIgor Romanov unsigned int i;
53496fd2bd6SIgor Romanov
535ce1f72dcSViacheslav Galaktionov rte_service_runstate_set(registry->polling.service.id, 0);
536ce1f72dcSViacheslav Galaktionov rte_service_component_runstate_set(registry->polling.service.id, 0);
53796fd2bd6SIgor Romanov
53896fd2bd6SIgor Romanov /*
53996fd2bd6SIgor Romanov * Wait for the counter routine to finish the last iteration.
54096fd2bd6SIgor Romanov * Give up on timeout.
54196fd2bd6SIgor Romanov */
54296fd2bd6SIgor Romanov for (i = 0; i < wait_ms; i++) {
543ce1f72dcSViacheslav Galaktionov if (rte_service_may_be_active(registry->polling.service.id) == 0)
54496fd2bd6SIgor Romanov break;
54596fd2bd6SIgor Romanov
54696fd2bd6SIgor Romanov rte_delay_ms(1);
54796fd2bd6SIgor Romanov }
54896fd2bd6SIgor Romanov if (i == wait_ms)
54996fd2bd6SIgor Romanov sfc_warn(sa, "failed to wait for counter service to stop");
55096fd2bd6SIgor Romanov
551ce1f72dcSViacheslav Galaktionov rte_service_map_lcore_set(registry->polling.service.id,
552ce1f72dcSViacheslav Galaktionov registry->polling.service.core_id, 0);
55396fd2bd6SIgor Romanov
554ce1f72dcSViacheslav Galaktionov rte_service_component_unregister(registry->polling.service.id);
55596fd2bd6SIgor Romanov }
55696fd2bd6SIgor Romanov
55796fd2bd6SIgor Romanov static struct sfc_rxq_info *
sfc_counter_rxq_info_get(struct sfc_adapter * sa)55896fd2bd6SIgor Romanov sfc_counter_rxq_info_get(struct sfc_adapter *sa)
55996fd2bd6SIgor Romanov {
56096fd2bd6SIgor Romanov return &sfc_sa2shared(sa)->rxq_info[sa->counter_rxq.sw_index];
56196fd2bd6SIgor Romanov }
56296fd2bd6SIgor Romanov
563ce1f72dcSViacheslav Galaktionov static void
sfc_mae_counter_registry_prepare(struct sfc_mae_counter_registry * registry,struct sfc_adapter * sa,uint32_t counter_stream_flags)564ce1f72dcSViacheslav Galaktionov sfc_mae_counter_registry_prepare(struct sfc_mae_counter_registry *registry,
565ce1f72dcSViacheslav Galaktionov struct sfc_adapter *sa,
566ce1f72dcSViacheslav Galaktionov uint32_t counter_stream_flags)
567ce1f72dcSViacheslav Galaktionov {
568ce1f72dcSViacheslav Galaktionov registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst;
569ce1f72dcSViacheslav Galaktionov registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp;
570ce1f72dcSViacheslav Galaktionov registry->pushed_n_buffers = 0;
571ce1f72dcSViacheslav Galaktionov registry->use_credits = counter_stream_flags &
572ce1f72dcSViacheslav Galaktionov EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS;
573ce1f72dcSViacheslav Galaktionov }
574ce1f72dcSViacheslav Galaktionov
57596fd2bd6SIgor Romanov static int
sfc_mae_counter_service_register(struct sfc_adapter * sa,uint32_t counter_stream_flags)57696fd2bd6SIgor Romanov sfc_mae_counter_service_register(struct sfc_adapter *sa,
57796fd2bd6SIgor Romanov uint32_t counter_stream_flags)
57896fd2bd6SIgor Romanov {
57996fd2bd6SIgor Romanov struct rte_service_spec service;
58096fd2bd6SIgor Romanov char counter_service_name[sizeof(service.name)] = "counter_service";
58196fd2bd6SIgor Romanov struct sfc_mae_counter_registry *counter_registry =
58296fd2bd6SIgor Romanov &sa->mae.counter_registry;
58396fd2bd6SIgor Romanov uint32_t cid;
58496fd2bd6SIgor Romanov uint32_t sid;
58596fd2bd6SIgor Romanov int rc;
58696fd2bd6SIgor Romanov
58796fd2bd6SIgor Romanov sfc_log_init(sa, "entry");
58896fd2bd6SIgor Romanov
58996fd2bd6SIgor Romanov /* Prepare service info */
59096fd2bd6SIgor Romanov memset(&service, 0, sizeof(service));
59196fd2bd6SIgor Romanov rte_strscpy(service.name, counter_service_name, sizeof(service.name));
59296fd2bd6SIgor Romanov service.socket_id = sa->socket_id;
593ce1f72dcSViacheslav Galaktionov service.callback = sfc_mae_counter_service_routine;
59496fd2bd6SIgor Romanov service.callback_userdata = sa;
595ce1f72dcSViacheslav Galaktionov sfc_mae_counter_registry_prepare(counter_registry, sa,
596ce1f72dcSViacheslav Galaktionov counter_stream_flags);
59796fd2bd6SIgor Romanov
59896fd2bd6SIgor Romanov cid = sfc_get_service_lcore(sa->socket_id);
59996fd2bd6SIgor Romanov if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) {
60096fd2bd6SIgor Romanov /* Warn and try to allocate on any NUMA node */
60196fd2bd6SIgor Romanov sfc_warn(sa,
60296fd2bd6SIgor Romanov "failed to get service lcore for counter service at socket %d",
60396fd2bd6SIgor Romanov sa->socket_id);
60496fd2bd6SIgor Romanov
60596fd2bd6SIgor Romanov cid = sfc_get_service_lcore(SOCKET_ID_ANY);
60696fd2bd6SIgor Romanov }
60796fd2bd6SIgor Romanov if (cid == RTE_MAX_LCORE) {
60896fd2bd6SIgor Romanov rc = ENOTSUP;
60996fd2bd6SIgor Romanov sfc_err(sa, "failed to get service lcore for counter service");
61096fd2bd6SIgor Romanov goto fail_get_service_lcore;
61196fd2bd6SIgor Romanov }
61296fd2bd6SIgor Romanov
61396fd2bd6SIgor Romanov /* Service core may be in "stopped" state, start it */
61496fd2bd6SIgor Romanov rc = rte_service_lcore_start(cid);
61596fd2bd6SIgor Romanov if (rc != 0 && rc != -EALREADY) {
61696fd2bd6SIgor Romanov sfc_err(sa, "failed to start service core for counter service: %s",
61796fd2bd6SIgor Romanov rte_strerror(-rc));
61896fd2bd6SIgor Romanov rc = ENOTSUP;
61996fd2bd6SIgor Romanov goto fail_start_core;
62096fd2bd6SIgor Romanov }
62196fd2bd6SIgor Romanov
62296fd2bd6SIgor Romanov /* Register counter service */
62396fd2bd6SIgor Romanov rc = rte_service_component_register(&service, &sid);
62496fd2bd6SIgor Romanov if (rc != 0) {
62596fd2bd6SIgor Romanov rc = ENOEXEC;
62696fd2bd6SIgor Romanov sfc_err(sa, "failed to register counter service component");
62796fd2bd6SIgor Romanov goto fail_register;
62896fd2bd6SIgor Romanov }
62996fd2bd6SIgor Romanov
63096fd2bd6SIgor Romanov /* Map the service with the service core */
63196fd2bd6SIgor Romanov rc = rte_service_map_lcore_set(sid, cid, 1);
63296fd2bd6SIgor Romanov if (rc != 0) {
63396fd2bd6SIgor Romanov rc = -rc;
63496fd2bd6SIgor Romanov sfc_err(sa, "failed to map lcore for counter service: %s",
63596fd2bd6SIgor Romanov rte_strerror(rc));
63696fd2bd6SIgor Romanov goto fail_map_lcore;
63796fd2bd6SIgor Romanov }
63896fd2bd6SIgor Romanov
63996fd2bd6SIgor Romanov /* Run the service */
64096fd2bd6SIgor Romanov rc = rte_service_component_runstate_set(sid, 1);
64196fd2bd6SIgor Romanov if (rc < 0) {
64296fd2bd6SIgor Romanov rc = -rc;
64396fd2bd6SIgor Romanov sfc_err(sa, "failed to run counter service component: %s",
64496fd2bd6SIgor Romanov rte_strerror(rc));
64596fd2bd6SIgor Romanov goto fail_component_runstate_set;
64696fd2bd6SIgor Romanov }
64796fd2bd6SIgor Romanov rc = rte_service_runstate_set(sid, 1);
64896fd2bd6SIgor Romanov if (rc < 0) {
64996fd2bd6SIgor Romanov rc = -rc;
65096fd2bd6SIgor Romanov sfc_err(sa, "failed to run counter service");
65196fd2bd6SIgor Romanov goto fail_runstate_set;
65296fd2bd6SIgor Romanov }
65396fd2bd6SIgor Romanov
654ce1f72dcSViacheslav Galaktionov counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_SERVICE;
655ce1f72dcSViacheslav Galaktionov counter_registry->polling.service.core_id = cid;
656ce1f72dcSViacheslav Galaktionov counter_registry->polling.service.id = sid;
65796fd2bd6SIgor Romanov
65896fd2bd6SIgor Romanov sfc_log_init(sa, "done");
65996fd2bd6SIgor Romanov
66096fd2bd6SIgor Romanov return 0;
66196fd2bd6SIgor Romanov
66296fd2bd6SIgor Romanov fail_runstate_set:
66396fd2bd6SIgor Romanov rte_service_component_runstate_set(sid, 0);
66496fd2bd6SIgor Romanov
66596fd2bd6SIgor Romanov fail_component_runstate_set:
66696fd2bd6SIgor Romanov rte_service_map_lcore_set(sid, cid, 0);
66796fd2bd6SIgor Romanov
66896fd2bd6SIgor Romanov fail_map_lcore:
66996fd2bd6SIgor Romanov rte_service_component_unregister(sid);
67096fd2bd6SIgor Romanov
67196fd2bd6SIgor Romanov fail_register:
67296fd2bd6SIgor Romanov fail_start_core:
67396fd2bd6SIgor Romanov fail_get_service_lcore:
67496fd2bd6SIgor Romanov sfc_log_init(sa, "failed: %s", rte_strerror(rc));
67596fd2bd6SIgor Romanov
67696fd2bd6SIgor Romanov return rc;
67796fd2bd6SIgor Romanov }
67896fd2bd6SIgor Romanov
679ce1f72dcSViacheslav Galaktionov static void
sfc_mae_counter_thread_stop(struct sfc_adapter * sa)680ce1f72dcSViacheslav Galaktionov sfc_mae_counter_thread_stop(struct sfc_adapter *sa)
681ce1f72dcSViacheslav Galaktionov {
682ce1f72dcSViacheslav Galaktionov struct sfc_mae_counter_registry *counter_registry =
683ce1f72dcSViacheslav Galaktionov &sa->mae.counter_registry;
684ce1f72dcSViacheslav Galaktionov int rc;
685ce1f72dcSViacheslav Galaktionov
686ce1f72dcSViacheslav Galaktionov /* Ensure that flag is set before attempting to join thread */
687ce1f72dcSViacheslav Galaktionov __atomic_store_n(&counter_registry->polling.thread.run, false,
688ce1f72dcSViacheslav Galaktionov __ATOMIC_RELEASE);
689ce1f72dcSViacheslav Galaktionov
690*a7ba40b2SThomas Monjalon rc = rte_thread_join(counter_registry->polling.thread.id, NULL);
691ce1f72dcSViacheslav Galaktionov if (rc != 0)
692ce1f72dcSViacheslav Galaktionov sfc_err(sa, "failed to join the MAE counter polling thread");
693ce1f72dcSViacheslav Galaktionov
694ce1f72dcSViacheslav Galaktionov counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_OFF;
695ce1f72dcSViacheslav Galaktionov }
696ce1f72dcSViacheslav Galaktionov
697ce1f72dcSViacheslav Galaktionov static int
sfc_mae_counter_thread_spawn(struct sfc_adapter * sa,uint32_t counter_stream_flags)698ce1f72dcSViacheslav Galaktionov sfc_mae_counter_thread_spawn(struct sfc_adapter *sa,
699ce1f72dcSViacheslav Galaktionov uint32_t counter_stream_flags)
700ce1f72dcSViacheslav Galaktionov {
701ce1f72dcSViacheslav Galaktionov struct sfc_mae_counter_registry *counter_registry =
702ce1f72dcSViacheslav Galaktionov &sa->mae.counter_registry;
703ce1f72dcSViacheslav Galaktionov int rc;
704ce1f72dcSViacheslav Galaktionov
705ce1f72dcSViacheslav Galaktionov sfc_log_init(sa, "entry");
706ce1f72dcSViacheslav Galaktionov
707ce1f72dcSViacheslav Galaktionov sfc_mae_counter_registry_prepare(counter_registry, sa,
708ce1f72dcSViacheslav Galaktionov counter_stream_flags);
709ce1f72dcSViacheslav Galaktionov
710ce1f72dcSViacheslav Galaktionov counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_THREAD;
711ce1f72dcSViacheslav Galaktionov counter_registry->polling.thread.run = true;
712ce1f72dcSViacheslav Galaktionov
713*a7ba40b2SThomas Monjalon rc = rte_thread_create_internal_control(&sa->mae.counter_registry.polling.thread.id,
714*a7ba40b2SThomas Monjalon "sfc-maecnt", sfc_mae_counter_thread, sa);
715ce1f72dcSViacheslav Galaktionov
716ce1f72dcSViacheslav Galaktionov return rc;
717ce1f72dcSViacheslav Galaktionov }
718ce1f72dcSViacheslav Galaktionov
71996fd2bd6SIgor Romanov int
sfc_mae_counters_init(struct sfc_mae_counter_records * counters,uint32_t nb_counters_max)7204a06eacbSIvan Malov sfc_mae_counters_init(struct sfc_mae_counter_records *counters,
72196fd2bd6SIgor Romanov uint32_t nb_counters_max)
72296fd2bd6SIgor Romanov {
72396fd2bd6SIgor Romanov int rc;
72496fd2bd6SIgor Romanov
72596fd2bd6SIgor Romanov SFC_GENERIC_LOG(DEBUG, "%s: entry", __func__);
72696fd2bd6SIgor Romanov
72796fd2bd6SIgor Romanov counters->mae_counters = rte_zmalloc("sfc_mae_counters",
72896fd2bd6SIgor Romanov sizeof(*counters->mae_counters) * nb_counters_max, 0);
72996fd2bd6SIgor Romanov if (counters->mae_counters == NULL) {
73096fd2bd6SIgor Romanov rc = ENOMEM;
73196fd2bd6SIgor Romanov SFC_GENERIC_LOG(ERR, "%s: failed: %s", __func__,
73296fd2bd6SIgor Romanov rte_strerror(rc));
73396fd2bd6SIgor Romanov return rc;
73496fd2bd6SIgor Romanov }
73596fd2bd6SIgor Romanov
73696fd2bd6SIgor Romanov counters->n_mae_counters = nb_counters_max;
73796fd2bd6SIgor Romanov
73896fd2bd6SIgor Romanov SFC_GENERIC_LOG(DEBUG, "%s: done", __func__);
73996fd2bd6SIgor Romanov
74096fd2bd6SIgor Romanov return 0;
74196fd2bd6SIgor Romanov }
74296fd2bd6SIgor Romanov
74396fd2bd6SIgor Romanov void
sfc_mae_counters_fini(struct sfc_mae_counter_records * counters)7444a06eacbSIvan Malov sfc_mae_counters_fini(struct sfc_mae_counter_records *counters)
74596fd2bd6SIgor Romanov {
74696fd2bd6SIgor Romanov rte_free(counters->mae_counters);
74796fd2bd6SIgor Romanov counters->mae_counters = NULL;
74896fd2bd6SIgor Romanov }
74996fd2bd6SIgor Romanov
75096fd2bd6SIgor Romanov int
sfc_mae_counter_rxq_attach(struct sfc_adapter * sa)751983ce116SIgor Romanov sfc_mae_counter_rxq_attach(struct sfc_adapter *sa)
752983ce116SIgor Romanov {
753983ce116SIgor Romanov struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
754983ce116SIgor Romanov char name[RTE_MEMPOOL_NAMESIZE];
755983ce116SIgor Romanov struct rte_mempool *mp;
756983ce116SIgor Romanov unsigned int n_elements;
757983ce116SIgor Romanov unsigned int cache_size;
758983ce116SIgor Romanov /* The mempool is internal and private area is not required */
759983ce116SIgor Romanov const uint16_t priv_size = 0;
760983ce116SIgor Romanov const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM +
761983ce116SIgor Romanov SFC_MAE_COUNTER_STREAM_PACKET_SIZE;
762983ce116SIgor Romanov int rc;
763983ce116SIgor Romanov
764983ce116SIgor Romanov sfc_log_init(sa, "entry");
765983ce116SIgor Romanov
766983ce116SIgor Romanov if (!sas->counters_rxq_allocated) {
767983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not supported - skip");
768983ce116SIgor Romanov return 0;
769983ce116SIgor Romanov }
770983ce116SIgor Romanov
771983ce116SIgor Romanov /*
772983ce116SIgor Romanov * At least one element in the ring is always unused to distinguish
773983ce116SIgor Romanov * between empty and full ring cases.
774983ce116SIgor Romanov */
775983ce116SIgor Romanov n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1;
776983ce116SIgor Romanov
777983ce116SIgor Romanov /*
778983ce116SIgor Romanov * The cache must have sufficient space to put received buckets
779983ce116SIgor Romanov * before they're reused on refill.
780983ce116SIgor Romanov */
781983ce116SIgor Romanov cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL +
782983ce116SIgor Romanov SFC_MAE_COUNTER_RX_BURST - 1);
783983ce116SIgor Romanov
784983ce116SIgor Romanov if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >=
785983ce116SIgor Romanov (int)sizeof(name)) {
786983ce116SIgor Romanov sfc_err(sa, "failed: counter RxQ mempool name is too long");
787983ce116SIgor Romanov rc = ENAMETOOLONG;
788983ce116SIgor Romanov goto fail_long_name;
789983ce116SIgor Romanov }
790983ce116SIgor Romanov
791983ce116SIgor Romanov /*
792983ce116SIgor Romanov * It could be single-producer single-consumer ring mempool which
793983ce116SIgor Romanov * requires minimal barriers. However, cache size and refill/burst
794983ce116SIgor Romanov * policy are aligned, therefore it does not matter which
795983ce116SIgor Romanov * mempool backend is chosen since backend is unused.
796983ce116SIgor Romanov */
797983ce116SIgor Romanov mp = rte_pktmbuf_pool_create(name, n_elements, cache_size,
798983ce116SIgor Romanov priv_size, data_room_size, sa->socket_id);
799983ce116SIgor Romanov if (mp == NULL) {
800983ce116SIgor Romanov sfc_err(sa, "failed to create counter RxQ mempool");
801983ce116SIgor Romanov rc = rte_errno;
802983ce116SIgor Romanov goto fail_mp_create;
803983ce116SIgor Romanov }
804983ce116SIgor Romanov
805983ce116SIgor Romanov sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas);
806983ce116SIgor Romanov sa->counter_rxq.mp = mp;
807983ce116SIgor Romanov sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED;
808983ce116SIgor Romanov
809983ce116SIgor Romanov sfc_log_init(sa, "done");
810983ce116SIgor Romanov
811983ce116SIgor Romanov return 0;
812983ce116SIgor Romanov
813983ce116SIgor Romanov fail_mp_create:
814983ce116SIgor Romanov fail_long_name:
815983ce116SIgor Romanov sfc_log_init(sa, "failed: %s", rte_strerror(rc));
816983ce116SIgor Romanov
817983ce116SIgor Romanov return rc;
818983ce116SIgor Romanov }
819983ce116SIgor Romanov
820983ce116SIgor Romanov void
sfc_mae_counter_rxq_detach(struct sfc_adapter * sa)821983ce116SIgor Romanov sfc_mae_counter_rxq_detach(struct sfc_adapter *sa)
822983ce116SIgor Romanov {
823983ce116SIgor Romanov struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
824983ce116SIgor Romanov
825983ce116SIgor Romanov sfc_log_init(sa, "entry");
826983ce116SIgor Romanov
827983ce116SIgor Romanov if (!sas->counters_rxq_allocated) {
828983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not supported - skip");
829983ce116SIgor Romanov return;
830983ce116SIgor Romanov }
831983ce116SIgor Romanov
832983ce116SIgor Romanov if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
833983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not attached - skip");
834983ce116SIgor Romanov return;
835983ce116SIgor Romanov }
836983ce116SIgor Romanov
837983ce116SIgor Romanov rte_mempool_free(sa->counter_rxq.mp);
838983ce116SIgor Romanov sa->counter_rxq.mp = NULL;
839983ce116SIgor Romanov sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED;
840983ce116SIgor Romanov
841983ce116SIgor Romanov sfc_log_init(sa, "done");
842983ce116SIgor Romanov }
843983ce116SIgor Romanov
844983ce116SIgor Romanov int
sfc_mae_counter_rxq_init(struct sfc_adapter * sa)845983ce116SIgor Romanov sfc_mae_counter_rxq_init(struct sfc_adapter *sa)
846983ce116SIgor Romanov {
847983ce116SIgor Romanov struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
848983ce116SIgor Romanov const struct rte_eth_rxconf rxconf = {
849983ce116SIgor Romanov .rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL,
850983ce116SIgor Romanov .rx_drop_en = 1,
851983ce116SIgor Romanov };
852983ce116SIgor Romanov uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT;
853983ce116SIgor Romanov int rc;
854983ce116SIgor Romanov
855983ce116SIgor Romanov sfc_log_init(sa, "entry");
856983ce116SIgor Romanov
857983ce116SIgor Romanov if (!sas->counters_rxq_allocated) {
858983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not supported - skip");
859983ce116SIgor Romanov return 0;
860983ce116SIgor Romanov }
861983ce116SIgor Romanov
862983ce116SIgor Romanov if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
863983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not attached - skip");
864983ce116SIgor Romanov return 0;
865983ce116SIgor Romanov }
866983ce116SIgor Romanov
867983ce116SIgor Romanov nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries);
868983ce116SIgor Romanov nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries);
869983ce116SIgor Romanov
870983ce116SIgor Romanov rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index,
871983ce116SIgor Romanov EFX_RXQ_FLAG_USER_MARK);
872983ce116SIgor Romanov if (rc != 0)
873983ce116SIgor Romanov goto fail_counter_rxq_init_info;
874983ce116SIgor Romanov
875983ce116SIgor Romanov rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc,
876983ce116SIgor Romanov sa->socket_id, &rxconf, sa->counter_rxq.mp);
877983ce116SIgor Romanov if (rc != 0) {
878983ce116SIgor Romanov sfc_err(sa, "failed to init counter RxQ");
879983ce116SIgor Romanov goto fail_counter_rxq_init;
880983ce116SIgor Romanov }
881983ce116SIgor Romanov
882983ce116SIgor Romanov sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED;
883983ce116SIgor Romanov
884983ce116SIgor Romanov sfc_log_init(sa, "done");
885983ce116SIgor Romanov
886983ce116SIgor Romanov return 0;
887983ce116SIgor Romanov
888983ce116SIgor Romanov fail_counter_rxq_init:
889983ce116SIgor Romanov fail_counter_rxq_init_info:
890983ce116SIgor Romanov sfc_log_init(sa, "failed: %s", rte_strerror(rc));
891983ce116SIgor Romanov
892983ce116SIgor Romanov return rc;
893983ce116SIgor Romanov }
894983ce116SIgor Romanov
895983ce116SIgor Romanov void
sfc_mae_counter_rxq_fini(struct sfc_adapter * sa)896983ce116SIgor Romanov sfc_mae_counter_rxq_fini(struct sfc_adapter *sa)
897983ce116SIgor Romanov {
898983ce116SIgor Romanov struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
899983ce116SIgor Romanov
900983ce116SIgor Romanov sfc_log_init(sa, "entry");
901983ce116SIgor Romanov
902983ce116SIgor Romanov if (!sas->counters_rxq_allocated) {
903983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not supported - skip");
904983ce116SIgor Romanov return;
905983ce116SIgor Romanov }
906983ce116SIgor Romanov
907983ce116SIgor Romanov if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
908983ce116SIgor Romanov sfc_log_init(sa, "counter queue is not initialized - skip");
909983ce116SIgor Romanov return;
910983ce116SIgor Romanov }
911983ce116SIgor Romanov
912983ce116SIgor Romanov sfc_rx_qfini(sa, sa->counter_rxq.sw_index);
913983ce116SIgor Romanov
914983ce116SIgor Romanov sfc_log_init(sa, "done");
915983ce116SIgor Romanov }
91696fd2bd6SIgor Romanov
91796fd2bd6SIgor Romanov void
sfc_mae_counter_stop(struct sfc_adapter * sa)91896fd2bd6SIgor Romanov sfc_mae_counter_stop(struct sfc_adapter *sa)
91996fd2bd6SIgor Romanov {
92096fd2bd6SIgor Romanov struct sfc_mae *mae = &sa->mae;
92196fd2bd6SIgor Romanov
92296fd2bd6SIgor Romanov sfc_log_init(sa, "entry");
92396fd2bd6SIgor Romanov
92496fd2bd6SIgor Romanov if (!mae->counter_rxq_running) {
92596fd2bd6SIgor Romanov sfc_log_init(sa, "counter queue is not running - skip");
92696fd2bd6SIgor Romanov return;
92796fd2bd6SIgor Romanov }
92896fd2bd6SIgor Romanov
929ce1f72dcSViacheslav Galaktionov SFC_ASSERT(mae->counter_registry.polling_mode !=
930ce1f72dcSViacheslav Galaktionov SFC_MAE_COUNTER_POLLING_OFF);
931ce1f72dcSViacheslav Galaktionov
932ce1f72dcSViacheslav Galaktionov if (mae->counter_registry.polling_mode ==
933ce1f72dcSViacheslav Galaktionov SFC_MAE_COUNTER_POLLING_SERVICE)
93496fd2bd6SIgor Romanov sfc_mae_counter_service_unregister(sa);
935ce1f72dcSViacheslav Galaktionov else
936ce1f72dcSViacheslav Galaktionov sfc_mae_counter_thread_stop(sa);
937ce1f72dcSViacheslav Galaktionov
93896fd2bd6SIgor Romanov efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
93996fd2bd6SIgor Romanov
94096fd2bd6SIgor Romanov mae->counter_rxq_running = false;
94196fd2bd6SIgor Romanov
94296fd2bd6SIgor Romanov sfc_log_init(sa, "done");
94396fd2bd6SIgor Romanov }
94496fd2bd6SIgor Romanov
94596fd2bd6SIgor Romanov int
sfc_mae_counter_start(struct sfc_adapter * sa)94696fd2bd6SIgor Romanov sfc_mae_counter_start(struct sfc_adapter *sa)
94796fd2bd6SIgor Romanov {
94896fd2bd6SIgor Romanov struct sfc_mae *mae = &sa->mae;
94996fd2bd6SIgor Romanov uint32_t flags;
95096fd2bd6SIgor Romanov int rc;
95196fd2bd6SIgor Romanov
95296fd2bd6SIgor Romanov SFC_ASSERT(sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED);
95396fd2bd6SIgor Romanov
95496fd2bd6SIgor Romanov if (mae->counter_rxq_running)
95596fd2bd6SIgor Romanov return 0;
95696fd2bd6SIgor Romanov
95796fd2bd6SIgor Romanov sfc_log_init(sa, "entry");
95896fd2bd6SIgor Romanov
95996fd2bd6SIgor Romanov rc = efx_mae_counters_stream_start(sa->nic, sa->counter_rxq.sw_index,
96096fd2bd6SIgor Romanov SFC_MAE_COUNTER_STREAM_PACKET_SIZE,
96196fd2bd6SIgor Romanov 0 /* No flags required */, &flags);
96296fd2bd6SIgor Romanov if (rc != 0) {
96396fd2bd6SIgor Romanov sfc_err(sa, "failed to start MAE counters stream: %s",
96496fd2bd6SIgor Romanov rte_strerror(rc));
96596fd2bd6SIgor Romanov goto fail_counter_stream;
96696fd2bd6SIgor Romanov }
96796fd2bd6SIgor Romanov
96896fd2bd6SIgor Romanov sfc_log_init(sa, "stream start flags: 0x%x", flags);
96996fd2bd6SIgor Romanov
970ce1f72dcSViacheslav Galaktionov if (sfc_mae_counter_get_service_lcore(sa) != RTE_MAX_LCORE) {
97196fd2bd6SIgor Romanov rc = sfc_mae_counter_service_register(sa, flags);
97296fd2bd6SIgor Romanov if (rc != 0)
97396fd2bd6SIgor Romanov goto fail_service_register;
974ce1f72dcSViacheslav Galaktionov } else {
975ce1f72dcSViacheslav Galaktionov rc = sfc_mae_counter_thread_spawn(sa, flags);
976ce1f72dcSViacheslav Galaktionov if (rc != 0)
977ce1f72dcSViacheslav Galaktionov goto fail_thread_spawn;
978ce1f72dcSViacheslav Galaktionov }
97996fd2bd6SIgor Romanov
98096fd2bd6SIgor Romanov mae->counter_rxq_running = true;
98196fd2bd6SIgor Romanov
98296fd2bd6SIgor Romanov return 0;
98396fd2bd6SIgor Romanov
98496fd2bd6SIgor Romanov fail_service_register:
985ce1f72dcSViacheslav Galaktionov fail_thread_spawn:
98696fd2bd6SIgor Romanov efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
98796fd2bd6SIgor Romanov
98896fd2bd6SIgor Romanov fail_counter_stream:
98996fd2bd6SIgor Romanov sfc_log_init(sa, "failed: %s", rte_strerror(rc));
99096fd2bd6SIgor Romanov
99196fd2bd6SIgor Romanov return rc;
99296fd2bd6SIgor Romanov }
9935cb47462SIgor Romanov
9945cb47462SIgor Romanov int
sfc_mae_counter_get(struct sfc_adapter * sa,const struct sfc_mae_counter * counter,struct rte_flow_query_count * data)9951c941a5fSIvan Malov sfc_mae_counter_get(struct sfc_adapter *sa,
9964a06eacbSIvan Malov const struct sfc_mae_counter *counter,
9975cb47462SIgor Romanov struct rte_flow_query_count *data)
9985cb47462SIgor Romanov {
999f55fe01fSIvan Malov struct sfc_ft_ctx *ft_ctx = counter->ft_ctx;
10001c941a5fSIvan Malov struct sfc_mae_counter_records *counters;
1001f55fe01fSIvan Malov uint64_t non_reset_tunnel_hit_counter;
10024a06eacbSIvan Malov struct sfc_mae_counter_record *p;
10035cb47462SIgor Romanov union sfc_pkts_bytes value;
10041c941a5fSIvan Malov bool need_byte_count;
10051c941a5fSIvan Malov
10061c941a5fSIvan Malov switch (counter->type) {
10071c941a5fSIvan Malov case EFX_COUNTER_TYPE_ACTION:
10081c941a5fSIvan Malov counters = &sa->mae.counter_registry.action_counters;
10091c941a5fSIvan Malov need_byte_count = true;
10101c941a5fSIvan Malov break;
10111588d135SIvan Malov case EFX_COUNTER_TYPE_CONNTRACK:
10121588d135SIvan Malov counters = &sa->mae.counter_registry.conntrack_counters;
10131588d135SIvan Malov need_byte_count = false;
10141588d135SIvan Malov break;
10151c941a5fSIvan Malov default:
10161c941a5fSIvan Malov return EINVAL;
10171c941a5fSIvan Malov }
10185cb47462SIgor Romanov
1019dffc1de1SIvan Malov SFC_ASSERT(counter->fw_rsrc.counter_id.id < counters->n_mae_counters);
1020dffc1de1SIvan Malov p = &counters->mae_counters[counter->fw_rsrc.counter_id.id];
10215cb47462SIgor Romanov
10225cb47462SIgor Romanov /*
10235cb47462SIgor Romanov * Ordering is relaxed since it is the only operation on counter value.
10245cb47462SIgor Romanov * And it does not depend on different stores/loads in other threads.
10255cb47462SIgor Romanov * Paired with relaxed ordering in counter increment.
10265cb47462SIgor Romanov */
10275cb47462SIgor Romanov value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
10285cb47462SIgor Romanov __ATOMIC_RELAXED);
10295cb47462SIgor Romanov
10305cb47462SIgor Romanov data->hits_set = 1;
10315cb47462SIgor Romanov data->hits = value.pkts - p->reset.pkts;
10329df2d8f5SIvan Malov
1033f55fe01fSIvan Malov if (ft_ctx != NULL) {
1034f55fe01fSIvan Malov data->hits += ft_ctx->switch_hit_counter;
1035f55fe01fSIvan Malov non_reset_tunnel_hit_counter = data->hits;
1036f55fe01fSIvan Malov data->hits -= ft_ctx->reset_tunnel_hit_counter;
10371c941a5fSIvan Malov } else if (need_byte_count) {
10389df2d8f5SIvan Malov data->bytes_set = 1;
10395cb47462SIgor Romanov data->bytes = value.bytes - p->reset.bytes;
10409df2d8f5SIvan Malov }
10415cb47462SIgor Romanov
10425cb47462SIgor Romanov if (data->reset != 0) {
1043f55fe01fSIvan Malov if (ft_ctx != NULL) {
1044f55fe01fSIvan Malov ft_ctx->reset_tunnel_hit_counter =
1045f55fe01fSIvan Malov non_reset_tunnel_hit_counter;
10469df2d8f5SIvan Malov } else {
10475cb47462SIgor Romanov p->reset.pkts = value.pkts;
10481c941a5fSIvan Malov
10491c941a5fSIvan Malov if (need_byte_count)
10505cb47462SIgor Romanov p->reset.bytes = value.bytes;
10515cb47462SIgor Romanov }
10529df2d8f5SIvan Malov }
10535cb47462SIgor Romanov
10545cb47462SIgor Romanov return 0;
10555cb47462SIgor Romanov }
10569df2d8f5SIvan Malov
10579df2d8f5SIvan Malov bool
sfc_mae_counter_stream_enabled(struct sfc_adapter * sa)10589df2d8f5SIvan Malov sfc_mae_counter_stream_enabled(struct sfc_adapter *sa)
10599df2d8f5SIvan Malov {
10609df2d8f5SIvan Malov if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0 ||
10619df2d8f5SIvan Malov sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
10629df2d8f5SIvan Malov return B_FALSE;
10639df2d8f5SIvan Malov else
10649df2d8f5SIvan Malov return B_TRUE;
10659df2d8f5SIvan Malov }
1066