16d239dd5SPavan Nikhilesh /*
26d239dd5SPavan Nikhilesh * SPDX-License-Identifier: BSD-3-Clause
36d239dd5SPavan Nikhilesh * Copyright 2016 Intel Corporation.
46d239dd5SPavan Nikhilesh * Copyright 2017 Cavium, Inc.
56d239dd5SPavan Nikhilesh */
66d239dd5SPavan Nikhilesh
76d239dd5SPavan Nikhilesh #include <stdbool.h>
86d239dd5SPavan Nikhilesh
96d239dd5SPavan Nikhilesh #include <rte_eal.h>
106d239dd5SPavan Nikhilesh #include <rte_mempool.h>
116d239dd5SPavan Nikhilesh #include <rte_mbuf.h>
126d239dd5SPavan Nikhilesh #include <rte_launch.h>
136d239dd5SPavan Nikhilesh #include <rte_malloc.h>
146d239dd5SPavan Nikhilesh #include <rte_random.h>
156d239dd5SPavan Nikhilesh #include <rte_cycles.h>
166d239dd5SPavan Nikhilesh #include <rte_ethdev.h>
176d239dd5SPavan Nikhilesh #include <rte_eventdev.h>
186d239dd5SPavan Nikhilesh #include <rte_event_eth_rx_adapter.h>
19085edac2SPavan Nikhilesh #include <rte_event_eth_tx_adapter.h>
206d239dd5SPavan Nikhilesh #include <rte_service.h>
216d239dd5SPavan Nikhilesh #include <rte_service_component.h>
226d239dd5SPavan Nikhilesh
236d239dd5SPavan Nikhilesh #define MAX_NUM_STAGES 8
246d239dd5SPavan Nikhilesh #define BATCH_SIZE 16
256d239dd5SPavan Nikhilesh #define MAX_NUM_CORE 64
266d239dd5SPavan Nikhilesh
27*7e06c0deSTyler Retzlaff struct __rte_cache_aligned worker_data {
286d239dd5SPavan Nikhilesh uint8_t dev_id;
296d239dd5SPavan Nikhilesh uint8_t port_id;
30*7e06c0deSTyler Retzlaff };
316d239dd5SPavan Nikhilesh
326d239dd5SPavan Nikhilesh typedef int (*worker_loop)(void *);
336d239dd5SPavan Nikhilesh typedef void (*schedule_loop)(unsigned int);
34085edac2SPavan Nikhilesh typedef int (*eventdev_setup)(struct worker_data *);
35085edac2SPavan Nikhilesh typedef void (*adapter_setup)(uint16_t nb_ports);
366d239dd5SPavan Nikhilesh typedef void (*opt_check)(void);
376d239dd5SPavan Nikhilesh
386d239dd5SPavan Nikhilesh struct setup_data {
396d239dd5SPavan Nikhilesh worker_loop worker;
406d239dd5SPavan Nikhilesh schedule_loop scheduler;
416d239dd5SPavan Nikhilesh eventdev_setup evdev_setup;
42085edac2SPavan Nikhilesh adapter_setup adptr_setup;
436d239dd5SPavan Nikhilesh opt_check check_opt;
446d239dd5SPavan Nikhilesh };
456d239dd5SPavan Nikhilesh
46*7e06c0deSTyler Retzlaff struct __rte_cache_aligned fastpath_data {
476d239dd5SPavan Nikhilesh volatile int done;
486d239dd5SPavan Nikhilesh uint32_t evdev_service_id;
496d239dd5SPavan Nikhilesh uint32_t rxadptr_service_id;
50085edac2SPavan Nikhilesh uint32_t txadptr_service_id;
516d239dd5SPavan Nikhilesh bool rx_single;
526d239dd5SPavan Nikhilesh bool tx_single;
536d239dd5SPavan Nikhilesh bool sched_single;
54ff0f1040SHarry van Haaren uint64_t rx_core[MAX_NUM_CORE];
55ff0f1040SHarry van Haaren uint64_t tx_core[MAX_NUM_CORE];
56ff0f1040SHarry van Haaren uint64_t sched_core[MAX_NUM_CORE];
57ff0f1040SHarry van Haaren uint64_t worker_core[MAX_NUM_CORE];
586d239dd5SPavan Nikhilesh struct setup_data cap;
59*7e06c0deSTyler Retzlaff };
606d239dd5SPavan Nikhilesh
616d239dd5SPavan Nikhilesh struct config_data {
626d239dd5SPavan Nikhilesh unsigned int active_cores;
636d239dd5SPavan Nikhilesh unsigned int num_workers;
646d239dd5SPavan Nikhilesh int64_t num_packets;
656d239dd5SPavan Nikhilesh uint64_t num_mbuf;
666d239dd5SPavan Nikhilesh unsigned int num_fids;
676d239dd5SPavan Nikhilesh int queue_type;
686d239dd5SPavan Nikhilesh int worker_cycles;
696d239dd5SPavan Nikhilesh int enable_queue_priorities;
706d239dd5SPavan Nikhilesh int quiet;
716d239dd5SPavan Nikhilesh int dump_dev;
726d239dd5SPavan Nikhilesh int dump_dev_signal;
736d239dd5SPavan Nikhilesh int all_type_queues;
746d239dd5SPavan Nikhilesh unsigned int num_stages;
756d239dd5SPavan Nikhilesh unsigned int worker_cq_depth;
766d239dd5SPavan Nikhilesh unsigned int rx_stride;
776d239dd5SPavan Nikhilesh /* Use rx stride value to reduce congestion in entry queue when using
786d239dd5SPavan Nikhilesh * multiple eth ports by forming multiple event queue pipelines.
796d239dd5SPavan Nikhilesh */
806d239dd5SPavan Nikhilesh int16_t next_qid[MAX_NUM_STAGES+2];
816d239dd5SPavan Nikhilesh int16_t qid[MAX_NUM_STAGES];
826d239dd5SPavan Nikhilesh uint8_t rx_adapter_id;
83085edac2SPavan Nikhilesh uint8_t tx_adapter_id;
84085edac2SPavan Nikhilesh uint8_t tx_queue_id;
856d239dd5SPavan Nikhilesh uint64_t worker_lcore_mask;
866d239dd5SPavan Nikhilesh uint64_t rx_lcore_mask;
876d239dd5SPavan Nikhilesh uint64_t tx_lcore_mask;
886d239dd5SPavan Nikhilesh uint64_t sched_lcore_mask;
896d239dd5SPavan Nikhilesh };
906d239dd5SPavan Nikhilesh
916d239dd5SPavan Nikhilesh struct port_link {
926d239dd5SPavan Nikhilesh uint8_t queue_id;
936d239dd5SPavan Nikhilesh uint8_t priority;
946d239dd5SPavan Nikhilesh };
956d239dd5SPavan Nikhilesh
9678de15bbSTimothy Redaelli extern struct fastpath_data *fdata;
9778de15bbSTimothy Redaelli extern struct config_data cdata;
986d239dd5SPavan Nikhilesh
996d239dd5SPavan Nikhilesh static __rte_always_inline void
exchange_mac(struct rte_mbuf * m)1006d239dd5SPavan Nikhilesh exchange_mac(struct rte_mbuf *m)
1016d239dd5SPavan Nikhilesh {
1026d13ea8eSOlivier Matz struct rte_ether_hdr *eth;
1036d13ea8eSOlivier Matz struct rte_ether_addr addr;
1046d239dd5SPavan Nikhilesh
1056d239dd5SPavan Nikhilesh /* change mac addresses on packet (to use mbuf data) */
1066d13ea8eSOlivier Matz eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
10704d43857SDmitry Kozlyuk rte_ether_addr_copy(ð->dst_addr, &addr);
10804d43857SDmitry Kozlyuk rte_ether_addr_copy(&addr, ð->dst_addr);
1096d239dd5SPavan Nikhilesh }
1106d239dd5SPavan Nikhilesh
1116d239dd5SPavan Nikhilesh static __rte_always_inline void
work(void)1126d239dd5SPavan Nikhilesh work(void)
1136d239dd5SPavan Nikhilesh {
1146d239dd5SPavan Nikhilesh /* do a number of cycles of work per packet */
1156d239dd5SPavan Nikhilesh volatile uint64_t start_tsc = rte_rdtsc();
1166d239dd5SPavan Nikhilesh while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
1176d239dd5SPavan Nikhilesh rte_pause();
1186d239dd5SPavan Nikhilesh }
1196d239dd5SPavan Nikhilesh
1206d239dd5SPavan Nikhilesh static __rte_always_inline void
schedule_devices(unsigned int lcore_id)1216d239dd5SPavan Nikhilesh schedule_devices(unsigned int lcore_id)
1226d239dd5SPavan Nikhilesh {
1236d239dd5SPavan Nikhilesh if (fdata->rx_core[lcore_id]) {
1246d239dd5SPavan Nikhilesh rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
1256d239dd5SPavan Nikhilesh !fdata->rx_single);
1266d239dd5SPavan Nikhilesh }
1276d239dd5SPavan Nikhilesh
1286d239dd5SPavan Nikhilesh if (fdata->sched_core[lcore_id]) {
1296d239dd5SPavan Nikhilesh rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
1306d239dd5SPavan Nikhilesh !fdata->sched_single);
1316d239dd5SPavan Nikhilesh if (cdata.dump_dev_signal) {
1326d239dd5SPavan Nikhilesh rte_event_dev_dump(0, stdout);
1336d239dd5SPavan Nikhilesh cdata.dump_dev_signal = 0;
1346d239dd5SPavan Nikhilesh }
1356d239dd5SPavan Nikhilesh }
1366d239dd5SPavan Nikhilesh
137085edac2SPavan Nikhilesh if (fdata->tx_core[lcore_id]) {
138085edac2SPavan Nikhilesh rte_service_run_iter_on_app_lcore(fdata->txadptr_service_id,
139085edac2SPavan Nikhilesh !fdata->tx_single);
1406d239dd5SPavan Nikhilesh }
1416d239dd5SPavan Nikhilesh }
1426d239dd5SPavan Nikhilesh
143aae4f5e0SPavan Nikhilesh static void
event_port_flush(uint8_t dev_id __rte_unused,struct rte_event ev,void * args __rte_unused)144aae4f5e0SPavan Nikhilesh event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
145aae4f5e0SPavan Nikhilesh void *args __rte_unused)
146aae4f5e0SPavan Nikhilesh {
147aae4f5e0SPavan Nikhilesh rte_mempool_put(args, ev.event_ptr);
148aae4f5e0SPavan Nikhilesh }
149aae4f5e0SPavan Nikhilesh
150d80176a0SPavan Nikhilesh static inline void
worker_cleanup(uint8_t dev_id,uint8_t port_id,struct rte_event events[],uint16_t nb_enq,uint16_t nb_deq)151d80176a0SPavan Nikhilesh worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
152d80176a0SPavan Nikhilesh uint16_t nb_enq, uint16_t nb_deq)
153d80176a0SPavan Nikhilesh {
154d80176a0SPavan Nikhilesh int i;
155d80176a0SPavan Nikhilesh
156d80176a0SPavan Nikhilesh if (!(nb_deq - nb_enq))
157d80176a0SPavan Nikhilesh return;
158d80176a0SPavan Nikhilesh
159d80176a0SPavan Nikhilesh if (nb_deq) {
160d80176a0SPavan Nikhilesh for (i = nb_enq; i < nb_deq; i++) {
161d80176a0SPavan Nikhilesh if (events[i].op == RTE_EVENT_OP_RELEASE)
162d80176a0SPavan Nikhilesh continue;
163d80176a0SPavan Nikhilesh rte_pktmbuf_free(events[i].mbuf);
164d80176a0SPavan Nikhilesh }
165d80176a0SPavan Nikhilesh
166d80176a0SPavan Nikhilesh for (i = 0; i < nb_deq; i++)
167d80176a0SPavan Nikhilesh events[i].op = RTE_EVENT_OP_RELEASE;
168d80176a0SPavan Nikhilesh rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
169d80176a0SPavan Nikhilesh }
170aae4f5e0SPavan Nikhilesh
171aae4f5e0SPavan Nikhilesh rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
172d80176a0SPavan Nikhilesh }
173d80176a0SPavan Nikhilesh
1746d239dd5SPavan Nikhilesh void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
175085edac2SPavan Nikhilesh void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);
176