1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright 2016 Intel Corporation. 4 * Copyright 2017 Cavium, Inc. 5 */ 6 7 #include <stdbool.h> 8 9 #include <rte_eal.h> 10 #include <rte_mempool.h> 11 #include <rte_mbuf.h> 12 #include <rte_launch.h> 13 #include <rte_malloc.h> 14 #include <rte_random.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 #include <rte_eventdev.h> 18 #include <rte_event_eth_rx_adapter.h> 19 #include <rte_event_eth_tx_adapter.h> 20 #include <rte_service.h> 21 #include <rte_service_component.h> 22 23 #define MAX_NUM_STAGES 8 24 #define BATCH_SIZE 16 25 #define MAX_NUM_CORE 64 26 27 struct worker_data { 28 uint8_t dev_id; 29 uint8_t port_id; 30 } __rte_cache_aligned; 31 32 typedef int (*worker_loop)(void *); 33 typedef void (*schedule_loop)(unsigned int); 34 typedef int (*eventdev_setup)(struct worker_data *); 35 typedef void (*adapter_setup)(uint16_t nb_ports); 36 typedef void (*opt_check)(void); 37 38 struct setup_data { 39 worker_loop worker; 40 schedule_loop scheduler; 41 eventdev_setup evdev_setup; 42 adapter_setup adptr_setup; 43 opt_check check_opt; 44 }; 45 46 struct fastpath_data { 47 volatile int done; 48 uint32_t evdev_service_id; 49 uint32_t rxadptr_service_id; 50 uint32_t txadptr_service_id; 51 bool rx_single; 52 bool tx_single; 53 bool sched_single; 54 uint64_t rx_core[MAX_NUM_CORE]; 55 uint64_t tx_core[MAX_NUM_CORE]; 56 uint64_t sched_core[MAX_NUM_CORE]; 57 uint64_t worker_core[MAX_NUM_CORE]; 58 struct setup_data cap; 59 } __rte_cache_aligned; 60 61 struct config_data { 62 unsigned int active_cores; 63 unsigned int num_workers; 64 int64_t num_packets; 65 uint64_t num_mbuf; 66 unsigned int num_fids; 67 int queue_type; 68 int worker_cycles; 69 int enable_queue_priorities; 70 int quiet; 71 int dump_dev; 72 int dump_dev_signal; 73 int all_type_queues; 74 unsigned int num_stages; 75 unsigned int worker_cq_depth; 76 unsigned int rx_stride; 77 /* Use rx stride value to reduce congestion in entry queue when using 78 * multiple eth ports by forming multiple event queue pipelines. 79 */ 80 int16_t next_qid[MAX_NUM_STAGES+2]; 81 int16_t qid[MAX_NUM_STAGES]; 82 uint8_t rx_adapter_id; 83 uint8_t tx_adapter_id; 84 uint8_t tx_queue_id; 85 uint64_t worker_lcore_mask; 86 uint64_t rx_lcore_mask; 87 uint64_t tx_lcore_mask; 88 uint64_t sched_lcore_mask; 89 }; 90 91 struct port_link { 92 uint8_t queue_id; 93 uint8_t priority; 94 }; 95 96 extern struct fastpath_data *fdata; 97 extern struct config_data cdata; 98 99 static __rte_always_inline void 100 exchange_mac(struct rte_mbuf *m) 101 { 102 struct rte_ether_hdr *eth; 103 struct rte_ether_addr addr; 104 105 /* change mac addresses on packet (to use mbuf data) */ 106 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 107 rte_ether_addr_copy(ð->dst_addr, &addr); 108 rte_ether_addr_copy(&addr, ð->dst_addr); 109 } 110 111 static __rte_always_inline void 112 work(void) 113 { 114 /* do a number of cycles of work per packet */ 115 volatile uint64_t start_tsc = rte_rdtsc(); 116 while (rte_rdtsc() < start_tsc + cdata.worker_cycles) 117 rte_pause(); 118 } 119 120 static __rte_always_inline void 121 schedule_devices(unsigned int lcore_id) 122 { 123 if (fdata->rx_core[lcore_id]) { 124 rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id, 125 !fdata->rx_single); 126 } 127 128 if (fdata->sched_core[lcore_id]) { 129 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 130 !fdata->sched_single); 131 if (cdata.dump_dev_signal) { 132 rte_event_dev_dump(0, stdout); 133 cdata.dump_dev_signal = 0; 134 } 135 } 136 137 if (fdata->tx_core[lcore_id]) { 138 rte_service_run_iter_on_app_lcore(fdata->txadptr_service_id, 139 !fdata->tx_single); 140 } 141 } 142 143 void set_worker_generic_setup_data(struct setup_data *caps, bool burst); 144 void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst); 145