Home
last modified time | relevance | path

Searched refs:flow (Results 1 – 25 of 119) sorted by relevance

12345

/dpdk/examples/qos_sched/
H A Dmain.c52 struct flow_conf *flow = &qos_conf[i]; in app_main_loop() local
54 if (flow->rx_core == lcore_id) { in app_main_loop()
55 flow->rx_thread.rx_port = flow->rx_port; in app_main_loop()
56 flow->rx_thread.rx_ring = flow->rx_ring; in app_main_loop()
57 flow->rx_thread.rx_queue = flow->rx_queue; in app_main_loop()
58 flow->rx_thread.sched_port = flow->sched_port; in app_main_loop()
60 rx_confs[rx_idx++] = &flow->rx_thread; in app_main_loop()
64 if (flow->tx_core == lcore_id) { in app_main_loop()
65 flow->tx_thread.tx_port = flow->tx_port; in app_main_loop()
66 flow->tx_thread.tx_ring = flow->tx_ring; in app_main_loop()
[all …]
/dpdk/drivers/net/cnxk/
H A Dcnxk_eswitch_flow.c21 struct roc_npc_flow *flow, *tvar; in cnxk_eswitch_flow_rules_remove_list() local
24 RTE_TAILQ_FOREACH_SAFE(flow, list, next, tvar) { in cnxk_eswitch_flow_rules_remove_list()
25 plt_esw_dbg("Removing flow %d", flow->mcam_id); in cnxk_eswitch_flow_rules_remove_list()
26 rc = roc_eswitch_npc_mcam_delete_rule(&eswitch_dev->npc, flow, in cnxk_eswitch_flow_rules_remove_list()
29 plt_err("Failed to delete rule %d", flow->mcam_id); in cnxk_eswitch_flow_rules_remove_list()
30 rc = roc_npc_mcam_free(&eswitch_dev->npc, flow); in cnxk_eswitch_flow_rules_remove_list()
32 plt_err("Failed to free entry %d", flow->mcam_id); in cnxk_eswitch_flow_rules_remove_list()
33 TAILQ_REMOVE(list, flow, next); in cnxk_eswitch_flow_rules_remove_list()
34 rte_free(flow); in cnxk_eswitch_flow_rules_remove_list()
41 eswitch_npc_vlan_rss_configure(struct roc_npc *roc_npc, struct roc_npc_flow *flow) in eswitch_npc_vlan_rss_configure() argument
[all …]
H A Dcn9k_flow.c17 struct roc_npc_flow *flow; in cn9k_flow_create() local
21 flow = cnxk_flow_create(eth_dev, attr, pattern, actions, error); in cn9k_flow_create()
22 if (!flow) in cn9k_flow_create()
39 return (struct rte_flow *)flow; in cn9k_flow_create()
46 struct roc_npc_flow *flow = (struct roc_npc_flow *)rte_flow; in cn9k_flow_destroy() local
53 match_id = (flow->npc_action >> NPC_RX_ACT_MATCH_OFFSET) & in cn9k_flow_destroy()
65 if (flow->nix_intf == ROC_NPC_INTF_RX) { in cn9k_flow_destroy()
75 return cnxk_flow_destroy(eth_dev, flow, error); in cn9k_flow_destroy()
H A Dcnxk_rep_flow.c274 process_flow_destroy(struct cnxk_rep_dev *rep_dev, void *flow, cnxk_rep_msg_ack_data_t *adata) in process_flow_destroy() argument
296 msg_fd_meta.flow = (uint64_t)flow; in process_flow_destroy()
297 plt_rep_dbg("Flow Destroy: flow 0x%" PRIu64 ", portid %d", msg_fd_meta.flow, in process_flow_destroy()
346 process_flow_dump(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow, FILE *file, in process_flow_dump() argument
365 msg_fp_meta.flow = (uint64_t)flow; in process_flow_dump()
368 plt_rep_dbg("Flow Dump: flow 0x%" PRIu64 ", portid %d stdout %d", msg_fp_meta.flow, in process_flow_dump()
427 process_flow_query(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow, in process_flow_query() argument
461 msg_fq_meta->flow = (uint64_t)flow; in process_flow_query()
467 "action sz %d", msg_fq_meta->flow, msg_fq_meta->portid, action->type, total_sz, in process_flow_query()
546 struct roc_npc_flow *flow; in cnxk_rep_flow_create_native() local
[all …]
H A Dcn10k_flow.c136 struct roc_npc_flow *flow; in cn10k_flow_create() local
203 flow = cnxk_flow_create(eth_dev, attr, pattern, actions, error); in cn10k_flow_create()
204 if (!flow) { in cn10k_flow_create()
227 return (struct rte_flow *)flow; in cn10k_flow_create()
250 struct roc_npc_flow *flow = (struct roc_npc_flow *)rte_flow; in cn10k_flow_destroy() local
259 match_id = (flow->npc_action >> NPC_RX_ACT_MATCH_OFFSET) & in cn10k_flow_destroy()
271 if (flow->nix_intf == ROC_NPC_INTF_RX) { in cn10k_flow_destroy()
281 if (cnxk_eth_macsec_sess_get_by_sess(dev, (void *)flow) != NULL) { in cn10k_flow_destroy()
282 rc = cnxk_mcs_flow_destroy(dev, (void *)flow); in cn10k_flow_destroy()
289 mtr_id = flow->mtr_id; in cn10k_flow_destroy()
[all …]
H A Dcnxk_flow.h29 int cnxk_flow_destroy(struct rte_eth_dev *dev, struct roc_npc_flow *flow,
41 int cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow,
44 int cnxk_flow_query_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
47 int cnxk_flow_dev_dump_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file,
/dpdk/drivers/common/cnxk/
H A Droc_npc_mcam.c546 npc_mcam_set_channel(struct roc_npc_flow *flow, struct npc_mcam_write_entry_req *req, in npc_mcam_set_channel() argument
553 flow->mcam_data[0] &= ~(GENMASK(11, 0)); in npc_mcam_set_channel()
554 flow->mcam_mask[0] &= ~(GENMASK(11, 0)); in npc_mcam_set_channel()
563 if (!(flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) { in npc_mcam_set_channel()
578 flow->mcam_data[0] |= (uint64_t)chan; in npc_mcam_set_channel()
579 flow->mcam_mask[0] |= (uint64_t)mask; in npc_mcam_set_channel()
583 npc_mcam_set_pf_func(struct npc *npc, struct roc_npc_flow *flow, uint16_t pf_func) in npc_mcam_set_pf_func() argument
594 flow_mcam_data = (uint8_t *)flow->mcam_data; in npc_mcam_set_pf_func()
595 flow_mcam_mask = (uint8_t *)flow->mcam_mask; in npc_mcam_set_pf_func()
622 npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_parse_state *pst) in npc_mcam_alloc_and_write() argument
[all …]
H A Droc_npc_utils.c476 dump = &pst->flow->dump_data[pst->flow->num_patterns++]; in npc_update_parse_state()
484 npc_mcam_init(struct npc *npc, struct roc_npc_flow *flow, int mcam_id) in npc_mcam_init() argument
500 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX; in npc_mcam_init()
502 req->entry_data.action = flow->npc_action; in npc_mcam_init()
503 req->entry_data.vtag_action = flow->vtag_action; in npc_mcam_init()
510 if (flow->nix_intf == NIX_INTF_RX) { in npc_mcam_init()
514 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff; in npc_mcam_init()
582 from_mcam_id = curr->flow->mcam_id; in npc_slide_mcam_entries()
592 rc = npc_mcam_init(npc, curr->flow, to_mcam_id); in npc_slide_mcam_entries()
598 curr->flow->mcam_id = to_mcam_id; in npc_slide_mcam_entries()
[all …]
/dpdk/drivers/net/failsafe/
H A Dfailsafe_flow.c23 struct rte_flow *flow; in fs_flow_allocate() local
39 flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret, in fs_flow_allocate()
41 if (flow == NULL) { in fs_flow_allocate()
45 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule, in fs_flow_allocate()
51 rte_free(flow); in fs_flow_allocate()
54 return flow; in fs_flow_allocate()
58 fs_flow_release(struct rte_flow **flow) in fs_flow_release() argument
60 rte_free(*flow); in fs_flow_release()
61 *flow = NULL; in fs_flow_release()
101 struct rte_flow *flow; in fs_flow_create() local
[all …]
/dpdk/drivers/net/bonding/
H A Drte_eth_bond_flow.c21 struct rte_flow *flow; in bond_flow_alloc() local
37 flow = rte_zmalloc_socket(NULL, offsetof(struct rte_flow, rule) + ret, in bond_flow_alloc()
39 if (unlikely(flow == NULL)) { in bond_flow_alloc()
43 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule, in bond_flow_alloc()
49 rte_free(flow); in bond_flow_alloc()
52 return flow; in bond_flow_alloc()
56 bond_flow_release(struct rte_flow **flow) in bond_flow_release() argument
58 rte_free(*flow); in bond_flow_release()
59 *flow = NULL; in bond_flow_release()
91 struct rte_flow *flow; in bond_flow_create() local
[all …]
/dpdk/lib/acl/
H A Dacl_run_avx512_common.h105 _F_(first_trans)(const struct acl_flow_avx512 *flow, _T_simd next_input, in _F_()
111 tr = (const int32_t *)(uintptr_t)flow->trans; in _F_()
114 root = _M_I_(set1_epi32)(flow->root_index); in _F_()
121 sizeof(flow->trans[0])); in _F_()
125 sizeof(flow->trans[0])); in _F_()
135 _F_(get_next_bytes)(const struct acl_flow_avx512 *flow, _T_simd pdata[2], in _F_()
142 div = (const int32_t *)flow->data_index; in _F_()
184 _F_(start_flow)(struct acl_flow_avx512 *flow, uint32_t num, uint32_t msk, in _F_()
201 flow->idata + flow->num_packets); in _F_()
203 flow->idata + flow->num_packets + n); in _F_()
[all …]
H A Dacl_run_avx512.c22 acl_set_flow_avx512(struct acl_flow_avx512 *flow, const struct rte_acl_ctx *ctx, in acl_set_flow_avx512() argument
26 flow->num_packets = 0; in acl_set_flow_avx512()
27 flow->total_packets = total_packets; in acl_set_flow_avx512()
28 flow->first_load_sz = ctx->first_load_sz; in acl_set_flow_avx512()
29 flow->root_index = ctx->trie[trie].root_index; in acl_set_flow_avx512()
30 flow->trans = ctx->trans_table; in acl_set_flow_avx512()
31 flow->data_index = ctx->trie[trie].data_index; in acl_set_flow_avx512()
32 flow->idata = data; in acl_set_flow_avx512()
33 flow->matches = matches; in acl_set_flow_avx512()
40 update_flow_mask(const struct acl_flow_avx512 *flow, uint32_t *fmsk, in update_flow_mask() argument
[all …]
/dpdk/lib/ethdev/
H A Dethdev_trace_points.c497 lib.ethdev.flow.copy)
500 lib.ethdev.flow.create)
503 lib.ethdev.flow.destroy)
506 lib.ethdev.flow.update)
509 lib.ethdev.flow.flush)
512 lib.ethdev.flow.isolate)
515 lib.ethdev.flow.query)
518 lib.ethdev.flow.validate)
521 lib.ethdev.flow.conv)
527 lib.ethdev.flow
[all...]
/dpdk/drivers/net/sfc/
H A Dsfc_flow.h198 struct rte_flow *flow,
202 struct rte_flow *flow);
205 struct rte_flow *flow);
208 struct rte_flow *flow);
211 struct rte_flow *flow);
214 struct rte_flow *flow,
225 int sfc_flow_destroy_locked(struct sfc_adapter *sa, struct rte_flow *flow,
/dpdk/drivers/net/tap/bpf/
H A DREADME2 across multiple queues if required by a flow action. The program is
3 loaded into the kernel when first RSS flow rule is created and is never unloaded.
5 When flow rules with the TAP device, packets are first handled by the
7 The first stage is the flow based classifier (flower); for RSS queue
9 the skb mark to a key based on the flow id; the final stage
10 is this BPF program which then maps flow id and packet header
27 - the number of flow rules using RSS is limited to 32.
/dpdk/doc/guides/tools/
H A Dflow-perf.rst17 give different flow each time, and all other items will have open masks.
19 To assess the rule insertion rate, the flow performance tool breaks
20 down the entire number of flow rule operations into windows of fixed size
21 (defaults to 100000 flow rule operations per window, but can be configured).
22 Then, the flow performance tool measures the total time per window and
25 The application also provides the ability to measure rte flow deletion rate,
26 in addition to memory consumption before and after the flow rules' creation.
35 The ``test-flow-perf`` application is compiled as part of the main compilation
55 The following are the command-line options for the flow performance application.
69 Set the total number of flow rules to insert,
[all …]
/dpdk/drivers/net/mvpp2/
H A Dmrvl_mtr.c215 struct rte_flow *flow; in mrvl_meter_enable() local
261 LIST_FOREACH(flow, &priv->flows, next) { in mrvl_meter_enable()
262 if (flow->mtr != mtr) in mrvl_meter_enable()
265 flow->action.plcr = mtr->plcr; in mrvl_meter_enable()
267 ret = pp2_cls_tbl_modify_rule(priv->cls_tbl, &flow->rule, in mrvl_meter_enable()
268 &flow->action); in mrvl_meter_enable()
292 struct rte_flow *flow; in mrvl_meter_disable() local
300 LIST_FOREACH(flow, &priv->flows, next) { in mrvl_meter_disable()
301 if (flow->mtr != mtr) in mrvl_meter_disable()
304 flow->action.plcr = NULL; in mrvl_meter_disable()
[all …]
/dpdk/app/test-eventdev/
H A Dtest_order_common.h100 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows; in order_process_stage_1() local
102 if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) { in order_process_stage_1()
104 flow, *order_mbuf_seqn(t, ev->mbuf), in order_process_stage_1()
105 expected_flow_seq[flow]); in order_process_stage_1()
114 expected_flow_seq[flow]++; in order_process_stage_1()
/dpdk/drivers/net/mlx5/
H A Dmlx5_nta_rss.c44 struct rte_flow_hw *flow; in mlx5_nta_ptype_rss_flow_create() local
69 ctx->external, &flow, ctx->error); in mlx5_nta_ptype_rss_flow_create()
70 if (flow) { in mlx5_nta_ptype_rss_flow_create()
71 SLIST_INSERT_HEAD(ctx->head, flow, nt2hws->next); in mlx5_nta_ptype_rss_flow_create()
278 struct rte_flow_hw *flow = NULL; in mlx5_hw_rss_ptype_create_miss_flow() local
304 external, &flow, error); in mlx5_hw_rss_ptype_create_miss_flow()
305 return flow; in mlx5_hw_rss_ptype_create_miss_flow()
319 struct rte_flow_hw *flow = NULL; in mlx5_hw_rss_ptype_create_base_flow() local
348 item_flags, action_flags, external, &flow, error); in mlx5_hw_rss_ptype_create_base_flow()
349 return flow; in mlx5_hw_rss_ptype_create_base_flow()
[all …]
/dpdk/app/test-flow-perf/
H A Dflow_gen.c59 struct rte_flow *flow = NULL; in generate_flow() local
74 flow = rte_flow_create(port_id, &attr, items, actions, error); in generate_flow()
75 return flow; in generate_flow()
/dpdk/doc/guides/nics/
H A Dsoftnic.rst16 The Soft NIC is configured through the standard DPDK ethdev API (ethdev, flow,
290 thus only supports creating meter object private to the flow. Once meter
291 object is successfully created, it can be linked to the specific flow by
292 specifying the ``meter`` flow action in the flow rule.
297 The SoftNIC PMD implements ethdev flow APIs ``rte_flow.h`` that allow validating
298 flow rules, adding flow rules to the SoftNIC pipeline as table rules, deleting
299 and querying the flow rules. The PMD provides new cli command for creating the
300 flow group and their mapping to the SoftNIC pipeline and table. This cli should
308 From the flow attributes of the flow, PMD uses the group id to get the mapped
309 pipeline and table. PMD supports number of flow actions such as
[all …]
H A Dipn3ke.rst52 --vdev 'ipn3ke_cfg0,afu=0|b3:00.0,fpga_acc={tm|flow}'
59 …--vdev 'ipn3ke_cfg0,afu=0|b3:00.0,fpga_acc={tm|flow},i40e_pf={0000:b1:00.0|0000:b1:00.1|0000:b1:00…
81 HQoS and flow acceleration
84 HQoS and flow acceleration bitstream is used to offloading HQoS and flow classifier.
90 …cfg0,ifpga=b3:00.0,port=0' --vdev 'ipn3ke_cfg0,afu=0|b3:00.0,fpga_acc={tm|flow},i40e_pf={0000:b1:0…
/dpdk/examples/flow_filtering/
H A Dflow_blocks.c
/dpdk/drivers/net/enic/
H A Denic_fm_flow.c115 struct rte_flow *flow; member
2411 remove_jump_flow(struct enic_flowman *fm, struct rte_flow *flow) in remove_jump_flow() argument
2417 if (j->flow == flow) { in remove_jump_flow()
2427 struct rte_flow *flow, in save_jump_flow() argument
2438 j->flow = flow; in save_jump_flow()
2443 ENICPMD_LOG(DEBUG, "saved jump flow: flow=%p group=%u", flow, group); in save_jump_flow()
2466 enic_fm_flow_free(struct enic_flowman *fm, struct rte_flow *flow) in enic_fm_flow_free() argument
2468 struct enic_fm_flow *steer = flow->fm->hairpin_steer_flow; in enic_fm_flow_free()
2470 if (flow->fm->fet && flow->fm->fet->default_key) in enic_fm_flow_free()
2471 remove_jump_flow(fm, flow); in enic_fm_flow_free()
[all …]
/dpdk/drivers/vdpa/mlx5/
H A Dmlx5_vdpa_steer.c21 if (priv->steer.rss[i].flow) { in mlx5_vdpa_rss_flows_destroy()
23 (priv->steer.rss[i].flow)); in mlx5_vdpa_rss_flows_destroy()
24 priv->steer.rss[i].flow = NULL; in mlx5_vdpa_rss_flows_destroy()
224 priv->steer.rss[i].flow = mlx5_glue->dv_create_flow in mlx5_vdpa_rss_flows_create()
227 if (!priv->steer.rss[i].flow) { in mlx5_vdpa_rss_flows_create()
254 } else if (!priv->steer.rss[0].flow) { in mlx5_vdpa_steer_update()

12345