1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <errno.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 #include <pthread.h> 10 11 #include <rte_common.h> 12 #include <rte_errno.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_string_fns.h> 15 #include <rte_mbuf_dyn.h> 16 #include "rte_flow_driver.h" 17 #include "rte_flow.h" 18 19 #include "ethdev_trace.h" 20 21 #define FLOW_LOG RTE_ETHDEV_LOG_LINE 22 23 /* Mbuf dynamic field name for metadata. */ 24 int32_t rte_flow_dynf_metadata_offs = -1; 25 26 /* Mbuf dynamic field flag bit number for metadata. */ 27 uint64_t rte_flow_dynf_metadata_mask; 28 29 /** 30 * Flow elements description tables. 31 */ 32 struct rte_flow_desc_data { 33 const char *name; 34 size_t size; 35 size_t (*desc_fn)(void *dst, const void *src); 36 }; 37 38 /** 39 * 40 * @param buf 41 * Destination memory. 42 * @param data 43 * Source memory 44 * @param size 45 * Requested copy size 46 * @param desc 47 * rte_flow_desc_item - for flow item conversion. 48 * rte_flow_desc_action - for flow action conversion. 49 * @param type 50 * Offset into the desc param or negative value for private flow elements. 51 */ 52 static inline size_t 53 rte_flow_conv_copy(void *buf, const void *data, const size_t size, 54 const struct rte_flow_desc_data *desc, int type) 55 { 56 /** 57 * Allow PMD private flow item 58 */ 59 bool rte_type = type >= 0; 60 61 size_t sz = rte_type ? desc[type].size : sizeof(void *); 62 if (buf == NULL || data == NULL) 63 return 0; 64 rte_memcpy(buf, data, (size > sz ? sz : size)); 65 if (rte_type && desc[type].desc_fn) 66 sz += desc[type].desc_fn(size > 0 ? buf : NULL, data); 67 return sz; 68 } 69 70 static size_t 71 rte_flow_item_flex_conv(void *buf, const void *data) 72 { 73 struct rte_flow_item_flex *dst = buf; 74 const struct rte_flow_item_flex *src = data; 75 if (buf) { 76 dst->pattern = rte_memcpy 77 ((void *)((uintptr_t)(dst + 1)), src->pattern, 78 src->length); 79 } 80 return src->length; 81 } 82 83 /** Generate flow_item[] entry. */ 84 #define MK_FLOW_ITEM(t, s) \ 85 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 86 .name = # t, \ 87 .size = s, \ 88 .desc_fn = NULL,\ 89 } 90 91 #define MK_FLOW_ITEM_FN(t, s, fn) \ 92 [RTE_FLOW_ITEM_TYPE_ ## t] = {\ 93 .name = # t, \ 94 .size = s, \ 95 .desc_fn = fn, \ 96 } 97 98 /** Information about known flow pattern items. */ 99 static const struct rte_flow_desc_data rte_flow_desc_item[] = { 100 MK_FLOW_ITEM(END, 0), 101 MK_FLOW_ITEM(VOID, 0), 102 MK_FLOW_ITEM(INVERT, 0), 103 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 104 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 105 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 106 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 107 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 108 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 109 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 110 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 111 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 112 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 113 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 114 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 115 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 116 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 117 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 118 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 119 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 120 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 121 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 122 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 123 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), 124 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 125 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 126 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 127 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 128 MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)), 129 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 130 MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)), 131 MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)), 132 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 133 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 134 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 135 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 136 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 137 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 138 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 139 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), 140 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), 141 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)), 142 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)), 143 MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)), 144 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)), 145 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), 146 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), 147 MK_FLOW_ITEM(PPPOE_PROTO_ID, 148 sizeof(struct rte_flow_item_pppoe_proto_id)), 149 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)), 150 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)), 151 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)), 152 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), 153 MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)), 154 MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)), 155 MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)), 156 MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)), 157 MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)), 158 MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)), 159 MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)), 160 MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)), 161 MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex), 162 rte_flow_item_flex_conv), 163 MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)), 164 MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)), 165 MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)), 166 MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)), 167 MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)), 168 MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)), 169 MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)), 170 MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)), 171 MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)), 172 }; 173 174 /** Generate flow_action[] entry. */ 175 #define MK_FLOW_ACTION(t, s) \ 176 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 177 .name = # t, \ 178 .size = s, \ 179 .desc_fn = NULL,\ 180 } 181 182 #define MK_FLOW_ACTION_FN(t, fn) \ 183 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 184 .name = # t, \ 185 .size = 0, \ 186 .desc_fn = fn,\ 187 } 188 189 190 /** Information about known flow actions. */ 191 static const struct rte_flow_desc_data rte_flow_desc_action[] = { 192 MK_FLOW_ACTION(END, 0), 193 MK_FLOW_ACTION(VOID, 0), 194 MK_FLOW_ACTION(PASSTHRU, 0), 195 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), 196 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 197 MK_FLOW_ACTION(FLAG, 0), 198 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 199 MK_FLOW_ACTION(DROP, 0), 200 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 201 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 202 MK_FLOW_ACTION(PF, 0), 203 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 204 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 205 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 206 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), 207 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 208 MK_FLOW_ACTION(OF_POP_VLAN, 0), 209 MK_FLOW_ACTION(OF_PUSH_VLAN, 210 sizeof(struct rte_flow_action_of_push_vlan)), 211 MK_FLOW_ACTION(OF_SET_VLAN_VID, 212 sizeof(struct rte_flow_action_of_set_vlan_vid)), 213 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 214 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 215 MK_FLOW_ACTION(OF_POP_MPLS, 216 sizeof(struct rte_flow_action_of_pop_mpls)), 217 MK_FLOW_ACTION(OF_PUSH_MPLS, 218 sizeof(struct rte_flow_action_of_push_mpls)), 219 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 220 MK_FLOW_ACTION(VXLAN_DECAP, 0), 221 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 222 MK_FLOW_ACTION(NVGRE_DECAP, 0), 223 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), 224 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), 225 MK_FLOW_ACTION(SET_IPV4_SRC, 226 sizeof(struct rte_flow_action_set_ipv4)), 227 MK_FLOW_ACTION(SET_IPV4_DST, 228 sizeof(struct rte_flow_action_set_ipv4)), 229 MK_FLOW_ACTION(SET_IPV6_SRC, 230 sizeof(struct rte_flow_action_set_ipv6)), 231 MK_FLOW_ACTION(SET_IPV6_DST, 232 sizeof(struct rte_flow_action_set_ipv6)), 233 MK_FLOW_ACTION(SET_TP_SRC, 234 sizeof(struct rte_flow_action_set_tp)), 235 MK_FLOW_ACTION(SET_TP_DST, 236 sizeof(struct rte_flow_action_set_tp)), 237 MK_FLOW_ACTION(MAC_SWAP, 0), 238 MK_FLOW_ACTION(DEC_TTL, 0), 239 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), 240 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), 241 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), 242 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), 243 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), 244 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), 245 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), 246 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)), 247 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)), 248 MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)), 249 MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)), 250 MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)), 251 MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)), 252 MK_FLOW_ACTION(MODIFY_FIELD, 253 sizeof(struct rte_flow_action_modify_field)), 254 /** 255 * Indirect action represented as handle of type 256 * (struct rte_flow_action_handle *) stored in conf field (see 257 * struct rte_flow_action); no need for additional structure to * store 258 * indirect action handle. 259 */ 260 MK_FLOW_ACTION(INDIRECT, 0), 261 MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)), 262 MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)), 263 MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)), 264 MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)), 265 MK_FLOW_ACTION(SEND_TO_KERNEL, 0), 266 MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)), 267 MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)), 268 MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)), 269 MK_FLOW_ACTION(INDIRECT_LIST, 270 sizeof(struct rte_flow_action_indirect_list)), 271 MK_FLOW_ACTION(PROG, 272 sizeof(struct rte_flow_action_prog)), 273 }; 274 275 int 276 rte_flow_dynf_metadata_register(void) 277 { 278 int offset; 279 int flag; 280 281 static const struct rte_mbuf_dynfield desc_offs = { 282 .name = RTE_MBUF_DYNFIELD_METADATA_NAME, 283 .size = sizeof(uint32_t), 284 .align = __alignof__(uint32_t), 285 }; 286 static const struct rte_mbuf_dynflag desc_flag = { 287 .name = RTE_MBUF_DYNFLAG_METADATA_NAME, 288 }; 289 290 offset = rte_mbuf_dynfield_register(&desc_offs); 291 if (offset < 0) 292 goto error; 293 flag = rte_mbuf_dynflag_register(&desc_flag); 294 if (flag < 0) 295 goto error; 296 rte_flow_dynf_metadata_offs = offset; 297 rte_flow_dynf_metadata_mask = RTE_BIT64(flag); 298 299 rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag)); 300 301 return 0; 302 303 error: 304 rte_flow_dynf_metadata_offs = -1; 305 rte_flow_dynf_metadata_mask = UINT64_C(0); 306 return -rte_errno; 307 } 308 309 static inline void 310 fts_enter(struct rte_eth_dev *dev) 311 { 312 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) 313 pthread_mutex_lock(&dev->data->flow_ops_mutex); 314 } 315 316 static inline void 317 fts_exit(struct rte_eth_dev *dev) 318 { 319 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) 320 pthread_mutex_unlock(&dev->data->flow_ops_mutex); 321 } 322 323 static int 324 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) 325 { 326 if (ret == 0) 327 return 0; 328 if (rte_eth_dev_is_removed(port_id)) 329 return rte_flow_error_set(error, EIO, 330 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 331 NULL, rte_strerror(EIO)); 332 return ret; 333 } 334 335 /* Get generic flow operations structure from a port. */ 336 const struct rte_flow_ops * 337 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) 338 { 339 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 340 const struct rte_flow_ops *ops; 341 int code; 342 343 if (unlikely(!rte_eth_dev_is_valid_port(port_id))) 344 code = ENODEV; 345 else if (unlikely(dev->dev_ops->flow_ops_get == NULL)) 346 /* flow API not supported with this driver dev_ops */ 347 code = ENOSYS; 348 else 349 code = dev->dev_ops->flow_ops_get(dev, &ops); 350 if (code == 0 && ops == NULL) 351 /* flow API not supported with this device */ 352 code = ENOSYS; 353 354 if (code != 0) { 355 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 356 NULL, rte_strerror(code)); 357 return NULL; 358 } 359 return ops; 360 } 361 362 /* Check whether a flow rule can be created on a given port. */ 363 int 364 rte_flow_validate(uint16_t port_id, 365 const struct rte_flow_attr *attr, 366 const struct rte_flow_item pattern[], 367 const struct rte_flow_action actions[], 368 struct rte_flow_error *error) 369 { 370 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 371 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 372 int ret; 373 374 if (likely(!!attr) && attr->transfer && 375 (attr->ingress || attr->egress)) { 376 return rte_flow_error_set(error, EINVAL, 377 RTE_FLOW_ERROR_TYPE_ATTR, 378 attr, "cannot use attr ingress/egress with attr transfer"); 379 } 380 381 if (unlikely(!ops)) 382 return -rte_errno; 383 if (likely(!!ops->validate)) { 384 fts_enter(dev); 385 ret = ops->validate(dev, attr, pattern, actions, error); 386 fts_exit(dev); 387 ret = flow_err(port_id, ret, error); 388 389 rte_flow_trace_validate(port_id, attr, pattern, actions, ret); 390 391 return ret; 392 } 393 return rte_flow_error_set(error, ENOSYS, 394 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 395 NULL, rte_strerror(ENOSYS)); 396 } 397 398 /* Create a flow rule on a given port. */ 399 struct rte_flow * 400 rte_flow_create(uint16_t port_id, 401 const struct rte_flow_attr *attr, 402 const struct rte_flow_item pattern[], 403 const struct rte_flow_action actions[], 404 struct rte_flow_error *error) 405 { 406 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 407 struct rte_flow *flow; 408 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 409 410 if (unlikely(!ops)) 411 return NULL; 412 if (likely(!!ops->create)) { 413 fts_enter(dev); 414 flow = ops->create(dev, attr, pattern, actions, error); 415 fts_exit(dev); 416 if (flow == NULL) 417 flow_err(port_id, -rte_errno, error); 418 419 rte_flow_trace_create(port_id, attr, pattern, actions, flow); 420 421 return flow; 422 } 423 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 424 NULL, rte_strerror(ENOSYS)); 425 return NULL; 426 } 427 428 /* Destroy a flow rule on a given port. */ 429 int 430 rte_flow_destroy(uint16_t port_id, 431 struct rte_flow *flow, 432 struct rte_flow_error *error) 433 { 434 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 435 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 436 int ret; 437 438 if (unlikely(!ops)) 439 return -rte_errno; 440 if (likely(!!ops->destroy)) { 441 fts_enter(dev); 442 ret = ops->destroy(dev, flow, error); 443 fts_exit(dev); 444 ret = flow_err(port_id, ret, error); 445 446 rte_flow_trace_destroy(port_id, flow, ret); 447 448 return ret; 449 } 450 return rte_flow_error_set(error, ENOSYS, 451 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 452 NULL, rte_strerror(ENOSYS)); 453 } 454 455 int 456 rte_flow_actions_update(uint16_t port_id, 457 struct rte_flow *flow, 458 const struct rte_flow_action actions[], 459 struct rte_flow_error *error) 460 { 461 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 462 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 463 int ret; 464 465 if (unlikely(!ops)) 466 return -rte_errno; 467 if (likely(!!ops->actions_update)) { 468 fts_enter(dev); 469 ret = ops->actions_update(dev, flow, actions, error); 470 fts_exit(dev); 471 472 rte_flow_trace_actions_update(port_id, flow, actions, ret); 473 474 return flow_err(port_id, ret, error); 475 } 476 return rte_flow_error_set(error, ENOSYS, 477 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 478 NULL, rte_strerror(ENOSYS)); 479 } 480 481 /* Destroy all flow rules associated with a port. */ 482 int 483 rte_flow_flush(uint16_t port_id, 484 struct rte_flow_error *error) 485 { 486 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 487 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 488 int ret; 489 490 if (unlikely(!ops)) 491 return -rte_errno; 492 if (likely(!!ops->flush)) { 493 fts_enter(dev); 494 ret = ops->flush(dev, error); 495 fts_exit(dev); 496 ret = flow_err(port_id, ret, error); 497 498 rte_flow_trace_flush(port_id, ret); 499 500 return ret; 501 } 502 return rte_flow_error_set(error, ENOSYS, 503 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 504 NULL, rte_strerror(ENOSYS)); 505 } 506 507 /* Query an existing flow rule. */ 508 int 509 rte_flow_query(uint16_t port_id, 510 struct rte_flow *flow, 511 const struct rte_flow_action *action, 512 void *data, 513 struct rte_flow_error *error) 514 { 515 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 516 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 517 int ret; 518 519 if (!ops) 520 return -rte_errno; 521 if (likely(!!ops->query)) { 522 fts_enter(dev); 523 ret = ops->query(dev, flow, action, data, error); 524 fts_exit(dev); 525 ret = flow_err(port_id, ret, error); 526 527 rte_flow_trace_query(port_id, flow, action, data, ret); 528 529 return ret; 530 } 531 return rte_flow_error_set(error, ENOSYS, 532 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 533 NULL, rte_strerror(ENOSYS)); 534 } 535 536 /* Restrict ingress traffic to the defined flow rules. */ 537 int 538 rte_flow_isolate(uint16_t port_id, 539 int set, 540 struct rte_flow_error *error) 541 { 542 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 543 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 544 int ret; 545 546 if (!ops) 547 return -rte_errno; 548 if (likely(!!ops->isolate)) { 549 fts_enter(dev); 550 ret = ops->isolate(dev, set, error); 551 fts_exit(dev); 552 ret = flow_err(port_id, ret, error); 553 554 rte_flow_trace_isolate(port_id, set, ret); 555 556 return ret; 557 } 558 return rte_flow_error_set(error, ENOSYS, 559 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 560 NULL, rte_strerror(ENOSYS)); 561 } 562 563 /* Initialize flow error structure. */ 564 int 565 rte_flow_error_set(struct rte_flow_error *error, 566 int code, 567 enum rte_flow_error_type type, 568 const void *cause, 569 const char *message) 570 { 571 if (error) { 572 *error = (struct rte_flow_error){ 573 .type = type, 574 .cause = cause, 575 .message = message, 576 }; 577 } 578 rte_errno = code; 579 return -code; 580 } 581 582 /** Pattern item specification types. */ 583 enum rte_flow_conv_item_spec_type { 584 RTE_FLOW_CONV_ITEM_SPEC, 585 RTE_FLOW_CONV_ITEM_LAST, 586 RTE_FLOW_CONV_ITEM_MASK, 587 }; 588 589 /** 590 * Copy pattern item specification. 591 * 592 * @param[out] buf 593 * Output buffer. Can be NULL if @p size is zero. 594 * @param size 595 * Size of @p buf in bytes. 596 * @param[in] item 597 * Pattern item to copy specification from. 598 * @param type 599 * Specification selector for either @p spec, @p last or @p mask. 600 * 601 * @return 602 * Number of bytes needed to store pattern item specification regardless 603 * of @p size. @p buf contents are truncated to @p size if not large 604 * enough. 605 */ 606 static size_t 607 rte_flow_conv_item_spec(void *buf, const size_t size, 608 const struct rte_flow_item *item, 609 enum rte_flow_conv_item_spec_type type) 610 { 611 size_t off; 612 const void *data = 613 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : 614 type == RTE_FLOW_CONV_ITEM_LAST ? item->last : 615 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : 616 NULL; 617 618 switch (item->type) { 619 union { 620 const struct rte_flow_item_raw *raw; 621 } spec; 622 union { 623 const struct rte_flow_item_raw *raw; 624 } last; 625 union { 626 const struct rte_flow_item_raw *raw; 627 } mask; 628 union { 629 const struct rte_flow_item_raw *raw; 630 } src; 631 union { 632 struct rte_flow_item_raw *raw; 633 } dst; 634 size_t tmp; 635 636 case RTE_FLOW_ITEM_TYPE_RAW: 637 spec.raw = item->spec; 638 last.raw = item->last ? item->last : item->spec; 639 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 640 src.raw = data; 641 dst.raw = buf; 642 rte_memcpy(dst.raw, 643 (&(struct rte_flow_item_raw){ 644 .relative = src.raw->relative, 645 .search = src.raw->search, 646 .reserved = src.raw->reserved, 647 .offset = src.raw->offset, 648 .limit = src.raw->limit, 649 .length = src.raw->length, 650 }), 651 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); 652 off = sizeof(*dst.raw); 653 if (type == RTE_FLOW_CONV_ITEM_SPEC || 654 (type == RTE_FLOW_CONV_ITEM_MASK && 655 ((spec.raw->length & mask.raw->length) >= 656 (last.raw->length & mask.raw->length)))) 657 tmp = spec.raw->length & mask.raw->length; 658 else 659 tmp = last.raw->length & mask.raw->length; 660 if (tmp) { 661 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); 662 if (size >= off + tmp) 663 dst.raw->pattern = rte_memcpy 664 ((void *)((uintptr_t)dst.raw + off), 665 src.raw->pattern, tmp); 666 off += tmp; 667 } 668 break; 669 default: 670 off = rte_flow_conv_copy(buf, data, size, 671 rte_flow_desc_item, item->type); 672 break; 673 } 674 return off; 675 } 676 677 /** 678 * Copy action configuration. 679 * 680 * @param[out] buf 681 * Output buffer. Can be NULL if @p size is zero. 682 * @param size 683 * Size of @p buf in bytes. 684 * @param[in] action 685 * Action to copy configuration from. 686 * 687 * @return 688 * Number of bytes needed to store pattern item specification regardless 689 * of @p size. @p buf contents are truncated to @p size if not large 690 * enough. 691 */ 692 static size_t 693 rte_flow_conv_action_conf(void *buf, const size_t size, 694 const struct rte_flow_action *action) 695 { 696 size_t off; 697 698 switch (action->type) { 699 union { 700 const struct rte_flow_action_rss *rss; 701 const struct rte_flow_action_vxlan_encap *vxlan_encap; 702 const struct rte_flow_action_nvgre_encap *nvgre_encap; 703 } src; 704 union { 705 struct rte_flow_action_rss *rss; 706 struct rte_flow_action_vxlan_encap *vxlan_encap; 707 struct rte_flow_action_nvgre_encap *nvgre_encap; 708 } dst; 709 size_t tmp; 710 int ret; 711 712 case RTE_FLOW_ACTION_TYPE_RSS: 713 src.rss = action->conf; 714 dst.rss = buf; 715 rte_memcpy(dst.rss, 716 (&(struct rte_flow_action_rss){ 717 .func = src.rss->func, 718 .level = src.rss->level, 719 .types = src.rss->types, 720 .key_len = src.rss->key_len, 721 .queue_num = src.rss->queue_num, 722 }), 723 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); 724 off = sizeof(*dst.rss); 725 if (src.rss->key_len && src.rss->key) { 726 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); 727 tmp = sizeof(*src.rss->key) * src.rss->key_len; 728 if (size >= (uint64_t)off + (uint64_t)tmp) 729 dst.rss->key = rte_memcpy 730 ((void *)((uintptr_t)dst.rss + off), 731 src.rss->key, tmp); 732 off += tmp; 733 } 734 if (src.rss->queue_num) { 735 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); 736 tmp = sizeof(*src.rss->queue) * src.rss->queue_num; 737 if (size >= (uint64_t)off + (uint64_t)tmp) 738 dst.rss->queue = rte_memcpy 739 ((void *)((uintptr_t)dst.rss + off), 740 src.rss->queue, tmp); 741 off += tmp; 742 } 743 break; 744 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 745 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 746 src.vxlan_encap = action->conf; 747 dst.vxlan_encap = buf; 748 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != 749 sizeof(*src.nvgre_encap) || 750 offsetof(struct rte_flow_action_vxlan_encap, 751 definition) != 752 offsetof(struct rte_flow_action_nvgre_encap, 753 definition)); 754 off = sizeof(*dst.vxlan_encap); 755 if (src.vxlan_encap->definition) { 756 off = RTE_ALIGN_CEIL 757 (off, sizeof(*dst.vxlan_encap->definition)); 758 ret = rte_flow_conv 759 (RTE_FLOW_CONV_OP_PATTERN, 760 (void *)((uintptr_t)dst.vxlan_encap + off), 761 size > off ? size - off : 0, 762 src.vxlan_encap->definition, NULL); 763 if (ret < 0) 764 return 0; 765 if (size >= off + ret) 766 dst.vxlan_encap->definition = 767 (void *)((uintptr_t)dst.vxlan_encap + 768 off); 769 off += ret; 770 } 771 break; 772 default: 773 off = rte_flow_conv_copy(buf, action->conf, size, 774 rte_flow_desc_action, action->type); 775 break; 776 } 777 return off; 778 } 779 780 /** 781 * Copy a list of pattern items. 782 * 783 * @param[out] dst 784 * Destination buffer. Can be NULL if @p size is zero. 785 * @param size 786 * Size of @p dst in bytes. 787 * @param[in] src 788 * Source pattern items. 789 * @param num 790 * Maximum number of pattern items to process from @p src or 0 to process 791 * the entire list. In both cases, processing stops after 792 * RTE_FLOW_ITEM_TYPE_END is encountered. 793 * @param[out] error 794 * Perform verbose error reporting if not NULL. 795 * 796 * @return 797 * A positive value representing the number of bytes needed to store 798 * pattern items regardless of @p size on success (@p buf contents are 799 * truncated to @p size if not large enough), a negative errno value 800 * otherwise and rte_errno is set. 801 */ 802 static int 803 rte_flow_conv_pattern(struct rte_flow_item *dst, 804 const size_t size, 805 const struct rte_flow_item *src, 806 unsigned int num, 807 struct rte_flow_error *error) 808 { 809 uintptr_t data = (uintptr_t)dst; 810 size_t off; 811 size_t ret; 812 unsigned int i; 813 814 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 815 /** 816 * allow PMD private flow item 817 */ 818 if (((int)src->type >= 0) && 819 ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || 820 !rte_flow_desc_item[src->type].name)) 821 return rte_flow_error_set 822 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, 823 "cannot convert unknown item type"); 824 if (size >= off + sizeof(*dst)) 825 *dst = (struct rte_flow_item){ 826 .type = src->type, 827 }; 828 off += sizeof(*dst); 829 if (!src->type) 830 num = i + 1; 831 } 832 num = i; 833 src -= num; 834 dst -= num; 835 do { 836 if (src->spec) { 837 off = RTE_ALIGN_CEIL(off, sizeof(double)); 838 ret = rte_flow_conv_item_spec 839 ((void *)(data + off), 840 size > off ? size - off : 0, src, 841 RTE_FLOW_CONV_ITEM_SPEC); 842 if (size && size >= off + ret) 843 dst->spec = (void *)(data + off); 844 off += ret; 845 846 } 847 if (src->last) { 848 off = RTE_ALIGN_CEIL(off, sizeof(double)); 849 ret = rte_flow_conv_item_spec 850 ((void *)(data + off), 851 size > off ? size - off : 0, src, 852 RTE_FLOW_CONV_ITEM_LAST); 853 if (size && size >= off + ret) 854 dst->last = (void *)(data + off); 855 off += ret; 856 } 857 if (src->mask) { 858 off = RTE_ALIGN_CEIL(off, sizeof(double)); 859 ret = rte_flow_conv_item_spec 860 ((void *)(data + off), 861 size > off ? size - off : 0, src, 862 RTE_FLOW_CONV_ITEM_MASK); 863 if (size && size >= off + ret) 864 dst->mask = (void *)(data + off); 865 off += ret; 866 } 867 ++src; 868 ++dst; 869 } while (--num); 870 return off; 871 } 872 873 /** 874 * Copy a list of actions. 875 * 876 * @param[out] dst 877 * Destination buffer. Can be NULL if @p size is zero. 878 * @param size 879 * Size of @p dst in bytes. 880 * @param[in] src 881 * Source actions. 882 * @param num 883 * Maximum number of actions to process from @p src or 0 to process the 884 * entire list. In both cases, processing stops after 885 * RTE_FLOW_ACTION_TYPE_END is encountered. 886 * @param[out] error 887 * Perform verbose error reporting if not NULL. 888 * 889 * @return 890 * A positive value representing the number of bytes needed to store 891 * actions regardless of @p size on success (@p buf contents are truncated 892 * to @p size if not large enough), a negative errno value otherwise and 893 * rte_errno is set. 894 */ 895 static int 896 rte_flow_conv_actions(struct rte_flow_action *dst, 897 const size_t size, 898 const struct rte_flow_action *src, 899 unsigned int num, 900 struct rte_flow_error *error) 901 { 902 uintptr_t data = (uintptr_t)dst; 903 size_t off; 904 size_t ret; 905 unsigned int i; 906 907 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 908 /** 909 * allow PMD private flow action 910 */ 911 if (((int)src->type >= 0) && 912 ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || 913 !rte_flow_desc_action[src->type].name)) 914 return rte_flow_error_set 915 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 916 src, "cannot convert unknown action type"); 917 if (size >= off + sizeof(*dst)) 918 *dst = (struct rte_flow_action){ 919 .type = src->type, 920 }; 921 off += sizeof(*dst); 922 if (!src->type) 923 num = i + 1; 924 } 925 num = i; 926 src -= num; 927 dst -= num; 928 do { 929 if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) { 930 /* 931 * Indirect action conf fills the indirect action 932 * handler. Copy the action handle directly instead 933 * of duplicating the pointer memory. 934 */ 935 if (size) 936 dst->conf = src->conf; 937 } else if (src->conf) { 938 off = RTE_ALIGN_CEIL(off, sizeof(double)); 939 ret = rte_flow_conv_action_conf 940 ((void *)(data + off), 941 size > off ? size - off : 0, src); 942 if (size && size >= off + ret) 943 dst->conf = (void *)(data + off); 944 off += ret; 945 } 946 ++src; 947 ++dst; 948 } while (--num); 949 return off; 950 } 951 952 /** 953 * Copy flow rule components. 954 * 955 * This comprises the flow rule descriptor itself, attributes, pattern and 956 * actions list. NULL components in @p src are skipped. 957 * 958 * @param[out] dst 959 * Destination buffer. Can be NULL if @p size is zero. 960 * @param size 961 * Size of @p dst in bytes. 962 * @param[in] src 963 * Source flow rule descriptor. 964 * @param[out] error 965 * Perform verbose error reporting if not NULL. 966 * 967 * @return 968 * A positive value representing the number of bytes needed to store all 969 * components including the descriptor regardless of @p size on success 970 * (@p buf contents are truncated to @p size if not large enough), a 971 * negative errno value otherwise and rte_errno is set. 972 */ 973 static int 974 rte_flow_conv_rule(struct rte_flow_conv_rule *dst, 975 const size_t size, 976 const struct rte_flow_conv_rule *src, 977 struct rte_flow_error *error) 978 { 979 size_t off; 980 int ret; 981 982 rte_memcpy(dst, 983 (&(struct rte_flow_conv_rule){ 984 .attr = NULL, 985 .pattern = NULL, 986 .actions = NULL, 987 }), 988 size > sizeof(*dst) ? sizeof(*dst) : size); 989 off = sizeof(*dst); 990 if (src->attr_ro) { 991 off = RTE_ALIGN_CEIL(off, sizeof(double)); 992 if (size && size >= off + sizeof(*dst->attr)) 993 dst->attr = rte_memcpy 994 ((void *)((uintptr_t)dst + off), 995 src->attr_ro, sizeof(*dst->attr)); 996 off += sizeof(*dst->attr); 997 } 998 if (src->pattern_ro) { 999 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1000 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), 1001 size > off ? size - off : 0, 1002 src->pattern_ro, 0, error); 1003 if (ret < 0) 1004 return ret; 1005 if (size && size >= off + (size_t)ret) 1006 dst->pattern = (void *)((uintptr_t)dst + off); 1007 off += ret; 1008 } 1009 if (src->actions_ro) { 1010 off = RTE_ALIGN_CEIL(off, sizeof(double)); 1011 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), 1012 size > off ? size - off : 0, 1013 src->actions_ro, 0, error); 1014 if (ret < 0) 1015 return ret; 1016 if (size >= off + (size_t)ret) 1017 dst->actions = (void *)((uintptr_t)dst + off); 1018 off += ret; 1019 } 1020 return off; 1021 } 1022 1023 /** 1024 * Retrieve the name of a pattern item/action type. 1025 * 1026 * @param is_action 1027 * Nonzero when @p src represents an action type instead of a pattern item 1028 * type. 1029 * @param is_ptr 1030 * Nonzero to write string address instead of contents into @p dst. 1031 * @param[out] dst 1032 * Destination buffer. Can be NULL if @p size is zero. 1033 * @param size 1034 * Size of @p dst in bytes. 1035 * @param[in] src 1036 * Depending on @p is_action, source pattern item or action type cast as a 1037 * pointer. 1038 * @param[out] error 1039 * Perform verbose error reporting if not NULL. 1040 * 1041 * @return 1042 * A positive value representing the number of bytes needed to store the 1043 * name or its address regardless of @p size on success (@p buf contents 1044 * are truncated to @p size if not large enough), a negative errno value 1045 * otherwise and rte_errno is set. 1046 */ 1047 static int 1048 rte_flow_conv_name(int is_action, 1049 int is_ptr, 1050 char *dst, 1051 const size_t size, 1052 const void *src, 1053 struct rte_flow_error *error) 1054 { 1055 struct desc_info { 1056 const struct rte_flow_desc_data *data; 1057 size_t num; 1058 }; 1059 static const struct desc_info info_rep[2] = { 1060 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, 1061 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, 1062 }; 1063 const struct desc_info *const info = &info_rep[!!is_action]; 1064 unsigned int type = (uintptr_t)src; 1065 1066 if (type >= info->num) 1067 return rte_flow_error_set 1068 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1069 "unknown object type to retrieve the name of"); 1070 if (!is_ptr) 1071 return strlcpy(dst, info->data[type].name, size); 1072 if (size >= sizeof(const char **)) 1073 *((const char **)dst) = info->data[type].name; 1074 return sizeof(const char **); 1075 } 1076 1077 /** Helper function to convert flow API objects. */ 1078 int 1079 rte_flow_conv(enum rte_flow_conv_op op, 1080 void *dst, 1081 size_t size, 1082 const void *src, 1083 struct rte_flow_error *error) 1084 { 1085 int ret; 1086 1087 switch (op) { 1088 const struct rte_flow_attr *attr; 1089 1090 case RTE_FLOW_CONV_OP_NONE: 1091 ret = 0; 1092 break; 1093 case RTE_FLOW_CONV_OP_ATTR: 1094 attr = src; 1095 if (size > sizeof(*attr)) 1096 size = sizeof(*attr); 1097 rte_memcpy(dst, attr, size); 1098 ret = sizeof(*attr); 1099 break; 1100 case RTE_FLOW_CONV_OP_ITEM: 1101 ret = rte_flow_conv_pattern(dst, size, src, 1, error); 1102 break; 1103 case RTE_FLOW_CONV_OP_ACTION: 1104 ret = rte_flow_conv_actions(dst, size, src, 1, error); 1105 break; 1106 case RTE_FLOW_CONV_OP_PATTERN: 1107 ret = rte_flow_conv_pattern(dst, size, src, 0, error); 1108 break; 1109 case RTE_FLOW_CONV_OP_ACTIONS: 1110 ret = rte_flow_conv_actions(dst, size, src, 0, error); 1111 break; 1112 case RTE_FLOW_CONV_OP_RULE: 1113 ret = rte_flow_conv_rule(dst, size, src, error); 1114 break; 1115 case RTE_FLOW_CONV_OP_ITEM_NAME: 1116 ret = rte_flow_conv_name(0, 0, dst, size, src, error); 1117 break; 1118 case RTE_FLOW_CONV_OP_ACTION_NAME: 1119 ret = rte_flow_conv_name(1, 0, dst, size, src, error); 1120 break; 1121 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: 1122 ret = rte_flow_conv_name(0, 1, dst, size, src, error); 1123 break; 1124 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: 1125 ret = rte_flow_conv_name(1, 1, dst, size, src, error); 1126 break; 1127 default: 1128 ret = rte_flow_error_set 1129 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1130 "unknown object conversion operation"); 1131 } 1132 1133 rte_flow_trace_conv(op, dst, size, src, ret); 1134 1135 return ret; 1136 } 1137 1138 /** Store a full rte_flow description. */ 1139 size_t 1140 rte_flow_copy(struct rte_flow_desc *desc, size_t len, 1141 const struct rte_flow_attr *attr, 1142 const struct rte_flow_item *items, 1143 const struct rte_flow_action *actions) 1144 { 1145 /* 1146 * Overlap struct rte_flow_conv with struct rte_flow_desc in order 1147 * to convert the former to the latter without wasting space. 1148 */ 1149 struct rte_flow_conv_rule *dst = 1150 len ? 1151 (void *)((uintptr_t)desc + 1152 (offsetof(struct rte_flow_desc, actions) - 1153 offsetof(struct rte_flow_conv_rule, actions))) : 1154 NULL; 1155 size_t dst_size = 1156 len > sizeof(*desc) - sizeof(*dst) ? 1157 len - (sizeof(*desc) - sizeof(*dst)) : 1158 0; 1159 struct rte_flow_conv_rule src = { 1160 .attr_ro = NULL, 1161 .pattern_ro = items, 1162 .actions_ro = actions, 1163 }; 1164 int ret; 1165 1166 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < 1167 sizeof(struct rte_flow_conv_rule)); 1168 if (dst_size && 1169 (&dst->pattern != &desc->items || 1170 &dst->actions != &desc->actions || 1171 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { 1172 rte_errno = EINVAL; 1173 return 0; 1174 } 1175 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); 1176 if (ret < 0) 1177 return 0; 1178 ret += sizeof(*desc) - sizeof(*dst); 1179 rte_memcpy(desc, 1180 (&(struct rte_flow_desc){ 1181 .size = ret, 1182 .attr = *attr, 1183 .items = dst_size ? dst->pattern : NULL, 1184 .actions = dst_size ? dst->actions : NULL, 1185 }), 1186 len > sizeof(*desc) ? sizeof(*desc) : len); 1187 1188 rte_flow_trace_copy(desc, len, attr, items, actions, ret); 1189 1190 return ret; 1191 } 1192 1193 int 1194 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow, 1195 FILE *file, struct rte_flow_error *error) 1196 { 1197 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1198 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1199 int ret; 1200 1201 if (unlikely(!ops)) 1202 return -rte_errno; 1203 if (likely(!!ops->dev_dump)) { 1204 fts_enter(dev); 1205 ret = ops->dev_dump(dev, flow, file, error); 1206 fts_exit(dev); 1207 return flow_err(port_id, ret, error); 1208 } 1209 return rte_flow_error_set(error, ENOSYS, 1210 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1211 NULL, rte_strerror(ENOSYS)); 1212 } 1213 1214 int 1215 rte_flow_get_aged_flows(uint16_t port_id, void **contexts, 1216 uint32_t nb_contexts, struct rte_flow_error *error) 1217 { 1218 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1219 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1220 int ret; 1221 1222 if (unlikely(!ops)) 1223 return -rte_errno; 1224 if (likely(!!ops->get_aged_flows)) { 1225 fts_enter(dev); 1226 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error); 1227 fts_exit(dev); 1228 ret = flow_err(port_id, ret, error); 1229 1230 rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret); 1231 1232 return ret; 1233 } 1234 return rte_flow_error_set(error, ENOTSUP, 1235 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1236 NULL, rte_strerror(ENOTSUP)); 1237 } 1238 1239 int 1240 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts, 1241 uint32_t nb_contexts, struct rte_flow_error *error) 1242 { 1243 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1244 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1245 int ret; 1246 1247 if (unlikely(!ops)) 1248 return -rte_errno; 1249 if (likely(!!ops->get_q_aged_flows)) { 1250 fts_enter(dev); 1251 ret = ops->get_q_aged_flows(dev, queue_id, contexts, 1252 nb_contexts, error); 1253 fts_exit(dev); 1254 ret = flow_err(port_id, ret, error); 1255 1256 rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts, 1257 nb_contexts, ret); 1258 1259 return ret; 1260 } 1261 return rte_flow_error_set(error, ENOTSUP, 1262 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1263 NULL, rte_strerror(ENOTSUP)); 1264 } 1265 1266 struct rte_flow_action_handle * 1267 rte_flow_action_handle_create(uint16_t port_id, 1268 const struct rte_flow_indir_action_conf *conf, 1269 const struct rte_flow_action *action, 1270 struct rte_flow_error *error) 1271 { 1272 struct rte_flow_action_handle *handle; 1273 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1274 1275 if (unlikely(!ops)) 1276 return NULL; 1277 if (unlikely(!ops->action_handle_create)) { 1278 rte_flow_error_set(error, ENOSYS, 1279 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1280 rte_strerror(ENOSYS)); 1281 return NULL; 1282 } 1283 handle = ops->action_handle_create(&rte_eth_devices[port_id], 1284 conf, action, error); 1285 if (handle == NULL) 1286 flow_err(port_id, -rte_errno, error); 1287 1288 rte_flow_trace_action_handle_create(port_id, conf, action, handle); 1289 1290 return handle; 1291 } 1292 1293 int 1294 rte_flow_action_handle_destroy(uint16_t port_id, 1295 struct rte_flow_action_handle *handle, 1296 struct rte_flow_error *error) 1297 { 1298 int ret; 1299 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1300 1301 if (unlikely(!ops)) 1302 return -rte_errno; 1303 if (unlikely(!ops->action_handle_destroy)) 1304 return rte_flow_error_set(error, ENOSYS, 1305 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1306 NULL, rte_strerror(ENOSYS)); 1307 ret = ops->action_handle_destroy(&rte_eth_devices[port_id], 1308 handle, error); 1309 ret = flow_err(port_id, ret, error); 1310 1311 rte_flow_trace_action_handle_destroy(port_id, handle, ret); 1312 1313 return ret; 1314 } 1315 1316 int 1317 rte_flow_action_handle_update(uint16_t port_id, 1318 struct rte_flow_action_handle *handle, 1319 const void *update, 1320 struct rte_flow_error *error) 1321 { 1322 int ret; 1323 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1324 1325 if (unlikely(!ops)) 1326 return -rte_errno; 1327 if (unlikely(!ops->action_handle_update)) 1328 return rte_flow_error_set(error, ENOSYS, 1329 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1330 NULL, rte_strerror(ENOSYS)); 1331 ret = ops->action_handle_update(&rte_eth_devices[port_id], handle, 1332 update, error); 1333 ret = flow_err(port_id, ret, error); 1334 1335 rte_flow_trace_action_handle_update(port_id, handle, update, ret); 1336 1337 return ret; 1338 } 1339 1340 int 1341 rte_flow_action_handle_query(uint16_t port_id, 1342 const struct rte_flow_action_handle *handle, 1343 void *data, 1344 struct rte_flow_error *error) 1345 { 1346 int ret; 1347 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1348 1349 if (unlikely(!ops)) 1350 return -rte_errno; 1351 if (unlikely(!ops->action_handle_query)) 1352 return rte_flow_error_set(error, ENOSYS, 1353 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1354 NULL, rte_strerror(ENOSYS)); 1355 ret = ops->action_handle_query(&rte_eth_devices[port_id], handle, 1356 data, error); 1357 ret = flow_err(port_id, ret, error); 1358 1359 rte_flow_trace_action_handle_query(port_id, handle, data, ret); 1360 1361 return ret; 1362 } 1363 1364 int 1365 rte_flow_tunnel_decap_set(uint16_t port_id, 1366 struct rte_flow_tunnel *tunnel, 1367 struct rte_flow_action **actions, 1368 uint32_t *num_of_actions, 1369 struct rte_flow_error *error) 1370 { 1371 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1372 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1373 int ret; 1374 1375 if (unlikely(!ops)) 1376 return -rte_errno; 1377 if (likely(!!ops->tunnel_decap_set)) { 1378 ret = flow_err(port_id, 1379 ops->tunnel_decap_set(dev, tunnel, actions, 1380 num_of_actions, error), 1381 error); 1382 1383 rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions, 1384 num_of_actions, ret); 1385 1386 return ret; 1387 } 1388 return rte_flow_error_set(error, ENOTSUP, 1389 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1390 NULL, rte_strerror(ENOTSUP)); 1391 } 1392 1393 int 1394 rte_flow_tunnel_match(uint16_t port_id, 1395 struct rte_flow_tunnel *tunnel, 1396 struct rte_flow_item **items, 1397 uint32_t *num_of_items, 1398 struct rte_flow_error *error) 1399 { 1400 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1401 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1402 int ret; 1403 1404 if (unlikely(!ops)) 1405 return -rte_errno; 1406 if (likely(!!ops->tunnel_match)) { 1407 ret = flow_err(port_id, 1408 ops->tunnel_match(dev, tunnel, items, 1409 num_of_items, error), 1410 error); 1411 1412 rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items, 1413 ret); 1414 1415 return ret; 1416 } 1417 return rte_flow_error_set(error, ENOTSUP, 1418 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1419 NULL, rte_strerror(ENOTSUP)); 1420 } 1421 1422 int 1423 rte_flow_get_restore_info(uint16_t port_id, 1424 struct rte_mbuf *m, 1425 struct rte_flow_restore_info *restore_info, 1426 struct rte_flow_error *error) 1427 { 1428 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1429 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1430 int ret; 1431 1432 if (unlikely(!ops)) 1433 return -rte_errno; 1434 if (likely(!!ops->get_restore_info)) { 1435 ret = flow_err(port_id, 1436 ops->get_restore_info(dev, m, restore_info, 1437 error), 1438 error); 1439 1440 rte_flow_trace_get_restore_info(port_id, m, restore_info, ret); 1441 1442 return ret; 1443 } 1444 return rte_flow_error_set(error, ENOTSUP, 1445 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1446 NULL, rte_strerror(ENOTSUP)); 1447 } 1448 1449 static struct { 1450 const struct rte_mbuf_dynflag desc; 1451 uint64_t value; 1452 } flow_restore_info_dynflag = { 1453 .desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", }, 1454 }; 1455 1456 uint64_t 1457 rte_flow_restore_info_dynflag(void) 1458 { 1459 return flow_restore_info_dynflag.value; 1460 } 1461 1462 int 1463 rte_flow_restore_info_dynflag_register(void) 1464 { 1465 if (flow_restore_info_dynflag.value == 0) { 1466 int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc); 1467 1468 if (offset < 0) 1469 return -1; 1470 flow_restore_info_dynflag.value = RTE_BIT64(offset); 1471 } 1472 1473 return 0; 1474 } 1475 1476 int 1477 rte_flow_tunnel_action_decap_release(uint16_t port_id, 1478 struct rte_flow_action *actions, 1479 uint32_t num_of_actions, 1480 struct rte_flow_error *error) 1481 { 1482 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1483 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1484 int ret; 1485 1486 if (unlikely(!ops)) 1487 return -rte_errno; 1488 if (likely(!!ops->tunnel_action_decap_release)) { 1489 ret = flow_err(port_id, 1490 ops->tunnel_action_decap_release(dev, actions, 1491 num_of_actions, 1492 error), 1493 error); 1494 1495 rte_flow_trace_tunnel_action_decap_release(port_id, actions, 1496 num_of_actions, ret); 1497 1498 return ret; 1499 } 1500 return rte_flow_error_set(error, ENOTSUP, 1501 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1502 NULL, rte_strerror(ENOTSUP)); 1503 } 1504 1505 int 1506 rte_flow_tunnel_item_release(uint16_t port_id, 1507 struct rte_flow_item *items, 1508 uint32_t num_of_items, 1509 struct rte_flow_error *error) 1510 { 1511 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1512 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1513 int ret; 1514 1515 if (unlikely(!ops)) 1516 return -rte_errno; 1517 if (likely(!!ops->tunnel_item_release)) { 1518 ret = flow_err(port_id, 1519 ops->tunnel_item_release(dev, items, 1520 num_of_items, error), 1521 error); 1522 1523 rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret); 1524 1525 return ret; 1526 } 1527 return rte_flow_error_set(error, ENOTSUP, 1528 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1529 NULL, rte_strerror(ENOTSUP)); 1530 } 1531 1532 int 1533 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id, 1534 struct rte_flow_error *error) 1535 { 1536 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1537 struct rte_eth_dev *dev; 1538 int ret; 1539 1540 if (unlikely(ops == NULL)) 1541 return -rte_errno; 1542 1543 if (ops->pick_transfer_proxy == NULL) { 1544 *proxy_port_id = port_id; 1545 return 0; 1546 } 1547 1548 dev = &rte_eth_devices[port_id]; 1549 1550 ret = flow_err(port_id, 1551 ops->pick_transfer_proxy(dev, proxy_port_id, error), 1552 error); 1553 1554 rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret); 1555 1556 return ret; 1557 } 1558 1559 struct rte_flow_item_flex_handle * 1560 rte_flow_flex_item_create(uint16_t port_id, 1561 const struct rte_flow_item_flex_conf *conf, 1562 struct rte_flow_error *error) 1563 { 1564 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1565 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1566 struct rte_flow_item_flex_handle *handle; 1567 1568 if (unlikely(!ops)) 1569 return NULL; 1570 if (unlikely(!ops->flex_item_create)) { 1571 rte_flow_error_set(error, ENOTSUP, 1572 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1573 NULL, rte_strerror(ENOTSUP)); 1574 return NULL; 1575 } 1576 handle = ops->flex_item_create(dev, conf, error); 1577 if (handle == NULL) 1578 flow_err(port_id, -rte_errno, error); 1579 1580 rte_flow_trace_flex_item_create(port_id, conf, handle); 1581 1582 return handle; 1583 } 1584 1585 int 1586 rte_flow_flex_item_release(uint16_t port_id, 1587 const struct rte_flow_item_flex_handle *handle, 1588 struct rte_flow_error *error) 1589 { 1590 int ret; 1591 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1592 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1593 1594 if (unlikely(!ops || !ops->flex_item_release)) 1595 return rte_flow_error_set(error, ENOTSUP, 1596 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1597 NULL, rte_strerror(ENOTSUP)); 1598 ret = ops->flex_item_release(dev, handle, error); 1599 ret = flow_err(port_id, ret, error); 1600 1601 rte_flow_trace_flex_item_release(port_id, handle, ret); 1602 1603 return ret; 1604 } 1605 1606 int 1607 rte_flow_info_get(uint16_t port_id, 1608 struct rte_flow_port_info *port_info, 1609 struct rte_flow_queue_info *queue_info, 1610 struct rte_flow_error *error) 1611 { 1612 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1613 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1614 int ret; 1615 1616 if (unlikely(!ops)) 1617 return -rte_errno; 1618 if (dev->data->dev_configured == 0) { 1619 FLOW_LOG(INFO, 1620 "Device with port_id=%"PRIu16" is not configured.", 1621 port_id); 1622 return -EINVAL; 1623 } 1624 if (port_info == NULL) { 1625 FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id); 1626 return -EINVAL; 1627 } 1628 if (likely(!!ops->info_get)) { 1629 ret = flow_err(port_id, 1630 ops->info_get(dev, port_info, queue_info, error), 1631 error); 1632 1633 rte_flow_trace_info_get(port_id, port_info, queue_info, ret); 1634 1635 return ret; 1636 } 1637 return rte_flow_error_set(error, ENOTSUP, 1638 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1639 NULL, rte_strerror(ENOTSUP)); 1640 } 1641 1642 int 1643 rte_flow_configure(uint16_t port_id, 1644 const struct rte_flow_port_attr *port_attr, 1645 uint16_t nb_queue, 1646 const struct rte_flow_queue_attr *queue_attr[], 1647 struct rte_flow_error *error) 1648 { 1649 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1650 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1651 int ret; 1652 1653 if (unlikely(!ops)) 1654 return -rte_errno; 1655 if (dev->data->dev_configured == 0) { 1656 FLOW_LOG(INFO, 1657 "Device with port_id=%"PRIu16" is not configured.", 1658 port_id); 1659 return -EINVAL; 1660 } 1661 if (dev->data->dev_started != 0) { 1662 FLOW_LOG(INFO, 1663 "Device with port_id=%"PRIu16" already started.", 1664 port_id); 1665 return -EINVAL; 1666 } 1667 if (port_attr == NULL) { 1668 FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id); 1669 return -EINVAL; 1670 } 1671 if (queue_attr == NULL) { 1672 FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id); 1673 return -EINVAL; 1674 } 1675 if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) && 1676 !rte_eth_dev_is_valid_port(port_attr->host_port_id)) { 1677 return rte_flow_error_set(error, ENODEV, 1678 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1679 NULL, rte_strerror(ENODEV)); 1680 } 1681 if (likely(!!ops->configure)) { 1682 ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error); 1683 if (ret == 0) 1684 dev->data->flow_configured = 1; 1685 ret = flow_err(port_id, ret, error); 1686 1687 rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret); 1688 1689 return ret; 1690 } 1691 return rte_flow_error_set(error, ENOTSUP, 1692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1693 NULL, rte_strerror(ENOTSUP)); 1694 } 1695 1696 struct rte_flow_pattern_template * 1697 rte_flow_pattern_template_create(uint16_t port_id, 1698 const struct rte_flow_pattern_template_attr *template_attr, 1699 const struct rte_flow_item pattern[], 1700 struct rte_flow_error *error) 1701 { 1702 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1703 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1704 struct rte_flow_pattern_template *template; 1705 1706 if (unlikely(!ops)) 1707 return NULL; 1708 if (dev->data->flow_configured == 0) { 1709 FLOW_LOG(INFO, 1710 "Flow engine on port_id=%"PRIu16" is not configured.", 1711 port_id); 1712 rte_flow_error_set(error, EINVAL, 1713 RTE_FLOW_ERROR_TYPE_STATE, 1714 NULL, rte_strerror(EINVAL)); 1715 return NULL; 1716 } 1717 if (template_attr == NULL) { 1718 FLOW_LOG(ERR, 1719 "Port %"PRIu16" template attr is NULL.", 1720 port_id); 1721 rte_flow_error_set(error, EINVAL, 1722 RTE_FLOW_ERROR_TYPE_ATTR, 1723 NULL, rte_strerror(EINVAL)); 1724 return NULL; 1725 } 1726 if (pattern == NULL) { 1727 FLOW_LOG(ERR, 1728 "Port %"PRIu16" pattern is NULL.", 1729 port_id); 1730 rte_flow_error_set(error, EINVAL, 1731 RTE_FLOW_ERROR_TYPE_ATTR, 1732 NULL, rte_strerror(EINVAL)); 1733 return NULL; 1734 } 1735 if (likely(!!ops->pattern_template_create)) { 1736 template = ops->pattern_template_create(dev, template_attr, 1737 pattern, error); 1738 if (template == NULL) 1739 flow_err(port_id, -rte_errno, error); 1740 1741 rte_flow_trace_pattern_template_create(port_id, template_attr, 1742 pattern, template); 1743 1744 return template; 1745 } 1746 rte_flow_error_set(error, ENOTSUP, 1747 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1748 NULL, rte_strerror(ENOTSUP)); 1749 return NULL; 1750 } 1751 1752 int 1753 rte_flow_pattern_template_destroy(uint16_t port_id, 1754 struct rte_flow_pattern_template *pattern_template, 1755 struct rte_flow_error *error) 1756 { 1757 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1758 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1759 int ret; 1760 1761 if (unlikely(!ops)) 1762 return -rte_errno; 1763 if (unlikely(pattern_template == NULL)) 1764 return 0; 1765 if (likely(!!ops->pattern_template_destroy)) { 1766 ret = flow_err(port_id, 1767 ops->pattern_template_destroy(dev, 1768 pattern_template, 1769 error), 1770 error); 1771 1772 rte_flow_trace_pattern_template_destroy(port_id, pattern_template, 1773 ret); 1774 1775 return ret; 1776 } 1777 return rte_flow_error_set(error, ENOTSUP, 1778 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1779 NULL, rte_strerror(ENOTSUP)); 1780 } 1781 1782 struct rte_flow_actions_template * 1783 rte_flow_actions_template_create(uint16_t port_id, 1784 const struct rte_flow_actions_template_attr *template_attr, 1785 const struct rte_flow_action actions[], 1786 const struct rte_flow_action masks[], 1787 struct rte_flow_error *error) 1788 { 1789 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1790 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1791 struct rte_flow_actions_template *template; 1792 1793 if (unlikely(!ops)) 1794 return NULL; 1795 if (dev->data->flow_configured == 0) { 1796 FLOW_LOG(INFO, 1797 "Flow engine on port_id=%"PRIu16" is not configured.", 1798 port_id); 1799 rte_flow_error_set(error, EINVAL, 1800 RTE_FLOW_ERROR_TYPE_STATE, 1801 NULL, rte_strerror(EINVAL)); 1802 return NULL; 1803 } 1804 if (template_attr == NULL) { 1805 FLOW_LOG(ERR, 1806 "Port %"PRIu16" template attr is NULL.", 1807 port_id); 1808 rte_flow_error_set(error, EINVAL, 1809 RTE_FLOW_ERROR_TYPE_ATTR, 1810 NULL, rte_strerror(EINVAL)); 1811 return NULL; 1812 } 1813 if (actions == NULL) { 1814 FLOW_LOG(ERR, 1815 "Port %"PRIu16" actions is NULL.", 1816 port_id); 1817 rte_flow_error_set(error, EINVAL, 1818 RTE_FLOW_ERROR_TYPE_ATTR, 1819 NULL, rte_strerror(EINVAL)); 1820 return NULL; 1821 } 1822 if (masks == NULL) { 1823 FLOW_LOG(ERR, 1824 "Port %"PRIu16" masks is NULL.", 1825 port_id); 1826 rte_flow_error_set(error, EINVAL, 1827 RTE_FLOW_ERROR_TYPE_ATTR, 1828 NULL, rte_strerror(EINVAL)); 1829 1830 } 1831 if (likely(!!ops->actions_template_create)) { 1832 template = ops->actions_template_create(dev, template_attr, 1833 actions, masks, error); 1834 if (template == NULL) 1835 flow_err(port_id, -rte_errno, error); 1836 1837 rte_flow_trace_actions_template_create(port_id, template_attr, actions, 1838 masks, template); 1839 1840 return template; 1841 } 1842 rte_flow_error_set(error, ENOTSUP, 1843 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1844 NULL, rte_strerror(ENOTSUP)); 1845 return NULL; 1846 } 1847 1848 int 1849 rte_flow_actions_template_destroy(uint16_t port_id, 1850 struct rte_flow_actions_template *actions_template, 1851 struct rte_flow_error *error) 1852 { 1853 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1854 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1855 int ret; 1856 1857 if (unlikely(!ops)) 1858 return -rte_errno; 1859 if (unlikely(actions_template == NULL)) 1860 return 0; 1861 if (likely(!!ops->actions_template_destroy)) { 1862 ret = flow_err(port_id, 1863 ops->actions_template_destroy(dev, 1864 actions_template, 1865 error), 1866 error); 1867 1868 rte_flow_trace_actions_template_destroy(port_id, actions_template, 1869 ret); 1870 1871 return ret; 1872 } 1873 return rte_flow_error_set(error, ENOTSUP, 1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1875 NULL, rte_strerror(ENOTSUP)); 1876 } 1877 1878 struct rte_flow_template_table * 1879 rte_flow_template_table_create(uint16_t port_id, 1880 const struct rte_flow_template_table_attr *table_attr, 1881 struct rte_flow_pattern_template *pattern_templates[], 1882 uint8_t nb_pattern_templates, 1883 struct rte_flow_actions_template *actions_templates[], 1884 uint8_t nb_actions_templates, 1885 struct rte_flow_error *error) 1886 { 1887 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1888 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1889 struct rte_flow_template_table *table; 1890 1891 if (unlikely(!ops)) 1892 return NULL; 1893 if (dev->data->flow_configured == 0) { 1894 FLOW_LOG(INFO, 1895 "Flow engine on port_id=%"PRIu16" is not configured.", 1896 port_id); 1897 rte_flow_error_set(error, EINVAL, 1898 RTE_FLOW_ERROR_TYPE_STATE, 1899 NULL, rte_strerror(EINVAL)); 1900 return NULL; 1901 } 1902 if (table_attr == NULL) { 1903 FLOW_LOG(ERR, 1904 "Port %"PRIu16" table attr is NULL.", 1905 port_id); 1906 rte_flow_error_set(error, EINVAL, 1907 RTE_FLOW_ERROR_TYPE_ATTR, 1908 NULL, rte_strerror(EINVAL)); 1909 return NULL; 1910 } 1911 if (pattern_templates == NULL) { 1912 FLOW_LOG(ERR, 1913 "Port %"PRIu16" pattern templates is NULL.", 1914 port_id); 1915 rte_flow_error_set(error, EINVAL, 1916 RTE_FLOW_ERROR_TYPE_ATTR, 1917 NULL, rte_strerror(EINVAL)); 1918 return NULL; 1919 } 1920 if (actions_templates == NULL) { 1921 FLOW_LOG(ERR, 1922 "Port %"PRIu16" actions templates is NULL.", 1923 port_id); 1924 rte_flow_error_set(error, EINVAL, 1925 RTE_FLOW_ERROR_TYPE_ATTR, 1926 NULL, rte_strerror(EINVAL)); 1927 return NULL; 1928 } 1929 if (likely(!!ops->template_table_create)) { 1930 table = ops->template_table_create(dev, table_attr, 1931 pattern_templates, nb_pattern_templates, 1932 actions_templates, nb_actions_templates, 1933 error); 1934 if (table == NULL) 1935 flow_err(port_id, -rte_errno, error); 1936 1937 rte_flow_trace_template_table_create(port_id, table_attr, 1938 pattern_templates, 1939 nb_pattern_templates, 1940 actions_templates, 1941 nb_actions_templates, table); 1942 1943 return table; 1944 } 1945 rte_flow_error_set(error, ENOTSUP, 1946 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1947 NULL, rte_strerror(ENOTSUP)); 1948 return NULL; 1949 } 1950 1951 int 1952 rte_flow_template_table_destroy(uint16_t port_id, 1953 struct rte_flow_template_table *template_table, 1954 struct rte_flow_error *error) 1955 { 1956 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1957 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1958 int ret; 1959 1960 if (unlikely(!ops)) 1961 return -rte_errno; 1962 if (unlikely(template_table == NULL)) 1963 return 0; 1964 if (likely(!!ops->template_table_destroy)) { 1965 ret = flow_err(port_id, 1966 ops->template_table_destroy(dev, 1967 template_table, 1968 error), 1969 error); 1970 1971 rte_flow_trace_template_table_destroy(port_id, template_table, 1972 ret); 1973 1974 return ret; 1975 } 1976 return rte_flow_error_set(error, ENOTSUP, 1977 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1978 NULL, rte_strerror(ENOTSUP)); 1979 } 1980 1981 int 1982 rte_flow_group_set_miss_actions(uint16_t port_id, 1983 uint32_t group_id, 1984 const struct rte_flow_group_attr *attr, 1985 const struct rte_flow_action actions[], 1986 struct rte_flow_error *error) 1987 { 1988 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1989 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1990 1991 if (unlikely(!ops)) 1992 return -rte_errno; 1993 if (likely(!!ops->group_set_miss_actions)) { 1994 return flow_err(port_id, 1995 ops->group_set_miss_actions(dev, group_id, attr, actions, error), 1996 error); 1997 } 1998 return rte_flow_error_set(error, ENOTSUP, 1999 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2000 NULL, rte_strerror(ENOTSUP)); 2001 } 2002 2003 struct rte_flow * 2004 rte_flow_async_create(uint16_t port_id, 2005 uint32_t queue_id, 2006 const struct rte_flow_op_attr *op_attr, 2007 struct rte_flow_template_table *template_table, 2008 const struct rte_flow_item pattern[], 2009 uint8_t pattern_template_index, 2010 const struct rte_flow_action actions[], 2011 uint8_t actions_template_index, 2012 void *user_data, 2013 struct rte_flow_error *error) 2014 { 2015 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2016 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2017 struct rte_flow *flow; 2018 2019 flow = ops->async_create(dev, queue_id, 2020 op_attr, template_table, 2021 pattern, pattern_template_index, 2022 actions, actions_template_index, 2023 user_data, error); 2024 if (flow == NULL) 2025 flow_err(port_id, -rte_errno, error); 2026 2027 rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table, 2028 pattern, pattern_template_index, actions, 2029 actions_template_index, user_data, flow); 2030 2031 return flow; 2032 } 2033 2034 struct rte_flow * 2035 rte_flow_async_create_by_index(uint16_t port_id, 2036 uint32_t queue_id, 2037 const struct rte_flow_op_attr *op_attr, 2038 struct rte_flow_template_table *template_table, 2039 uint32_t rule_index, 2040 const struct rte_flow_action actions[], 2041 uint8_t actions_template_index, 2042 void *user_data, 2043 struct rte_flow_error *error) 2044 { 2045 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2046 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2047 struct rte_flow *flow; 2048 2049 flow = ops->async_create_by_index(dev, queue_id, 2050 op_attr, template_table, rule_index, 2051 actions, actions_template_index, 2052 user_data, error); 2053 if (flow == NULL) 2054 flow_err(port_id, -rte_errno, error); 2055 return flow; 2056 } 2057 2058 int 2059 rte_flow_async_destroy(uint16_t port_id, 2060 uint32_t queue_id, 2061 const struct rte_flow_op_attr *op_attr, 2062 struct rte_flow *flow, 2063 void *user_data, 2064 struct rte_flow_error *error) 2065 { 2066 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2067 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2068 int ret; 2069 2070 ret = flow_err(port_id, 2071 ops->async_destroy(dev, queue_id, 2072 op_attr, flow, 2073 user_data, error), 2074 error); 2075 2076 rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow, 2077 user_data, ret); 2078 2079 return ret; 2080 } 2081 2082 int 2083 rte_flow_async_actions_update(uint16_t port_id, 2084 uint32_t queue_id, 2085 const struct rte_flow_op_attr *op_attr, 2086 struct rte_flow *flow, 2087 const struct rte_flow_action actions[], 2088 uint8_t actions_template_index, 2089 void *user_data, 2090 struct rte_flow_error *error) 2091 { 2092 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2093 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2094 int ret; 2095 2096 ret = flow_err(port_id, 2097 ops->async_actions_update(dev, queue_id, op_attr, 2098 flow, actions, 2099 actions_template_index, 2100 user_data, error), 2101 error); 2102 2103 rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow, 2104 actions, actions_template_index, 2105 user_data, ret); 2106 2107 return ret; 2108 } 2109 2110 int 2111 rte_flow_push(uint16_t port_id, 2112 uint32_t queue_id, 2113 struct rte_flow_error *error) 2114 { 2115 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2116 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2117 int ret; 2118 2119 ret = flow_err(port_id, 2120 ops->push(dev, queue_id, error), 2121 error); 2122 2123 rte_flow_trace_push(port_id, queue_id, ret); 2124 2125 return ret; 2126 } 2127 2128 int 2129 rte_flow_pull(uint16_t port_id, 2130 uint32_t queue_id, 2131 struct rte_flow_op_result res[], 2132 uint16_t n_res, 2133 struct rte_flow_error *error) 2134 { 2135 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2136 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2137 int ret; 2138 int rc; 2139 2140 ret = ops->pull(dev, queue_id, res, n_res, error); 2141 rc = ret ? ret : flow_err(port_id, ret, error); 2142 2143 rte_flow_trace_pull(port_id, queue_id, res, n_res, rc); 2144 2145 return rc; 2146 } 2147 2148 struct rte_flow_action_handle * 2149 rte_flow_async_action_handle_create(uint16_t port_id, 2150 uint32_t queue_id, 2151 const struct rte_flow_op_attr *op_attr, 2152 const struct rte_flow_indir_action_conf *indir_action_conf, 2153 const struct rte_flow_action *action, 2154 void *user_data, 2155 struct rte_flow_error *error) 2156 { 2157 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2158 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2159 struct rte_flow_action_handle *handle; 2160 2161 handle = ops->async_action_handle_create(dev, queue_id, op_attr, 2162 indir_action_conf, action, user_data, error); 2163 if (handle == NULL) 2164 flow_err(port_id, -rte_errno, error); 2165 2166 rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr, 2167 indir_action_conf, action, 2168 user_data, handle); 2169 2170 return handle; 2171 } 2172 2173 int 2174 rte_flow_async_action_handle_destroy(uint16_t port_id, 2175 uint32_t queue_id, 2176 const struct rte_flow_op_attr *op_attr, 2177 struct rte_flow_action_handle *action_handle, 2178 void *user_data, 2179 struct rte_flow_error *error) 2180 { 2181 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2182 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2183 int ret; 2184 2185 ret = ops->async_action_handle_destroy(dev, queue_id, op_attr, 2186 action_handle, user_data, error); 2187 ret = flow_err(port_id, ret, error); 2188 2189 rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr, 2190 action_handle, user_data, ret); 2191 2192 return ret; 2193 } 2194 2195 int 2196 rte_flow_async_action_handle_update(uint16_t port_id, 2197 uint32_t queue_id, 2198 const struct rte_flow_op_attr *op_attr, 2199 struct rte_flow_action_handle *action_handle, 2200 const void *update, 2201 void *user_data, 2202 struct rte_flow_error *error) 2203 { 2204 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2205 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2206 int ret; 2207 2208 ret = ops->async_action_handle_update(dev, queue_id, op_attr, 2209 action_handle, update, user_data, error); 2210 ret = flow_err(port_id, ret, error); 2211 2212 rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr, 2213 action_handle, update, 2214 user_data, ret); 2215 2216 return ret; 2217 } 2218 2219 int 2220 rte_flow_async_action_handle_query(uint16_t port_id, 2221 uint32_t queue_id, 2222 const struct rte_flow_op_attr *op_attr, 2223 const struct rte_flow_action_handle *action_handle, 2224 void *data, 2225 void *user_data, 2226 struct rte_flow_error *error) 2227 { 2228 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2229 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 2230 int ret; 2231 2232 if (unlikely(!ops)) 2233 return -rte_errno; 2234 ret = ops->async_action_handle_query(dev, queue_id, op_attr, 2235 action_handle, data, user_data, error); 2236 ret = flow_err(port_id, ret, error); 2237 2238 rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr, 2239 action_handle, data, user_data, 2240 ret); 2241 2242 return ret; 2243 } 2244 2245 int 2246 rte_flow_action_handle_query_update(uint16_t port_id, 2247 struct rte_flow_action_handle *handle, 2248 const void *update, void *query, 2249 enum rte_flow_query_update_mode mode, 2250 struct rte_flow_error *error) 2251 { 2252 int ret; 2253 struct rte_eth_dev *dev; 2254 const struct rte_flow_ops *ops; 2255 2256 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2257 if (!handle) 2258 return -EINVAL; 2259 if (!update && !query) 2260 return -EINVAL; 2261 dev = &rte_eth_devices[port_id]; 2262 ops = rte_flow_ops_get(port_id, error); 2263 if (!ops || !ops->action_handle_query_update) 2264 return -ENOTSUP; 2265 ret = ops->action_handle_query_update(dev, handle, update, 2266 query, mode, error); 2267 return flow_err(port_id, ret, error); 2268 } 2269 2270 int 2271 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id, 2272 const struct rte_flow_op_attr *attr, 2273 struct rte_flow_action_handle *handle, 2274 const void *update, void *query, 2275 enum rte_flow_query_update_mode mode, 2276 void *user_data, 2277 struct rte_flow_error *error) 2278 { 2279 int ret; 2280 struct rte_eth_dev *dev; 2281 const struct rte_flow_ops *ops; 2282 2283 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2284 if (!handle) 2285 return -EINVAL; 2286 if (!update && !query) 2287 return -EINVAL; 2288 dev = &rte_eth_devices[port_id]; 2289 ops = rte_flow_ops_get(port_id, error); 2290 if (!ops || !ops->async_action_handle_query_update) 2291 return -ENOTSUP; 2292 ret = ops->async_action_handle_query_update(dev, queue_id, attr, 2293 handle, update, 2294 query, mode, 2295 user_data, error); 2296 return flow_err(port_id, ret, error); 2297 } 2298 2299 struct rte_flow_action_list_handle * 2300 rte_flow_action_list_handle_create(uint16_t port_id, 2301 const 2302 struct rte_flow_indir_action_conf *conf, 2303 const struct rte_flow_action *actions, 2304 struct rte_flow_error *error) 2305 { 2306 int ret; 2307 struct rte_eth_dev *dev; 2308 const struct rte_flow_ops *ops; 2309 struct rte_flow_action_list_handle *handle; 2310 2311 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 2312 ops = rte_flow_ops_get(port_id, error); 2313 if (!ops || !ops->action_list_handle_create) { 2314 rte_flow_error_set(error, ENOTSUP, 2315 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2316 "action_list handle not supported"); 2317 return NULL; 2318 } 2319 dev = &rte_eth_devices[port_id]; 2320 handle = ops->action_list_handle_create(dev, conf, actions, error); 2321 ret = flow_err(port_id, -rte_errno, error); 2322 rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret); 2323 return handle; 2324 } 2325 2326 int 2327 rte_flow_action_list_handle_destroy(uint16_t port_id, 2328 struct rte_flow_action_list_handle *handle, 2329 struct rte_flow_error *error) 2330 { 2331 int ret; 2332 struct rte_eth_dev *dev; 2333 const struct rte_flow_ops *ops; 2334 2335 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2336 ops = rte_flow_ops_get(port_id, error); 2337 if (!ops || !ops->action_list_handle_destroy) 2338 return rte_flow_error_set(error, ENOTSUP, 2339 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2340 "action_list handle not supported"); 2341 dev = &rte_eth_devices[port_id]; 2342 ret = ops->action_list_handle_destroy(dev, handle, error); 2343 ret = flow_err(port_id, ret, error); 2344 rte_flow_trace_action_list_handle_destroy(port_id, handle, ret); 2345 return ret; 2346 } 2347 2348 struct rte_flow_action_list_handle * 2349 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id, 2350 const struct rte_flow_op_attr *attr, 2351 const struct rte_flow_indir_action_conf *conf, 2352 const struct rte_flow_action *actions, 2353 void *user_data, 2354 struct rte_flow_error *error) 2355 { 2356 int ret; 2357 struct rte_eth_dev *dev; 2358 const struct rte_flow_ops *ops; 2359 struct rte_flow_action_list_handle *handle; 2360 2361 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 2362 ops = rte_flow_ops_get(port_id, error); 2363 if (!ops || !ops->async_action_list_handle_create) { 2364 rte_flow_error_set(error, ENOTSUP, 2365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2366 "action_list handle not supported"); 2367 return NULL; 2368 } 2369 dev = &rte_eth_devices[port_id]; 2370 handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf, 2371 actions, user_data, 2372 error); 2373 ret = flow_err(port_id, -rte_errno, error); 2374 rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr, 2375 conf, actions, user_data, 2376 ret); 2377 return handle; 2378 } 2379 2380 int 2381 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id, 2382 const struct rte_flow_op_attr *op_attr, 2383 struct rte_flow_action_list_handle *handle, 2384 void *user_data, struct rte_flow_error *error) 2385 { 2386 int ret; 2387 struct rte_eth_dev *dev; 2388 const struct rte_flow_ops *ops; 2389 2390 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2391 ops = rte_flow_ops_get(port_id, error); 2392 if (!ops || !ops->async_action_list_handle_destroy) 2393 return rte_flow_error_set(error, ENOTSUP, 2394 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2395 "async action_list handle not supported"); 2396 dev = &rte_eth_devices[port_id]; 2397 ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr, 2398 handle, user_data, error); 2399 ret = flow_err(port_id, ret, error); 2400 rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id, 2401 op_attr, handle, 2402 user_data, ret); 2403 return ret; 2404 } 2405 2406 int 2407 rte_flow_action_list_handle_query_update(uint16_t port_id, 2408 const struct rte_flow_action_list_handle *handle, 2409 const void **update, void **query, 2410 enum rte_flow_query_update_mode mode, 2411 struct rte_flow_error *error) 2412 { 2413 int ret; 2414 struct rte_eth_dev *dev; 2415 const struct rte_flow_ops *ops; 2416 2417 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2418 ops = rte_flow_ops_get(port_id, error); 2419 if (!ops || !ops->action_list_handle_query_update) 2420 return rte_flow_error_set(error, ENOTSUP, 2421 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2422 "action_list query_update not supported"); 2423 dev = &rte_eth_devices[port_id]; 2424 ret = ops->action_list_handle_query_update(dev, handle, update, query, 2425 mode, error); 2426 ret = flow_err(port_id, ret, error); 2427 rte_flow_trace_action_list_handle_query_update(port_id, handle, update, 2428 query, mode, ret); 2429 return ret; 2430 } 2431 2432 int 2433 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id, 2434 const struct rte_flow_op_attr *attr, 2435 const struct rte_flow_action_list_handle *handle, 2436 const void **update, void **query, 2437 enum rte_flow_query_update_mode mode, 2438 void *user_data, struct rte_flow_error *error) 2439 { 2440 int ret; 2441 struct rte_eth_dev *dev; 2442 const struct rte_flow_ops *ops; 2443 2444 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2445 ops = rte_flow_ops_get(port_id, error); 2446 if (!ops || !ops->async_action_list_handle_query_update) 2447 return rte_flow_error_set(error, ENOTSUP, 2448 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2449 "action_list async query_update not supported"); 2450 dev = &rte_eth_devices[port_id]; 2451 ret = ops->async_action_list_handle_query_update(dev, queue_id, attr, 2452 handle, update, query, 2453 mode, user_data, 2454 error); 2455 ret = flow_err(port_id, ret, error); 2456 rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id, 2457 attr, handle, 2458 update, query, 2459 mode, user_data, 2460 ret); 2461 return ret; 2462 } 2463 2464 int 2465 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table, 2466 const struct rte_flow_item pattern[], uint8_t pattern_template_index, 2467 uint32_t *hash, struct rte_flow_error *error) 2468 { 2469 int ret; 2470 struct rte_eth_dev *dev; 2471 const struct rte_flow_ops *ops; 2472 2473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2474 ops = rte_flow_ops_get(port_id, error); 2475 if (!ops || !ops->flow_calc_table_hash) 2476 return rte_flow_error_set(error, ENOTSUP, 2477 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2478 "action_list async query_update not supported"); 2479 dev = &rte_eth_devices[port_id]; 2480 ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index, 2481 hash, error); 2482 return flow_err(port_id, ret, error); 2483 } 2484