1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <errno.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 10 #include <rte_common.h> 11 #include <rte_errno.h> 12 #include <rte_branch_prediction.h> 13 #include <rte_string_fns.h> 14 #include <rte_mbuf_dyn.h> 15 #include "rte_ethdev.h" 16 #include "rte_flow_driver.h" 17 #include "rte_flow.h" 18 19 /* Mbuf dynamic field name for metadata. */ 20 int32_t rte_flow_dynf_metadata_offs = -1; 21 22 /* Mbuf dynamic field flag bit number for metadata. */ 23 uint64_t rte_flow_dynf_metadata_mask; 24 25 /** 26 * Flow elements description tables. 27 */ 28 struct rte_flow_desc_data { 29 const char *name; 30 size_t size; 31 size_t (*desc_fn)(void *dst, const void *src); 32 }; 33 34 /** 35 * 36 * @param buf 37 * Destination memory. 38 * @param data 39 * Source memory 40 * @param size 41 * Requested copy size 42 * @param desc 43 * rte_flow_desc_item - for flow item conversion. 44 * rte_flow_desc_action - for flow action conversion. 45 * @param type 46 * Offset into the desc param or negative value for private flow elements. 47 */ 48 static inline size_t 49 rte_flow_conv_copy(void *buf, const void *data, const size_t size, 50 const struct rte_flow_desc_data *desc, int type) 51 { 52 /** 53 * Allow PMD private flow item 54 */ 55 bool rte_type = type >= 0; 56 57 size_t sz = rte_type ? desc[type].size : sizeof(void *); 58 if (buf == NULL || data == NULL) 59 return 0; 60 rte_memcpy(buf, data, (size > sz ? sz : size)); 61 if (rte_type && desc[type].desc_fn) 62 sz += desc[type].desc_fn(size > 0 ? buf : NULL, data); 63 return sz; 64 } 65 66 static size_t 67 rte_flow_item_flex_conv(void *buf, const void *data) 68 { 69 struct rte_flow_item_flex *dst = buf; 70 const struct rte_flow_item_flex *src = data; 71 if (buf) { 72 dst->pattern = rte_memcpy 73 ((void *)((uintptr_t)(dst + 1)), src->pattern, 74 src->length); 75 } 76 return src->length; 77 } 78 79 /** Generate flow_item[] entry. */ 80 #define MK_FLOW_ITEM(t, s) \ 81 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 82 .name = # t, \ 83 .size = s, \ 84 .desc_fn = NULL,\ 85 } 86 87 #define MK_FLOW_ITEM_FN(t, s, fn) \ 88 [RTE_FLOW_ITEM_TYPE_ ## t] = {\ 89 .name = # t, \ 90 .size = s, \ 91 .desc_fn = fn, \ 92 } 93 94 /** Information about known flow pattern items. */ 95 static const struct rte_flow_desc_data rte_flow_desc_item[] = { 96 MK_FLOW_ITEM(END, 0), 97 MK_FLOW_ITEM(VOID, 0), 98 MK_FLOW_ITEM(INVERT, 0), 99 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 100 MK_FLOW_ITEM(PF, 0), 101 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 102 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 103 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 104 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 105 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 106 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 107 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 108 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 109 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 110 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 111 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 112 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 113 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 114 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 115 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 116 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 117 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 118 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 119 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 120 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 121 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 122 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), 123 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 124 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 125 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 126 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 127 MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)), 128 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 129 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 130 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 131 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 132 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 133 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 134 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 135 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 136 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), 137 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), 138 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)), 139 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)), 140 MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)), 141 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)), 142 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), 143 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), 144 MK_FLOW_ITEM(PPPOE_PROTO_ID, 145 sizeof(struct rte_flow_item_pppoe_proto_id)), 146 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)), 147 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)), 148 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)), 149 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), 150 MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)), 151 MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)), 152 MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)), 153 MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)), 154 MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)), 155 MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)), 156 MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)), 157 MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)), 158 MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex), 159 rte_flow_item_flex_conv), 160 MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)), 161 MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)), 162 }; 163 164 /** Generate flow_action[] entry. */ 165 #define MK_FLOW_ACTION(t, s) \ 166 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 167 .name = # t, \ 168 .size = s, \ 169 .desc_fn = NULL,\ 170 } 171 172 #define MK_FLOW_ACTION_FN(t, fn) \ 173 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 174 .name = # t, \ 175 .size = 0, \ 176 .desc_fn = fn,\ 177 } 178 179 180 /** Information about known flow actions. */ 181 static const struct rte_flow_desc_data rte_flow_desc_action[] = { 182 MK_FLOW_ACTION(END, 0), 183 MK_FLOW_ACTION(VOID, 0), 184 MK_FLOW_ACTION(PASSTHRU, 0), 185 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), 186 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 187 MK_FLOW_ACTION(FLAG, 0), 188 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 189 MK_FLOW_ACTION(DROP, 0), 190 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 191 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 192 MK_FLOW_ACTION(PF, 0), 193 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 194 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 195 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 196 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 197 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), 198 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 199 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 200 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 201 MK_FLOW_ACTION(OF_SET_NW_TTL, 202 sizeof(struct rte_flow_action_of_set_nw_ttl)), 203 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 204 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 205 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 206 MK_FLOW_ACTION(OF_POP_VLAN, 0), 207 MK_FLOW_ACTION(OF_PUSH_VLAN, 208 sizeof(struct rte_flow_action_of_push_vlan)), 209 MK_FLOW_ACTION(OF_SET_VLAN_VID, 210 sizeof(struct rte_flow_action_of_set_vlan_vid)), 211 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 212 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 213 MK_FLOW_ACTION(OF_POP_MPLS, 214 sizeof(struct rte_flow_action_of_pop_mpls)), 215 MK_FLOW_ACTION(OF_PUSH_MPLS, 216 sizeof(struct rte_flow_action_of_push_mpls)), 217 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 218 MK_FLOW_ACTION(VXLAN_DECAP, 0), 219 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 220 MK_FLOW_ACTION(NVGRE_DECAP, 0), 221 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), 222 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), 223 MK_FLOW_ACTION(SET_IPV4_SRC, 224 sizeof(struct rte_flow_action_set_ipv4)), 225 MK_FLOW_ACTION(SET_IPV4_DST, 226 sizeof(struct rte_flow_action_set_ipv4)), 227 MK_FLOW_ACTION(SET_IPV6_SRC, 228 sizeof(struct rte_flow_action_set_ipv6)), 229 MK_FLOW_ACTION(SET_IPV6_DST, 230 sizeof(struct rte_flow_action_set_ipv6)), 231 MK_FLOW_ACTION(SET_TP_SRC, 232 sizeof(struct rte_flow_action_set_tp)), 233 MK_FLOW_ACTION(SET_TP_DST, 234 sizeof(struct rte_flow_action_set_tp)), 235 MK_FLOW_ACTION(MAC_SWAP, 0), 236 MK_FLOW_ACTION(DEC_TTL, 0), 237 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), 238 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), 239 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), 240 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), 241 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), 242 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), 243 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), 244 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)), 245 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)), 246 MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)), 247 MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)), 248 MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)), 249 MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)), 250 MK_FLOW_ACTION(MODIFY_FIELD, 251 sizeof(struct rte_flow_action_modify_field)), 252 /** 253 * Indirect action represented as handle of type 254 * (struct rte_flow_action_handle *) stored in conf field (see 255 * struct rte_flow_action); no need for additional structure to * store 256 * indirect action handle. 257 */ 258 MK_FLOW_ACTION(INDIRECT, 0), 259 MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)), 260 MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)), 261 MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)), 262 }; 263 264 int 265 rte_flow_dynf_metadata_register(void) 266 { 267 int offset; 268 int flag; 269 270 static const struct rte_mbuf_dynfield desc_offs = { 271 .name = RTE_MBUF_DYNFIELD_METADATA_NAME, 272 .size = sizeof(uint32_t), 273 .align = __alignof__(uint32_t), 274 }; 275 static const struct rte_mbuf_dynflag desc_flag = { 276 .name = RTE_MBUF_DYNFLAG_METADATA_NAME, 277 }; 278 279 offset = rte_mbuf_dynfield_register(&desc_offs); 280 if (offset < 0) 281 goto error; 282 flag = rte_mbuf_dynflag_register(&desc_flag); 283 if (flag < 0) 284 goto error; 285 rte_flow_dynf_metadata_offs = offset; 286 rte_flow_dynf_metadata_mask = RTE_BIT64(flag); 287 return 0; 288 289 error: 290 rte_flow_dynf_metadata_offs = -1; 291 rte_flow_dynf_metadata_mask = UINT64_C(0); 292 return -rte_errno; 293 } 294 295 static inline void 296 fts_enter(struct rte_eth_dev *dev) 297 { 298 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) 299 pthread_mutex_lock(&dev->data->flow_ops_mutex); 300 } 301 302 static inline void 303 fts_exit(struct rte_eth_dev *dev) 304 { 305 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) 306 pthread_mutex_unlock(&dev->data->flow_ops_mutex); 307 } 308 309 static int 310 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) 311 { 312 if (ret == 0) 313 return 0; 314 if (rte_eth_dev_is_removed(port_id)) 315 return rte_flow_error_set(error, EIO, 316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 317 NULL, rte_strerror(EIO)); 318 return ret; 319 } 320 321 /* Get generic flow operations structure from a port. */ 322 const struct rte_flow_ops * 323 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) 324 { 325 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 326 const struct rte_flow_ops *ops; 327 int code; 328 329 if (unlikely(!rte_eth_dev_is_valid_port(port_id))) 330 code = ENODEV; 331 else if (unlikely(dev->dev_ops->flow_ops_get == NULL)) 332 /* flow API not supported with this driver dev_ops */ 333 code = ENOSYS; 334 else 335 code = dev->dev_ops->flow_ops_get(dev, &ops); 336 if (code == 0 && ops == NULL) 337 /* flow API not supported with this device */ 338 code = ENOSYS; 339 340 if (code != 0) { 341 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 342 NULL, rte_strerror(code)); 343 return NULL; 344 } 345 return ops; 346 } 347 348 /* Check whether a flow rule can be created on a given port. */ 349 int 350 rte_flow_validate(uint16_t port_id, 351 const struct rte_flow_attr *attr, 352 const struct rte_flow_item pattern[], 353 const struct rte_flow_action actions[], 354 struct rte_flow_error *error) 355 { 356 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 357 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 358 int ret; 359 360 if (unlikely(!ops)) 361 return -rte_errno; 362 if (likely(!!ops->validate)) { 363 fts_enter(dev); 364 ret = ops->validate(dev, attr, pattern, actions, error); 365 fts_exit(dev); 366 return flow_err(port_id, ret, error); 367 } 368 return rte_flow_error_set(error, ENOSYS, 369 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 370 NULL, rte_strerror(ENOSYS)); 371 } 372 373 /* Create a flow rule on a given port. */ 374 struct rte_flow * 375 rte_flow_create(uint16_t port_id, 376 const struct rte_flow_attr *attr, 377 const struct rte_flow_item pattern[], 378 const struct rte_flow_action actions[], 379 struct rte_flow_error *error) 380 { 381 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 382 struct rte_flow *flow; 383 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 384 385 if (unlikely(!ops)) 386 return NULL; 387 if (likely(!!ops->create)) { 388 fts_enter(dev); 389 flow = ops->create(dev, attr, pattern, actions, error); 390 fts_exit(dev); 391 if (flow == NULL) 392 flow_err(port_id, -rte_errno, error); 393 return flow; 394 } 395 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 396 NULL, rte_strerror(ENOSYS)); 397 return NULL; 398 } 399 400 /* Destroy a flow rule on a given port. */ 401 int 402 rte_flow_destroy(uint16_t port_id, 403 struct rte_flow *flow, 404 struct rte_flow_error *error) 405 { 406 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 407 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 408 int ret; 409 410 if (unlikely(!ops)) 411 return -rte_errno; 412 if (likely(!!ops->destroy)) { 413 fts_enter(dev); 414 ret = ops->destroy(dev, flow, error); 415 fts_exit(dev); 416 return flow_err(port_id, ret, error); 417 } 418 return rte_flow_error_set(error, ENOSYS, 419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 420 NULL, rte_strerror(ENOSYS)); 421 } 422 423 /* Destroy all flow rules associated with a port. */ 424 int 425 rte_flow_flush(uint16_t port_id, 426 struct rte_flow_error *error) 427 { 428 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 429 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 430 int ret; 431 432 if (unlikely(!ops)) 433 return -rte_errno; 434 if (likely(!!ops->flush)) { 435 fts_enter(dev); 436 ret = ops->flush(dev, error); 437 fts_exit(dev); 438 return flow_err(port_id, ret, error); 439 } 440 return rte_flow_error_set(error, ENOSYS, 441 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 442 NULL, rte_strerror(ENOSYS)); 443 } 444 445 /* Query an existing flow rule. */ 446 int 447 rte_flow_query(uint16_t port_id, 448 struct rte_flow *flow, 449 const struct rte_flow_action *action, 450 void *data, 451 struct rte_flow_error *error) 452 { 453 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 454 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 455 int ret; 456 457 if (!ops) 458 return -rte_errno; 459 if (likely(!!ops->query)) { 460 fts_enter(dev); 461 ret = ops->query(dev, flow, action, data, error); 462 fts_exit(dev); 463 return flow_err(port_id, ret, error); 464 } 465 return rte_flow_error_set(error, ENOSYS, 466 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 467 NULL, rte_strerror(ENOSYS)); 468 } 469 470 /* Restrict ingress traffic to the defined flow rules. */ 471 int 472 rte_flow_isolate(uint16_t port_id, 473 int set, 474 struct rte_flow_error *error) 475 { 476 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 477 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 478 int ret; 479 480 if (!ops) 481 return -rte_errno; 482 if (likely(!!ops->isolate)) { 483 fts_enter(dev); 484 ret = ops->isolate(dev, set, error); 485 fts_exit(dev); 486 return flow_err(port_id, ret, error); 487 } 488 return rte_flow_error_set(error, ENOSYS, 489 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 490 NULL, rte_strerror(ENOSYS)); 491 } 492 493 /* Initialize flow error structure. */ 494 int 495 rte_flow_error_set(struct rte_flow_error *error, 496 int code, 497 enum rte_flow_error_type type, 498 const void *cause, 499 const char *message) 500 { 501 if (error) { 502 *error = (struct rte_flow_error){ 503 .type = type, 504 .cause = cause, 505 .message = message, 506 }; 507 } 508 rte_errno = code; 509 return -code; 510 } 511 512 /** Pattern item specification types. */ 513 enum rte_flow_conv_item_spec_type { 514 RTE_FLOW_CONV_ITEM_SPEC, 515 RTE_FLOW_CONV_ITEM_LAST, 516 RTE_FLOW_CONV_ITEM_MASK, 517 }; 518 519 /** 520 * Copy pattern item specification. 521 * 522 * @param[out] buf 523 * Output buffer. Can be NULL if @p size is zero. 524 * @param size 525 * Size of @p buf in bytes. 526 * @param[in] item 527 * Pattern item to copy specification from. 528 * @param type 529 * Specification selector for either @p spec, @p last or @p mask. 530 * 531 * @return 532 * Number of bytes needed to store pattern item specification regardless 533 * of @p size. @p buf contents are truncated to @p size if not large 534 * enough. 535 */ 536 static size_t 537 rte_flow_conv_item_spec(void *buf, const size_t size, 538 const struct rte_flow_item *item, 539 enum rte_flow_conv_item_spec_type type) 540 { 541 size_t off; 542 const void *data = 543 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : 544 type == RTE_FLOW_CONV_ITEM_LAST ? item->last : 545 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : 546 NULL; 547 548 switch (item->type) { 549 union { 550 const struct rte_flow_item_raw *raw; 551 } spec; 552 union { 553 const struct rte_flow_item_raw *raw; 554 } last; 555 union { 556 const struct rte_flow_item_raw *raw; 557 } mask; 558 union { 559 const struct rte_flow_item_raw *raw; 560 } src; 561 union { 562 struct rte_flow_item_raw *raw; 563 } dst; 564 size_t tmp; 565 566 case RTE_FLOW_ITEM_TYPE_RAW: 567 spec.raw = item->spec; 568 last.raw = item->last ? item->last : item->spec; 569 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 570 src.raw = data; 571 dst.raw = buf; 572 rte_memcpy(dst.raw, 573 (&(struct rte_flow_item_raw){ 574 .relative = src.raw->relative, 575 .search = src.raw->search, 576 .reserved = src.raw->reserved, 577 .offset = src.raw->offset, 578 .limit = src.raw->limit, 579 .length = src.raw->length, 580 }), 581 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); 582 off = sizeof(*dst.raw); 583 if (type == RTE_FLOW_CONV_ITEM_SPEC || 584 (type == RTE_FLOW_CONV_ITEM_MASK && 585 ((spec.raw->length & mask.raw->length) >= 586 (last.raw->length & mask.raw->length)))) 587 tmp = spec.raw->length & mask.raw->length; 588 else 589 tmp = last.raw->length & mask.raw->length; 590 if (tmp) { 591 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); 592 if (size >= off + tmp) 593 dst.raw->pattern = rte_memcpy 594 ((void *)((uintptr_t)dst.raw + off), 595 src.raw->pattern, tmp); 596 off += tmp; 597 } 598 break; 599 default: 600 off = rte_flow_conv_copy(buf, data, size, 601 rte_flow_desc_item, item->type); 602 break; 603 } 604 return off; 605 } 606 607 /** 608 * Copy action configuration. 609 * 610 * @param[out] buf 611 * Output buffer. Can be NULL if @p size is zero. 612 * @param size 613 * Size of @p buf in bytes. 614 * @param[in] action 615 * Action to copy configuration from. 616 * 617 * @return 618 * Number of bytes needed to store pattern item specification regardless 619 * of @p size. @p buf contents are truncated to @p size if not large 620 * enough. 621 */ 622 static size_t 623 rte_flow_conv_action_conf(void *buf, const size_t size, 624 const struct rte_flow_action *action) 625 { 626 size_t off; 627 628 switch (action->type) { 629 union { 630 const struct rte_flow_action_rss *rss; 631 const struct rte_flow_action_vxlan_encap *vxlan_encap; 632 const struct rte_flow_action_nvgre_encap *nvgre_encap; 633 } src; 634 union { 635 struct rte_flow_action_rss *rss; 636 struct rte_flow_action_vxlan_encap *vxlan_encap; 637 struct rte_flow_action_nvgre_encap *nvgre_encap; 638 } dst; 639 size_t tmp; 640 int ret; 641 642 case RTE_FLOW_ACTION_TYPE_RSS: 643 src.rss = action->conf; 644 dst.rss = buf; 645 rte_memcpy(dst.rss, 646 (&(struct rte_flow_action_rss){ 647 .func = src.rss->func, 648 .level = src.rss->level, 649 .types = src.rss->types, 650 .key_len = src.rss->key_len, 651 .queue_num = src.rss->queue_num, 652 }), 653 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); 654 off = sizeof(*dst.rss); 655 if (src.rss->key_len && src.rss->key) { 656 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); 657 tmp = sizeof(*src.rss->key) * src.rss->key_len; 658 if (size >= off + tmp) 659 dst.rss->key = rte_memcpy 660 ((void *)((uintptr_t)dst.rss + off), 661 src.rss->key, tmp); 662 off += tmp; 663 } 664 if (src.rss->queue_num) { 665 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); 666 tmp = sizeof(*src.rss->queue) * src.rss->queue_num; 667 if (size >= off + tmp) 668 dst.rss->queue = rte_memcpy 669 ((void *)((uintptr_t)dst.rss + off), 670 src.rss->queue, tmp); 671 off += tmp; 672 } 673 break; 674 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 675 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 676 src.vxlan_encap = action->conf; 677 dst.vxlan_encap = buf; 678 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != 679 sizeof(*src.nvgre_encap) || 680 offsetof(struct rte_flow_action_vxlan_encap, 681 definition) != 682 offsetof(struct rte_flow_action_nvgre_encap, 683 definition)); 684 off = sizeof(*dst.vxlan_encap); 685 if (src.vxlan_encap->definition) { 686 off = RTE_ALIGN_CEIL 687 (off, sizeof(*dst.vxlan_encap->definition)); 688 ret = rte_flow_conv 689 (RTE_FLOW_CONV_OP_PATTERN, 690 (void *)((uintptr_t)dst.vxlan_encap + off), 691 size > off ? size - off : 0, 692 src.vxlan_encap->definition, NULL); 693 if (ret < 0) 694 return 0; 695 if (size >= off + ret) 696 dst.vxlan_encap->definition = 697 (void *)((uintptr_t)dst.vxlan_encap + 698 off); 699 off += ret; 700 } 701 break; 702 default: 703 off = rte_flow_conv_copy(buf, action->conf, size, 704 rte_flow_desc_action, action->type); 705 break; 706 } 707 return off; 708 } 709 710 /** 711 * Copy a list of pattern items. 712 * 713 * @param[out] dst 714 * Destination buffer. Can be NULL if @p size is zero. 715 * @param size 716 * Size of @p dst in bytes. 717 * @param[in] src 718 * Source pattern items. 719 * @param num 720 * Maximum number of pattern items to process from @p src or 0 to process 721 * the entire list. In both cases, processing stops after 722 * RTE_FLOW_ITEM_TYPE_END is encountered. 723 * @param[out] error 724 * Perform verbose error reporting if not NULL. 725 * 726 * @return 727 * A positive value representing the number of bytes needed to store 728 * pattern items regardless of @p size on success (@p buf contents are 729 * truncated to @p size if not large enough), a negative errno value 730 * otherwise and rte_errno is set. 731 */ 732 static int 733 rte_flow_conv_pattern(struct rte_flow_item *dst, 734 const size_t size, 735 const struct rte_flow_item *src, 736 unsigned int num, 737 struct rte_flow_error *error) 738 { 739 uintptr_t data = (uintptr_t)dst; 740 size_t off; 741 size_t ret; 742 unsigned int i; 743 744 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 745 /** 746 * allow PMD private flow item 747 */ 748 if (((int)src->type >= 0) && 749 ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || 750 !rte_flow_desc_item[src->type].name)) 751 return rte_flow_error_set 752 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, 753 "cannot convert unknown item type"); 754 if (size >= off + sizeof(*dst)) 755 *dst = (struct rte_flow_item){ 756 .type = src->type, 757 }; 758 off += sizeof(*dst); 759 if (!src->type) 760 num = i + 1; 761 } 762 num = i; 763 src -= num; 764 dst -= num; 765 do { 766 if (src->spec) { 767 off = RTE_ALIGN_CEIL(off, sizeof(double)); 768 ret = rte_flow_conv_item_spec 769 ((void *)(data + off), 770 size > off ? size - off : 0, src, 771 RTE_FLOW_CONV_ITEM_SPEC); 772 if (size && size >= off + ret) 773 dst->spec = (void *)(data + off); 774 off += ret; 775 776 } 777 if (src->last) { 778 off = RTE_ALIGN_CEIL(off, sizeof(double)); 779 ret = rte_flow_conv_item_spec 780 ((void *)(data + off), 781 size > off ? size - off : 0, src, 782 RTE_FLOW_CONV_ITEM_LAST); 783 if (size && size >= off + ret) 784 dst->last = (void *)(data + off); 785 off += ret; 786 } 787 if (src->mask) { 788 off = RTE_ALIGN_CEIL(off, sizeof(double)); 789 ret = rte_flow_conv_item_spec 790 ((void *)(data + off), 791 size > off ? size - off : 0, src, 792 RTE_FLOW_CONV_ITEM_MASK); 793 if (size && size >= off + ret) 794 dst->mask = (void *)(data + off); 795 off += ret; 796 } 797 ++src; 798 ++dst; 799 } while (--num); 800 return off; 801 } 802 803 /** 804 * Copy a list of actions. 805 * 806 * @param[out] dst 807 * Destination buffer. Can be NULL if @p size is zero. 808 * @param size 809 * Size of @p dst in bytes. 810 * @param[in] src 811 * Source actions. 812 * @param num 813 * Maximum number of actions to process from @p src or 0 to process the 814 * entire list. In both cases, processing stops after 815 * RTE_FLOW_ACTION_TYPE_END is encountered. 816 * @param[out] error 817 * Perform verbose error reporting if not NULL. 818 * 819 * @return 820 * A positive value representing the number of bytes needed to store 821 * actions regardless of @p size on success (@p buf contents are truncated 822 * to @p size if not large enough), a negative errno value otherwise and 823 * rte_errno is set. 824 */ 825 static int 826 rte_flow_conv_actions(struct rte_flow_action *dst, 827 const size_t size, 828 const struct rte_flow_action *src, 829 unsigned int num, 830 struct rte_flow_error *error) 831 { 832 uintptr_t data = (uintptr_t)dst; 833 size_t off; 834 size_t ret; 835 unsigned int i; 836 837 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 838 /** 839 * allow PMD private flow action 840 */ 841 if (((int)src->type >= 0) && 842 ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || 843 !rte_flow_desc_action[src->type].name)) 844 return rte_flow_error_set 845 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 846 src, "cannot convert unknown action type"); 847 if (size >= off + sizeof(*dst)) 848 *dst = (struct rte_flow_action){ 849 .type = src->type, 850 }; 851 off += sizeof(*dst); 852 if (!src->type) 853 num = i + 1; 854 } 855 num = i; 856 src -= num; 857 dst -= num; 858 do { 859 if (src->conf) { 860 off = RTE_ALIGN_CEIL(off, sizeof(double)); 861 ret = rte_flow_conv_action_conf 862 ((void *)(data + off), 863 size > off ? size - off : 0, src); 864 if (size && size >= off + ret) 865 dst->conf = (void *)(data + off); 866 off += ret; 867 } 868 ++src; 869 ++dst; 870 } while (--num); 871 return off; 872 } 873 874 /** 875 * Copy flow rule components. 876 * 877 * This comprises the flow rule descriptor itself, attributes, pattern and 878 * actions list. NULL components in @p src are skipped. 879 * 880 * @param[out] dst 881 * Destination buffer. Can be NULL if @p size is zero. 882 * @param size 883 * Size of @p dst in bytes. 884 * @param[in] src 885 * Source flow rule descriptor. 886 * @param[out] error 887 * Perform verbose error reporting if not NULL. 888 * 889 * @return 890 * A positive value representing the number of bytes needed to store all 891 * components including the descriptor regardless of @p size on success 892 * (@p buf contents are truncated to @p size if not large enough), a 893 * negative errno value otherwise and rte_errno is set. 894 */ 895 static int 896 rte_flow_conv_rule(struct rte_flow_conv_rule *dst, 897 const size_t size, 898 const struct rte_flow_conv_rule *src, 899 struct rte_flow_error *error) 900 { 901 size_t off; 902 int ret; 903 904 rte_memcpy(dst, 905 (&(struct rte_flow_conv_rule){ 906 .attr = NULL, 907 .pattern = NULL, 908 .actions = NULL, 909 }), 910 size > sizeof(*dst) ? sizeof(*dst) : size); 911 off = sizeof(*dst); 912 if (src->attr_ro) { 913 off = RTE_ALIGN_CEIL(off, sizeof(double)); 914 if (size && size >= off + sizeof(*dst->attr)) 915 dst->attr = rte_memcpy 916 ((void *)((uintptr_t)dst + off), 917 src->attr_ro, sizeof(*dst->attr)); 918 off += sizeof(*dst->attr); 919 } 920 if (src->pattern_ro) { 921 off = RTE_ALIGN_CEIL(off, sizeof(double)); 922 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), 923 size > off ? size - off : 0, 924 src->pattern_ro, 0, error); 925 if (ret < 0) 926 return ret; 927 if (size && size >= off + (size_t)ret) 928 dst->pattern = (void *)((uintptr_t)dst + off); 929 off += ret; 930 } 931 if (src->actions_ro) { 932 off = RTE_ALIGN_CEIL(off, sizeof(double)); 933 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), 934 size > off ? size - off : 0, 935 src->actions_ro, 0, error); 936 if (ret < 0) 937 return ret; 938 if (size >= off + (size_t)ret) 939 dst->actions = (void *)((uintptr_t)dst + off); 940 off += ret; 941 } 942 return off; 943 } 944 945 /** 946 * Retrieve the name of a pattern item/action type. 947 * 948 * @param is_action 949 * Nonzero when @p src represents an action type instead of a pattern item 950 * type. 951 * @param is_ptr 952 * Nonzero to write string address instead of contents into @p dst. 953 * @param[out] dst 954 * Destination buffer. Can be NULL if @p size is zero. 955 * @param size 956 * Size of @p dst in bytes. 957 * @param[in] src 958 * Depending on @p is_action, source pattern item or action type cast as a 959 * pointer. 960 * @param[out] error 961 * Perform verbose error reporting if not NULL. 962 * 963 * @return 964 * A positive value representing the number of bytes needed to store the 965 * name or its address regardless of @p size on success (@p buf contents 966 * are truncated to @p size if not large enough), a negative errno value 967 * otherwise and rte_errno is set. 968 */ 969 static int 970 rte_flow_conv_name(int is_action, 971 int is_ptr, 972 char *dst, 973 const size_t size, 974 const void *src, 975 struct rte_flow_error *error) 976 { 977 struct desc_info { 978 const struct rte_flow_desc_data *data; 979 size_t num; 980 }; 981 static const struct desc_info info_rep[2] = { 982 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, 983 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, 984 }; 985 const struct desc_info *const info = &info_rep[!!is_action]; 986 unsigned int type = (uintptr_t)src; 987 988 if (type >= info->num) 989 return rte_flow_error_set 990 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 991 "unknown object type to retrieve the name of"); 992 if (!is_ptr) 993 return strlcpy(dst, info->data[type].name, size); 994 if (size >= sizeof(const char **)) 995 *((const char **)dst) = info->data[type].name; 996 return sizeof(const char **); 997 } 998 999 /** Helper function to convert flow API objects. */ 1000 int 1001 rte_flow_conv(enum rte_flow_conv_op op, 1002 void *dst, 1003 size_t size, 1004 const void *src, 1005 struct rte_flow_error *error) 1006 { 1007 switch (op) { 1008 const struct rte_flow_attr *attr; 1009 1010 case RTE_FLOW_CONV_OP_NONE: 1011 return 0; 1012 case RTE_FLOW_CONV_OP_ATTR: 1013 attr = src; 1014 if (size > sizeof(*attr)) 1015 size = sizeof(*attr); 1016 rte_memcpy(dst, attr, size); 1017 return sizeof(*attr); 1018 case RTE_FLOW_CONV_OP_ITEM: 1019 return rte_flow_conv_pattern(dst, size, src, 1, error); 1020 case RTE_FLOW_CONV_OP_ACTION: 1021 return rte_flow_conv_actions(dst, size, src, 1, error); 1022 case RTE_FLOW_CONV_OP_PATTERN: 1023 return rte_flow_conv_pattern(dst, size, src, 0, error); 1024 case RTE_FLOW_CONV_OP_ACTIONS: 1025 return rte_flow_conv_actions(dst, size, src, 0, error); 1026 case RTE_FLOW_CONV_OP_RULE: 1027 return rte_flow_conv_rule(dst, size, src, error); 1028 case RTE_FLOW_CONV_OP_ITEM_NAME: 1029 return rte_flow_conv_name(0, 0, dst, size, src, error); 1030 case RTE_FLOW_CONV_OP_ACTION_NAME: 1031 return rte_flow_conv_name(1, 0, dst, size, src, error); 1032 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: 1033 return rte_flow_conv_name(0, 1, dst, size, src, error); 1034 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: 1035 return rte_flow_conv_name(1, 1, dst, size, src, error); 1036 } 1037 return rte_flow_error_set 1038 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1039 "unknown object conversion operation"); 1040 } 1041 1042 /** Store a full rte_flow description. */ 1043 size_t 1044 rte_flow_copy(struct rte_flow_desc *desc, size_t len, 1045 const struct rte_flow_attr *attr, 1046 const struct rte_flow_item *items, 1047 const struct rte_flow_action *actions) 1048 { 1049 /* 1050 * Overlap struct rte_flow_conv with struct rte_flow_desc in order 1051 * to convert the former to the latter without wasting space. 1052 */ 1053 struct rte_flow_conv_rule *dst = 1054 len ? 1055 (void *)((uintptr_t)desc + 1056 (offsetof(struct rte_flow_desc, actions) - 1057 offsetof(struct rte_flow_conv_rule, actions))) : 1058 NULL; 1059 size_t dst_size = 1060 len > sizeof(*desc) - sizeof(*dst) ? 1061 len - (sizeof(*desc) - sizeof(*dst)) : 1062 0; 1063 struct rte_flow_conv_rule src = { 1064 .attr_ro = NULL, 1065 .pattern_ro = items, 1066 .actions_ro = actions, 1067 }; 1068 int ret; 1069 1070 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < 1071 sizeof(struct rte_flow_conv_rule)); 1072 if (dst_size && 1073 (&dst->pattern != &desc->items || 1074 &dst->actions != &desc->actions || 1075 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { 1076 rte_errno = EINVAL; 1077 return 0; 1078 } 1079 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); 1080 if (ret < 0) 1081 return 0; 1082 ret += sizeof(*desc) - sizeof(*dst); 1083 rte_memcpy(desc, 1084 (&(struct rte_flow_desc){ 1085 .size = ret, 1086 .attr = *attr, 1087 .items = dst_size ? dst->pattern : NULL, 1088 .actions = dst_size ? dst->actions : NULL, 1089 }), 1090 len > sizeof(*desc) ? sizeof(*desc) : len); 1091 return ret; 1092 } 1093 1094 int 1095 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow, 1096 FILE *file, struct rte_flow_error *error) 1097 { 1098 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1099 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1100 int ret; 1101 1102 if (unlikely(!ops)) 1103 return -rte_errno; 1104 if (likely(!!ops->dev_dump)) { 1105 fts_enter(dev); 1106 ret = ops->dev_dump(dev, flow, file, error); 1107 fts_exit(dev); 1108 return flow_err(port_id, ret, error); 1109 } 1110 return rte_flow_error_set(error, ENOSYS, 1111 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1112 NULL, rte_strerror(ENOSYS)); 1113 } 1114 1115 int 1116 rte_flow_get_aged_flows(uint16_t port_id, void **contexts, 1117 uint32_t nb_contexts, struct rte_flow_error *error) 1118 { 1119 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1120 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1121 int ret; 1122 1123 if (unlikely(!ops)) 1124 return -rte_errno; 1125 if (likely(!!ops->get_aged_flows)) { 1126 fts_enter(dev); 1127 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error); 1128 fts_exit(dev); 1129 return flow_err(port_id, ret, error); 1130 } 1131 return rte_flow_error_set(error, ENOTSUP, 1132 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1133 NULL, rte_strerror(ENOTSUP)); 1134 } 1135 1136 struct rte_flow_action_handle * 1137 rte_flow_action_handle_create(uint16_t port_id, 1138 const struct rte_flow_indir_action_conf *conf, 1139 const struct rte_flow_action *action, 1140 struct rte_flow_error *error) 1141 { 1142 struct rte_flow_action_handle *handle; 1143 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1144 1145 if (unlikely(!ops)) 1146 return NULL; 1147 if (unlikely(!ops->action_handle_create)) { 1148 rte_flow_error_set(error, ENOSYS, 1149 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1150 rte_strerror(ENOSYS)); 1151 return NULL; 1152 } 1153 handle = ops->action_handle_create(&rte_eth_devices[port_id], 1154 conf, action, error); 1155 if (handle == NULL) 1156 flow_err(port_id, -rte_errno, error); 1157 return handle; 1158 } 1159 1160 int 1161 rte_flow_action_handle_destroy(uint16_t port_id, 1162 struct rte_flow_action_handle *handle, 1163 struct rte_flow_error *error) 1164 { 1165 int ret; 1166 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1167 1168 if (unlikely(!ops)) 1169 return -rte_errno; 1170 if (unlikely(!ops->action_handle_destroy)) 1171 return rte_flow_error_set(error, ENOSYS, 1172 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1173 NULL, rte_strerror(ENOSYS)); 1174 ret = ops->action_handle_destroy(&rte_eth_devices[port_id], 1175 handle, error); 1176 return flow_err(port_id, ret, error); 1177 } 1178 1179 int 1180 rte_flow_action_handle_update(uint16_t port_id, 1181 struct rte_flow_action_handle *handle, 1182 const void *update, 1183 struct rte_flow_error *error) 1184 { 1185 int ret; 1186 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1187 1188 if (unlikely(!ops)) 1189 return -rte_errno; 1190 if (unlikely(!ops->action_handle_update)) 1191 return rte_flow_error_set(error, ENOSYS, 1192 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1193 NULL, rte_strerror(ENOSYS)); 1194 ret = ops->action_handle_update(&rte_eth_devices[port_id], handle, 1195 update, error); 1196 return flow_err(port_id, ret, error); 1197 } 1198 1199 int 1200 rte_flow_action_handle_query(uint16_t port_id, 1201 const struct rte_flow_action_handle *handle, 1202 void *data, 1203 struct rte_flow_error *error) 1204 { 1205 int ret; 1206 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1207 1208 if (unlikely(!ops)) 1209 return -rte_errno; 1210 if (unlikely(!ops->action_handle_query)) 1211 return rte_flow_error_set(error, ENOSYS, 1212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1213 NULL, rte_strerror(ENOSYS)); 1214 ret = ops->action_handle_query(&rte_eth_devices[port_id], handle, 1215 data, error); 1216 return flow_err(port_id, ret, error); 1217 } 1218 1219 int 1220 rte_flow_tunnel_decap_set(uint16_t port_id, 1221 struct rte_flow_tunnel *tunnel, 1222 struct rte_flow_action **actions, 1223 uint32_t *num_of_actions, 1224 struct rte_flow_error *error) 1225 { 1226 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1227 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1228 1229 if (unlikely(!ops)) 1230 return -rte_errno; 1231 if (likely(!!ops->tunnel_decap_set)) { 1232 return flow_err(port_id, 1233 ops->tunnel_decap_set(dev, tunnel, actions, 1234 num_of_actions, error), 1235 error); 1236 } 1237 return rte_flow_error_set(error, ENOTSUP, 1238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1239 NULL, rte_strerror(ENOTSUP)); 1240 } 1241 1242 int 1243 rte_flow_tunnel_match(uint16_t port_id, 1244 struct rte_flow_tunnel *tunnel, 1245 struct rte_flow_item **items, 1246 uint32_t *num_of_items, 1247 struct rte_flow_error *error) 1248 { 1249 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1250 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1251 1252 if (unlikely(!ops)) 1253 return -rte_errno; 1254 if (likely(!!ops->tunnel_match)) { 1255 return flow_err(port_id, 1256 ops->tunnel_match(dev, tunnel, items, 1257 num_of_items, error), 1258 error); 1259 } 1260 return rte_flow_error_set(error, ENOTSUP, 1261 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1262 NULL, rte_strerror(ENOTSUP)); 1263 } 1264 1265 int 1266 rte_flow_get_restore_info(uint16_t port_id, 1267 struct rte_mbuf *m, 1268 struct rte_flow_restore_info *restore_info, 1269 struct rte_flow_error *error) 1270 { 1271 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1272 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1273 1274 if (unlikely(!ops)) 1275 return -rte_errno; 1276 if (likely(!!ops->get_restore_info)) { 1277 return flow_err(port_id, 1278 ops->get_restore_info(dev, m, restore_info, 1279 error), 1280 error); 1281 } 1282 return rte_flow_error_set(error, ENOTSUP, 1283 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1284 NULL, rte_strerror(ENOTSUP)); 1285 } 1286 1287 int 1288 rte_flow_tunnel_action_decap_release(uint16_t port_id, 1289 struct rte_flow_action *actions, 1290 uint32_t num_of_actions, 1291 struct rte_flow_error *error) 1292 { 1293 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1294 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1295 1296 if (unlikely(!ops)) 1297 return -rte_errno; 1298 if (likely(!!ops->tunnel_action_decap_release)) { 1299 return flow_err(port_id, 1300 ops->tunnel_action_decap_release(dev, actions, 1301 num_of_actions, 1302 error), 1303 error); 1304 } 1305 return rte_flow_error_set(error, ENOTSUP, 1306 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1307 NULL, rte_strerror(ENOTSUP)); 1308 } 1309 1310 int 1311 rte_flow_tunnel_item_release(uint16_t port_id, 1312 struct rte_flow_item *items, 1313 uint32_t num_of_items, 1314 struct rte_flow_error *error) 1315 { 1316 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1317 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1318 1319 if (unlikely(!ops)) 1320 return -rte_errno; 1321 if (likely(!!ops->tunnel_item_release)) { 1322 return flow_err(port_id, 1323 ops->tunnel_item_release(dev, items, 1324 num_of_items, error), 1325 error); 1326 } 1327 return rte_flow_error_set(error, ENOTSUP, 1328 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1329 NULL, rte_strerror(ENOTSUP)); 1330 } 1331 1332 int 1333 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id, 1334 struct rte_flow_error *error) 1335 { 1336 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1337 struct rte_eth_dev *dev; 1338 1339 if (unlikely(ops == NULL)) 1340 return -rte_errno; 1341 1342 if (ops->pick_transfer_proxy == NULL) { 1343 *proxy_port_id = port_id; 1344 return 0; 1345 } 1346 1347 dev = &rte_eth_devices[port_id]; 1348 1349 return flow_err(port_id, 1350 ops->pick_transfer_proxy(dev, proxy_port_id, error), 1351 error); 1352 } 1353 1354 struct rte_flow_item_flex_handle * 1355 rte_flow_flex_item_create(uint16_t port_id, 1356 const struct rte_flow_item_flex_conf *conf, 1357 struct rte_flow_error *error) 1358 { 1359 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1360 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1361 struct rte_flow_item_flex_handle *handle; 1362 1363 if (unlikely(!ops)) 1364 return NULL; 1365 if (unlikely(!ops->flex_item_create)) { 1366 rte_flow_error_set(error, ENOTSUP, 1367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1368 NULL, rte_strerror(ENOTSUP)); 1369 return NULL; 1370 } 1371 handle = ops->flex_item_create(dev, conf, error); 1372 if (handle == NULL) 1373 flow_err(port_id, -rte_errno, error); 1374 return handle; 1375 } 1376 1377 int 1378 rte_flow_flex_item_release(uint16_t port_id, 1379 const struct rte_flow_item_flex_handle *handle, 1380 struct rte_flow_error *error) 1381 { 1382 int ret; 1383 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1384 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1385 1386 if (unlikely(!ops || !ops->flex_item_release)) 1387 return rte_flow_error_set(error, ENOTSUP, 1388 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1389 NULL, rte_strerror(ENOTSUP)); 1390 ret = ops->flex_item_release(dev, handle, error); 1391 return flow_err(port_id, ret, error); 1392 } 1393 1394 int 1395 rte_flow_info_get(uint16_t port_id, 1396 struct rte_flow_port_info *port_info, 1397 struct rte_flow_queue_info *queue_info, 1398 struct rte_flow_error *error) 1399 { 1400 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1401 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1402 1403 if (unlikely(!ops)) 1404 return -rte_errno; 1405 if (dev->data->dev_configured == 0) { 1406 RTE_FLOW_LOG(INFO, 1407 "Device with port_id=%"PRIu16" is not configured.\n", 1408 port_id); 1409 return -EINVAL; 1410 } 1411 if (port_info == NULL) { 1412 RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id); 1413 return -EINVAL; 1414 } 1415 if (likely(!!ops->info_get)) { 1416 return flow_err(port_id, 1417 ops->info_get(dev, port_info, queue_info, error), 1418 error); 1419 } 1420 return rte_flow_error_set(error, ENOTSUP, 1421 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1422 NULL, rte_strerror(ENOTSUP)); 1423 } 1424 1425 int 1426 rte_flow_configure(uint16_t port_id, 1427 const struct rte_flow_port_attr *port_attr, 1428 uint16_t nb_queue, 1429 const struct rte_flow_queue_attr *queue_attr[], 1430 struct rte_flow_error *error) 1431 { 1432 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1433 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1434 int ret; 1435 1436 if (unlikely(!ops)) 1437 return -rte_errno; 1438 if (dev->data->dev_configured == 0) { 1439 RTE_FLOW_LOG(INFO, 1440 "Device with port_id=%"PRIu16" is not configured.\n", 1441 port_id); 1442 return -EINVAL; 1443 } 1444 if (dev->data->dev_started != 0) { 1445 RTE_FLOW_LOG(INFO, 1446 "Device with port_id=%"PRIu16" already started.\n", 1447 port_id); 1448 return -EINVAL; 1449 } 1450 if (port_attr == NULL) { 1451 RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id); 1452 return -EINVAL; 1453 } 1454 if (queue_attr == NULL) { 1455 RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id); 1456 return -EINVAL; 1457 } 1458 if (likely(!!ops->configure)) { 1459 ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error); 1460 if (ret == 0) 1461 dev->data->flow_configured = 1; 1462 return flow_err(port_id, ret, error); 1463 } 1464 return rte_flow_error_set(error, ENOTSUP, 1465 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1466 NULL, rte_strerror(ENOTSUP)); 1467 } 1468 1469 struct rte_flow_pattern_template * 1470 rte_flow_pattern_template_create(uint16_t port_id, 1471 const struct rte_flow_pattern_template_attr *template_attr, 1472 const struct rte_flow_item pattern[], 1473 struct rte_flow_error *error) 1474 { 1475 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1476 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1477 struct rte_flow_pattern_template *template; 1478 1479 if (unlikely(!ops)) 1480 return NULL; 1481 if (dev->data->flow_configured == 0) { 1482 RTE_FLOW_LOG(INFO, 1483 "Flow engine on port_id=%"PRIu16" is not configured.\n", 1484 port_id); 1485 rte_flow_error_set(error, EINVAL, 1486 RTE_FLOW_ERROR_TYPE_STATE, 1487 NULL, rte_strerror(EINVAL)); 1488 return NULL; 1489 } 1490 if (template_attr == NULL) { 1491 RTE_FLOW_LOG(ERR, 1492 "Port %"PRIu16" template attr is NULL.\n", 1493 port_id); 1494 rte_flow_error_set(error, EINVAL, 1495 RTE_FLOW_ERROR_TYPE_ATTR, 1496 NULL, rte_strerror(EINVAL)); 1497 return NULL; 1498 } 1499 if (pattern == NULL) { 1500 RTE_FLOW_LOG(ERR, 1501 "Port %"PRIu16" pattern is NULL.\n", 1502 port_id); 1503 rte_flow_error_set(error, EINVAL, 1504 RTE_FLOW_ERROR_TYPE_ATTR, 1505 NULL, rte_strerror(EINVAL)); 1506 return NULL; 1507 } 1508 if (likely(!!ops->pattern_template_create)) { 1509 template = ops->pattern_template_create(dev, template_attr, 1510 pattern, error); 1511 if (template == NULL) 1512 flow_err(port_id, -rte_errno, error); 1513 return template; 1514 } 1515 rte_flow_error_set(error, ENOTSUP, 1516 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1517 NULL, rte_strerror(ENOTSUP)); 1518 return NULL; 1519 } 1520 1521 int 1522 rte_flow_pattern_template_destroy(uint16_t port_id, 1523 struct rte_flow_pattern_template *pattern_template, 1524 struct rte_flow_error *error) 1525 { 1526 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1527 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1528 1529 if (unlikely(!ops)) 1530 return -rte_errno; 1531 if (unlikely(pattern_template == NULL)) 1532 return 0; 1533 if (likely(!!ops->pattern_template_destroy)) { 1534 return flow_err(port_id, 1535 ops->pattern_template_destroy(dev, 1536 pattern_template, 1537 error), 1538 error); 1539 } 1540 return rte_flow_error_set(error, ENOTSUP, 1541 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1542 NULL, rte_strerror(ENOTSUP)); 1543 } 1544 1545 struct rte_flow_actions_template * 1546 rte_flow_actions_template_create(uint16_t port_id, 1547 const struct rte_flow_actions_template_attr *template_attr, 1548 const struct rte_flow_action actions[], 1549 const struct rte_flow_action masks[], 1550 struct rte_flow_error *error) 1551 { 1552 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1553 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1554 struct rte_flow_actions_template *template; 1555 1556 if (unlikely(!ops)) 1557 return NULL; 1558 if (dev->data->flow_configured == 0) { 1559 RTE_FLOW_LOG(INFO, 1560 "Flow engine on port_id=%"PRIu16" is not configured.\n", 1561 port_id); 1562 rte_flow_error_set(error, EINVAL, 1563 RTE_FLOW_ERROR_TYPE_STATE, 1564 NULL, rte_strerror(EINVAL)); 1565 return NULL; 1566 } 1567 if (template_attr == NULL) { 1568 RTE_FLOW_LOG(ERR, 1569 "Port %"PRIu16" template attr is NULL.\n", 1570 port_id); 1571 rte_flow_error_set(error, EINVAL, 1572 RTE_FLOW_ERROR_TYPE_ATTR, 1573 NULL, rte_strerror(EINVAL)); 1574 return NULL; 1575 } 1576 if (actions == NULL) { 1577 RTE_FLOW_LOG(ERR, 1578 "Port %"PRIu16" actions is NULL.\n", 1579 port_id); 1580 rte_flow_error_set(error, EINVAL, 1581 RTE_FLOW_ERROR_TYPE_ATTR, 1582 NULL, rte_strerror(EINVAL)); 1583 return NULL; 1584 } 1585 if (masks == NULL) { 1586 RTE_FLOW_LOG(ERR, 1587 "Port %"PRIu16" masks is NULL.\n", 1588 port_id); 1589 rte_flow_error_set(error, EINVAL, 1590 RTE_FLOW_ERROR_TYPE_ATTR, 1591 NULL, rte_strerror(EINVAL)); 1592 1593 } 1594 if (likely(!!ops->actions_template_create)) { 1595 template = ops->actions_template_create(dev, template_attr, 1596 actions, masks, error); 1597 if (template == NULL) 1598 flow_err(port_id, -rte_errno, error); 1599 return template; 1600 } 1601 rte_flow_error_set(error, ENOTSUP, 1602 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1603 NULL, rte_strerror(ENOTSUP)); 1604 return NULL; 1605 } 1606 1607 int 1608 rte_flow_actions_template_destroy(uint16_t port_id, 1609 struct rte_flow_actions_template *actions_template, 1610 struct rte_flow_error *error) 1611 { 1612 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1613 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1614 1615 if (unlikely(!ops)) 1616 return -rte_errno; 1617 if (unlikely(actions_template == NULL)) 1618 return 0; 1619 if (likely(!!ops->actions_template_destroy)) { 1620 return flow_err(port_id, 1621 ops->actions_template_destroy(dev, 1622 actions_template, 1623 error), 1624 error); 1625 } 1626 return rte_flow_error_set(error, ENOTSUP, 1627 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1628 NULL, rte_strerror(ENOTSUP)); 1629 } 1630 1631 struct rte_flow_template_table * 1632 rte_flow_template_table_create(uint16_t port_id, 1633 const struct rte_flow_template_table_attr *table_attr, 1634 struct rte_flow_pattern_template *pattern_templates[], 1635 uint8_t nb_pattern_templates, 1636 struct rte_flow_actions_template *actions_templates[], 1637 uint8_t nb_actions_templates, 1638 struct rte_flow_error *error) 1639 { 1640 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1641 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1642 struct rte_flow_template_table *table; 1643 1644 if (unlikely(!ops)) 1645 return NULL; 1646 if (dev->data->flow_configured == 0) { 1647 RTE_FLOW_LOG(INFO, 1648 "Flow engine on port_id=%"PRIu16" is not configured.\n", 1649 port_id); 1650 rte_flow_error_set(error, EINVAL, 1651 RTE_FLOW_ERROR_TYPE_STATE, 1652 NULL, rte_strerror(EINVAL)); 1653 return NULL; 1654 } 1655 if (table_attr == NULL) { 1656 RTE_FLOW_LOG(ERR, 1657 "Port %"PRIu16" table attr is NULL.\n", 1658 port_id); 1659 rte_flow_error_set(error, EINVAL, 1660 RTE_FLOW_ERROR_TYPE_ATTR, 1661 NULL, rte_strerror(EINVAL)); 1662 return NULL; 1663 } 1664 if (pattern_templates == NULL) { 1665 RTE_FLOW_LOG(ERR, 1666 "Port %"PRIu16" pattern templates is NULL.\n", 1667 port_id); 1668 rte_flow_error_set(error, EINVAL, 1669 RTE_FLOW_ERROR_TYPE_ATTR, 1670 NULL, rte_strerror(EINVAL)); 1671 return NULL; 1672 } 1673 if (actions_templates == NULL) { 1674 RTE_FLOW_LOG(ERR, 1675 "Port %"PRIu16" actions templates is NULL.\n", 1676 port_id); 1677 rte_flow_error_set(error, EINVAL, 1678 RTE_FLOW_ERROR_TYPE_ATTR, 1679 NULL, rte_strerror(EINVAL)); 1680 return NULL; 1681 } 1682 if (likely(!!ops->template_table_create)) { 1683 table = ops->template_table_create(dev, table_attr, 1684 pattern_templates, nb_pattern_templates, 1685 actions_templates, nb_actions_templates, 1686 error); 1687 if (table == NULL) 1688 flow_err(port_id, -rte_errno, error); 1689 return table; 1690 } 1691 rte_flow_error_set(error, ENOTSUP, 1692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1693 NULL, rte_strerror(ENOTSUP)); 1694 return NULL; 1695 } 1696 1697 int 1698 rte_flow_template_table_destroy(uint16_t port_id, 1699 struct rte_flow_template_table *template_table, 1700 struct rte_flow_error *error) 1701 { 1702 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1703 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1704 1705 if (unlikely(!ops)) 1706 return -rte_errno; 1707 if (unlikely(template_table == NULL)) 1708 return 0; 1709 if (likely(!!ops->template_table_destroy)) { 1710 return flow_err(port_id, 1711 ops->template_table_destroy(dev, 1712 template_table, 1713 error), 1714 error); 1715 } 1716 return rte_flow_error_set(error, ENOTSUP, 1717 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1718 NULL, rte_strerror(ENOTSUP)); 1719 } 1720 1721 struct rte_flow * 1722 rte_flow_async_create(uint16_t port_id, 1723 uint32_t queue_id, 1724 const struct rte_flow_op_attr *op_attr, 1725 struct rte_flow_template_table *template_table, 1726 const struct rte_flow_item pattern[], 1727 uint8_t pattern_template_index, 1728 const struct rte_flow_action actions[], 1729 uint8_t actions_template_index, 1730 void *user_data, 1731 struct rte_flow_error *error) 1732 { 1733 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1734 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1735 struct rte_flow *flow; 1736 1737 flow = ops->async_create(dev, queue_id, 1738 op_attr, template_table, 1739 pattern, pattern_template_index, 1740 actions, actions_template_index, 1741 user_data, error); 1742 if (flow == NULL) 1743 flow_err(port_id, -rte_errno, error); 1744 return flow; 1745 } 1746 1747 int 1748 rte_flow_async_destroy(uint16_t port_id, 1749 uint32_t queue_id, 1750 const struct rte_flow_op_attr *op_attr, 1751 struct rte_flow *flow, 1752 void *user_data, 1753 struct rte_flow_error *error) 1754 { 1755 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1756 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1757 1758 return flow_err(port_id, 1759 ops->async_destroy(dev, queue_id, 1760 op_attr, flow, 1761 user_data, error), 1762 error); 1763 } 1764 1765 int 1766 rte_flow_push(uint16_t port_id, 1767 uint32_t queue_id, 1768 struct rte_flow_error *error) 1769 { 1770 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1771 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1772 1773 return flow_err(port_id, 1774 ops->push(dev, queue_id, error), 1775 error); 1776 } 1777 1778 int 1779 rte_flow_pull(uint16_t port_id, 1780 uint32_t queue_id, 1781 struct rte_flow_op_result res[], 1782 uint16_t n_res, 1783 struct rte_flow_error *error) 1784 { 1785 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1786 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1787 int ret; 1788 1789 ret = ops->pull(dev, queue_id, res, n_res, error); 1790 return ret ? ret : flow_err(port_id, ret, error); 1791 } 1792 1793 struct rte_flow_action_handle * 1794 rte_flow_async_action_handle_create(uint16_t port_id, 1795 uint32_t queue_id, 1796 const struct rte_flow_op_attr *op_attr, 1797 const struct rte_flow_indir_action_conf *indir_action_conf, 1798 const struct rte_flow_action *action, 1799 void *user_data, 1800 struct rte_flow_error *error) 1801 { 1802 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1803 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1804 struct rte_flow_action_handle *handle; 1805 1806 handle = ops->async_action_handle_create(dev, queue_id, op_attr, 1807 indir_action_conf, action, user_data, error); 1808 if (handle == NULL) 1809 flow_err(port_id, -rte_errno, error); 1810 return handle; 1811 } 1812 1813 int 1814 rte_flow_async_action_handle_destroy(uint16_t port_id, 1815 uint32_t queue_id, 1816 const struct rte_flow_op_attr *op_attr, 1817 struct rte_flow_action_handle *action_handle, 1818 void *user_data, 1819 struct rte_flow_error *error) 1820 { 1821 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1822 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1823 int ret; 1824 1825 ret = ops->async_action_handle_destroy(dev, queue_id, op_attr, 1826 action_handle, user_data, error); 1827 return flow_err(port_id, ret, error); 1828 } 1829 1830 int 1831 rte_flow_async_action_handle_update(uint16_t port_id, 1832 uint32_t queue_id, 1833 const struct rte_flow_op_attr *op_attr, 1834 struct rte_flow_action_handle *action_handle, 1835 const void *update, 1836 void *user_data, 1837 struct rte_flow_error *error) 1838 { 1839 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1840 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1841 int ret; 1842 1843 ret = ops->async_action_handle_update(dev, queue_id, op_attr, 1844 action_handle, update, user_data, error); 1845 return flow_err(port_id, ret, error); 1846 } 1847