1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <errno.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 10 #include <rte_common.h> 11 #include <rte_errno.h> 12 #include <rte_branch_prediction.h> 13 #include <rte_string_fns.h> 14 #include <rte_mbuf_dyn.h> 15 #include "rte_ethdev.h" 16 #include "rte_flow_driver.h" 17 #include "rte_flow.h" 18 19 /* Mbuf dynamic field name for metadata. */ 20 int32_t rte_flow_dynf_metadata_offs = -1; 21 22 /* Mbuf dynamic field flag bit number for metadata. */ 23 uint64_t rte_flow_dynf_metadata_mask; 24 25 /** 26 * Flow elements description tables. 27 */ 28 struct rte_flow_desc_data { 29 const char *name; 30 size_t size; 31 size_t (*desc_fn)(void *dst, const void *src); 32 }; 33 34 /** 35 * 36 * @param buf 37 * Destination memory. 38 * @param data 39 * Source memory 40 * @param size 41 * Requested copy size 42 * @param desc 43 * rte_flow_desc_item - for flow item conversion. 44 * rte_flow_desc_action - for flow action conversion. 45 * @param type 46 * Offset into the desc param or negative value for private flow elements. 47 */ 48 static inline size_t 49 rte_flow_conv_copy(void *buf, const void *data, const size_t size, 50 const struct rte_flow_desc_data *desc, int type) 51 { 52 /** 53 * Allow PMD private flow item 54 */ 55 bool rte_type = type >= 0; 56 57 size_t sz = rte_type ? desc[type].size : sizeof(void *); 58 if (buf == NULL || data == NULL) 59 return 0; 60 rte_memcpy(buf, data, (size > sz ? sz : size)); 61 if (rte_type && desc[type].desc_fn) 62 sz += desc[type].desc_fn(size > 0 ? buf : NULL, data); 63 return sz; 64 } 65 66 static size_t 67 rte_flow_item_flex_conv(void *buf, const void *data) 68 { 69 struct rte_flow_item_flex *dst = buf; 70 const struct rte_flow_item_flex *src = data; 71 if (buf) { 72 dst->pattern = rte_memcpy 73 ((void *)((uintptr_t)(dst + 1)), src->pattern, 74 src->length); 75 } 76 return src->length; 77 } 78 79 /** Generate flow_item[] entry. */ 80 #define MK_FLOW_ITEM(t, s) \ 81 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 82 .name = # t, \ 83 .size = s, \ 84 .desc_fn = NULL,\ 85 } 86 87 #define MK_FLOW_ITEM_FN(t, s, fn) \ 88 [RTE_FLOW_ITEM_TYPE_ ## t] = {\ 89 .name = # t, \ 90 .size = s, \ 91 .desc_fn = fn, \ 92 } 93 94 /** Information about known flow pattern items. */ 95 static const struct rte_flow_desc_data rte_flow_desc_item[] = { 96 MK_FLOW_ITEM(END, 0), 97 MK_FLOW_ITEM(VOID, 0), 98 MK_FLOW_ITEM(INVERT, 0), 99 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 100 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 101 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 102 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 103 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 104 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 105 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 106 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 107 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 108 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 109 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 110 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 111 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 112 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 113 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 114 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 115 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 116 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 117 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 118 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 119 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), 120 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 121 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 122 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 123 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 124 MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)), 125 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 126 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 127 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 128 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 129 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 130 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 131 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 132 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 133 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), 134 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), 135 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)), 136 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)), 137 MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)), 138 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)), 139 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), 140 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), 141 MK_FLOW_ITEM(PPPOE_PROTO_ID, 142 sizeof(struct rte_flow_item_pppoe_proto_id)), 143 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)), 144 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)), 145 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)), 146 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), 147 MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)), 148 MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)), 149 MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)), 150 MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)), 151 MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)), 152 MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)), 153 MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)), 154 MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)), 155 MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex), 156 rte_flow_item_flex_conv), 157 MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)), 158 MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)), 159 }; 160 161 /** Generate flow_action[] entry. */ 162 #define MK_FLOW_ACTION(t, s) \ 163 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 164 .name = # t, \ 165 .size = s, \ 166 .desc_fn = NULL,\ 167 } 168 169 #define MK_FLOW_ACTION_FN(t, fn) \ 170 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 171 .name = # t, \ 172 .size = 0, \ 173 .desc_fn = fn,\ 174 } 175 176 177 /** Information about known flow actions. */ 178 static const struct rte_flow_desc_data rte_flow_desc_action[] = { 179 MK_FLOW_ACTION(END, 0), 180 MK_FLOW_ACTION(VOID, 0), 181 MK_FLOW_ACTION(PASSTHRU, 0), 182 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), 183 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 184 MK_FLOW_ACTION(FLAG, 0), 185 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 186 MK_FLOW_ACTION(DROP, 0), 187 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 188 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 189 MK_FLOW_ACTION(PF, 0), 190 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 191 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 192 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 193 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), 194 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 195 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 196 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 197 MK_FLOW_ACTION(OF_SET_NW_TTL, 198 sizeof(struct rte_flow_action_of_set_nw_ttl)), 199 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 200 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 201 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 202 MK_FLOW_ACTION(OF_POP_VLAN, 0), 203 MK_FLOW_ACTION(OF_PUSH_VLAN, 204 sizeof(struct rte_flow_action_of_push_vlan)), 205 MK_FLOW_ACTION(OF_SET_VLAN_VID, 206 sizeof(struct rte_flow_action_of_set_vlan_vid)), 207 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 208 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 209 MK_FLOW_ACTION(OF_POP_MPLS, 210 sizeof(struct rte_flow_action_of_pop_mpls)), 211 MK_FLOW_ACTION(OF_PUSH_MPLS, 212 sizeof(struct rte_flow_action_of_push_mpls)), 213 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 214 MK_FLOW_ACTION(VXLAN_DECAP, 0), 215 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 216 MK_FLOW_ACTION(NVGRE_DECAP, 0), 217 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), 218 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), 219 MK_FLOW_ACTION(SET_IPV4_SRC, 220 sizeof(struct rte_flow_action_set_ipv4)), 221 MK_FLOW_ACTION(SET_IPV4_DST, 222 sizeof(struct rte_flow_action_set_ipv4)), 223 MK_FLOW_ACTION(SET_IPV6_SRC, 224 sizeof(struct rte_flow_action_set_ipv6)), 225 MK_FLOW_ACTION(SET_IPV6_DST, 226 sizeof(struct rte_flow_action_set_ipv6)), 227 MK_FLOW_ACTION(SET_TP_SRC, 228 sizeof(struct rte_flow_action_set_tp)), 229 MK_FLOW_ACTION(SET_TP_DST, 230 sizeof(struct rte_flow_action_set_tp)), 231 MK_FLOW_ACTION(MAC_SWAP, 0), 232 MK_FLOW_ACTION(DEC_TTL, 0), 233 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), 234 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), 235 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), 236 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), 237 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), 238 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), 239 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), 240 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)), 241 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)), 242 MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)), 243 MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)), 244 MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)), 245 MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)), 246 MK_FLOW_ACTION(MODIFY_FIELD, 247 sizeof(struct rte_flow_action_modify_field)), 248 /** 249 * Indirect action represented as handle of type 250 * (struct rte_flow_action_handle *) stored in conf field (see 251 * struct rte_flow_action); no need for additional structure to * store 252 * indirect action handle. 253 */ 254 MK_FLOW_ACTION(INDIRECT, 0), 255 MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)), 256 MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)), 257 MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)), 258 }; 259 260 int 261 rte_flow_dynf_metadata_register(void) 262 { 263 int offset; 264 int flag; 265 266 static const struct rte_mbuf_dynfield desc_offs = { 267 .name = RTE_MBUF_DYNFIELD_METADATA_NAME, 268 .size = sizeof(uint32_t), 269 .align = __alignof__(uint32_t), 270 }; 271 static const struct rte_mbuf_dynflag desc_flag = { 272 .name = RTE_MBUF_DYNFLAG_METADATA_NAME, 273 }; 274 275 offset = rte_mbuf_dynfield_register(&desc_offs); 276 if (offset < 0) 277 goto error; 278 flag = rte_mbuf_dynflag_register(&desc_flag); 279 if (flag < 0) 280 goto error; 281 rte_flow_dynf_metadata_offs = offset; 282 rte_flow_dynf_metadata_mask = RTE_BIT64(flag); 283 return 0; 284 285 error: 286 rte_flow_dynf_metadata_offs = -1; 287 rte_flow_dynf_metadata_mask = UINT64_C(0); 288 return -rte_errno; 289 } 290 291 static inline void 292 fts_enter(struct rte_eth_dev *dev) 293 { 294 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) 295 pthread_mutex_lock(&dev->data->flow_ops_mutex); 296 } 297 298 static inline void 299 fts_exit(struct rte_eth_dev *dev) 300 { 301 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) 302 pthread_mutex_unlock(&dev->data->flow_ops_mutex); 303 } 304 305 static int 306 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) 307 { 308 if (ret == 0) 309 return 0; 310 if (rte_eth_dev_is_removed(port_id)) 311 return rte_flow_error_set(error, EIO, 312 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 313 NULL, rte_strerror(EIO)); 314 return ret; 315 } 316 317 /* Get generic flow operations structure from a port. */ 318 const struct rte_flow_ops * 319 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) 320 { 321 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 322 const struct rte_flow_ops *ops; 323 int code; 324 325 if (unlikely(!rte_eth_dev_is_valid_port(port_id))) 326 code = ENODEV; 327 else if (unlikely(dev->dev_ops->flow_ops_get == NULL)) 328 /* flow API not supported with this driver dev_ops */ 329 code = ENOSYS; 330 else 331 code = dev->dev_ops->flow_ops_get(dev, &ops); 332 if (code == 0 && ops == NULL) 333 /* flow API not supported with this device */ 334 code = ENOSYS; 335 336 if (code != 0) { 337 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 338 NULL, rte_strerror(code)); 339 return NULL; 340 } 341 return ops; 342 } 343 344 /* Check whether a flow rule can be created on a given port. */ 345 int 346 rte_flow_validate(uint16_t port_id, 347 const struct rte_flow_attr *attr, 348 const struct rte_flow_item pattern[], 349 const struct rte_flow_action actions[], 350 struct rte_flow_error *error) 351 { 352 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 353 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 354 int ret; 355 356 if (unlikely(!ops)) 357 return -rte_errno; 358 if (likely(!!ops->validate)) { 359 fts_enter(dev); 360 ret = ops->validate(dev, attr, pattern, actions, error); 361 fts_exit(dev); 362 return flow_err(port_id, ret, error); 363 } 364 return rte_flow_error_set(error, ENOSYS, 365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 366 NULL, rte_strerror(ENOSYS)); 367 } 368 369 /* Create a flow rule on a given port. */ 370 struct rte_flow * 371 rte_flow_create(uint16_t port_id, 372 const struct rte_flow_attr *attr, 373 const struct rte_flow_item pattern[], 374 const struct rte_flow_action actions[], 375 struct rte_flow_error *error) 376 { 377 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 378 struct rte_flow *flow; 379 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 380 381 if (unlikely(!ops)) 382 return NULL; 383 if (likely(!!ops->create)) { 384 fts_enter(dev); 385 flow = ops->create(dev, attr, pattern, actions, error); 386 fts_exit(dev); 387 if (flow == NULL) 388 flow_err(port_id, -rte_errno, error); 389 return flow; 390 } 391 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 392 NULL, rte_strerror(ENOSYS)); 393 return NULL; 394 } 395 396 /* Destroy a flow rule on a given port. */ 397 int 398 rte_flow_destroy(uint16_t port_id, 399 struct rte_flow *flow, 400 struct rte_flow_error *error) 401 { 402 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 403 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 404 int ret; 405 406 if (unlikely(!ops)) 407 return -rte_errno; 408 if (likely(!!ops->destroy)) { 409 fts_enter(dev); 410 ret = ops->destroy(dev, flow, error); 411 fts_exit(dev); 412 return flow_err(port_id, ret, error); 413 } 414 return rte_flow_error_set(error, ENOSYS, 415 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 416 NULL, rte_strerror(ENOSYS)); 417 } 418 419 /* Destroy all flow rules associated with a port. */ 420 int 421 rte_flow_flush(uint16_t port_id, 422 struct rte_flow_error *error) 423 { 424 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 425 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 426 int ret; 427 428 if (unlikely(!ops)) 429 return -rte_errno; 430 if (likely(!!ops->flush)) { 431 fts_enter(dev); 432 ret = ops->flush(dev, error); 433 fts_exit(dev); 434 return flow_err(port_id, ret, error); 435 } 436 return rte_flow_error_set(error, ENOSYS, 437 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 438 NULL, rte_strerror(ENOSYS)); 439 } 440 441 /* Query an existing flow rule. */ 442 int 443 rte_flow_query(uint16_t port_id, 444 struct rte_flow *flow, 445 const struct rte_flow_action *action, 446 void *data, 447 struct rte_flow_error *error) 448 { 449 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 450 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 451 int ret; 452 453 if (!ops) 454 return -rte_errno; 455 if (likely(!!ops->query)) { 456 fts_enter(dev); 457 ret = ops->query(dev, flow, action, data, error); 458 fts_exit(dev); 459 return flow_err(port_id, ret, error); 460 } 461 return rte_flow_error_set(error, ENOSYS, 462 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 463 NULL, rte_strerror(ENOSYS)); 464 } 465 466 /* Restrict ingress traffic to the defined flow rules. */ 467 int 468 rte_flow_isolate(uint16_t port_id, 469 int set, 470 struct rte_flow_error *error) 471 { 472 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 473 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 474 int ret; 475 476 if (!ops) 477 return -rte_errno; 478 if (likely(!!ops->isolate)) { 479 fts_enter(dev); 480 ret = ops->isolate(dev, set, error); 481 fts_exit(dev); 482 return flow_err(port_id, ret, error); 483 } 484 return rte_flow_error_set(error, ENOSYS, 485 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 486 NULL, rte_strerror(ENOSYS)); 487 } 488 489 /* Initialize flow error structure. */ 490 int 491 rte_flow_error_set(struct rte_flow_error *error, 492 int code, 493 enum rte_flow_error_type type, 494 const void *cause, 495 const char *message) 496 { 497 if (error) { 498 *error = (struct rte_flow_error){ 499 .type = type, 500 .cause = cause, 501 .message = message, 502 }; 503 } 504 rte_errno = code; 505 return -code; 506 } 507 508 /** Pattern item specification types. */ 509 enum rte_flow_conv_item_spec_type { 510 RTE_FLOW_CONV_ITEM_SPEC, 511 RTE_FLOW_CONV_ITEM_LAST, 512 RTE_FLOW_CONV_ITEM_MASK, 513 }; 514 515 /** 516 * Copy pattern item specification. 517 * 518 * @param[out] buf 519 * Output buffer. Can be NULL if @p size is zero. 520 * @param size 521 * Size of @p buf in bytes. 522 * @param[in] item 523 * Pattern item to copy specification from. 524 * @param type 525 * Specification selector for either @p spec, @p last or @p mask. 526 * 527 * @return 528 * Number of bytes needed to store pattern item specification regardless 529 * of @p size. @p buf contents are truncated to @p size if not large 530 * enough. 531 */ 532 static size_t 533 rte_flow_conv_item_spec(void *buf, const size_t size, 534 const struct rte_flow_item *item, 535 enum rte_flow_conv_item_spec_type type) 536 { 537 size_t off; 538 const void *data = 539 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : 540 type == RTE_FLOW_CONV_ITEM_LAST ? item->last : 541 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : 542 NULL; 543 544 switch (item->type) { 545 union { 546 const struct rte_flow_item_raw *raw; 547 } spec; 548 union { 549 const struct rte_flow_item_raw *raw; 550 } last; 551 union { 552 const struct rte_flow_item_raw *raw; 553 } mask; 554 union { 555 const struct rte_flow_item_raw *raw; 556 } src; 557 union { 558 struct rte_flow_item_raw *raw; 559 } dst; 560 size_t tmp; 561 562 case RTE_FLOW_ITEM_TYPE_RAW: 563 spec.raw = item->spec; 564 last.raw = item->last ? item->last : item->spec; 565 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 566 src.raw = data; 567 dst.raw = buf; 568 rte_memcpy(dst.raw, 569 (&(struct rte_flow_item_raw){ 570 .relative = src.raw->relative, 571 .search = src.raw->search, 572 .reserved = src.raw->reserved, 573 .offset = src.raw->offset, 574 .limit = src.raw->limit, 575 .length = src.raw->length, 576 }), 577 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); 578 off = sizeof(*dst.raw); 579 if (type == RTE_FLOW_CONV_ITEM_SPEC || 580 (type == RTE_FLOW_CONV_ITEM_MASK && 581 ((spec.raw->length & mask.raw->length) >= 582 (last.raw->length & mask.raw->length)))) 583 tmp = spec.raw->length & mask.raw->length; 584 else 585 tmp = last.raw->length & mask.raw->length; 586 if (tmp) { 587 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); 588 if (size >= off + tmp) 589 dst.raw->pattern = rte_memcpy 590 ((void *)((uintptr_t)dst.raw + off), 591 src.raw->pattern, tmp); 592 off += tmp; 593 } 594 break; 595 default: 596 off = rte_flow_conv_copy(buf, data, size, 597 rte_flow_desc_item, item->type); 598 break; 599 } 600 return off; 601 } 602 603 /** 604 * Copy action configuration. 605 * 606 * @param[out] buf 607 * Output buffer. Can be NULL if @p size is zero. 608 * @param size 609 * Size of @p buf in bytes. 610 * @param[in] action 611 * Action to copy configuration from. 612 * 613 * @return 614 * Number of bytes needed to store pattern item specification regardless 615 * of @p size. @p buf contents are truncated to @p size if not large 616 * enough. 617 */ 618 static size_t 619 rte_flow_conv_action_conf(void *buf, const size_t size, 620 const struct rte_flow_action *action) 621 { 622 size_t off; 623 624 switch (action->type) { 625 union { 626 const struct rte_flow_action_rss *rss; 627 const struct rte_flow_action_vxlan_encap *vxlan_encap; 628 const struct rte_flow_action_nvgre_encap *nvgre_encap; 629 } src; 630 union { 631 struct rte_flow_action_rss *rss; 632 struct rte_flow_action_vxlan_encap *vxlan_encap; 633 struct rte_flow_action_nvgre_encap *nvgre_encap; 634 } dst; 635 size_t tmp; 636 int ret; 637 638 case RTE_FLOW_ACTION_TYPE_RSS: 639 src.rss = action->conf; 640 dst.rss = buf; 641 rte_memcpy(dst.rss, 642 (&(struct rte_flow_action_rss){ 643 .func = src.rss->func, 644 .level = src.rss->level, 645 .types = src.rss->types, 646 .key_len = src.rss->key_len, 647 .queue_num = src.rss->queue_num, 648 }), 649 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); 650 off = sizeof(*dst.rss); 651 if (src.rss->key_len && src.rss->key) { 652 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); 653 tmp = sizeof(*src.rss->key) * src.rss->key_len; 654 if (size >= off + tmp) 655 dst.rss->key = rte_memcpy 656 ((void *)((uintptr_t)dst.rss + off), 657 src.rss->key, tmp); 658 off += tmp; 659 } 660 if (src.rss->queue_num) { 661 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); 662 tmp = sizeof(*src.rss->queue) * src.rss->queue_num; 663 if (size >= off + tmp) 664 dst.rss->queue = rte_memcpy 665 ((void *)((uintptr_t)dst.rss + off), 666 src.rss->queue, tmp); 667 off += tmp; 668 } 669 break; 670 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 671 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 672 src.vxlan_encap = action->conf; 673 dst.vxlan_encap = buf; 674 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != 675 sizeof(*src.nvgre_encap) || 676 offsetof(struct rte_flow_action_vxlan_encap, 677 definition) != 678 offsetof(struct rte_flow_action_nvgre_encap, 679 definition)); 680 off = sizeof(*dst.vxlan_encap); 681 if (src.vxlan_encap->definition) { 682 off = RTE_ALIGN_CEIL 683 (off, sizeof(*dst.vxlan_encap->definition)); 684 ret = rte_flow_conv 685 (RTE_FLOW_CONV_OP_PATTERN, 686 (void *)((uintptr_t)dst.vxlan_encap + off), 687 size > off ? size - off : 0, 688 src.vxlan_encap->definition, NULL); 689 if (ret < 0) 690 return 0; 691 if (size >= off + ret) 692 dst.vxlan_encap->definition = 693 (void *)((uintptr_t)dst.vxlan_encap + 694 off); 695 off += ret; 696 } 697 break; 698 default: 699 off = rte_flow_conv_copy(buf, action->conf, size, 700 rte_flow_desc_action, action->type); 701 break; 702 } 703 return off; 704 } 705 706 /** 707 * Copy a list of pattern items. 708 * 709 * @param[out] dst 710 * Destination buffer. Can be NULL if @p size is zero. 711 * @param size 712 * Size of @p dst in bytes. 713 * @param[in] src 714 * Source pattern items. 715 * @param num 716 * Maximum number of pattern items to process from @p src or 0 to process 717 * the entire list. In both cases, processing stops after 718 * RTE_FLOW_ITEM_TYPE_END is encountered. 719 * @param[out] error 720 * Perform verbose error reporting if not NULL. 721 * 722 * @return 723 * A positive value representing the number of bytes needed to store 724 * pattern items regardless of @p size on success (@p buf contents are 725 * truncated to @p size if not large enough), a negative errno value 726 * otherwise and rte_errno is set. 727 */ 728 static int 729 rte_flow_conv_pattern(struct rte_flow_item *dst, 730 const size_t size, 731 const struct rte_flow_item *src, 732 unsigned int num, 733 struct rte_flow_error *error) 734 { 735 uintptr_t data = (uintptr_t)dst; 736 size_t off; 737 size_t ret; 738 unsigned int i; 739 740 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 741 /** 742 * allow PMD private flow item 743 */ 744 if (((int)src->type >= 0) && 745 ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || 746 !rte_flow_desc_item[src->type].name)) 747 return rte_flow_error_set 748 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, 749 "cannot convert unknown item type"); 750 if (size >= off + sizeof(*dst)) 751 *dst = (struct rte_flow_item){ 752 .type = src->type, 753 }; 754 off += sizeof(*dst); 755 if (!src->type) 756 num = i + 1; 757 } 758 num = i; 759 src -= num; 760 dst -= num; 761 do { 762 if (src->spec) { 763 off = RTE_ALIGN_CEIL(off, sizeof(double)); 764 ret = rte_flow_conv_item_spec 765 ((void *)(data + off), 766 size > off ? size - off : 0, src, 767 RTE_FLOW_CONV_ITEM_SPEC); 768 if (size && size >= off + ret) 769 dst->spec = (void *)(data + off); 770 off += ret; 771 772 } 773 if (src->last) { 774 off = RTE_ALIGN_CEIL(off, sizeof(double)); 775 ret = rte_flow_conv_item_spec 776 ((void *)(data + off), 777 size > off ? size - off : 0, src, 778 RTE_FLOW_CONV_ITEM_LAST); 779 if (size && size >= off + ret) 780 dst->last = (void *)(data + off); 781 off += ret; 782 } 783 if (src->mask) { 784 off = RTE_ALIGN_CEIL(off, sizeof(double)); 785 ret = rte_flow_conv_item_spec 786 ((void *)(data + off), 787 size > off ? size - off : 0, src, 788 RTE_FLOW_CONV_ITEM_MASK); 789 if (size && size >= off + ret) 790 dst->mask = (void *)(data + off); 791 off += ret; 792 } 793 ++src; 794 ++dst; 795 } while (--num); 796 return off; 797 } 798 799 /** 800 * Copy a list of actions. 801 * 802 * @param[out] dst 803 * Destination buffer. Can be NULL if @p size is zero. 804 * @param size 805 * Size of @p dst in bytes. 806 * @param[in] src 807 * Source actions. 808 * @param num 809 * Maximum number of actions to process from @p src or 0 to process the 810 * entire list. In both cases, processing stops after 811 * RTE_FLOW_ACTION_TYPE_END is encountered. 812 * @param[out] error 813 * Perform verbose error reporting if not NULL. 814 * 815 * @return 816 * A positive value representing the number of bytes needed to store 817 * actions regardless of @p size on success (@p buf contents are truncated 818 * to @p size if not large enough), a negative errno value otherwise and 819 * rte_errno is set. 820 */ 821 static int 822 rte_flow_conv_actions(struct rte_flow_action *dst, 823 const size_t size, 824 const struct rte_flow_action *src, 825 unsigned int num, 826 struct rte_flow_error *error) 827 { 828 uintptr_t data = (uintptr_t)dst; 829 size_t off; 830 size_t ret; 831 unsigned int i; 832 833 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 834 /** 835 * allow PMD private flow action 836 */ 837 if (((int)src->type >= 0) && 838 ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || 839 !rte_flow_desc_action[src->type].name)) 840 return rte_flow_error_set 841 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 842 src, "cannot convert unknown action type"); 843 if (size >= off + sizeof(*dst)) 844 *dst = (struct rte_flow_action){ 845 .type = src->type, 846 }; 847 off += sizeof(*dst); 848 if (!src->type) 849 num = i + 1; 850 } 851 num = i; 852 src -= num; 853 dst -= num; 854 do { 855 if (src->conf) { 856 off = RTE_ALIGN_CEIL(off, sizeof(double)); 857 ret = rte_flow_conv_action_conf 858 ((void *)(data + off), 859 size > off ? size - off : 0, src); 860 if (size && size >= off + ret) 861 dst->conf = (void *)(data + off); 862 off += ret; 863 } 864 ++src; 865 ++dst; 866 } while (--num); 867 return off; 868 } 869 870 /** 871 * Copy flow rule components. 872 * 873 * This comprises the flow rule descriptor itself, attributes, pattern and 874 * actions list. NULL components in @p src are skipped. 875 * 876 * @param[out] dst 877 * Destination buffer. Can be NULL if @p size is zero. 878 * @param size 879 * Size of @p dst in bytes. 880 * @param[in] src 881 * Source flow rule descriptor. 882 * @param[out] error 883 * Perform verbose error reporting if not NULL. 884 * 885 * @return 886 * A positive value representing the number of bytes needed to store all 887 * components including the descriptor regardless of @p size on success 888 * (@p buf contents are truncated to @p size if not large enough), a 889 * negative errno value otherwise and rte_errno is set. 890 */ 891 static int 892 rte_flow_conv_rule(struct rte_flow_conv_rule *dst, 893 const size_t size, 894 const struct rte_flow_conv_rule *src, 895 struct rte_flow_error *error) 896 { 897 size_t off; 898 int ret; 899 900 rte_memcpy(dst, 901 (&(struct rte_flow_conv_rule){ 902 .attr = NULL, 903 .pattern = NULL, 904 .actions = NULL, 905 }), 906 size > sizeof(*dst) ? sizeof(*dst) : size); 907 off = sizeof(*dst); 908 if (src->attr_ro) { 909 off = RTE_ALIGN_CEIL(off, sizeof(double)); 910 if (size && size >= off + sizeof(*dst->attr)) 911 dst->attr = rte_memcpy 912 ((void *)((uintptr_t)dst + off), 913 src->attr_ro, sizeof(*dst->attr)); 914 off += sizeof(*dst->attr); 915 } 916 if (src->pattern_ro) { 917 off = RTE_ALIGN_CEIL(off, sizeof(double)); 918 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), 919 size > off ? size - off : 0, 920 src->pattern_ro, 0, error); 921 if (ret < 0) 922 return ret; 923 if (size && size >= off + (size_t)ret) 924 dst->pattern = (void *)((uintptr_t)dst + off); 925 off += ret; 926 } 927 if (src->actions_ro) { 928 off = RTE_ALIGN_CEIL(off, sizeof(double)); 929 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), 930 size > off ? size - off : 0, 931 src->actions_ro, 0, error); 932 if (ret < 0) 933 return ret; 934 if (size >= off + (size_t)ret) 935 dst->actions = (void *)((uintptr_t)dst + off); 936 off += ret; 937 } 938 return off; 939 } 940 941 /** 942 * Retrieve the name of a pattern item/action type. 943 * 944 * @param is_action 945 * Nonzero when @p src represents an action type instead of a pattern item 946 * type. 947 * @param is_ptr 948 * Nonzero to write string address instead of contents into @p dst. 949 * @param[out] dst 950 * Destination buffer. Can be NULL if @p size is zero. 951 * @param size 952 * Size of @p dst in bytes. 953 * @param[in] src 954 * Depending on @p is_action, source pattern item or action type cast as a 955 * pointer. 956 * @param[out] error 957 * Perform verbose error reporting if not NULL. 958 * 959 * @return 960 * A positive value representing the number of bytes needed to store the 961 * name or its address regardless of @p size on success (@p buf contents 962 * are truncated to @p size if not large enough), a negative errno value 963 * otherwise and rte_errno is set. 964 */ 965 static int 966 rte_flow_conv_name(int is_action, 967 int is_ptr, 968 char *dst, 969 const size_t size, 970 const void *src, 971 struct rte_flow_error *error) 972 { 973 struct desc_info { 974 const struct rte_flow_desc_data *data; 975 size_t num; 976 }; 977 static const struct desc_info info_rep[2] = { 978 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, 979 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, 980 }; 981 const struct desc_info *const info = &info_rep[!!is_action]; 982 unsigned int type = (uintptr_t)src; 983 984 if (type >= info->num) 985 return rte_flow_error_set 986 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 987 "unknown object type to retrieve the name of"); 988 if (!is_ptr) 989 return strlcpy(dst, info->data[type].name, size); 990 if (size >= sizeof(const char **)) 991 *((const char **)dst) = info->data[type].name; 992 return sizeof(const char **); 993 } 994 995 /** Helper function to convert flow API objects. */ 996 int 997 rte_flow_conv(enum rte_flow_conv_op op, 998 void *dst, 999 size_t size, 1000 const void *src, 1001 struct rte_flow_error *error) 1002 { 1003 switch (op) { 1004 const struct rte_flow_attr *attr; 1005 1006 case RTE_FLOW_CONV_OP_NONE: 1007 return 0; 1008 case RTE_FLOW_CONV_OP_ATTR: 1009 attr = src; 1010 if (size > sizeof(*attr)) 1011 size = sizeof(*attr); 1012 rte_memcpy(dst, attr, size); 1013 return sizeof(*attr); 1014 case RTE_FLOW_CONV_OP_ITEM: 1015 return rte_flow_conv_pattern(dst, size, src, 1, error); 1016 case RTE_FLOW_CONV_OP_ACTION: 1017 return rte_flow_conv_actions(dst, size, src, 1, error); 1018 case RTE_FLOW_CONV_OP_PATTERN: 1019 return rte_flow_conv_pattern(dst, size, src, 0, error); 1020 case RTE_FLOW_CONV_OP_ACTIONS: 1021 return rte_flow_conv_actions(dst, size, src, 0, error); 1022 case RTE_FLOW_CONV_OP_RULE: 1023 return rte_flow_conv_rule(dst, size, src, error); 1024 case RTE_FLOW_CONV_OP_ITEM_NAME: 1025 return rte_flow_conv_name(0, 0, dst, size, src, error); 1026 case RTE_FLOW_CONV_OP_ACTION_NAME: 1027 return rte_flow_conv_name(1, 0, dst, size, src, error); 1028 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: 1029 return rte_flow_conv_name(0, 1, dst, size, src, error); 1030 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: 1031 return rte_flow_conv_name(1, 1, dst, size, src, error); 1032 } 1033 return rte_flow_error_set 1034 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1035 "unknown object conversion operation"); 1036 } 1037 1038 /** Store a full rte_flow description. */ 1039 size_t 1040 rte_flow_copy(struct rte_flow_desc *desc, size_t len, 1041 const struct rte_flow_attr *attr, 1042 const struct rte_flow_item *items, 1043 const struct rte_flow_action *actions) 1044 { 1045 /* 1046 * Overlap struct rte_flow_conv with struct rte_flow_desc in order 1047 * to convert the former to the latter without wasting space. 1048 */ 1049 struct rte_flow_conv_rule *dst = 1050 len ? 1051 (void *)((uintptr_t)desc + 1052 (offsetof(struct rte_flow_desc, actions) - 1053 offsetof(struct rte_flow_conv_rule, actions))) : 1054 NULL; 1055 size_t dst_size = 1056 len > sizeof(*desc) - sizeof(*dst) ? 1057 len - (sizeof(*desc) - sizeof(*dst)) : 1058 0; 1059 struct rte_flow_conv_rule src = { 1060 .attr_ro = NULL, 1061 .pattern_ro = items, 1062 .actions_ro = actions, 1063 }; 1064 int ret; 1065 1066 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < 1067 sizeof(struct rte_flow_conv_rule)); 1068 if (dst_size && 1069 (&dst->pattern != &desc->items || 1070 &dst->actions != &desc->actions || 1071 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { 1072 rte_errno = EINVAL; 1073 return 0; 1074 } 1075 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); 1076 if (ret < 0) 1077 return 0; 1078 ret += sizeof(*desc) - sizeof(*dst); 1079 rte_memcpy(desc, 1080 (&(struct rte_flow_desc){ 1081 .size = ret, 1082 .attr = *attr, 1083 .items = dst_size ? dst->pattern : NULL, 1084 .actions = dst_size ? dst->actions : NULL, 1085 }), 1086 len > sizeof(*desc) ? sizeof(*desc) : len); 1087 return ret; 1088 } 1089 1090 int 1091 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow, 1092 FILE *file, struct rte_flow_error *error) 1093 { 1094 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1095 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1096 int ret; 1097 1098 if (unlikely(!ops)) 1099 return -rte_errno; 1100 if (likely(!!ops->dev_dump)) { 1101 fts_enter(dev); 1102 ret = ops->dev_dump(dev, flow, file, error); 1103 fts_exit(dev); 1104 return flow_err(port_id, ret, error); 1105 } 1106 return rte_flow_error_set(error, ENOSYS, 1107 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1108 NULL, rte_strerror(ENOSYS)); 1109 } 1110 1111 int 1112 rte_flow_get_aged_flows(uint16_t port_id, void **contexts, 1113 uint32_t nb_contexts, struct rte_flow_error *error) 1114 { 1115 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1116 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1117 int ret; 1118 1119 if (unlikely(!ops)) 1120 return -rte_errno; 1121 if (likely(!!ops->get_aged_flows)) { 1122 fts_enter(dev); 1123 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error); 1124 fts_exit(dev); 1125 return flow_err(port_id, ret, error); 1126 } 1127 return rte_flow_error_set(error, ENOTSUP, 1128 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1129 NULL, rte_strerror(ENOTSUP)); 1130 } 1131 1132 struct rte_flow_action_handle * 1133 rte_flow_action_handle_create(uint16_t port_id, 1134 const struct rte_flow_indir_action_conf *conf, 1135 const struct rte_flow_action *action, 1136 struct rte_flow_error *error) 1137 { 1138 struct rte_flow_action_handle *handle; 1139 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1140 1141 if (unlikely(!ops)) 1142 return NULL; 1143 if (unlikely(!ops->action_handle_create)) { 1144 rte_flow_error_set(error, ENOSYS, 1145 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1146 rte_strerror(ENOSYS)); 1147 return NULL; 1148 } 1149 handle = ops->action_handle_create(&rte_eth_devices[port_id], 1150 conf, action, error); 1151 if (handle == NULL) 1152 flow_err(port_id, -rte_errno, error); 1153 return handle; 1154 } 1155 1156 int 1157 rte_flow_action_handle_destroy(uint16_t port_id, 1158 struct rte_flow_action_handle *handle, 1159 struct rte_flow_error *error) 1160 { 1161 int ret; 1162 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1163 1164 if (unlikely(!ops)) 1165 return -rte_errno; 1166 if (unlikely(!ops->action_handle_destroy)) 1167 return rte_flow_error_set(error, ENOSYS, 1168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1169 NULL, rte_strerror(ENOSYS)); 1170 ret = ops->action_handle_destroy(&rte_eth_devices[port_id], 1171 handle, error); 1172 return flow_err(port_id, ret, error); 1173 } 1174 1175 int 1176 rte_flow_action_handle_update(uint16_t port_id, 1177 struct rte_flow_action_handle *handle, 1178 const void *update, 1179 struct rte_flow_error *error) 1180 { 1181 int ret; 1182 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1183 1184 if (unlikely(!ops)) 1185 return -rte_errno; 1186 if (unlikely(!ops->action_handle_update)) 1187 return rte_flow_error_set(error, ENOSYS, 1188 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1189 NULL, rte_strerror(ENOSYS)); 1190 ret = ops->action_handle_update(&rte_eth_devices[port_id], handle, 1191 update, error); 1192 return flow_err(port_id, ret, error); 1193 } 1194 1195 int 1196 rte_flow_action_handle_query(uint16_t port_id, 1197 const struct rte_flow_action_handle *handle, 1198 void *data, 1199 struct rte_flow_error *error) 1200 { 1201 int ret; 1202 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1203 1204 if (unlikely(!ops)) 1205 return -rte_errno; 1206 if (unlikely(!ops->action_handle_query)) 1207 return rte_flow_error_set(error, ENOSYS, 1208 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1209 NULL, rte_strerror(ENOSYS)); 1210 ret = ops->action_handle_query(&rte_eth_devices[port_id], handle, 1211 data, error); 1212 return flow_err(port_id, ret, error); 1213 } 1214 1215 int 1216 rte_flow_tunnel_decap_set(uint16_t port_id, 1217 struct rte_flow_tunnel *tunnel, 1218 struct rte_flow_action **actions, 1219 uint32_t *num_of_actions, 1220 struct rte_flow_error *error) 1221 { 1222 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1223 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1224 1225 if (unlikely(!ops)) 1226 return -rte_errno; 1227 if (likely(!!ops->tunnel_decap_set)) { 1228 return flow_err(port_id, 1229 ops->tunnel_decap_set(dev, tunnel, actions, 1230 num_of_actions, error), 1231 error); 1232 } 1233 return rte_flow_error_set(error, ENOTSUP, 1234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1235 NULL, rte_strerror(ENOTSUP)); 1236 } 1237 1238 int 1239 rte_flow_tunnel_match(uint16_t port_id, 1240 struct rte_flow_tunnel *tunnel, 1241 struct rte_flow_item **items, 1242 uint32_t *num_of_items, 1243 struct rte_flow_error *error) 1244 { 1245 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1246 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1247 1248 if (unlikely(!ops)) 1249 return -rte_errno; 1250 if (likely(!!ops->tunnel_match)) { 1251 return flow_err(port_id, 1252 ops->tunnel_match(dev, tunnel, items, 1253 num_of_items, error), 1254 error); 1255 } 1256 return rte_flow_error_set(error, ENOTSUP, 1257 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1258 NULL, rte_strerror(ENOTSUP)); 1259 } 1260 1261 int 1262 rte_flow_get_restore_info(uint16_t port_id, 1263 struct rte_mbuf *m, 1264 struct rte_flow_restore_info *restore_info, 1265 struct rte_flow_error *error) 1266 { 1267 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1268 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1269 1270 if (unlikely(!ops)) 1271 return -rte_errno; 1272 if (likely(!!ops->get_restore_info)) { 1273 return flow_err(port_id, 1274 ops->get_restore_info(dev, m, restore_info, 1275 error), 1276 error); 1277 } 1278 return rte_flow_error_set(error, ENOTSUP, 1279 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1280 NULL, rte_strerror(ENOTSUP)); 1281 } 1282 1283 int 1284 rte_flow_tunnel_action_decap_release(uint16_t port_id, 1285 struct rte_flow_action *actions, 1286 uint32_t num_of_actions, 1287 struct rte_flow_error *error) 1288 { 1289 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1290 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1291 1292 if (unlikely(!ops)) 1293 return -rte_errno; 1294 if (likely(!!ops->tunnel_action_decap_release)) { 1295 return flow_err(port_id, 1296 ops->tunnel_action_decap_release(dev, actions, 1297 num_of_actions, 1298 error), 1299 error); 1300 } 1301 return rte_flow_error_set(error, ENOTSUP, 1302 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1303 NULL, rte_strerror(ENOTSUP)); 1304 } 1305 1306 int 1307 rte_flow_tunnel_item_release(uint16_t port_id, 1308 struct rte_flow_item *items, 1309 uint32_t num_of_items, 1310 struct rte_flow_error *error) 1311 { 1312 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1313 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1314 1315 if (unlikely(!ops)) 1316 return -rte_errno; 1317 if (likely(!!ops->tunnel_item_release)) { 1318 return flow_err(port_id, 1319 ops->tunnel_item_release(dev, items, 1320 num_of_items, error), 1321 error); 1322 } 1323 return rte_flow_error_set(error, ENOTSUP, 1324 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1325 NULL, rte_strerror(ENOTSUP)); 1326 } 1327 1328 int 1329 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id, 1330 struct rte_flow_error *error) 1331 { 1332 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1333 struct rte_eth_dev *dev; 1334 1335 if (unlikely(ops == NULL)) 1336 return -rte_errno; 1337 1338 if (ops->pick_transfer_proxy == NULL) { 1339 *proxy_port_id = port_id; 1340 return 0; 1341 } 1342 1343 dev = &rte_eth_devices[port_id]; 1344 1345 return flow_err(port_id, 1346 ops->pick_transfer_proxy(dev, proxy_port_id, error), 1347 error); 1348 } 1349 1350 struct rte_flow_item_flex_handle * 1351 rte_flow_flex_item_create(uint16_t port_id, 1352 const struct rte_flow_item_flex_conf *conf, 1353 struct rte_flow_error *error) 1354 { 1355 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1356 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1357 struct rte_flow_item_flex_handle *handle; 1358 1359 if (unlikely(!ops)) 1360 return NULL; 1361 if (unlikely(!ops->flex_item_create)) { 1362 rte_flow_error_set(error, ENOTSUP, 1363 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1364 NULL, rte_strerror(ENOTSUP)); 1365 return NULL; 1366 } 1367 handle = ops->flex_item_create(dev, conf, error); 1368 if (handle == NULL) 1369 flow_err(port_id, -rte_errno, error); 1370 return handle; 1371 } 1372 1373 int 1374 rte_flow_flex_item_release(uint16_t port_id, 1375 const struct rte_flow_item_flex_handle *handle, 1376 struct rte_flow_error *error) 1377 { 1378 int ret; 1379 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1380 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1381 1382 if (unlikely(!ops || !ops->flex_item_release)) 1383 return rte_flow_error_set(error, ENOTSUP, 1384 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1385 NULL, rte_strerror(ENOTSUP)); 1386 ret = ops->flex_item_release(dev, handle, error); 1387 return flow_err(port_id, ret, error); 1388 } 1389 1390 int 1391 rte_flow_info_get(uint16_t port_id, 1392 struct rte_flow_port_info *port_info, 1393 struct rte_flow_queue_info *queue_info, 1394 struct rte_flow_error *error) 1395 { 1396 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1397 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1398 1399 if (unlikely(!ops)) 1400 return -rte_errno; 1401 if (dev->data->dev_configured == 0) { 1402 RTE_FLOW_LOG(INFO, 1403 "Device with port_id=%"PRIu16" is not configured.\n", 1404 port_id); 1405 return -EINVAL; 1406 } 1407 if (port_info == NULL) { 1408 RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id); 1409 return -EINVAL; 1410 } 1411 if (likely(!!ops->info_get)) { 1412 return flow_err(port_id, 1413 ops->info_get(dev, port_info, queue_info, error), 1414 error); 1415 } 1416 return rte_flow_error_set(error, ENOTSUP, 1417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1418 NULL, rte_strerror(ENOTSUP)); 1419 } 1420 1421 int 1422 rte_flow_configure(uint16_t port_id, 1423 const struct rte_flow_port_attr *port_attr, 1424 uint16_t nb_queue, 1425 const struct rte_flow_queue_attr *queue_attr[], 1426 struct rte_flow_error *error) 1427 { 1428 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1429 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1430 int ret; 1431 1432 if (unlikely(!ops)) 1433 return -rte_errno; 1434 if (dev->data->dev_configured == 0) { 1435 RTE_FLOW_LOG(INFO, 1436 "Device with port_id=%"PRIu16" is not configured.\n", 1437 port_id); 1438 return -EINVAL; 1439 } 1440 if (dev->data->dev_started != 0) { 1441 RTE_FLOW_LOG(INFO, 1442 "Device with port_id=%"PRIu16" already started.\n", 1443 port_id); 1444 return -EINVAL; 1445 } 1446 if (port_attr == NULL) { 1447 RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id); 1448 return -EINVAL; 1449 } 1450 if (queue_attr == NULL) { 1451 RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id); 1452 return -EINVAL; 1453 } 1454 if (likely(!!ops->configure)) { 1455 ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error); 1456 if (ret == 0) 1457 dev->data->flow_configured = 1; 1458 return flow_err(port_id, ret, error); 1459 } 1460 return rte_flow_error_set(error, ENOTSUP, 1461 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1462 NULL, rte_strerror(ENOTSUP)); 1463 } 1464 1465 struct rte_flow_pattern_template * 1466 rte_flow_pattern_template_create(uint16_t port_id, 1467 const struct rte_flow_pattern_template_attr *template_attr, 1468 const struct rte_flow_item pattern[], 1469 struct rte_flow_error *error) 1470 { 1471 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1472 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1473 struct rte_flow_pattern_template *template; 1474 1475 if (unlikely(!ops)) 1476 return NULL; 1477 if (dev->data->flow_configured == 0) { 1478 RTE_FLOW_LOG(INFO, 1479 "Flow engine on port_id=%"PRIu16" is not configured.\n", 1480 port_id); 1481 rte_flow_error_set(error, EINVAL, 1482 RTE_FLOW_ERROR_TYPE_STATE, 1483 NULL, rte_strerror(EINVAL)); 1484 return NULL; 1485 } 1486 if (template_attr == NULL) { 1487 RTE_FLOW_LOG(ERR, 1488 "Port %"PRIu16" template attr is NULL.\n", 1489 port_id); 1490 rte_flow_error_set(error, EINVAL, 1491 RTE_FLOW_ERROR_TYPE_ATTR, 1492 NULL, rte_strerror(EINVAL)); 1493 return NULL; 1494 } 1495 if (pattern == NULL) { 1496 RTE_FLOW_LOG(ERR, 1497 "Port %"PRIu16" pattern is NULL.\n", 1498 port_id); 1499 rte_flow_error_set(error, EINVAL, 1500 RTE_FLOW_ERROR_TYPE_ATTR, 1501 NULL, rte_strerror(EINVAL)); 1502 return NULL; 1503 } 1504 if (likely(!!ops->pattern_template_create)) { 1505 template = ops->pattern_template_create(dev, template_attr, 1506 pattern, error); 1507 if (template == NULL) 1508 flow_err(port_id, -rte_errno, error); 1509 return template; 1510 } 1511 rte_flow_error_set(error, ENOTSUP, 1512 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1513 NULL, rte_strerror(ENOTSUP)); 1514 return NULL; 1515 } 1516 1517 int 1518 rte_flow_pattern_template_destroy(uint16_t port_id, 1519 struct rte_flow_pattern_template *pattern_template, 1520 struct rte_flow_error *error) 1521 { 1522 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1523 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1524 1525 if (unlikely(!ops)) 1526 return -rte_errno; 1527 if (unlikely(pattern_template == NULL)) 1528 return 0; 1529 if (likely(!!ops->pattern_template_destroy)) { 1530 return flow_err(port_id, 1531 ops->pattern_template_destroy(dev, 1532 pattern_template, 1533 error), 1534 error); 1535 } 1536 return rte_flow_error_set(error, ENOTSUP, 1537 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1538 NULL, rte_strerror(ENOTSUP)); 1539 } 1540 1541 struct rte_flow_actions_template * 1542 rte_flow_actions_template_create(uint16_t port_id, 1543 const struct rte_flow_actions_template_attr *template_attr, 1544 const struct rte_flow_action actions[], 1545 const struct rte_flow_action masks[], 1546 struct rte_flow_error *error) 1547 { 1548 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1549 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1550 struct rte_flow_actions_template *template; 1551 1552 if (unlikely(!ops)) 1553 return NULL; 1554 if (dev->data->flow_configured == 0) { 1555 RTE_FLOW_LOG(INFO, 1556 "Flow engine on port_id=%"PRIu16" is not configured.\n", 1557 port_id); 1558 rte_flow_error_set(error, EINVAL, 1559 RTE_FLOW_ERROR_TYPE_STATE, 1560 NULL, rte_strerror(EINVAL)); 1561 return NULL; 1562 } 1563 if (template_attr == NULL) { 1564 RTE_FLOW_LOG(ERR, 1565 "Port %"PRIu16" template attr is NULL.\n", 1566 port_id); 1567 rte_flow_error_set(error, EINVAL, 1568 RTE_FLOW_ERROR_TYPE_ATTR, 1569 NULL, rte_strerror(EINVAL)); 1570 return NULL; 1571 } 1572 if (actions == NULL) { 1573 RTE_FLOW_LOG(ERR, 1574 "Port %"PRIu16" actions is NULL.\n", 1575 port_id); 1576 rte_flow_error_set(error, EINVAL, 1577 RTE_FLOW_ERROR_TYPE_ATTR, 1578 NULL, rte_strerror(EINVAL)); 1579 return NULL; 1580 } 1581 if (masks == NULL) { 1582 RTE_FLOW_LOG(ERR, 1583 "Port %"PRIu16" masks is NULL.\n", 1584 port_id); 1585 rte_flow_error_set(error, EINVAL, 1586 RTE_FLOW_ERROR_TYPE_ATTR, 1587 NULL, rte_strerror(EINVAL)); 1588 1589 } 1590 if (likely(!!ops->actions_template_create)) { 1591 template = ops->actions_template_create(dev, template_attr, 1592 actions, masks, error); 1593 if (template == NULL) 1594 flow_err(port_id, -rte_errno, error); 1595 return template; 1596 } 1597 rte_flow_error_set(error, ENOTSUP, 1598 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1599 NULL, rte_strerror(ENOTSUP)); 1600 return NULL; 1601 } 1602 1603 int 1604 rte_flow_actions_template_destroy(uint16_t port_id, 1605 struct rte_flow_actions_template *actions_template, 1606 struct rte_flow_error *error) 1607 { 1608 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1609 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1610 1611 if (unlikely(!ops)) 1612 return -rte_errno; 1613 if (unlikely(actions_template == NULL)) 1614 return 0; 1615 if (likely(!!ops->actions_template_destroy)) { 1616 return flow_err(port_id, 1617 ops->actions_template_destroy(dev, 1618 actions_template, 1619 error), 1620 error); 1621 } 1622 return rte_flow_error_set(error, ENOTSUP, 1623 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1624 NULL, rte_strerror(ENOTSUP)); 1625 } 1626 1627 struct rte_flow_template_table * 1628 rte_flow_template_table_create(uint16_t port_id, 1629 const struct rte_flow_template_table_attr *table_attr, 1630 struct rte_flow_pattern_template *pattern_templates[], 1631 uint8_t nb_pattern_templates, 1632 struct rte_flow_actions_template *actions_templates[], 1633 uint8_t nb_actions_templates, 1634 struct rte_flow_error *error) 1635 { 1636 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1637 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1638 struct rte_flow_template_table *table; 1639 1640 if (unlikely(!ops)) 1641 return NULL; 1642 if (dev->data->flow_configured == 0) { 1643 RTE_FLOW_LOG(INFO, 1644 "Flow engine on port_id=%"PRIu16" is not configured.\n", 1645 port_id); 1646 rte_flow_error_set(error, EINVAL, 1647 RTE_FLOW_ERROR_TYPE_STATE, 1648 NULL, rte_strerror(EINVAL)); 1649 return NULL; 1650 } 1651 if (table_attr == NULL) { 1652 RTE_FLOW_LOG(ERR, 1653 "Port %"PRIu16" table attr is NULL.\n", 1654 port_id); 1655 rte_flow_error_set(error, EINVAL, 1656 RTE_FLOW_ERROR_TYPE_ATTR, 1657 NULL, rte_strerror(EINVAL)); 1658 return NULL; 1659 } 1660 if (pattern_templates == NULL) { 1661 RTE_FLOW_LOG(ERR, 1662 "Port %"PRIu16" pattern templates is NULL.\n", 1663 port_id); 1664 rte_flow_error_set(error, EINVAL, 1665 RTE_FLOW_ERROR_TYPE_ATTR, 1666 NULL, rte_strerror(EINVAL)); 1667 return NULL; 1668 } 1669 if (actions_templates == NULL) { 1670 RTE_FLOW_LOG(ERR, 1671 "Port %"PRIu16" actions templates is NULL.\n", 1672 port_id); 1673 rte_flow_error_set(error, EINVAL, 1674 RTE_FLOW_ERROR_TYPE_ATTR, 1675 NULL, rte_strerror(EINVAL)); 1676 return NULL; 1677 } 1678 if (likely(!!ops->template_table_create)) { 1679 table = ops->template_table_create(dev, table_attr, 1680 pattern_templates, nb_pattern_templates, 1681 actions_templates, nb_actions_templates, 1682 error); 1683 if (table == NULL) 1684 flow_err(port_id, -rte_errno, error); 1685 return table; 1686 } 1687 rte_flow_error_set(error, ENOTSUP, 1688 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1689 NULL, rte_strerror(ENOTSUP)); 1690 return NULL; 1691 } 1692 1693 int 1694 rte_flow_template_table_destroy(uint16_t port_id, 1695 struct rte_flow_template_table *template_table, 1696 struct rte_flow_error *error) 1697 { 1698 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1699 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1700 1701 if (unlikely(!ops)) 1702 return -rte_errno; 1703 if (unlikely(template_table == NULL)) 1704 return 0; 1705 if (likely(!!ops->template_table_destroy)) { 1706 return flow_err(port_id, 1707 ops->template_table_destroy(dev, 1708 template_table, 1709 error), 1710 error); 1711 } 1712 return rte_flow_error_set(error, ENOTSUP, 1713 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1714 NULL, rte_strerror(ENOTSUP)); 1715 } 1716 1717 struct rte_flow * 1718 rte_flow_async_create(uint16_t port_id, 1719 uint32_t queue_id, 1720 const struct rte_flow_op_attr *op_attr, 1721 struct rte_flow_template_table *template_table, 1722 const struct rte_flow_item pattern[], 1723 uint8_t pattern_template_index, 1724 const struct rte_flow_action actions[], 1725 uint8_t actions_template_index, 1726 void *user_data, 1727 struct rte_flow_error *error) 1728 { 1729 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1730 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1731 struct rte_flow *flow; 1732 1733 flow = ops->async_create(dev, queue_id, 1734 op_attr, template_table, 1735 pattern, pattern_template_index, 1736 actions, actions_template_index, 1737 user_data, error); 1738 if (flow == NULL) 1739 flow_err(port_id, -rte_errno, error); 1740 return flow; 1741 } 1742 1743 int 1744 rte_flow_async_destroy(uint16_t port_id, 1745 uint32_t queue_id, 1746 const struct rte_flow_op_attr *op_attr, 1747 struct rte_flow *flow, 1748 void *user_data, 1749 struct rte_flow_error *error) 1750 { 1751 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1752 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1753 1754 return flow_err(port_id, 1755 ops->async_destroy(dev, queue_id, 1756 op_attr, flow, 1757 user_data, error), 1758 error); 1759 } 1760 1761 int 1762 rte_flow_push(uint16_t port_id, 1763 uint32_t queue_id, 1764 struct rte_flow_error *error) 1765 { 1766 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1767 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1768 1769 return flow_err(port_id, 1770 ops->push(dev, queue_id, error), 1771 error); 1772 } 1773 1774 int 1775 rte_flow_pull(uint16_t port_id, 1776 uint32_t queue_id, 1777 struct rte_flow_op_result res[], 1778 uint16_t n_res, 1779 struct rte_flow_error *error) 1780 { 1781 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1782 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1783 int ret; 1784 1785 ret = ops->pull(dev, queue_id, res, n_res, error); 1786 return ret ? ret : flow_err(port_id, ret, error); 1787 } 1788 1789 struct rte_flow_action_handle * 1790 rte_flow_async_action_handle_create(uint16_t port_id, 1791 uint32_t queue_id, 1792 const struct rte_flow_op_attr *op_attr, 1793 const struct rte_flow_indir_action_conf *indir_action_conf, 1794 const struct rte_flow_action *action, 1795 void *user_data, 1796 struct rte_flow_error *error) 1797 { 1798 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1799 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1800 struct rte_flow_action_handle *handle; 1801 1802 handle = ops->async_action_handle_create(dev, queue_id, op_attr, 1803 indir_action_conf, action, user_data, error); 1804 if (handle == NULL) 1805 flow_err(port_id, -rte_errno, error); 1806 return handle; 1807 } 1808 1809 int 1810 rte_flow_async_action_handle_destroy(uint16_t port_id, 1811 uint32_t queue_id, 1812 const struct rte_flow_op_attr *op_attr, 1813 struct rte_flow_action_handle *action_handle, 1814 void *user_data, 1815 struct rte_flow_error *error) 1816 { 1817 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1818 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1819 int ret; 1820 1821 ret = ops->async_action_handle_destroy(dev, queue_id, op_attr, 1822 action_handle, user_data, error); 1823 return flow_err(port_id, ret, error); 1824 } 1825 1826 int 1827 rte_flow_async_action_handle_update(uint16_t port_id, 1828 uint32_t queue_id, 1829 const struct rte_flow_op_attr *op_attr, 1830 struct rte_flow_action_handle *action_handle, 1831 const void *update, 1832 void *user_data, 1833 struct rte_flow_error *error) 1834 { 1835 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1836 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 1837 int ret; 1838 1839 ret = ops->async_action_handle_update(dev, queue_id, op_attr, 1840 action_handle, update, user_data, error); 1841 return flow_err(port_id, ret, error); 1842 } 1843