1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 #define WIRE_PORT 0xFFFF 8 9 #define MLX5DR_ACTION_METER_INIT_COLOR_OFFSET 1 10 /* Header removal size limited to 128B (64 words) */ 11 #define MLX5DR_ACTION_REMOVE_HEADER_MAX_SIZE 128 12 13 /* This is the maximum allowed action order for each table type: 14 * TX: POP_VLAN, CTR, ASO_METER, AS_CT, PUSH_VLAN, MODIFY, ENCAP, Term 15 * RX: TAG, DECAP, POP_VLAN, CTR, ASO_METER, ASO_CT, PUSH_VLAN, MODIFY, 16 * ENCAP, Term 17 * FDB: DECAP, POP_VLAN, CTR, ASO_METER, ASO_CT, PUSH_VLAN, MODIFY, 18 * ENCAP, Term 19 */ 20 static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_MAX] = { 21 [MLX5DR_TABLE_TYPE_NIC_RX] = { 22 BIT(MLX5DR_ACTION_TYP_TAG), 23 BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) | 24 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | 25 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) | 26 BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT), 27 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 28 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 29 BIT(MLX5DR_ACTION_TYP_CTR), 30 BIT(MLX5DR_ACTION_TYP_ASO_METER), 31 BIT(MLX5DR_ACTION_TYP_ASO_CT), 32 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 33 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 34 BIT(MLX5DR_ACTION_TYP_NAT64), 35 BIT(MLX5DR_ACTION_TYP_MODIFY_HDR), 36 BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) | 37 BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) | 38 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | 39 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), 40 BIT(MLX5DR_ACTION_TYP_TBL) | 41 BIT(MLX5DR_ACTION_TYP_MISS) | 42 BIT(MLX5DR_ACTION_TYP_TIR) | 43 BIT(MLX5DR_ACTION_TYP_DROP) | 44 BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | 45 BIT(MLX5DR_ACTION_TYP_DEST_ARRAY) | 46 BIT(MLX5DR_ACTION_TYP_JUMP_TO_MATCHER), 47 BIT(MLX5DR_ACTION_TYP_LAST), 48 }, 49 [MLX5DR_TABLE_TYPE_NIC_TX] = { 50 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 51 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 52 BIT(MLX5DR_ACTION_TYP_CTR), 53 BIT(MLX5DR_ACTION_TYP_ASO_METER), 54 BIT(MLX5DR_ACTION_TYP_ASO_CT), 55 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 56 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 57 BIT(MLX5DR_ACTION_TYP_NAT64), 58 BIT(MLX5DR_ACTION_TYP_MODIFY_HDR), 59 BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) | 60 BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) | 61 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | 62 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), 63 BIT(MLX5DR_ACTION_TYP_TBL) | 64 BIT(MLX5DR_ACTION_TYP_MISS) | 65 BIT(MLX5DR_ACTION_TYP_DROP) | 66 BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | 67 BIT(MLX5DR_ACTION_TYP_JUMP_TO_MATCHER), 68 BIT(MLX5DR_ACTION_TYP_LAST), 69 }, 70 [MLX5DR_TABLE_TYPE_FDB] = { 71 BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) | 72 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | 73 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) | 74 BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT), 75 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 76 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 77 BIT(MLX5DR_ACTION_TYP_CTR), 78 BIT(MLX5DR_ACTION_TYP_ASO_METER), 79 BIT(MLX5DR_ACTION_TYP_ASO_CT), 80 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 81 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 82 BIT(MLX5DR_ACTION_TYP_NAT64), 83 BIT(MLX5DR_ACTION_TYP_MODIFY_HDR), 84 BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) | 85 BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) | 86 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | 87 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), 88 BIT(MLX5DR_ACTION_TYP_TBL) | 89 BIT(MLX5DR_ACTION_TYP_MISS) | 90 BIT(MLX5DR_ACTION_TYP_VPORT) | 91 BIT(MLX5DR_ACTION_TYP_DROP) | 92 BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | 93 BIT(MLX5DR_ACTION_TYP_DEST_ARRAY) | 94 BIT(MLX5DR_ACTION_TYP_JUMP_TO_MATCHER), 95 BIT(MLX5DR_ACTION_TYP_LAST), 96 }, 97 }; 98 99 static int mlx5dr_action_get_shared_stc_nic(struct mlx5dr_context *ctx, 100 enum mlx5dr_context_shared_stc_type stc_type, 101 uint8_t tbl_type) 102 { 103 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 104 struct mlx5dr_action_shared_stc *shared_stc; 105 int ret; 106 107 pthread_spin_lock(&ctx->ctrl_lock); 108 if (ctx->common_res[tbl_type].shared_stc[stc_type]) { 109 ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++; 110 pthread_spin_unlock(&ctx->ctrl_lock); 111 return 0; 112 } 113 114 shared_stc = simple_calloc(1, sizeof(*shared_stc)); 115 if (!shared_stc) { 116 DR_LOG(ERR, "Failed to allocate memory for shared STCs"); 117 rte_errno = ENOMEM; 118 goto unlock_and_out; 119 } 120 switch (stc_type) { 121 case MLX5DR_CONTEXT_SHARED_STC_DECAP_L3: 122 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; 123 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5; 124 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 125 stc_attr.remove_header.decap = 0; 126 stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; 127 stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4; 128 break; 129 case MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP: 130 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; 131 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5; 132 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 133 stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; 134 stc_attr.remove_words.num_of_words = MLX5DR_ACTION_HDR_LEN_L2_VLAN; 135 break; 136 default: 137 DR_LOG(ERR, "No such type : stc_type"); 138 assert(false); 139 rte_errno = EINVAL; 140 goto unlock_and_out; 141 } 142 143 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 144 &shared_stc->remove_header); 145 if (ret) { 146 DR_LOG(ERR, "Failed to allocate shared decap l2 STC"); 147 goto free_shared_stc; 148 } 149 150 ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc; 151 ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1; 152 153 pthread_spin_unlock(&ctx->ctrl_lock); 154 155 return 0; 156 157 free_shared_stc: 158 simple_free(shared_stc); 159 unlock_and_out: 160 pthread_spin_unlock(&ctx->ctrl_lock); 161 return rte_errno; 162 } 163 164 static void mlx5dr_action_put_shared_stc_nic(struct mlx5dr_context *ctx, 165 enum mlx5dr_context_shared_stc_type stc_type, 166 uint8_t tbl_type) 167 { 168 struct mlx5dr_action_shared_stc *shared_stc; 169 170 pthread_spin_lock(&ctx->ctrl_lock); 171 if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) { 172 pthread_spin_unlock(&ctx->ctrl_lock); 173 return; 174 } 175 176 shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type]; 177 178 mlx5dr_action_free_single_stc(ctx, tbl_type, &shared_stc->remove_header); 179 simple_free(shared_stc); 180 ctx->common_res[tbl_type].shared_stc[stc_type] = NULL; 181 pthread_spin_unlock(&ctx->ctrl_lock); 182 } 183 184 static int mlx5dr_action_get_shared_stc(struct mlx5dr_action *action, 185 enum mlx5dr_context_shared_stc_type stc_type) 186 { 187 struct mlx5dr_context *ctx = action->ctx; 188 int ret; 189 190 if (stc_type >= MLX5DR_CONTEXT_SHARED_STC_MAX) { 191 assert(false); 192 rte_errno = EINVAL; 193 return rte_errno; 194 } 195 196 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) { 197 ret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX); 198 if (ret) { 199 DR_LOG(ERR, "Failed to allocate memory for RX shared STCs (type: %d)", 200 stc_type); 201 return ret; 202 } 203 } 204 205 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) { 206 ret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX); 207 if (ret) { 208 DR_LOG(ERR, "Failed to allocate memory for TX shared STCs(type: %d)", 209 stc_type); 210 goto clean_nic_rx_stc; 211 } 212 } 213 214 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) { 215 ret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_FDB); 216 if (ret) { 217 DR_LOG(ERR, "Failed to allocate memory for FDB shared STCs (type: %d)", 218 stc_type); 219 goto clean_nic_tx_stc; 220 } 221 } 222 223 return 0; 224 225 clean_nic_tx_stc: 226 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 227 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX); 228 clean_nic_rx_stc: 229 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 230 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX); 231 232 return ret; 233 } 234 235 static void mlx5dr_action_put_shared_stc(struct mlx5dr_action *action, 236 enum mlx5dr_context_shared_stc_type stc_type) 237 { 238 struct mlx5dr_context *ctx = action->ctx; 239 240 if (stc_type >= MLX5DR_CONTEXT_SHARED_STC_MAX) { 241 assert(false); 242 return; 243 } 244 245 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 246 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX); 247 248 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 249 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX); 250 251 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) 252 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_FDB); 253 } 254 255 static void 256 mlx5dr_action_create_nat64_zero_all_addr(uint8_t **action_ptr, bool is_v4_to_v6) 257 { 258 if (is_v4_to_v6) { 259 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 260 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV4); 261 MLX5_SET(set_action_in, *action_ptr, data, 0); 262 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 263 264 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 265 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV4); 266 MLX5_SET(set_action_in, *action_ptr, data, 0); 267 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 268 } else { 269 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 270 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_127_96); 271 MLX5_SET(set_action_in, *action_ptr, data, 0); 272 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 273 274 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 275 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_95_64); 276 MLX5_SET(set_action_in, *action_ptr, data, 0); 277 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 278 279 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 280 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_63_32); 281 MLX5_SET(set_action_in, *action_ptr, data, 0); 282 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 283 284 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 285 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_31_0); 286 MLX5_SET(set_action_in, *action_ptr, data, 0); 287 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 288 289 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 290 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_127_96); 291 MLX5_SET(set_action_in, *action_ptr, data, 0); 292 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 293 294 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 295 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_95_64); 296 MLX5_SET(set_action_in, *action_ptr, data, 0); 297 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 298 299 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 300 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_63_32); 301 MLX5_SET(set_action_in, *action_ptr, data, 0); 302 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 303 304 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 305 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_31_0); 306 MLX5_SET(set_action_in, *action_ptr, data, 0); 307 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 308 } 309 } 310 311 static struct mlx5dr_action * 312 mlx5dr_action_create_nat64_copy_state(struct mlx5dr_context *ctx, 313 struct mlx5dr_action_nat64_attr *attr, 314 uint32_t flags) 315 { 316 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 317 struct mlx5dr_action_mh_pattern pat[2]; 318 struct mlx5dr_action *action; 319 uint32_t packet_len_field; 320 uint8_t *action_ptr; 321 uint32_t tos_field; 322 uint32_t tos_size; 323 uint32_t src_addr; 324 uint32_t dst_addr; 325 bool is_v4_to_v6; 326 uint32_t ecn; 327 328 is_v4_to_v6 = attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6; 329 330 if (is_v4_to_v6) { 331 packet_len_field = MLX5_MODI_OUT_IPV4_TOTAL_LEN; 332 tos_field = MLX5_MODI_OUT_IP_DSCP; 333 tos_size = 6; 334 ecn = MLX5_MODI_OUT_IP_ECN; 335 src_addr = MLX5_MODI_OUT_SIPV4; 336 dst_addr = MLX5_MODI_OUT_DIPV4; 337 } else { 338 packet_len_field = MLX5_MODI_OUT_IPV6_PAYLOAD_LEN; 339 tos_field = MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS; 340 tos_size = 8; 341 ecn = 0; 342 src_addr = MLX5_MODI_OUT_SIPV6_31_0; 343 dst_addr = MLX5_MODI_OUT_DIPV6_31_0; 344 } 345 346 memset(modify_action_data, 0, sizeof(modify_action_data)); 347 action_ptr = (uint8_t *)modify_action_data; 348 349 if (attr->flags & MLX5DR_ACTION_NAT64_BACKUP_ADDR) { 350 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 351 MLX5_SET(copy_action_in, action_ptr, src_field, src_addr); 352 MLX5_SET(copy_action_in, action_ptr, dst_field, 353 attr->registers[MLX5DR_ACTION_NAT64_REG_SRC_IP]); 354 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 355 356 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 357 MLX5_SET(copy_action_in, action_ptr, src_field, dst_addr); 358 MLX5_SET(copy_action_in, action_ptr, dst_field, 359 attr->registers[MLX5DR_ACTION_NAT64_REG_DST_IP]); 360 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 361 } 362 363 /* | 8 bit - 8 bit - 16 bit | 364 * | TOS - protocol - packet-len | 365 */ 366 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 367 MLX5_SET(copy_action_in, action_ptr, src_field, packet_len_field); 368 MLX5_SET(copy_action_in, action_ptr, dst_field, 369 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 370 MLX5_SET(copy_action_in, action_ptr, dst_offset, 0);/* 16 bits in the lsb */ 371 MLX5_SET(copy_action_in, action_ptr, length, 16); 372 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 373 374 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 375 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 376 377 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 378 MLX5_SET(copy_action_in, action_ptr, src_field, MLX5_MODI_OUT_IP_PROTOCOL); 379 MLX5_SET(copy_action_in, action_ptr, dst_field, 380 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 381 MLX5_SET(copy_action_in, action_ptr, dst_offset, 16); 382 MLX5_SET(copy_action_in, action_ptr, length, 8); 383 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 384 385 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 386 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 387 388 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 389 MLX5_SET(copy_action_in, action_ptr, src_field, tos_field); 390 MLX5_SET(copy_action_in, action_ptr, dst_field, 391 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 392 MLX5_SET(copy_action_in, action_ptr, dst_offset, 24); 393 MLX5_SET(copy_action_in, action_ptr, length, tos_size); 394 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 395 /* in ipv4 TOS = {dscp (6bits) - ecn (2bits) }*/ 396 if (ecn) { 397 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 398 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 399 400 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 401 MLX5_SET(copy_action_in, action_ptr, src_field, ecn); 402 MLX5_SET(copy_action_in, action_ptr, dst_field, 403 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 404 MLX5_SET(copy_action_in, action_ptr, dst_offset, 24 + tos_size); 405 MLX5_SET(copy_action_in, action_ptr, length, MLX5DR_ACTION_NAT64_ECN_SIZE); 406 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 407 } 408 409 /* set sip and dip to 0, in order to have new csum */ 410 mlx5dr_action_create_nat64_zero_all_addr(&action_ptr, is_v4_to_v6); 411 412 pat[0].data = modify_action_data; 413 pat[0].sz = (action_ptr - (uint8_t *)modify_action_data); 414 415 action = mlx5dr_action_create_modify_header(ctx, 1, pat, 0, flags); 416 if (!action) { 417 DR_LOG(ERR, "Failed to create copy for NAT64: action_sz: %zu, flags: 0x%x\n", 418 pat[0].sz, flags); 419 return NULL; 420 } 421 422 return action; 423 } 424 425 static struct mlx5dr_action * 426 mlx5dr_action_create_nat64_repalce_state(struct mlx5dr_context *ctx, 427 struct mlx5dr_action_nat64_attr *attr, 428 uint32_t flags) 429 { 430 uint32_t address_prefix[MLX5DR_ACTION_NAT64_HEADER_MINUS_ONE] = {0}; 431 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 432 struct mlx5dr_action_mh_pattern pat[2]; 433 static struct mlx5dr_action *action; 434 uint8_t header_size_in_dw; 435 uint8_t *action_ptr; 436 uint32_t eth_type; 437 bool is_v4_to_v6; 438 uint32_t ip_ver; 439 int i; 440 441 is_v4_to_v6 = attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6; 442 443 if (is_v4_to_v6) { 444 uint32_t nat64_well_known_pref[] = {0x00010000, 445 0x9bff6400, 0x0, 0x0, 0x0, 446 0x9bff6400, 0x0, 0x0, 0x0}; 447 448 header_size_in_dw = MLX5DR_ACTION_NAT64_IPV6_HEADER; 449 ip_ver = MLX5DR_ACTION_NAT64_IPV6_VER; 450 eth_type = RTE_ETHER_TYPE_IPV6; 451 memcpy(address_prefix, nat64_well_known_pref, 452 MLX5DR_ACTION_NAT64_HEADER_MINUS_ONE * sizeof(uint32_t)); 453 } else { 454 /* In order to fix HW csum issue, make the prefix ready */ 455 uint32_t ipv4_pref[] = {0x0, 0xffba0000, 0x0, 0x0, 0x0}; 456 457 header_size_in_dw = MLX5DR_ACTION_NAT64_IPV4_HEADER; 458 ip_ver = MLX5DR_ACTION_NAT64_IPV4_VER; 459 eth_type = RTE_ETHER_TYPE_IPV4; 460 memcpy(address_prefix, ipv4_pref, 461 MLX5DR_ACTION_NAT64_IPV4_HEADER * sizeof(uint32_t)); 462 } 463 464 memset(modify_action_data, 0, sizeof(modify_action_data)); 465 action_ptr = (uint8_t *)modify_action_data; 466 467 MLX5_SET(set_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 468 MLX5_SET(set_action_in, action_ptr, field, MLX5_MODI_OUT_ETHERTYPE); 469 MLX5_SET(set_action_in, action_ptr, length, 16); 470 MLX5_SET(set_action_in, action_ptr, data, eth_type); 471 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 472 473 /* push empty header with ipv6 as version */ 474 MLX5_SET(stc_ste_param_insert, action_ptr, action_type, 475 MLX5_MODIFICATION_TYPE_INSERT); 476 MLX5_SET(stc_ste_param_insert, action_ptr, inline_data, 0x1); 477 MLX5_SET(stc_ste_param_insert, action_ptr, insert_anchor, 478 MLX5_HEADER_ANCHOR_IPV6_IPV4); 479 MLX5_SET(stc_ste_param_insert, action_ptr, insert_size, 2); 480 MLX5_SET(stc_ste_param_insert, action_ptr, insert_argument, ip_ver); 481 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 482 483 for (i = 0; i < header_size_in_dw - 1; i++) { 484 MLX5_SET(stc_ste_param_insert, action_ptr, action_type, 485 MLX5_MODIFICATION_TYPE_INSERT); 486 MLX5_SET(stc_ste_param_insert, action_ptr, inline_data, 0x1); 487 MLX5_SET(stc_ste_param_insert, action_ptr, insert_anchor, 488 MLX5_HEADER_ANCHOR_IPV6_IPV4); 489 MLX5_SET(stc_ste_param_insert, action_ptr, insert_size, 2); 490 MLX5_SET(stc_ste_param_insert, action_ptr, insert_argument, 491 htobe32(address_prefix[i])); 492 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 493 } 494 495 /* Remove orig src/dst addr (8 bytes, 4 words) */ 496 MLX5_SET(stc_ste_param_remove, action_ptr, action_type, 497 MLX5_MODIFICATION_TYPE_REMOVE); 498 MLX5_SET(stc_ste_param_remove, action_ptr, remove_start_anchor, 499 MLX5_HEADER_ANCHOR_IPV6_IPV4); 500 MLX5_SET(stc_ste_param_remove, action_ptr, remove_end_anchor, 501 MLX5_HEADER_ANCHOR_TCP_UDP); 502 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 503 504 pat[0].data = modify_action_data; 505 pat[0].sz = action_ptr - (uint8_t *)modify_action_data; 506 507 action = mlx5dr_action_create_modify_header(ctx, 1, pat, 0, flags); 508 if (!action) { 509 DR_LOG(ERR, "Failed to create action: action_sz: %zu flags: 0x%x\n", 510 pat[0].sz, flags); 511 return NULL; 512 } 513 514 return action; 515 } 516 517 static struct mlx5dr_action * 518 mlx5dr_action_create_nat64_copy_proto_state(struct mlx5dr_context *ctx, 519 struct mlx5dr_action_nat64_attr *attr, 520 uint32_t flags) 521 { 522 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 523 struct mlx5dr_action_mh_pattern pat[2]; 524 struct mlx5dr_action *action; 525 uint8_t *action_ptr; 526 527 memset(modify_action_data, 0, sizeof(modify_action_data)); 528 action_ptr = (uint8_t *)modify_action_data; 529 530 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 531 MLX5_SET(copy_action_in, action_ptr, src_field, 532 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 533 MLX5_SET(copy_action_in, action_ptr, dst_field, 534 MLX5_MODI_OUT_IP_PROTOCOL); 535 MLX5_SET(copy_action_in, action_ptr, src_offset, 16); 536 MLX5_SET(copy_action_in, action_ptr, dst_offset, 0); 537 MLX5_SET(copy_action_in, action_ptr, length, 8); 538 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 539 540 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 541 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 542 543 pat[0].data = modify_action_data; 544 pat[0].sz = action_ptr - (uint8_t *)modify_action_data; 545 546 action = mlx5dr_action_create_modify_header_reparse(ctx, 1, pat, 0, flags, 547 MLX5DR_ACTION_STC_REPARSE_ON); 548 if (!action) { 549 DR_LOG(ERR, "Failed to create action: action_sz: %zu, flags: 0x%x\n", 550 pat[0].sz, flags); 551 return NULL; 552 } 553 554 return action; 555 } 556 557 static struct mlx5dr_action * 558 mlx5dr_action_create_nat64_copy_back_state(struct mlx5dr_context *ctx, 559 struct mlx5dr_action_nat64_attr *attr, 560 uint32_t flags) 561 { 562 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 563 struct mlx5dr_action_mh_pattern pat[2]; 564 struct mlx5dr_action *action; 565 uint32_t packet_len_field; 566 uint32_t packet_len_add; 567 uint8_t *action_ptr; 568 uint32_t tos_field; 569 uint32_t ttl_field; 570 uint32_t tos_size; 571 uint32_t src_addr; 572 uint32_t dst_addr; 573 bool is_v4_to_v6; 574 uint32_t ecn; 575 576 is_v4_to_v6 = attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6; 577 578 if (is_v4_to_v6) { 579 packet_len_field = MLX5_MODI_OUT_IPV6_PAYLOAD_LEN; 580 /* 2' comp to 20, to get -20 in add operation */ 581 packet_len_add = MLX5DR_ACTION_NAT64_DEC_20; 582 ttl_field = MLX5_MODI_OUT_IPV6_HOPLIMIT; 583 src_addr = MLX5_MODI_OUT_SIPV6_31_0; 584 dst_addr = MLX5_MODI_OUT_DIPV6_31_0; 585 tos_field = MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS; 586 tos_size = 8; 587 ecn = 0; 588 } else { 589 packet_len_field = MLX5_MODI_OUT_IPV4_TOTAL_LEN; 590 /* ipv4 len is including 20 bytes of the header, so add 20 over ipv6 len */ 591 packet_len_add = MLX5DR_ACTION_NAT64_ADD_20; 592 ttl_field = MLX5_MODI_OUT_IPV4_TTL; 593 src_addr = MLX5_MODI_OUT_SIPV4; 594 dst_addr = MLX5_MODI_OUT_DIPV4; 595 tos_field = MLX5_MODI_OUT_IP_DSCP; 596 tos_size = 6; 597 ecn = MLX5_MODI_OUT_IP_ECN; 598 } 599 600 memset(modify_action_data, 0, sizeof(modify_action_data)); 601 action_ptr = (uint8_t *)modify_action_data; 602 603 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 604 MLX5_SET(copy_action_in, action_ptr, src_field, 605 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 606 MLX5_SET(copy_action_in, action_ptr, dst_field, 607 packet_len_field); 608 MLX5_SET(copy_action_in, action_ptr, src_offset, 32); 609 MLX5_SET(copy_action_in, action_ptr, length, 16); 610 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 611 612 MLX5_SET(set_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 613 MLX5_SET(set_action_in, action_ptr, field, ttl_field); 614 MLX5_SET(set_action_in, action_ptr, length, 8); 615 MLX5_SET(set_action_in, action_ptr, data, MLX5DR_ACTION_NAT64_TTL_DEFAULT_VAL); 616 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 617 618 /* copy TOS */ 619 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 620 MLX5_SET(copy_action_in, action_ptr, src_field, 621 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 622 MLX5_SET(copy_action_in, action_ptr, dst_field, tos_field); 623 MLX5_SET(copy_action_in, action_ptr, src_offset, 24 + (ecn ? 624 MLX5DR_ACTION_NAT64_ECN_SIZE : 0)); 625 MLX5_SET(copy_action_in, action_ptr, length, tos_size); 626 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 627 628 if (ecn) { 629 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 630 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 631 632 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 633 MLX5_SET(copy_action_in, action_ptr, src_field, 634 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 635 MLX5_SET(copy_action_in, action_ptr, dst_field, ecn); 636 MLX5_SET(copy_action_in, action_ptr, src_offset, 24); 637 MLX5_SET(copy_action_in, action_ptr, length, MLX5DR_ACTION_NAT64_ECN_SIZE); 638 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 639 } 640 641 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 642 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 643 644 /* if required Copy original addresses */ 645 if (attr->flags & MLX5DR_ACTION_NAT64_BACKUP_ADDR) { 646 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 647 MLX5_SET(copy_action_in, action_ptr, src_field, 648 attr->registers[MLX5DR_ACTION_NAT64_REG_SRC_IP]); 649 MLX5_SET(copy_action_in, action_ptr, dst_field, src_addr); 650 MLX5_SET(copy_action_in, action_ptr, src_offset, 0); 651 MLX5_SET(copy_action_in, action_ptr, length, 32); 652 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 653 654 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 655 MLX5_SET(copy_action_in, action_ptr, src_field, 656 attr->registers[MLX5DR_ACTION_NAT64_REG_DST_IP]); 657 MLX5_SET(copy_action_in, action_ptr, dst_field, dst_addr); 658 MLX5_SET(copy_action_in, action_ptr, src_offset, 0); 659 MLX5_SET(copy_action_in, action_ptr, length, 32); 660 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 661 } 662 663 /* take/add off 20 bytes ipv4/6 from/to the total size */ 664 MLX5_SET(set_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_ADD); 665 MLX5_SET(set_action_in, action_ptr, field, packet_len_field); 666 MLX5_SET(set_action_in, action_ptr, data, packet_len_add); 667 MLX5_SET(set_action_in, action_ptr, length, 16); 668 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 669 670 pat[0].data = modify_action_data; 671 pat[0].sz = action_ptr - (uint8_t *)modify_action_data; 672 673 action = mlx5dr_action_create_modify_header(ctx, 1, pat, 0, flags); 674 if (!action) { 675 DR_LOG(ERR, "Failed to create action: action_sz: %zu, flags: 0x%x\n", 676 pat[0].sz, flags); 677 return NULL; 678 } 679 680 return action; 681 } 682 683 static void mlx5dr_action_print_combo(enum mlx5dr_action_type *user_actions) 684 { 685 DR_LOG(ERR, "Invalid action_type sequence"); 686 while (*user_actions != MLX5DR_ACTION_TYP_LAST) { 687 DR_LOG(ERR, "%s", mlx5dr_debug_action_type_to_str(*user_actions)); 688 user_actions++; 689 } 690 } 691 692 bool mlx5dr_action_check_combo(enum mlx5dr_action_type *user_actions, 693 enum mlx5dr_table_type table_type) 694 { 695 const uint32_t *order_arr = action_order_arr[table_type]; 696 uint8_t order_idx = 0; 697 uint8_t user_idx = 0; 698 bool valid_combo; 699 700 while (order_arr[order_idx] != BIT(MLX5DR_ACTION_TYP_LAST)) { 701 /* User action order validated move to next user action */ 702 if (BIT(user_actions[user_idx]) & order_arr[order_idx]) 703 user_idx++; 704 705 /* Iterate to the next supported action in the order */ 706 order_idx++; 707 } 708 709 /* Combination is valid if all user action were processed */ 710 valid_combo = user_actions[user_idx] == MLX5DR_ACTION_TYP_LAST; 711 if (!valid_combo) 712 mlx5dr_action_print_combo(user_actions); 713 714 return valid_combo; 715 } 716 717 int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[], 718 uint32_t num_actions, 719 struct mlx5dv_flow_action_attr *attr) 720 { 721 struct mlx5dr_action *action; 722 uint32_t i; 723 724 for (i = 0; i < num_actions; i++) { 725 action = rule_actions[i].action; 726 727 switch (action->type) { 728 case MLX5DR_ACTION_TYP_TBL: 729 case MLX5DR_ACTION_TYP_TIR: 730 attr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX; 731 attr[i].obj = action->devx_obj; 732 break; 733 case MLX5DR_ACTION_TYP_TAG: 734 attr[i].type = MLX5DV_FLOW_ACTION_TAG; 735 attr[i].tag_value = rule_actions[i].tag.value; 736 break; 737 #ifdef HAVE_MLX5_DR_CREATE_ACTION_DEFAULT_MISS 738 case MLX5DR_ACTION_TYP_MISS: 739 attr[i].type = MLX5DV_FLOW_ACTION_DEFAULT_MISS; 740 break; 741 #endif 742 case MLX5DR_ACTION_TYP_DROP: 743 attr[i].type = MLX5DV_FLOW_ACTION_DROP; 744 break; 745 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 746 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 747 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 748 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 749 case MLX5DR_ACTION_TYP_MODIFY_HDR: 750 attr[i].type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; 751 attr[i].action = action->flow_action; 752 break; 753 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS 754 case MLX5DR_ACTION_TYP_CTR: 755 attr[i].type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX; 756 attr[i].obj = action->devx_obj; 757 758 if (rule_actions[i].counter.offset) { 759 DR_LOG(ERR, "Counter offset not supported over root"); 760 rte_errno = ENOTSUP; 761 return rte_errno; 762 } 763 break; 764 #endif 765 default: 766 DR_LOG(ERR, "Found unsupported action type: %d", action->type); 767 rte_errno = ENOTSUP; 768 return rte_errno; 769 } 770 } 771 772 return 0; 773 } 774 775 static bool 776 mlx5dr_action_fixup_stc_attr(struct mlx5dr_context *ctx, 777 struct mlx5dr_cmd_stc_modify_attr *stc_attr, 778 struct mlx5dr_cmd_stc_modify_attr *fixup_stc_attr, 779 enum mlx5dr_table_type table_type, 780 bool is_mirror) 781 { 782 struct mlx5dr_devx_obj *devx_obj; 783 bool use_fixup = false; 784 uint32_t fw_tbl_type; 785 786 fw_tbl_type = mlx5dr_table_get_res_fw_ft_type(table_type, is_mirror); 787 788 switch (stc_attr->action_type) { 789 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 790 if (!is_mirror) 791 devx_obj = mlx5dr_pool_chunk_get_base_devx_obj(stc_attr->ste_table.ste_pool, 792 &stc_attr->ste_table.ste); 793 else 794 devx_obj = 795 mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_attr->ste_table.ste_pool, 796 &stc_attr->ste_table.ste); 797 798 *fixup_stc_attr = *stc_attr; 799 fixup_stc_attr->ste_table.ste_obj_id = devx_obj->id; 800 use_fixup = true; 801 break; 802 803 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 804 if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { 805 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; 806 fixup_stc_attr->action_offset = stc_attr->action_offset; 807 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 808 fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id; 809 fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number; 810 fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = 811 ctx->caps->merged_eswitch; 812 use_fixup = true; 813 } 814 break; 815 816 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 817 if (stc_attr->vport.vport_num != WIRE_PORT) 818 break; 819 820 if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { 821 /*The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT*/ 822 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK; 823 fixup_stc_attr->action_offset = stc_attr->action_offset; 824 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 825 fixup_stc_attr->vport.vport_num = 0; 826 fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id; 827 fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = 828 stc_attr->vport.eswitch_owner_vhca_id_valid; 829 } 830 use_fixup = true; 831 break; 832 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 833 /* TIR is allowed on RX side, requires mask in case of FDB */ 834 if (fw_tbl_type == FS_FT_FDB_TX) { 835 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 836 fixup_stc_attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 837 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 838 use_fixup = true; 839 } 840 break; 841 default: 842 break; 843 } 844 845 return use_fixup; 846 } 847 848 int mlx5dr_action_alloc_single_stc(struct mlx5dr_context *ctx, 849 struct mlx5dr_cmd_stc_modify_attr *stc_attr, 850 uint32_t table_type, 851 struct mlx5dr_pool_chunk *stc) 852 { 853 struct mlx5dr_cmd_stc_modify_attr cleanup_stc_attr = {0}; 854 struct mlx5dr_pool *stc_pool = ctx->stc_pool[table_type]; 855 struct mlx5dr_cmd_stc_modify_attr fixup_stc_attr = {0}; 856 struct mlx5dr_devx_obj *devx_obj_0; 857 bool use_fixup; 858 int ret; 859 860 ret = mlx5dr_pool_chunk_alloc(stc_pool, stc); 861 if (ret) { 862 DR_LOG(ERR, "Failed to allocate single action STC"); 863 return ret; 864 } 865 866 stc_attr->stc_offset = stc->offset; 867 868 /* Dynamic reparse not supported, overwrite and use default */ 869 if (!mlx5dr_context_cap_dynamic_reparse(ctx)) 870 stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 871 872 devx_obj_0 = mlx5dr_pool_chunk_get_base_devx_obj(stc_pool, stc); 873 874 /* According to table/action limitation change the stc_attr */ 875 use_fixup = mlx5dr_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false); 876 ret = mlx5dr_cmd_stc_modify(devx_obj_0, use_fixup ? &fixup_stc_attr : stc_attr); 877 if (ret) { 878 DR_LOG(ERR, "Failed to modify STC action_type %d tbl_type %d", 879 stc_attr->action_type, table_type); 880 goto free_chunk; 881 } 882 883 /* Modify the FDB peer */ 884 if (table_type == MLX5DR_TABLE_TYPE_FDB) { 885 struct mlx5dr_devx_obj *devx_obj_1; 886 887 devx_obj_1 = mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_pool, stc); 888 889 use_fixup = mlx5dr_action_fixup_stc_attr(ctx, stc_attr, 890 &fixup_stc_attr, 891 table_type, true); 892 ret = mlx5dr_cmd_stc_modify(devx_obj_1, use_fixup ? &fixup_stc_attr : stc_attr); 893 if (ret) { 894 DR_LOG(ERR, "Failed to modify peer STC action_type %d tbl_type %d", 895 stc_attr->action_type, table_type); 896 goto clean_devx_obj_0; 897 } 898 } 899 900 return 0; 901 902 clean_devx_obj_0: 903 cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 904 cleanup_stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT; 905 cleanup_stc_attr.stc_offset = stc->offset; 906 mlx5dr_cmd_stc_modify(devx_obj_0, &cleanup_stc_attr); 907 free_chunk: 908 mlx5dr_pool_chunk_free(stc_pool, stc); 909 return rte_errno; 910 } 911 912 void mlx5dr_action_free_single_stc(struct mlx5dr_context *ctx, 913 uint32_t table_type, 914 struct mlx5dr_pool_chunk *stc) 915 { 916 struct mlx5dr_pool *stc_pool = ctx->stc_pool[table_type]; 917 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 918 struct mlx5dr_devx_obj *devx_obj; 919 920 /* Modify the STC not to point to an object */ 921 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 922 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT; 923 stc_attr.stc_offset = stc->offset; 924 devx_obj = mlx5dr_pool_chunk_get_base_devx_obj(stc_pool, stc); 925 mlx5dr_cmd_stc_modify(devx_obj, &stc_attr); 926 927 if (table_type == MLX5DR_TABLE_TYPE_FDB) { 928 devx_obj = mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_pool, stc); 929 mlx5dr_cmd_stc_modify(devx_obj, &stc_attr); 930 } 931 932 mlx5dr_pool_chunk_free(stc_pool, stc); 933 } 934 935 static uint32_t mlx5dr_action_get_mh_stc_type(__be64 pattern) 936 { 937 uint8_t action_type = MLX5_GET(set_action_in, &pattern, action_type); 938 939 switch (action_type) { 940 case MLX5_MODIFICATION_TYPE_SET: 941 return MLX5_IFC_STC_ACTION_TYPE_SET; 942 case MLX5_MODIFICATION_TYPE_ADD: 943 return MLX5_IFC_STC_ACTION_TYPE_ADD; 944 case MLX5_MODIFICATION_TYPE_COPY: 945 return MLX5_IFC_STC_ACTION_TYPE_COPY; 946 case MLX5_MODIFICATION_TYPE_ADD_FIELD: 947 return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD; 948 default: 949 assert(false); 950 DR_LOG(ERR, "Unsupported action type: 0x%x", action_type); 951 rte_errno = ENOTSUP; 952 return MLX5_IFC_STC_ACTION_TYPE_NOP; 953 } 954 } 955 956 static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action, 957 struct mlx5dr_devx_obj *obj, 958 struct mlx5dr_cmd_stc_modify_attr *attr) 959 { 960 attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 961 962 switch (action->type) { 963 case MLX5DR_ACTION_TYP_TAG: 964 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG; 965 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 966 break; 967 case MLX5DR_ACTION_TYP_DROP: 968 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 969 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 970 break; 971 case MLX5DR_ACTION_TYP_MISS: 972 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; 973 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 974 break; 975 case MLX5DR_ACTION_TYP_CTR: 976 attr->id = obj->id; 977 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER; 978 attr->action_offset = MLX5DR_ACTION_OFFSET_DW0; 979 break; 980 case MLX5DR_ACTION_TYP_TIR: 981 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR; 982 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 983 attr->dest_tir_num = obj->id; 984 break; 985 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 986 case MLX5DR_ACTION_TYP_MODIFY_HDR: 987 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 988 attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 989 if (action->modify_header.require_reparse) 990 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 991 992 if (action->modify_header.num_of_actions == 1) { 993 attr->modify_action.data = action->modify_header.single_action; 994 attr->action_type = mlx5dr_action_get_mh_stc_type(attr->modify_action.data); 995 996 if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD || 997 attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET) 998 MLX5_SET(set_action_in, &attr->modify_action.data, data, 0); 999 } else { 1000 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST; 1001 attr->modify_header.arg_id = action->modify_header.arg_obj->id; 1002 attr->modify_header.pattern_id = action->modify_header.pat_obj->id; 1003 } 1004 break; 1005 case MLX5DR_ACTION_TYP_TBL: 1006 case MLX5DR_ACTION_TYP_DEST_ARRAY: 1007 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; 1008 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1009 attr->dest_table_id = obj->id; 1010 break; 1011 case MLX5DR_ACTION_TYP_DEST_ROOT: 1012 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; 1013 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1014 attr->dest_table_id = action->root_tbl.sa->id; 1015 break; 1016 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 1017 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; 1018 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 1019 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1020 attr->remove_header.decap = 1; 1021 attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; 1022 attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC; 1023 break; 1024 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 1025 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 1026 case MLX5DR_ACTION_TYP_INSERT_HEADER: 1027 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1028 if (!action->reformat.require_reparse) 1029 attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 1030 1031 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; 1032 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1033 attr->insert_header.encap = action->reformat.encap; 1034 attr->insert_header.push_esp = action->reformat.push_esp; 1035 attr->insert_header.insert_anchor = action->reformat.anchor; 1036 attr->insert_header.arg_id = action->reformat.arg_obj->id; 1037 attr->insert_header.header_size = action->reformat.header_size; 1038 attr->insert_header.insert_offset = action->reformat.offset; 1039 break; 1040 case MLX5DR_ACTION_TYP_ASO_METER: 1041 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1042 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO; 1043 attr->aso.aso_type = ASO_OPC_MOD_POLICER; 1044 attr->aso.devx_obj_id = obj->id; 1045 attr->aso.return_reg_id = action->aso.return_reg_id; 1046 break; 1047 case MLX5DR_ACTION_TYP_ASO_CT: 1048 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1049 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO; 1050 attr->aso.aso_type = ASO_OPC_MOD_CONNECTION_TRACKING; 1051 attr->aso.devx_obj_id = obj->id; 1052 attr->aso.return_reg_id = action->aso.return_reg_id; 1053 break; 1054 case MLX5DR_ACTION_TYP_VPORT: 1055 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1056 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; 1057 attr->vport.vport_num = action->vport.vport_num; 1058 attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id; 1059 attr->vport.eswitch_owner_vhca_id_valid = action->ctx->caps->merged_eswitch; 1060 break; 1061 case MLX5DR_ACTION_TYP_POP_VLAN: 1062 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; 1063 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 1064 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1065 attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; 1066 attr->remove_words.num_of_words = MLX5DR_ACTION_HDR_LEN_L2_VLAN / 2; 1067 break; 1068 case MLX5DR_ACTION_TYP_PUSH_VLAN: 1069 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; 1070 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1071 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1072 attr->insert_header.encap = 0; 1073 attr->insert_header.push_esp = 0; 1074 attr->insert_header.is_inline = 1; 1075 attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START; 1076 attr->insert_header.insert_offset = MLX5DR_ACTION_HDR_LEN_L2_MACS; 1077 attr->insert_header.header_size = MLX5DR_ACTION_HDR_LEN_L2_VLAN; 1078 break; 1079 case MLX5DR_ACTION_TYP_REMOVE_HEADER: 1080 if (action->remove_header.type == MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER) { 1081 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; 1082 attr->remove_header.decap = action->remove_header.decap; 1083 attr->remove_header.start_anchor = action->remove_header.start_anchor; 1084 attr->remove_header.end_anchor = action->remove_header.end_anchor; 1085 } else { 1086 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; 1087 attr->remove_words.start_anchor = action->remove_header.start_anchor; 1088 attr->remove_words.num_of_words = action->remove_header.num_of_words; 1089 } 1090 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 1091 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1092 break; 1093 case MLX5DR_ACTION_TYP_JUMP_TO_MATCHER: 1094 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; 1095 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1096 attr->ste_table.ste = action->jump_to_matcher.matcher->match_ste.ste; 1097 attr->ste_table.ste_pool = action->jump_to_matcher.matcher->match_ste.pool; 1098 attr->ste_table.match_definer_id = action->ctx->caps->trivial_match_definer; 1099 break; 1100 default: 1101 DR_LOG(ERR, "Invalid action type %d", action->type); 1102 assert(false); 1103 } 1104 } 1105 1106 static int 1107 mlx5dr_action_create_stcs(struct mlx5dr_action *action, 1108 struct mlx5dr_devx_obj *obj) 1109 { 1110 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 1111 struct mlx5dr_context *ctx = action->ctx; 1112 int ret; 1113 1114 mlx5dr_action_fill_stc_attr(action, obj, &stc_attr); 1115 1116 /* Block unsupported parallel devx obj modify over the same base */ 1117 pthread_spin_lock(&ctx->ctrl_lock); 1118 1119 /* Allocate STC for RX */ 1120 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) { 1121 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, 1122 MLX5DR_TABLE_TYPE_NIC_RX, 1123 &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]); 1124 if (ret) 1125 goto out_err; 1126 } 1127 1128 /* Allocate STC for TX */ 1129 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) { 1130 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, 1131 MLX5DR_TABLE_TYPE_NIC_TX, 1132 &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]); 1133 if (ret) 1134 goto free_nic_rx_stc; 1135 } 1136 1137 /* Allocate STC for FDB */ 1138 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) { 1139 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, 1140 MLX5DR_TABLE_TYPE_FDB, 1141 &action->stc[MLX5DR_TABLE_TYPE_FDB]); 1142 if (ret) 1143 goto free_nic_tx_stc; 1144 } 1145 1146 pthread_spin_unlock(&ctx->ctrl_lock); 1147 1148 return 0; 1149 1150 free_nic_tx_stc: 1151 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 1152 mlx5dr_action_free_single_stc(ctx, 1153 MLX5DR_TABLE_TYPE_NIC_TX, 1154 &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]); 1155 free_nic_rx_stc: 1156 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 1157 mlx5dr_action_free_single_stc(ctx, 1158 MLX5DR_TABLE_TYPE_NIC_RX, 1159 &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]); 1160 out_err: 1161 pthread_spin_unlock(&ctx->ctrl_lock); 1162 return rte_errno; 1163 } 1164 1165 static void 1166 mlx5dr_action_destroy_stcs(struct mlx5dr_action *action) 1167 { 1168 struct mlx5dr_context *ctx = action->ctx; 1169 1170 /* Block unsupported parallel devx obj modify over the same base */ 1171 pthread_spin_lock(&ctx->ctrl_lock); 1172 1173 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 1174 mlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_NIC_RX, 1175 &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]); 1176 1177 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 1178 mlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_NIC_TX, 1179 &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]); 1180 1181 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) 1182 mlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_FDB, 1183 &action->stc[MLX5DR_TABLE_TYPE_FDB]); 1184 1185 pthread_spin_unlock(&ctx->ctrl_lock); 1186 } 1187 1188 static bool 1189 mlx5dr_action_is_root_flags(uint32_t flags) 1190 { 1191 return flags & (MLX5DR_ACTION_FLAG_ROOT_RX | 1192 MLX5DR_ACTION_FLAG_ROOT_TX | 1193 MLX5DR_ACTION_FLAG_ROOT_FDB); 1194 } 1195 1196 static bool 1197 mlx5dr_action_is_hws_flags(uint32_t flags) 1198 { 1199 return flags & (MLX5DR_ACTION_FLAG_HWS_RX | 1200 MLX5DR_ACTION_FLAG_HWS_TX | 1201 MLX5DR_ACTION_FLAG_HWS_FDB); 1202 } 1203 1204 static struct mlx5dr_action * 1205 mlx5dr_action_create_generic_bulk(struct mlx5dr_context *ctx, 1206 uint32_t flags, 1207 enum mlx5dr_action_type action_type, 1208 uint8_t bulk_sz) 1209 { 1210 struct mlx5dr_action *action; 1211 int i; 1212 1213 if (!mlx5dr_action_is_root_flags(flags) && 1214 !mlx5dr_action_is_hws_flags(flags)) { 1215 DR_LOG(ERR, "Action flags must specify root or non root (HWS)"); 1216 rte_errno = ENOTSUP; 1217 return NULL; 1218 } 1219 1220 if (mlx5dr_action_is_hws_flags(flags) && 1221 !(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT)) { 1222 DR_LOG(ERR, "Cannot create HWS action since HWS is not supported"); 1223 rte_errno = ENOTSUP; 1224 return NULL; 1225 } 1226 1227 action = simple_calloc(bulk_sz, sizeof(*action)); 1228 if (!action) { 1229 DR_LOG(ERR, "Failed to allocate memory for action [%d]", action_type); 1230 rte_errno = ENOMEM; 1231 return NULL; 1232 } 1233 1234 for (i = 0; i < bulk_sz; i++) { 1235 action[i].ctx = ctx; 1236 action[i].flags = flags; 1237 action[i].type = action_type; 1238 } 1239 1240 return action; 1241 } 1242 1243 static struct mlx5dr_action * 1244 mlx5dr_action_create_generic(struct mlx5dr_context *ctx, 1245 uint32_t flags, 1246 enum mlx5dr_action_type action_type) 1247 { 1248 return mlx5dr_action_create_generic_bulk(ctx, flags, action_type, 1); 1249 } 1250 1251 struct mlx5dr_action * 1252 mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx, 1253 struct mlx5dr_table *tbl, 1254 uint32_t flags) 1255 { 1256 struct mlx5dr_action *action; 1257 int ret; 1258 1259 if (mlx5dr_table_is_root(tbl)) { 1260 DR_LOG(ERR, "Root table cannot be set as destination"); 1261 rte_errno = ENOTSUP; 1262 return NULL; 1263 } 1264 1265 if (mlx5dr_action_is_hws_flags(flags) && 1266 mlx5dr_action_is_root_flags(flags)) { 1267 DR_LOG(ERR, "Same action cannot be used for root and non root"); 1268 rte_errno = ENOTSUP; 1269 return NULL; 1270 } 1271 1272 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TBL); 1273 if (!action) 1274 return NULL; 1275 1276 if (mlx5dr_action_is_root_flags(flags)) { 1277 if (mlx5dr_context_shared_gvmi_used(ctx)) 1278 action->devx_obj = tbl->local_ft->obj; 1279 else 1280 action->devx_obj = tbl->ft->obj; 1281 } else { 1282 ret = mlx5dr_action_create_stcs(action, tbl->ft); 1283 if (ret) 1284 goto free_action; 1285 1286 action->devx_dest.devx_obj = tbl->ft; 1287 } 1288 1289 return action; 1290 1291 free_action: 1292 simple_free(action); 1293 return NULL; 1294 } 1295 1296 static int mlx5dr_action_get_dest_tir_obj(struct mlx5dr_context *ctx, 1297 struct mlx5dr_action *action, 1298 struct mlx5dr_devx_obj *obj, 1299 struct mlx5dr_devx_obj **ret_obj) 1300 { 1301 int ret; 1302 1303 if (mlx5dr_context_shared_gvmi_used(ctx)) { 1304 ret = mlx5dr_matcher_create_aliased_obj(ctx, 1305 ctx->local_ibv_ctx, 1306 ctx->ibv_ctx, 1307 ctx->caps->vhca_id, 1308 obj->id, 1309 MLX5_GENERAL_OBJ_TYPE_TIR_ALIAS, 1310 &action->alias.devx_obj); 1311 if (ret) { 1312 DR_LOG(ERR, "Failed to create tir alias"); 1313 return rte_errno; 1314 } 1315 *ret_obj = action->alias.devx_obj; 1316 } else { 1317 *ret_obj = obj; 1318 } 1319 1320 return 0; 1321 } 1322 1323 struct mlx5dr_action * 1324 mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx, 1325 struct mlx5dr_devx_obj *obj, 1326 uint32_t flags, 1327 bool is_local) 1328 { 1329 struct mlx5dr_action *action; 1330 int ret; 1331 1332 if (mlx5dr_action_is_hws_flags(flags) && 1333 mlx5dr_action_is_root_flags(flags)) { 1334 DR_LOG(ERR, "Same action cannot be used for root and non root"); 1335 rte_errno = ENOTSUP; 1336 return NULL; 1337 } 1338 1339 if ((flags & MLX5DR_ACTION_FLAG_ROOT_FDB) || 1340 (flags & MLX5DR_ACTION_FLAG_HWS_FDB && !ctx->caps->fdb_tir_stc)) { 1341 DR_LOG(ERR, "TIR action not support on FDB"); 1342 rte_errno = ENOTSUP; 1343 return NULL; 1344 } 1345 1346 if (!is_local) { 1347 DR_LOG(ERR, "TIR should be created on local ibv_device, flags: 0x%x", 1348 flags); 1349 rte_errno = ENOTSUP; 1350 return NULL; 1351 } 1352 1353 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TIR); 1354 if (!action) 1355 return NULL; 1356 1357 if (mlx5dr_action_is_root_flags(flags)) { 1358 action->devx_obj = obj->obj; 1359 } else { 1360 struct mlx5dr_devx_obj *cur_obj = NULL; /*compilation warn*/ 1361 1362 ret = mlx5dr_action_get_dest_tir_obj(ctx, action, obj, &cur_obj); 1363 if (ret) { 1364 DR_LOG(ERR, "Failed to create tir alias (flags: %d)", flags); 1365 goto free_action; 1366 } 1367 1368 ret = mlx5dr_action_create_stcs(action, cur_obj); 1369 if (ret) 1370 goto clean_obj; 1371 1372 action->devx_dest.devx_obj = cur_obj; 1373 } 1374 1375 return action; 1376 1377 clean_obj: 1378 mlx5dr_cmd_destroy_obj(action->alias.devx_obj); 1379 free_action: 1380 simple_free(action); 1381 return NULL; 1382 } 1383 1384 struct mlx5dr_action * 1385 mlx5dr_action_create_dest_drop(struct mlx5dr_context *ctx, 1386 uint32_t flags) 1387 { 1388 struct mlx5dr_action *action; 1389 int ret; 1390 1391 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DROP); 1392 if (!action) 1393 return NULL; 1394 1395 if (mlx5dr_action_is_hws_flags(flags)) { 1396 ret = mlx5dr_action_create_stcs(action, NULL); 1397 if (ret) 1398 goto free_action; 1399 } 1400 1401 return action; 1402 1403 free_action: 1404 simple_free(action); 1405 return NULL; 1406 } 1407 1408 struct mlx5dr_action * 1409 mlx5dr_action_create_default_miss(struct mlx5dr_context *ctx, 1410 uint32_t flags) 1411 { 1412 struct mlx5dr_action *action; 1413 int ret; 1414 1415 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_MISS); 1416 if (!action) 1417 return NULL; 1418 1419 if (mlx5dr_action_is_hws_flags(flags)) { 1420 ret = mlx5dr_action_create_stcs(action, NULL); 1421 if (ret) 1422 goto free_action; 1423 } 1424 1425 return action; 1426 1427 free_action: 1428 simple_free(action); 1429 return NULL; 1430 } 1431 1432 struct mlx5dr_action * 1433 mlx5dr_action_create_tag(struct mlx5dr_context *ctx, 1434 uint32_t flags) 1435 { 1436 struct mlx5dr_action *action; 1437 int ret; 1438 1439 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TAG); 1440 if (!action) 1441 return NULL; 1442 1443 if (mlx5dr_action_is_hws_flags(flags)) { 1444 ret = mlx5dr_action_create_stcs(action, NULL); 1445 if (ret) 1446 goto free_action; 1447 } 1448 1449 return action; 1450 1451 free_action: 1452 simple_free(action); 1453 return NULL; 1454 } 1455 1456 struct mlx5dr_action * 1457 mlx5dr_action_create_last(struct mlx5dr_context *ctx, 1458 uint32_t flags) 1459 { 1460 return mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_LAST); 1461 } 1462 1463 static struct mlx5dr_action * 1464 mlx5dr_action_create_aso(struct mlx5dr_context *ctx, 1465 enum mlx5dr_action_type action_type, 1466 struct mlx5dr_devx_obj *devx_obj, 1467 uint8_t return_reg_id, 1468 uint32_t flags) 1469 { 1470 struct mlx5dr_action *action; 1471 int ret; 1472 1473 if (mlx5dr_action_is_root_flags(flags)) { 1474 DR_LOG(ERR, "ASO action cannot be used over root table"); 1475 rte_errno = ENOTSUP; 1476 return NULL; 1477 } 1478 1479 action = mlx5dr_action_create_generic(ctx, flags, action_type); 1480 if (!action) 1481 return NULL; 1482 1483 action->aso.devx_obj = devx_obj; 1484 action->aso.return_reg_id = return_reg_id; 1485 1486 ret = mlx5dr_action_create_stcs(action, devx_obj); 1487 if (ret) 1488 goto free_action; 1489 1490 return action; 1491 1492 free_action: 1493 simple_free(action); 1494 return NULL; 1495 } 1496 1497 struct mlx5dr_action * 1498 mlx5dr_action_create_aso_meter(struct mlx5dr_context *ctx, 1499 struct mlx5dr_devx_obj *devx_obj, 1500 uint8_t return_reg_id, 1501 uint32_t flags) 1502 { 1503 return mlx5dr_action_create_aso(ctx, MLX5DR_ACTION_TYP_ASO_METER, 1504 devx_obj, return_reg_id, flags); 1505 } 1506 1507 struct mlx5dr_action * 1508 mlx5dr_action_create_aso_ct(struct mlx5dr_context *ctx, 1509 struct mlx5dr_devx_obj *devx_obj, 1510 uint8_t return_reg_id, 1511 uint32_t flags) 1512 { 1513 return mlx5dr_action_create_aso(ctx, MLX5DR_ACTION_TYP_ASO_CT, 1514 devx_obj, return_reg_id, flags); 1515 } 1516 1517 struct mlx5dr_action * 1518 mlx5dr_action_create_counter(struct mlx5dr_context *ctx, 1519 struct mlx5dr_devx_obj *obj, 1520 uint32_t flags) 1521 { 1522 struct mlx5dr_action *action; 1523 int ret; 1524 1525 if (mlx5dr_action_is_hws_flags(flags) && 1526 mlx5dr_action_is_root_flags(flags)) { 1527 DR_LOG(ERR, "Same action cannot be used for root and non root"); 1528 rte_errno = ENOTSUP; 1529 return NULL; 1530 } 1531 1532 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_CTR); 1533 if (!action) 1534 return NULL; 1535 1536 if (mlx5dr_action_is_root_flags(flags)) { 1537 action->devx_obj = obj->obj; 1538 } else { 1539 ret = mlx5dr_action_create_stcs(action, obj); 1540 if (ret) 1541 goto free_action; 1542 } 1543 1544 return action; 1545 1546 free_action: 1547 simple_free(action); 1548 return NULL; 1549 } 1550 1551 static int mlx5dr_action_create_dest_vport_hws(struct mlx5dr_context *ctx, 1552 struct mlx5dr_action *action, 1553 uint32_t ib_port_num) 1554 { 1555 struct mlx5dr_cmd_query_vport_caps vport_caps = {0}; 1556 int ret; 1557 1558 ret = mlx5dr_cmd_query_ib_port(ctx->ibv_ctx, &vport_caps, ib_port_num); 1559 if (ret) { 1560 DR_LOG(ERR, "Failed querying port %d", ib_port_num); 1561 return ret; 1562 } 1563 action->vport.vport_num = vport_caps.vport_num; 1564 action->vport.esw_owner_vhca_id = vport_caps.esw_owner_vhca_id; 1565 1566 if (!ctx->caps->merged_eswitch && 1567 action->vport.esw_owner_vhca_id != ctx->caps->vhca_id) { 1568 DR_LOG(ERR, "Not merged-eswitch (%d), not allowed to send to other vhca_id (%d)", 1569 ctx->caps->vhca_id, action->vport.esw_owner_vhca_id); 1570 rte_errno = ENOTSUP; 1571 return rte_errno; 1572 } 1573 1574 ret = mlx5dr_action_create_stcs(action, NULL); 1575 if (ret) { 1576 DR_LOG(ERR, "Failed creating stc for port %d", ib_port_num); 1577 return ret; 1578 } 1579 1580 return 0; 1581 } 1582 1583 struct mlx5dr_action * 1584 mlx5dr_action_create_dest_vport(struct mlx5dr_context *ctx, 1585 uint32_t ib_port_num, 1586 uint32_t flags) 1587 { 1588 struct mlx5dr_action *action; 1589 int ret; 1590 1591 if (!(flags & MLX5DR_ACTION_FLAG_HWS_FDB)) { 1592 DR_LOG(ERR, "Vport action is supported for FDB only"); 1593 rte_errno = EINVAL; 1594 return NULL; 1595 } 1596 1597 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_VPORT); 1598 if (!action) 1599 return NULL; 1600 1601 ret = mlx5dr_action_create_dest_vport_hws(ctx, action, ib_port_num); 1602 if (ret) { 1603 DR_LOG(ERR, "Failed to create vport action HWS"); 1604 goto free_action; 1605 } 1606 1607 return action; 1608 1609 free_action: 1610 simple_free(action); 1611 return NULL; 1612 } 1613 1614 struct mlx5dr_action * 1615 mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags) 1616 { 1617 struct mlx5dr_action *action; 1618 int ret; 1619 1620 if (mlx5dr_action_is_root_flags(flags)) { 1621 DR_LOG(ERR, "Push vlan action not supported for root"); 1622 rte_errno = ENOTSUP; 1623 return NULL; 1624 } 1625 1626 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_PUSH_VLAN); 1627 if (!action) 1628 return NULL; 1629 1630 ret = mlx5dr_action_create_stcs(action, NULL); 1631 if (ret) { 1632 DR_LOG(ERR, "Failed creating stc for push vlan"); 1633 goto free_action; 1634 } 1635 1636 return action; 1637 1638 free_action: 1639 simple_free(action); 1640 return NULL; 1641 } 1642 1643 struct mlx5dr_action * 1644 mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags) 1645 { 1646 struct mlx5dr_action *action; 1647 int ret; 1648 1649 if (mlx5dr_action_is_root_flags(flags)) { 1650 DR_LOG(ERR, "Pop vlan action not supported for root"); 1651 rte_errno = ENOTSUP; 1652 return NULL; 1653 } 1654 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_POP_VLAN); 1655 if (!action) 1656 return NULL; 1657 1658 ret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP); 1659 if (ret) { 1660 DR_LOG(ERR, "Failed to create remove stc for reformat"); 1661 goto free_action; 1662 } 1663 1664 ret = mlx5dr_action_create_stcs(action, NULL); 1665 if (ret) { 1666 DR_LOG(ERR, "Failed creating stc for pop vlan"); 1667 goto free_shared; 1668 } 1669 1670 return action; 1671 1672 free_shared: 1673 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP); 1674 free_action: 1675 simple_free(action); 1676 return NULL; 1677 } 1678 1679 static int 1680 mlx5dr_action_conv_reformat_to_verbs(uint32_t action_type, 1681 uint32_t *verb_reformat_type) 1682 { 1683 switch (action_type) { 1684 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 1685 *verb_reformat_type = 1686 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2; 1687 return 0; 1688 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 1689 *verb_reformat_type = 1690 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 1691 return 0; 1692 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 1693 *verb_reformat_type = 1694 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 1695 return 0; 1696 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 1697 *verb_reformat_type = 1698 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 1699 return 0; 1700 default: 1701 DR_LOG(ERR, "Invalid root reformat action type"); 1702 rte_errno = EINVAL; 1703 return rte_errno; 1704 } 1705 } 1706 1707 static int 1708 mlx5dr_action_conv_flags_to_ft_type(uint32_t flags, enum mlx5dv_flow_table_type *ft_type) 1709 { 1710 if (flags & (MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX)) { 1711 *ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 1712 } else if (flags & (MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX)) { 1713 *ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX; 1714 #ifdef HAVE_MLX5DV_FLOW_MATCHER_FT_TYPE 1715 } else if (flags & (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB)) { 1716 *ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 1717 #endif 1718 } else { 1719 rte_errno = ENOTSUP; 1720 return 1; 1721 } 1722 1723 return 0; 1724 } 1725 1726 static int 1727 mlx5dr_action_create_reformat_root(struct mlx5dr_action *action, 1728 size_t data_sz, 1729 void *data) 1730 { 1731 enum mlx5dv_flow_table_type ft_type = 0; /*fix compilation warn*/ 1732 uint32_t verb_reformat_type = 0; 1733 struct ibv_context *ibv_ctx; 1734 int ret; 1735 1736 /* Convert action to FT type and verbs reformat type */ 1737 ret = mlx5dr_action_conv_flags_to_ft_type(action->flags, &ft_type); 1738 if (ret) 1739 return rte_errno; 1740 1741 ret = mlx5dr_action_conv_reformat_to_verbs(action->type, &verb_reformat_type); 1742 if (ret) 1743 return rte_errno; 1744 1745 /* Create the reformat type for root table */ 1746 ibv_ctx = mlx5dr_context_get_local_ibv(action->ctx); 1747 action->flow_action = 1748 mlx5_glue->dv_create_flow_action_packet_reformat_root(ibv_ctx, 1749 data_sz, 1750 data, 1751 verb_reformat_type, 1752 ft_type); 1753 if (!action->flow_action) { 1754 DR_LOG(ERR, "Failed to create dv_create_flow reformat"); 1755 rte_errno = errno; 1756 return rte_errno; 1757 } 1758 1759 return 0; 1760 } 1761 1762 static int 1763 mlx5dr_action_handle_insert_with_ptr(struct mlx5dr_action *action, 1764 uint8_t num_of_hdrs, 1765 struct mlx5dr_action_reformat_header *hdrs, 1766 uint32_t log_bulk_sz, uint32_t reparse) 1767 { 1768 struct mlx5dr_devx_obj *arg_obj; 1769 size_t max_sz = 0; 1770 int ret, i; 1771 1772 for (i = 0; i < num_of_hdrs; i++) { 1773 if (hdrs[i].sz % W_SIZE != 0) { 1774 DR_LOG(ERR, "Header data size should be in WORD granularity"); 1775 rte_errno = EINVAL; 1776 return rte_errno; 1777 } 1778 max_sz = RTE_MAX(hdrs[i].sz, max_sz); 1779 } 1780 1781 /* Allocate single shared arg object for all headers */ 1782 arg_obj = mlx5dr_arg_create(action->ctx, 1783 hdrs->data, 1784 max_sz, 1785 log_bulk_sz, 1786 action->flags & MLX5DR_ACTION_FLAG_SHARED); 1787 if (!arg_obj) 1788 return rte_errno; 1789 1790 for (i = 0; i < num_of_hdrs; i++) { 1791 action[i].reformat.arg_obj = arg_obj; 1792 action[i].reformat.header_size = hdrs[i].sz; 1793 action[i].reformat.num_of_hdrs = num_of_hdrs; 1794 action[i].reformat.max_hdr_sz = max_sz; 1795 1796 if (action[i].type == MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 || 1797 action[i].type == MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) { 1798 action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START; 1799 action[i].reformat.offset = 0; 1800 action[i].reformat.encap = 1; 1801 action[i].reformat.push_esp = 0; 1802 } 1803 1804 if (likely(reparse == MLX5DR_ACTION_STC_REPARSE_DEFAULT)) 1805 action[i].reformat.require_reparse = true; 1806 else if (reparse == MLX5DR_ACTION_STC_REPARSE_ON) 1807 action[i].reformat.require_reparse = true; 1808 1809 ret = mlx5dr_action_create_stcs(&action[i], NULL); 1810 if (ret) { 1811 DR_LOG(ERR, "Failed to create stc for reformat"); 1812 goto free_stc; 1813 } 1814 } 1815 1816 return 0; 1817 1818 free_stc: 1819 while (i--) 1820 mlx5dr_action_destroy_stcs(&action[i]); 1821 1822 mlx5dr_cmd_destroy_obj(arg_obj); 1823 return ret; 1824 } 1825 1826 static int 1827 mlx5dr_action_handle_l2_to_tunnel_l3(struct mlx5dr_action *action, 1828 uint8_t num_of_hdrs, 1829 struct mlx5dr_action_reformat_header *hdrs, 1830 uint32_t log_bulk_sz) 1831 { 1832 int ret; 1833 1834 /* The action is remove-l2-header + insert-l3-header */ 1835 ret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP_L3); 1836 if (ret) { 1837 DR_LOG(ERR, "Failed to create remove stc for reformat"); 1838 return ret; 1839 } 1840 1841 /* Reuse the insert with pointer for the L2L3 header */ 1842 ret = mlx5dr_action_handle_insert_with_ptr(action, 1843 num_of_hdrs, 1844 hdrs, 1845 log_bulk_sz, 1846 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 1847 if (ret) 1848 goto put_shared_stc; 1849 1850 return 0; 1851 1852 put_shared_stc: 1853 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP_L3); 1854 return ret; 1855 } 1856 1857 static void mlx5dr_action_prepare_decap_l3_actions(size_t data_sz, 1858 uint8_t *mh_data, 1859 int *num_of_actions) 1860 { 1861 int actions; 1862 uint32_t i; 1863 1864 /* Remove L2L3 outer headers */ 1865 MLX5_SET(stc_ste_param_remove, mh_data, action_type, 1866 MLX5_MODIFICATION_TYPE_REMOVE); 1867 MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1); 1868 MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor, 1869 MLX5_HEADER_ANCHOR_PACKET_START); 1870 MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor, 1871 MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4); 1872 mh_data += MLX5DR_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */ 1873 actions = 1; 1874 1875 /* Add the new header using inline action 4Byte at a time, the header 1876 * is added in reversed order to the beginning of the packet to avoid 1877 * incorrect parsing by the HW. Since header is 14B or 18B an extra 1878 * two bytes are padded and later removed. 1879 */ 1880 for (i = 0; i < data_sz / MLX5DR_ACTION_INLINE_DATA_SIZE + 1; i++) { 1881 MLX5_SET(stc_ste_param_insert, mh_data, action_type, 1882 MLX5_MODIFICATION_TYPE_INSERT); 1883 MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1); 1884 MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor, 1885 MLX5_HEADER_ANCHOR_PACKET_START); 1886 MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2); 1887 mh_data += MLX5DR_ACTION_DOUBLE_SIZE; 1888 actions++; 1889 } 1890 1891 /* Remove first 2 extra bytes */ 1892 MLX5_SET(stc_ste_param_remove_words, mh_data, action_type, 1893 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 1894 MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor, 1895 MLX5_HEADER_ANCHOR_PACKET_START); 1896 /* The hardware expects here size in words (2 bytes) */ 1897 MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1); 1898 actions++; 1899 1900 *num_of_actions = actions; 1901 } 1902 1903 static int 1904 mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, 1905 uint8_t num_of_hdrs, 1906 struct mlx5dr_action_reformat_header *hdrs, 1907 uint32_t log_bulk_sz) 1908 { 1909 uint8_t mh_data[MLX5DR_ACTION_REFORMAT_DATA_SIZE] = {0}; 1910 struct mlx5dr_devx_obj *arg_obj, *pat_obj; 1911 struct mlx5dr_context *ctx = action->ctx; 1912 int num_of_actions; 1913 int mh_data_size; 1914 int ret, i; 1915 1916 for (i = 0; i < num_of_hdrs; i++) { 1917 if (hdrs[i].sz != MLX5DR_ACTION_HDR_LEN_L2 && 1918 hdrs[i].sz != MLX5DR_ACTION_HDR_LEN_L2_W_VLAN) { 1919 DR_LOG(ERR, "Data size is not supported for decap-l3"); 1920 rte_errno = EINVAL; 1921 return rte_errno; 1922 } 1923 } 1924 1925 /* Create a full modify header action list in case shared */ 1926 mlx5dr_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); 1927 1928 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) 1929 mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); 1930 1931 /* All DecapL3 cases require the same max arg size */ 1932 arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, 1933 (__be64 *)mh_data, 1934 num_of_actions, 1935 log_bulk_sz, 1936 action->flags & MLX5DR_ACTION_FLAG_SHARED); 1937 if (!arg_obj) 1938 return rte_errno; 1939 1940 for (i = 0; i < num_of_hdrs; i++) { 1941 memset(mh_data, 0, MLX5DR_ACTION_REFORMAT_DATA_SIZE); 1942 mlx5dr_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions); 1943 mh_data_size = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE; 1944 1945 pat_obj = mlx5dr_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size); 1946 if (!pat_obj) { 1947 DR_LOG(ERR, "Failed to allocate pattern for DecapL3"); 1948 goto free_stc_and_pat; 1949 } 1950 1951 action[i].modify_header.max_num_of_actions = num_of_actions; 1952 action[i].modify_header.num_of_actions = num_of_actions; 1953 action[i].modify_header.num_of_patterns = num_of_hdrs; 1954 action[i].modify_header.arg_obj = arg_obj; 1955 action[i].modify_header.pat_obj = pat_obj; 1956 action[i].modify_header.require_reparse = 1957 mlx5dr_pat_require_reparse((__be64 *)mh_data, num_of_actions); 1958 1959 ret = mlx5dr_action_create_stcs(&action[i], NULL); 1960 if (ret) { 1961 mlx5dr_pat_put_pattern(ctx, pat_obj); 1962 goto free_stc_and_pat; 1963 } 1964 } 1965 1966 return 0; 1967 1968 1969 free_stc_and_pat: 1970 while (i--) { 1971 mlx5dr_action_destroy_stcs(&action[i]); 1972 mlx5dr_pat_put_pattern(ctx, action[i].modify_header.pat_obj); 1973 } 1974 1975 mlx5dr_cmd_destroy_obj(arg_obj); 1976 return 0; 1977 } 1978 1979 static int 1980 mlx5dr_action_create_reformat_hws(struct mlx5dr_action *action, 1981 uint8_t num_of_hdrs, 1982 struct mlx5dr_action_reformat_header *hdrs, 1983 uint32_t bulk_size) 1984 { 1985 int ret; 1986 1987 switch (action->type) { 1988 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 1989 ret = mlx5dr_action_create_stcs(action, NULL); 1990 break; 1991 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 1992 ret = mlx5dr_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size, 1993 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 1994 break; 1995 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 1996 ret = mlx5dr_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size); 1997 break; 1998 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 1999 ret = mlx5dr_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size); 2000 break; 2001 default: 2002 DR_LOG(ERR, "Invalid HWS reformat action type"); 2003 rte_errno = EINVAL; 2004 return rte_errno; 2005 } 2006 2007 return ret; 2008 } 2009 2010 struct mlx5dr_action * 2011 mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, 2012 enum mlx5dr_action_type reformat_type, 2013 uint8_t num_of_hdrs, 2014 struct mlx5dr_action_reformat_header *hdrs, 2015 uint32_t log_bulk_size, 2016 uint32_t flags) 2017 { 2018 struct mlx5dr_action *action; 2019 int ret; 2020 2021 if (!num_of_hdrs) { 2022 DR_LOG(ERR, "Reformat num_of_hdrs cannot be zero"); 2023 rte_errno = EINVAL; 2024 return NULL; 2025 } 2026 2027 action = mlx5dr_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs); 2028 if (!action) 2029 return NULL; 2030 2031 if (mlx5dr_action_is_root_flags(flags)) { 2032 if (log_bulk_size) { 2033 DR_LOG(ERR, "Bulk reformat not supported over root"); 2034 rte_errno = ENOTSUP; 2035 goto free_action; 2036 } 2037 2038 ret = mlx5dr_action_create_reformat_root(action, 2039 hdrs ? hdrs->sz : 0, 2040 hdrs ? hdrs->data : NULL); 2041 if (ret) { 2042 DR_LOG(ERR, "Failed to create root reformat action"); 2043 goto free_action; 2044 } 2045 2046 return action; 2047 } 2048 2049 if (!mlx5dr_action_is_hws_flags(flags) || 2050 ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) { 2051 DR_LOG(ERR, "Reformat flags don't fit HWS (flags: 0x%x)", flags); 2052 rte_errno = EINVAL; 2053 goto free_action; 2054 } 2055 2056 ret = mlx5dr_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size); 2057 if (ret) { 2058 DR_LOG(ERR, "Failed to create HWS reformat action"); 2059 goto free_action; 2060 } 2061 2062 return action; 2063 2064 free_action: 2065 simple_free(action); 2066 return NULL; 2067 } 2068 2069 static int 2070 mlx5dr_action_create_modify_header_root(struct mlx5dr_action *action, 2071 size_t actions_sz, 2072 __be64 *actions) 2073 { 2074 enum mlx5dv_flow_table_type ft_type = 0; 2075 struct ibv_context *local_ibv_ctx; 2076 int ret; 2077 2078 ret = mlx5dr_action_conv_flags_to_ft_type(action->flags, &ft_type); 2079 if (ret) 2080 return rte_errno; 2081 2082 local_ibv_ctx = mlx5dr_context_get_local_ibv(action->ctx); 2083 2084 action->flow_action = 2085 mlx5_glue->dv_create_flow_action_modify_header_root(local_ibv_ctx, 2086 actions_sz, 2087 (uint64_t *)actions, 2088 ft_type); 2089 if (!action->flow_action) { 2090 rte_errno = errno; 2091 return rte_errno; 2092 } 2093 2094 return 0; 2095 } 2096 2097 static int 2098 mlx5dr_action_create_modify_header_hws(struct mlx5dr_action *action, 2099 uint8_t num_of_patterns, 2100 struct mlx5dr_action_mh_pattern *pattern, 2101 uint32_t log_bulk_size, 2102 uint32_t reparse) 2103 { 2104 struct mlx5dr_devx_obj *pat_obj, *arg_obj = NULL; 2105 struct mlx5dr_context *ctx = action->ctx; 2106 uint16_t num_actions, max_mh_actions = 0; 2107 int i, ret; 2108 2109 /* Calculate maximum number of mh actions for shared arg allocation */ 2110 for (i = 0; i < num_of_patterns; i++) 2111 max_mh_actions = RTE_MAX(max_mh_actions, pattern[i].sz / MLX5DR_MODIFY_ACTION_SIZE); 2112 2113 /* Allocate single shared arg for all patterns based on the max size */ 2114 if (max_mh_actions > 1) { 2115 arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, 2116 pattern->data, 2117 max_mh_actions, 2118 log_bulk_size, 2119 action->flags & 2120 MLX5DR_ACTION_FLAG_SHARED); 2121 if (!arg_obj) 2122 return rte_errno; 2123 } 2124 2125 for (i = 0; i < num_of_patterns; i++) { 2126 if (!mlx5dr_pat_verify_actions(pattern[i].data, pattern[i].sz)) { 2127 DR_LOG(ERR, "Fail to verify pattern modify actions"); 2128 rte_errno = EINVAL; 2129 goto free_stc_and_pat; 2130 } 2131 2132 num_actions = pattern[i].sz / MLX5DR_MODIFY_ACTION_SIZE; 2133 action[i].modify_header.num_of_patterns = num_of_patterns; 2134 action[i].modify_header.max_num_of_actions = max_mh_actions; 2135 action[i].modify_header.num_of_actions = num_actions; 2136 2137 if (likely(reparse == MLX5DR_ACTION_STC_REPARSE_DEFAULT)) 2138 action[i].modify_header.require_reparse = 2139 mlx5dr_pat_require_reparse(pattern[i].data, num_actions); 2140 else if (reparse == MLX5DR_ACTION_STC_REPARSE_ON) 2141 action[i].modify_header.require_reparse = true; 2142 2143 if (num_actions == 1) { 2144 pat_obj = NULL; 2145 /* Optimize single modify action to be used inline */ 2146 action[i].modify_header.single_action = pattern[i].data[0]; 2147 action[i].modify_header.single_action_type = 2148 MLX5_GET(set_action_in, pattern[i].data, action_type); 2149 } else { 2150 /* Multiple modify actions require a pattern */ 2151 pat_obj = mlx5dr_pat_get_pattern(ctx, pattern[i].data, pattern[i].sz); 2152 if (!pat_obj) { 2153 DR_LOG(ERR, "Failed to allocate pattern for modify header"); 2154 goto free_stc_and_pat; 2155 } 2156 2157 action[i].modify_header.arg_obj = arg_obj; 2158 action[i].modify_header.pat_obj = pat_obj; 2159 } 2160 /* Allocate STC for each action representing a header */ 2161 ret = mlx5dr_action_create_stcs(&action[i], NULL); 2162 if (ret) { 2163 if (pat_obj) 2164 mlx5dr_pat_put_pattern(ctx, pat_obj); 2165 goto free_stc_and_pat; 2166 } 2167 } 2168 2169 return 0; 2170 2171 free_stc_and_pat: 2172 while (i--) { 2173 mlx5dr_action_destroy_stcs(&action[i]); 2174 if (action[i].modify_header.pat_obj) 2175 mlx5dr_pat_put_pattern(ctx, action[i].modify_header.pat_obj); 2176 } 2177 2178 if (arg_obj) 2179 mlx5dr_cmd_destroy_obj(arg_obj); 2180 2181 return rte_errno; 2182 } 2183 2184 struct mlx5dr_action * 2185 mlx5dr_action_create_modify_header_reparse(struct mlx5dr_context *ctx, 2186 uint8_t num_of_patterns, 2187 struct mlx5dr_action_mh_pattern *patterns, 2188 uint32_t log_bulk_size, 2189 uint32_t flags, uint32_t reparse) 2190 { 2191 struct mlx5dr_action *action; 2192 int ret; 2193 2194 if (!num_of_patterns) { 2195 DR_LOG(ERR, "Invalid number of patterns"); 2196 rte_errno = ENOTSUP; 2197 return NULL; 2198 } 2199 2200 action = mlx5dr_action_create_generic_bulk(ctx, flags, 2201 MLX5DR_ACTION_TYP_MODIFY_HDR, 2202 num_of_patterns); 2203 if (!action) 2204 return NULL; 2205 2206 if (mlx5dr_action_is_root_flags(flags)) { 2207 if (log_bulk_size) { 2208 DR_LOG(ERR, "Bulk modify-header not supported over root"); 2209 rte_errno = ENOTSUP; 2210 goto free_action; 2211 } 2212 2213 if (num_of_patterns != 1) { 2214 DR_LOG(ERR, "Only a single pattern supported over root"); 2215 rte_errno = ENOTSUP; 2216 goto free_action; 2217 } 2218 2219 ret = mlx5dr_action_create_modify_header_root(action, 2220 patterns->sz, 2221 patterns->data); 2222 if (ret) 2223 goto free_action; 2224 2225 return action; 2226 } 2227 2228 if ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) { 2229 DR_LOG(ERR, "Action cannot be shared with requested pattern or size"); 2230 rte_errno = EINVAL; 2231 goto free_action; 2232 } 2233 2234 ret = mlx5dr_action_create_modify_header_hws(action, 2235 num_of_patterns, 2236 patterns, 2237 log_bulk_size, 2238 reparse); 2239 if (ret) 2240 goto free_action; 2241 2242 return action; 2243 2244 free_action: 2245 simple_free(action); 2246 return NULL; 2247 } 2248 2249 struct mlx5dr_action * 2250 mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx, 2251 uint8_t num_of_patterns, 2252 struct mlx5dr_action_mh_pattern *patterns, 2253 uint32_t log_bulk_size, 2254 uint32_t flags) 2255 { 2256 return mlx5dr_action_create_modify_header_reparse(ctx, num_of_patterns, patterns, 2257 log_bulk_size, flags, 2258 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 2259 } 2260 static struct mlx5dr_devx_obj * 2261 mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx, 2262 enum mlx5dr_action_type type, 2263 void *reformat_data, 2264 size_t reformat_data_sz) 2265 { 2266 struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0}; 2267 struct mlx5dr_devx_obj *reformat_devx_obj; 2268 2269 if (!reformat_data || !reformat_data_sz) { 2270 DR_LOG(ERR, "Empty reformat action or data"); 2271 rte_errno = EINVAL; 2272 return NULL; 2273 } 2274 2275 switch (type) { 2276 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 2277 pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 2278 break; 2279 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 2280 pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 2281 break; 2282 default: 2283 DR_LOG(ERR, "Invalid value for reformat type"); 2284 rte_errno = EINVAL; 2285 return NULL; 2286 } 2287 pr_attr.reformat_param_0 = 0; 2288 pr_attr.data_sz = reformat_data_sz; 2289 pr_attr.data = reformat_data; 2290 2291 reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr); 2292 if (!reformat_devx_obj) 2293 return NULL; 2294 2295 return reformat_devx_obj; 2296 } 2297 2298 struct mlx5dr_action * 2299 mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx, 2300 size_t num_dest, 2301 struct mlx5dr_action_dest_attr *dests, 2302 uint32_t flags) 2303 { 2304 struct mlx5dr_cmd_set_fte_dest *dest_list = NULL; 2305 struct mlx5dr_devx_obj *packet_reformat = NULL; 2306 struct mlx5dr_cmd_ft_create_attr ft_attr = {0}; 2307 struct mlx5dr_cmd_set_fte_attr fte_attr = {0}; 2308 struct mlx5dr_cmd_forward_tbl *fw_island; 2309 enum mlx5dr_table_type table_type; 2310 struct mlx5dr_action *action; 2311 uint32_t i; 2312 int ret; 2313 2314 if (num_dest <= 1) { 2315 rte_errno = EINVAL; 2316 DR_LOG(ERR, "Action must have multiple dests"); 2317 return NULL; 2318 } 2319 2320 if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) { 2321 ft_attr.type = FS_FT_NIC_RX; 2322 ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1; 2323 table_type = MLX5DR_TABLE_TYPE_NIC_RX; 2324 } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) { 2325 ft_attr.type = FS_FT_FDB; 2326 ft_attr.level = ctx->caps->fdb_ft.max_level - 1; 2327 table_type = MLX5DR_TABLE_TYPE_FDB; 2328 } else { 2329 DR_LOG(ERR, "Action flags not supported"); 2330 rte_errno = ENOTSUP; 2331 return NULL; 2332 } 2333 2334 if (mlx5dr_context_shared_gvmi_used(ctx)) { 2335 DR_LOG(ERR, "Cannot use this action in shared GVMI context"); 2336 rte_errno = ENOTSUP; 2337 return NULL; 2338 } 2339 2340 dest_list = simple_calloc(num_dest, sizeof(*dest_list)); 2341 if (!dest_list) { 2342 DR_LOG(ERR, "Failed to allocate memory for destinations"); 2343 rte_errno = ENOMEM; 2344 return NULL; 2345 } 2346 2347 for (i = 0; i < num_dest; i++) { 2348 enum mlx5dr_action_type *action_type = dests[i].action_type; 2349 2350 if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) { 2351 DR_LOG(ERR, "Invalid combination of actions"); 2352 rte_errno = EINVAL; 2353 goto free_dest_list; 2354 } 2355 2356 for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) { 2357 switch (*action_type) { 2358 case MLX5DR_ACTION_TYP_TBL: 2359 dest_list[i].destination_type = 2360 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 2361 dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id; 2362 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2363 fte_attr.ignore_flow_level = 1; 2364 break; 2365 case MLX5DR_ACTION_TYP_MISS: 2366 if (table_type != MLX5DR_TABLE_TYPE_FDB) { 2367 DR_LOG(ERR, "Miss action supported for FDB only"); 2368 rte_errno = ENOTSUP; 2369 goto free_dest_list; 2370 } 2371 dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2372 dest_list[i].destination_id = 2373 ctx->caps->eswitch_manager_vport_number; 2374 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2375 break; 2376 case MLX5DR_ACTION_TYP_VPORT: 2377 dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2378 dest_list[i].destination_id = dests[i].dest->vport.vport_num; 2379 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2380 if (ctx->caps->merged_eswitch) { 2381 dest_list[i].ext_flags |= 2382 MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID; 2383 dest_list[i].esw_owner_vhca_id = 2384 dests[i].dest->vport.esw_owner_vhca_id; 2385 } 2386 break; 2387 case MLX5DR_ACTION_TYP_TIR: 2388 dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 2389 dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id; 2390 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2391 break; 2392 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 2393 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 2394 packet_reformat = mlx5dr_action_dest_array_process_reformat 2395 (ctx, 2396 *action_type, 2397 dests[i].reformat.reformat_data, 2398 dests[i].reformat.reformat_data_sz); 2399 if (!packet_reformat) 2400 goto free_dest_list; 2401 2402 dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT; 2403 dest_list[i].ext_reformat = packet_reformat; 2404 ft_attr.reformat_en = true; 2405 fte_attr.extended_dest = 1; 2406 break; 2407 default: 2408 DR_LOG(ERR, "Unsupported action in dest_array"); 2409 rte_errno = ENOTSUP; 2410 goto free_dest_list; 2411 } 2412 } 2413 } 2414 fte_attr.dests_num = num_dest; 2415 fte_attr.dests = dest_list; 2416 2417 fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr); 2418 if (!fw_island) 2419 goto free_dest_list; 2420 2421 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY); 2422 if (!action) 2423 goto destroy_fw_island; 2424 2425 ret = mlx5dr_action_create_stcs(action, fw_island->ft); 2426 if (ret) 2427 goto free_action; 2428 2429 action->dest_array.fw_island = fw_island; 2430 action->dest_array.num_dest = num_dest; 2431 action->dest_array.dest_list = dest_list; 2432 2433 return action; 2434 2435 free_action: 2436 simple_free(action); 2437 destroy_fw_island: 2438 mlx5dr_cmd_forward_tbl_destroy(fw_island); 2439 free_dest_list: 2440 for (i = 0; i < num_dest; i++) { 2441 if (dest_list[i].ext_reformat) 2442 mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat); 2443 } 2444 simple_free(dest_list); 2445 return NULL; 2446 } 2447 2448 struct mlx5dr_action * 2449 mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx, 2450 uint16_t priority, 2451 uint32_t flags) 2452 { 2453 struct mlx5dv_steering_anchor_attr attr = {0}; 2454 struct mlx5dv_steering_anchor *sa; 2455 struct mlx5dr_action *action; 2456 int ret; 2457 2458 if (mlx5dr_action_is_root_flags(flags)) { 2459 DR_LOG(ERR, "Action flags must be only non root (HWS)"); 2460 rte_errno = ENOTSUP; 2461 return NULL; 2462 } 2463 2464 if (mlx5dr_context_shared_gvmi_used(ctx)) { 2465 DR_LOG(ERR, "Cannot use this action in shared GVMI context"); 2466 rte_errno = ENOTSUP; 2467 return NULL; 2468 } 2469 2470 if (mlx5dr_action_conv_flags_to_ft_type(flags, &attr.ft_type)) 2471 return NULL; 2472 2473 attr.priority = priority; 2474 2475 sa = mlx5_glue->create_steering_anchor(ctx->ibv_ctx, &attr); 2476 if (!sa) { 2477 DR_LOG(ERR, "Creation of steering anchor failed"); 2478 return NULL; 2479 } 2480 2481 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ROOT); 2482 if (!action) 2483 goto free_steering_anchor; 2484 2485 action->root_tbl.sa = sa; 2486 2487 ret = mlx5dr_action_create_stcs(action, NULL); 2488 if (ret) 2489 goto free_action; 2490 2491 return action; 2492 2493 free_action: 2494 simple_free(action); 2495 free_steering_anchor: 2496 mlx5_glue->destroy_steering_anchor(sa); 2497 return NULL; 2498 } 2499 2500 static struct mlx5dr_action * 2501 mlx5dr_action_create_insert_header_reparse(struct mlx5dr_context *ctx, 2502 uint8_t num_of_hdrs, 2503 struct mlx5dr_action_insert_header *hdrs, 2504 uint32_t log_bulk_size, 2505 uint32_t flags, uint32_t reparse) 2506 { 2507 struct mlx5dr_action_reformat_header *reformat_hdrs; 2508 struct mlx5dr_action *action; 2509 int i, ret; 2510 2511 if (!num_of_hdrs) { 2512 DR_LOG(ERR, "Reformat num_of_hdrs cannot be zero"); 2513 rte_errno = EINVAL; 2514 return NULL; 2515 } 2516 2517 if (mlx5dr_action_is_root_flags(flags)) { 2518 DR_LOG(ERR, "Dynamic reformat action not supported over root"); 2519 rte_errno = ENOTSUP; 2520 return NULL; 2521 } 2522 2523 if (!mlx5dr_action_is_hws_flags(flags) || 2524 ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) { 2525 DR_LOG(ERR, "Reformat flags don't fit HWS (flags: 0x%x)", flags); 2526 rte_errno = EINVAL; 2527 return NULL; 2528 } 2529 2530 action = mlx5dr_action_create_generic_bulk(ctx, flags, 2531 MLX5DR_ACTION_TYP_INSERT_HEADER, 2532 num_of_hdrs); 2533 if (!action) 2534 return NULL; 2535 2536 reformat_hdrs = simple_calloc(num_of_hdrs, sizeof(*reformat_hdrs)); 2537 if (!reformat_hdrs) { 2538 DR_LOG(ERR, "Failed to allocate memory for reformat_hdrs"); 2539 rte_errno = ENOMEM; 2540 goto free_action; 2541 } 2542 2543 for (i = 0; i < num_of_hdrs; i++) { 2544 if (hdrs[i].offset % W_SIZE != 0) { 2545 DR_LOG(ERR, "Header offset should be in WORD granularity"); 2546 rte_errno = EINVAL; 2547 goto free_reformat_hdrs; 2548 } 2549 2550 action[i].reformat.anchor = hdrs[i].anchor; 2551 action[i].reformat.encap = hdrs[i].encap; 2552 action[i].reformat.push_esp = hdrs[i].push_esp; 2553 action[i].reformat.offset = hdrs[i].offset; 2554 reformat_hdrs[i].sz = hdrs[i].hdr.sz; 2555 reformat_hdrs[i].data = hdrs[i].hdr.data; 2556 } 2557 2558 ret = mlx5dr_action_handle_insert_with_ptr(action, num_of_hdrs, 2559 reformat_hdrs, log_bulk_size, 2560 reparse); 2561 if (ret) { 2562 DR_LOG(ERR, "Failed to create HWS reformat action"); 2563 goto free_reformat_hdrs; 2564 } 2565 2566 simple_free(reformat_hdrs); 2567 2568 return action; 2569 2570 free_reformat_hdrs: 2571 simple_free(reformat_hdrs); 2572 free_action: 2573 simple_free(action); 2574 return NULL; 2575 } 2576 2577 struct mlx5dr_action * 2578 mlx5dr_action_create_insert_header(struct mlx5dr_context *ctx, 2579 uint8_t num_of_hdrs, 2580 struct mlx5dr_action_insert_header *hdrs, 2581 uint32_t log_bulk_size, 2582 uint32_t flags) 2583 { 2584 return mlx5dr_action_create_insert_header_reparse(ctx, num_of_hdrs, hdrs, 2585 log_bulk_size, flags, 2586 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 2587 } 2588 2589 struct mlx5dr_action * 2590 mlx5dr_action_create_remove_header(struct mlx5dr_context *ctx, 2591 struct mlx5dr_action_remove_header_attr *attr, 2592 uint32_t flags) 2593 { 2594 struct mlx5dr_action *action; 2595 2596 if (mlx5dr_action_is_root_flags(flags)) { 2597 DR_LOG(ERR, "Remove header action not supported over root"); 2598 rte_errno = ENOTSUP; 2599 return NULL; 2600 } 2601 2602 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_REMOVE_HEADER); 2603 if (!action) 2604 return NULL; 2605 2606 switch (attr->type) { 2607 case MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER: 2608 action->remove_header.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER; 2609 action->remove_header.start_anchor = attr->by_anchor.start_anchor; 2610 action->remove_header.end_anchor = attr->by_anchor.end_anchor; 2611 action->remove_header.decap = attr->by_anchor.decap; 2612 break; 2613 case MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_OFFSET: 2614 if (attr->by_offset.size % W_SIZE != 0) { 2615 DR_LOG(ERR, "Invalid size, HW supports header remove in WORD granularity"); 2616 rte_errno = EINVAL; 2617 goto free_action; 2618 } 2619 2620 if (attr->by_offset.size > MLX5DR_ACTION_REMOVE_HEADER_MAX_SIZE) { 2621 DR_LOG(ERR, "Header removal size limited to %u bytes", 2622 MLX5DR_ACTION_REMOVE_HEADER_MAX_SIZE); 2623 rte_errno = EINVAL; 2624 goto free_action; 2625 } 2626 2627 action->remove_header.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_OFFSET; 2628 action->remove_header.start_anchor = attr->by_offset.start_anchor; 2629 action->remove_header.num_of_words = attr->by_offset.size / W_SIZE; 2630 break; 2631 default: 2632 DR_LOG(ERR, "Unsupported remove header type %u", attr->type); 2633 rte_errno = ENOTSUP; 2634 goto free_action; 2635 } 2636 2637 if (mlx5dr_action_create_stcs(action, NULL)) 2638 goto free_action; 2639 2640 return action; 2641 2642 free_action: 2643 simple_free(action); 2644 return NULL; 2645 } 2646 2647 static void * 2648 mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(struct mlx5dr_action *action) 2649 { 2650 struct mlx5dr_action_mh_pattern pattern; 2651 __be64 cmd[3] = {0}; 2652 uint16_t mod_id; 2653 2654 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2655 if (!mod_id) { 2656 rte_errno = EINVAL; 2657 return NULL; 2658 } 2659 2660 /* 2661 * Backup ipv6_route_ext.next_hdr to ipv6_route_ext.seg_left. 2662 * Next_hdr will be copied to ipv6.protocol after pop done. 2663 */ 2664 MLX5_SET(copy_action_in, &cmd[0], action_type, MLX5_MODIFICATION_TYPE_COPY); 2665 MLX5_SET(copy_action_in, &cmd[0], length, 8); 2666 MLX5_SET(copy_action_in, &cmd[0], src_offset, 24); 2667 MLX5_SET(copy_action_in, &cmd[0], src_field, mod_id); 2668 MLX5_SET(copy_action_in, &cmd[0], dst_field, mod_id); 2669 2670 /* Add nop between the continuous same modify field id */ 2671 MLX5_SET(copy_action_in, &cmd[1], action_type, MLX5_MODIFICATION_TYPE_NOP); 2672 2673 /* Clear next_hdr for right checksum */ 2674 MLX5_SET(set_action_in, &cmd[2], action_type, MLX5_MODIFICATION_TYPE_SET); 2675 MLX5_SET(set_action_in, &cmd[2], length, 8); 2676 MLX5_SET(set_action_in, &cmd[2], offset, 24); 2677 MLX5_SET(set_action_in, &cmd[2], field, mod_id); 2678 2679 pattern.data = cmd; 2680 pattern.sz = sizeof(cmd); 2681 2682 return mlx5dr_action_create_modify_header_reparse(action->ctx, 1, &pattern, 0, 2683 action->flags, 2684 MLX5DR_ACTION_STC_REPARSE_ON); 2685 } 2686 2687 static void * 2688 mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(struct mlx5dr_action *action) 2689 { 2690 enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = { 2691 MLX5_MODI_OUT_DIPV6_127_96, 2692 MLX5_MODI_OUT_DIPV6_95_64, 2693 MLX5_MODI_OUT_DIPV6_63_32, 2694 MLX5_MODI_OUT_DIPV6_31_0 2695 }; 2696 struct mlx5dr_action_mh_pattern pattern; 2697 __be64 cmd[5] = {0}; 2698 uint16_t mod_id; 2699 uint32_t i; 2700 2701 /* Copy ipv6_route_ext[first_segment].dst_addr by flex parser to ipv6.dst_addr */ 2702 for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) { 2703 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, i + 1); 2704 if (!mod_id) { 2705 rte_errno = EINVAL; 2706 return NULL; 2707 } 2708 2709 MLX5_SET(copy_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_COPY); 2710 MLX5_SET(copy_action_in, &cmd[i], dst_field, field[i]); 2711 MLX5_SET(copy_action_in, &cmd[i], src_field, mod_id); 2712 } 2713 2714 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2715 if (!mod_id) { 2716 rte_errno = EINVAL; 2717 return NULL; 2718 } 2719 2720 /* Restore next_hdr from seg_left for flex parser identifying */ 2721 MLX5_SET(copy_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_COPY); 2722 MLX5_SET(copy_action_in, &cmd[4], length, 8); 2723 MLX5_SET(copy_action_in, &cmd[4], dst_offset, 24); 2724 MLX5_SET(copy_action_in, &cmd[4], src_field, mod_id); 2725 MLX5_SET(copy_action_in, &cmd[4], dst_field, mod_id); 2726 2727 pattern.data = cmd; 2728 pattern.sz = sizeof(cmd); 2729 2730 return mlx5dr_action_create_modify_header_reparse(action->ctx, 1, &pattern, 0, 2731 action->flags, 2732 MLX5DR_ACTION_STC_REPARSE_ON); 2733 } 2734 2735 static void * 2736 mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(struct mlx5dr_action *action) 2737 { 2738 uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0}; 2739 struct mlx5dr_action_mh_pattern pattern; 2740 uint16_t mod_id; 2741 2742 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2743 if (!mod_id) { 2744 rte_errno = EINVAL; 2745 return NULL; 2746 } 2747 2748 /* Copy ipv6_route_ext.next_hdr to ipv6.protocol */ 2749 MLX5_SET(copy_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_COPY); 2750 MLX5_SET(copy_action_in, cmd, length, 8); 2751 MLX5_SET(copy_action_in, cmd, src_offset, 24); 2752 MLX5_SET(copy_action_in, cmd, src_field, mod_id); 2753 MLX5_SET(copy_action_in, cmd, dst_field, MLX5_MODI_OUT_IP_PROTOCOL); 2754 2755 pattern.data = (__be64 *)cmd; 2756 pattern.sz = sizeof(cmd); 2757 2758 return mlx5dr_action_create_modify_header_reparse(action->ctx, 1, &pattern, 0, 2759 action->flags, 2760 MLX5DR_ACTION_STC_REPARSE_OFF); 2761 } 2762 2763 static int 2764 mlx5dr_action_create_pop_ipv6_route_ext(struct mlx5dr_action *action) 2765 { 2766 uint8_t anchor_id = flow_hw_get_ipv6_route_ext_anchor_from_ctx(action->ctx); 2767 struct mlx5dr_action_remove_header_attr hdr_attr; 2768 uint32_t i; 2769 2770 if (!anchor_id) { 2771 rte_errno = EINVAL; 2772 return rte_errno; 2773 } 2774 2775 action->ipv6_route_ext.action[0] = 2776 mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(action); 2777 action->ipv6_route_ext.action[1] = 2778 mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(action); 2779 action->ipv6_route_ext.action[2] = 2780 mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(action); 2781 2782 hdr_attr.by_anchor.decap = 1; 2783 hdr_attr.by_anchor.start_anchor = anchor_id; 2784 hdr_attr.by_anchor.end_anchor = MLX5_HEADER_ANCHOR_TCP_UDP; 2785 hdr_attr.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER; 2786 action->ipv6_route_ext.action[3] = 2787 mlx5dr_action_create_remove_header(action->ctx, &hdr_attr, action->flags); 2788 2789 if (!action->ipv6_route_ext.action[0] || !action->ipv6_route_ext.action[1] || 2790 !action->ipv6_route_ext.action[2] || !action->ipv6_route_ext.action[3]) { 2791 DR_LOG(ERR, "Failed to create ipv6_route_ext pop subaction"); 2792 goto err; 2793 } 2794 2795 return 0; 2796 2797 err: 2798 for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++) 2799 if (action->ipv6_route_ext.action[i]) 2800 mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); 2801 2802 return rte_errno; 2803 } 2804 2805 static void * 2806 mlx5dr_action_create_push_ipv6_route_ext_mhdr1(struct mlx5dr_action *action) 2807 { 2808 uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0}; 2809 struct mlx5dr_action_mh_pattern pattern; 2810 2811 /* Set ipv6.protocol to IPPROTO_ROUTING */ 2812 MLX5_SET(set_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_SET); 2813 MLX5_SET(set_action_in, cmd, length, 8); 2814 MLX5_SET(set_action_in, cmd, field, MLX5_MODI_OUT_IP_PROTOCOL); 2815 MLX5_SET(set_action_in, cmd, data, IPPROTO_ROUTING); 2816 2817 pattern.data = (__be64 *)cmd; 2818 pattern.sz = sizeof(cmd); 2819 2820 return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern, 0, 2821 action->flags | MLX5DR_ACTION_FLAG_SHARED); 2822 } 2823 2824 static void * 2825 mlx5dr_action_create_push_ipv6_route_ext_mhdr2(struct mlx5dr_action *action, 2826 uint32_t bulk_size, 2827 uint8_t *data) 2828 { 2829 enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = { 2830 MLX5_MODI_OUT_DIPV6_127_96, 2831 MLX5_MODI_OUT_DIPV6_95_64, 2832 MLX5_MODI_OUT_DIPV6_63_32, 2833 MLX5_MODI_OUT_DIPV6_31_0 2834 }; 2835 struct mlx5dr_action_mh_pattern pattern; 2836 uint32_t *ipv6_dst_addr = NULL; 2837 uint8_t seg_left, next_hdr; 2838 __be64 cmd[5] = {0}; 2839 uint16_t mod_id; 2840 uint32_t i; 2841 2842 /* Fetch the last IPv6 address in the segment list */ 2843 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { 2844 seg_left = MLX5_GET(header_ipv6_routing_ext, data, segments_left) - 1; 2845 ipv6_dst_addr = (uint32_t *)data + MLX5_ST_SZ_DW(header_ipv6_routing_ext) + 2846 seg_left * MLX5_ST_SZ_DW(definer_hl_ipv6_addr); 2847 } 2848 2849 /* Copy IPv6 destination address from ipv6_route_ext.last_segment */ 2850 for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) { 2851 MLX5_SET(set_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_SET); 2852 MLX5_SET(set_action_in, &cmd[i], field, field[i]); 2853 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) 2854 MLX5_SET(set_action_in, &cmd[i], data, be32toh(*ipv6_dst_addr++)); 2855 } 2856 2857 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2858 if (!mod_id) { 2859 rte_errno = EINVAL; 2860 return NULL; 2861 } 2862 2863 /* Set ipv6_route_ext.next_hdr since initially pushed as 0 for right checksum */ 2864 MLX5_SET(set_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_SET); 2865 MLX5_SET(set_action_in, &cmd[4], length, 8); 2866 MLX5_SET(set_action_in, &cmd[4], offset, 24); 2867 MLX5_SET(set_action_in, &cmd[4], field, mod_id); 2868 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { 2869 next_hdr = MLX5_GET(header_ipv6_routing_ext, data, next_hdr); 2870 MLX5_SET(set_action_in, &cmd[4], data, next_hdr); 2871 } 2872 2873 pattern.data = cmd; 2874 pattern.sz = sizeof(cmd); 2875 2876 return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern, 2877 bulk_size, action->flags); 2878 } 2879 2880 static int 2881 mlx5dr_action_create_push_ipv6_route_ext(struct mlx5dr_action *action, 2882 struct mlx5dr_action_reformat_header *hdr, 2883 uint32_t bulk_size) 2884 { 2885 struct mlx5dr_action_insert_header insert_hdr = { {0} }; 2886 uint8_t header[MLX5_PUSH_MAX_LEN]; 2887 uint32_t i; 2888 2889 if (!hdr || !hdr->sz || hdr->sz > MLX5_PUSH_MAX_LEN || 2890 ((action->flags & MLX5DR_ACTION_FLAG_SHARED) && !hdr->data)) { 2891 DR_LOG(ERR, "Invalid ipv6_route_ext header"); 2892 rte_errno = EINVAL; 2893 return rte_errno; 2894 } 2895 2896 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { 2897 memcpy(header, hdr->data, hdr->sz); 2898 /* Clear ipv6_route_ext.next_hdr for right checksum */ 2899 MLX5_SET(header_ipv6_routing_ext, header, next_hdr, 0); 2900 } 2901 2902 insert_hdr.anchor = MLX5_HEADER_ANCHOR_TCP_UDP; 2903 insert_hdr.encap = 1; 2904 insert_hdr.hdr.sz = hdr->sz; 2905 insert_hdr.hdr.data = header; 2906 action->ipv6_route_ext.action[0] = 2907 mlx5dr_action_create_insert_header_reparse(action->ctx, 1, &insert_hdr, 2908 bulk_size, action->flags, 2909 MLX5DR_ACTION_STC_REPARSE_OFF); 2910 action->ipv6_route_ext.action[1] = 2911 mlx5dr_action_create_push_ipv6_route_ext_mhdr1(action); 2912 action->ipv6_route_ext.action[2] = 2913 mlx5dr_action_create_push_ipv6_route_ext_mhdr2(action, bulk_size, hdr->data); 2914 2915 if (!action->ipv6_route_ext.action[0] || 2916 !action->ipv6_route_ext.action[1] || 2917 !action->ipv6_route_ext.action[2]) { 2918 DR_LOG(ERR, "Failed to create ipv6_route_ext push subaction"); 2919 goto err; 2920 } 2921 2922 return 0; 2923 2924 err: 2925 for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++) 2926 if (action->ipv6_route_ext.action[i]) 2927 mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); 2928 2929 return rte_errno; 2930 } 2931 2932 struct mlx5dr_action * 2933 mlx5dr_action_create_reformat_ipv6_ext(struct mlx5dr_context *ctx, 2934 enum mlx5dr_action_type action_type, 2935 struct mlx5dr_action_reformat_header *hdr, 2936 uint32_t log_bulk_size, 2937 uint32_t flags) 2938 { 2939 struct mlx5dr_action *action; 2940 int ret; 2941 2942 if (!mlx5dr_action_is_hws_flags(flags) || 2943 ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) { 2944 DR_LOG(ERR, "IPv6 extension flags don't fit HWS (flags: 0x%x)", flags); 2945 rte_errno = EINVAL; 2946 return NULL; 2947 } 2948 2949 action = mlx5dr_action_create_generic(ctx, flags, action_type); 2950 if (!action) { 2951 rte_errno = ENOMEM; 2952 return NULL; 2953 } 2954 2955 switch (action_type) { 2956 case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT: 2957 if (!(flags & MLX5DR_ACTION_FLAG_SHARED)) { 2958 DR_LOG(ERR, "Pop ipv6_route_ext must be shared"); 2959 rte_errno = EINVAL; 2960 goto free_action; 2961 } 2962 2963 ret = mlx5dr_action_create_pop_ipv6_route_ext(action); 2964 break; 2965 case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT: 2966 if (!mlx5dr_context_cap_dynamic_reparse(ctx)) { 2967 DR_LOG(ERR, "IPv6 routing extension push actions is not supported"); 2968 rte_errno = ENOTSUP; 2969 goto free_action; 2970 } 2971 2972 ret = mlx5dr_action_create_push_ipv6_route_ext(action, hdr, log_bulk_size); 2973 break; 2974 default: 2975 DR_LOG(ERR, "Unsupported action type %d\n", action_type); 2976 rte_errno = ENOTSUP; 2977 goto free_action; 2978 } 2979 2980 if (ret) { 2981 DR_LOG(ERR, "Failed to create IPv6 extension reformat action"); 2982 goto free_action; 2983 } 2984 2985 return action; 2986 2987 free_action: 2988 simple_free(action); 2989 return NULL; 2990 } 2991 2992 static bool 2993 mlx5dr_action_nat64_validate_param(struct mlx5dr_action_nat64_attr *attr, 2994 uint32_t flags) 2995 { 2996 if (mlx5dr_action_is_root_flags(flags)) { 2997 DR_LOG(ERR, "Nat64 action not supported for root"); 2998 rte_errno = ENOTSUP; 2999 return false; 3000 } 3001 3002 if (!(flags & MLX5DR_ACTION_FLAG_SHARED)) { 3003 DR_LOG(ERR, "Nat64 action must be with SHARED flag"); 3004 rte_errno = EINVAL; 3005 return false; 3006 } 3007 3008 if (attr->num_of_registers > MLX5DR_ACTION_NAT64_REG_MAX) { 3009 DR_LOG(ERR, "Nat64 action doesn't support more than %d registers", 3010 MLX5DR_ACTION_NAT64_REG_MAX); 3011 rte_errno = EINVAL; 3012 return false; 3013 } 3014 3015 if (attr->flags & MLX5DR_ACTION_NAT64_BACKUP_ADDR && 3016 attr->num_of_registers != MLX5DR_ACTION_NAT64_REG_MAX) { 3017 DR_LOG(ERR, "Nat64 backup addr requires %d registers", 3018 MLX5DR_ACTION_NAT64_REG_MAX); 3019 rte_errno = EINVAL; 3020 return false; 3021 } 3022 3023 if (!(attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6 || 3024 attr->flags & MLX5DR_ACTION_NAT64_V6_TO_V4)) { 3025 DR_LOG(ERR, "Nat64 backup addr requires one mode at least"); 3026 rte_errno = EINVAL; 3027 return false; 3028 } 3029 3030 return true; 3031 } 3032 3033 struct mlx5dr_action * 3034 mlx5dr_action_create_nat64(struct mlx5dr_context *ctx, 3035 struct mlx5dr_action_nat64_attr *attr, 3036 uint32_t flags) 3037 { 3038 struct mlx5dr_action *action; 3039 3040 if (!mlx5dr_action_nat64_validate_param(attr, flags)) 3041 return NULL; 3042 3043 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_NAT64); 3044 if (!action) 3045 return NULL; 3046 3047 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY] = 3048 mlx5dr_action_create_nat64_copy_state(ctx, attr, flags); 3049 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY]) { 3050 DR_LOG(ERR, "Nat64 failed creating copy state"); 3051 goto free_action; 3052 } 3053 3054 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_REPLACE] = 3055 mlx5dr_action_create_nat64_repalce_state(ctx, attr, flags); 3056 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_REPLACE]) { 3057 DR_LOG(ERR, "Nat64 failed creating replace state"); 3058 goto free_copy; 3059 } 3060 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY_PROTOCOL] = 3061 mlx5dr_action_create_nat64_copy_proto_state(ctx, attr, flags); 3062 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY_PROTOCOL]) { 3063 DR_LOG(ERR, "Nat64 failed creating copy protocol state"); 3064 goto free_replace; 3065 } 3066 3067 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPYBACK] = 3068 mlx5dr_action_create_nat64_copy_back_state(ctx, attr, flags); 3069 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPYBACK]) { 3070 DR_LOG(ERR, "Nat64 failed creating copyback state"); 3071 goto free_copy_proto; 3072 } 3073 3074 return action; 3075 3076 free_copy_proto: 3077 mlx5dr_action_destroy(action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY_PROTOCOL]); 3078 free_replace: 3079 mlx5dr_action_destroy(action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_REPLACE]); 3080 free_copy: 3081 mlx5dr_action_destroy(action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY]); 3082 free_action: 3083 simple_free(action); 3084 return NULL; 3085 } 3086 3087 struct mlx5dr_action * 3088 mlx5dr_action_create_jump_to_matcher(struct mlx5dr_context *ctx, 3089 struct mlx5dr_action_jump_to_matcher_attr *attr, 3090 uint32_t flags) 3091 { 3092 struct mlx5dr_matcher *matcher = attr->matcher; 3093 struct mlx5dr_matcher_attr *m_attr; 3094 struct mlx5dr_action *action; 3095 3096 if (attr->type != MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX) { 3097 DR_LOG(ERR, "Only jump to matcher by index is supported"); 3098 goto enotsup; 3099 } 3100 3101 if (mlx5dr_action_is_root_flags(flags)) { 3102 DR_LOG(ERR, "Action flags must be only non root (HWS)"); 3103 goto enotsup; 3104 } 3105 3106 if (mlx5dr_table_is_root(matcher->tbl)) { 3107 DR_LOG(ERR, "Root matcher cannot be set as destination"); 3108 goto enotsup; 3109 } 3110 3111 m_attr = &matcher->attr; 3112 3113 if (!(matcher->flags & MLX5DR_MATCHER_FLAGS_STE_ARRAY) && 3114 (m_attr->resizable || m_attr->table.sz_col_log || m_attr->table.sz_row_log)) { 3115 DR_LOG(ERR, "Only STE array or matcher of size 1 can be set as destination"); 3116 goto enotsup; 3117 } 3118 3119 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_JUMP_TO_MATCHER); 3120 if (!action) 3121 return NULL; 3122 3123 action->jump_to_matcher.matcher = matcher; 3124 3125 if (mlx5dr_action_create_stcs(action, NULL)) { 3126 DR_LOG(ERR, "Failed to create action jump to matcher STC"); 3127 simple_free(action); 3128 return NULL; 3129 } 3130 3131 return action; 3132 3133 enotsup: 3134 rte_errno = ENOTSUP; 3135 return NULL; 3136 } 3137 3138 static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) 3139 { 3140 struct mlx5dr_devx_obj *obj = NULL; 3141 uint32_t i; 3142 3143 switch (action->type) { 3144 case MLX5DR_ACTION_TYP_TIR: 3145 mlx5dr_action_destroy_stcs(action); 3146 if (mlx5dr_context_shared_gvmi_used(action->ctx)) 3147 mlx5dr_cmd_destroy_obj(action->alias.devx_obj); 3148 break; 3149 case MLX5DR_ACTION_TYP_MISS: 3150 case MLX5DR_ACTION_TYP_TAG: 3151 case MLX5DR_ACTION_TYP_DROP: 3152 case MLX5DR_ACTION_TYP_CTR: 3153 case MLX5DR_ACTION_TYP_TBL: 3154 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 3155 case MLX5DR_ACTION_TYP_ASO_METER: 3156 case MLX5DR_ACTION_TYP_ASO_CT: 3157 case MLX5DR_ACTION_TYP_PUSH_VLAN: 3158 case MLX5DR_ACTION_TYP_REMOVE_HEADER: 3159 case MLX5DR_ACTION_TYP_VPORT: 3160 case MLX5DR_ACTION_TYP_JUMP_TO_MATCHER: 3161 mlx5dr_action_destroy_stcs(action); 3162 break; 3163 case MLX5DR_ACTION_TYP_DEST_ROOT: 3164 mlx5dr_action_destroy_stcs(action); 3165 mlx5_glue->destroy_steering_anchor(action->root_tbl.sa); 3166 break; 3167 case MLX5DR_ACTION_TYP_POP_VLAN: 3168 mlx5dr_action_destroy_stcs(action); 3169 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP); 3170 break; 3171 case MLX5DR_ACTION_TYP_DEST_ARRAY: 3172 mlx5dr_action_destroy_stcs(action); 3173 mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island); 3174 for (i = 0; i < action->dest_array.num_dest; i++) { 3175 if (action->dest_array.dest_list[i].ext_reformat) 3176 mlx5dr_cmd_destroy_obj 3177 (action->dest_array.dest_list[i].ext_reformat); 3178 } 3179 simple_free(action->dest_array.dest_list); 3180 break; 3181 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 3182 case MLX5DR_ACTION_TYP_MODIFY_HDR: 3183 for (i = 0; i < action->modify_header.num_of_patterns; i++) { 3184 mlx5dr_action_destroy_stcs(&action[i]); 3185 if (action[i].modify_header.num_of_actions > 1) { 3186 mlx5dr_pat_put_pattern(action[i].ctx, 3187 action[i].modify_header.pat_obj); 3188 /* Save shared arg object if was used to free */ 3189 if (action[i].modify_header.arg_obj) 3190 obj = action[i].modify_header.arg_obj; 3191 } 3192 } 3193 if (obj) 3194 mlx5dr_cmd_destroy_obj(obj); 3195 break; 3196 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 3197 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP_L3); 3198 for (i = 0; i < action->reformat.num_of_hdrs; i++) 3199 mlx5dr_action_destroy_stcs(&action[i]); 3200 mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); 3201 break; 3202 case MLX5DR_ACTION_TYP_INSERT_HEADER: 3203 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 3204 for (i = 0; i < action->reformat.num_of_hdrs; i++) 3205 mlx5dr_action_destroy_stcs(&action[i]); 3206 mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); 3207 break; 3208 case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT: 3209 case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT: 3210 for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++) 3211 if (action->ipv6_route_ext.action[i]) 3212 mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); 3213 break; 3214 case MLX5DR_ACTION_TYP_NAT64: 3215 for (i = 0; i < MLX5DR_ACTION_NAT64_STAGES; i++) 3216 mlx5dr_action_destroy(action->nat64.stages[i]); 3217 break; 3218 case MLX5DR_ACTION_TYP_LAST: 3219 break; 3220 default: 3221 DR_LOG(ERR, "Not supported action type: %d", action->type); 3222 assert(false); 3223 } 3224 } 3225 3226 static void mlx5dr_action_destroy_root(struct mlx5dr_action *action) 3227 { 3228 switch (action->type) { 3229 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 3230 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 3231 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 3232 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 3233 case MLX5DR_ACTION_TYP_MODIFY_HDR: 3234 ibv_destroy_flow_action(action->flow_action); 3235 break; 3236 } 3237 } 3238 3239 int mlx5dr_action_destroy(struct mlx5dr_action *action) 3240 { 3241 if (mlx5dr_action_is_root_flags(action->flags)) 3242 mlx5dr_action_destroy_root(action); 3243 else 3244 mlx5dr_action_destroy_hws(action); 3245 3246 simple_free(action); 3247 return 0; 3248 } 3249 3250 /* Called under pthread_spin_lock(&ctx->ctrl_lock) */ 3251 int mlx5dr_action_get_default_stc(struct mlx5dr_context *ctx, 3252 uint8_t tbl_type) 3253 { 3254 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 3255 struct mlx5dr_action_default_stc *default_stc; 3256 int ret; 3257 3258 if (ctx->common_res[tbl_type].default_stc) { 3259 ctx->common_res[tbl_type].default_stc->refcount++; 3260 return 0; 3261 } 3262 3263 default_stc = simple_calloc(1, sizeof(*default_stc)); 3264 if (!default_stc) { 3265 DR_LOG(ERR, "Failed to allocate memory for default STCs"); 3266 rte_errno = ENOMEM; 3267 return rte_errno; 3268 } 3269 3270 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP; 3271 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW0; 3272 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 3273 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3274 &default_stc->nop_ctr); 3275 if (ret) { 3276 DR_LOG(ERR, "Failed to allocate default counter STC"); 3277 goto free_default_stc; 3278 } 3279 3280 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5; 3281 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3282 &default_stc->nop_dw5); 3283 if (ret) { 3284 DR_LOG(ERR, "Failed to allocate default NOP DW5 STC"); 3285 goto free_nop_ctr; 3286 } 3287 3288 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW6; 3289 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3290 &default_stc->nop_dw6); 3291 if (ret) { 3292 DR_LOG(ERR, "Failed to allocate default NOP DW6 STC"); 3293 goto free_nop_dw5; 3294 } 3295 3296 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW7; 3297 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3298 &default_stc->nop_dw7); 3299 if (ret) { 3300 DR_LOG(ERR, "Failed to allocate default NOP DW7 STC"); 3301 goto free_nop_dw6; 3302 } 3303 3304 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT; 3305 if (!mlx5dr_context_shared_gvmi_used(ctx)) { 3306 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; 3307 } else { 3308 /* On shared gvmi the default hit behavior is jump to alias end ft */ 3309 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; 3310 stc_attr.dest_table_id = ctx->gvmi_res[tbl_type].aliased_end_ft->id; 3311 } 3312 3313 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3314 &default_stc->default_hit); 3315 if (ret) { 3316 DR_LOG(ERR, "Failed to allocate default allow STC"); 3317 goto free_nop_dw7; 3318 } 3319 3320 ctx->common_res[tbl_type].default_stc = default_stc; 3321 ctx->common_res[tbl_type].default_stc->refcount++; 3322 3323 return 0; 3324 3325 free_nop_dw7: 3326 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); 3327 free_nop_dw6: 3328 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); 3329 free_nop_dw5: 3330 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); 3331 free_nop_ctr: 3332 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); 3333 free_default_stc: 3334 simple_free(default_stc); 3335 return rte_errno; 3336 } 3337 3338 void mlx5dr_action_put_default_stc(struct mlx5dr_context *ctx, 3339 uint8_t tbl_type) 3340 { 3341 struct mlx5dr_action_default_stc *default_stc; 3342 3343 default_stc = ctx->common_res[tbl_type].default_stc; 3344 3345 default_stc = ctx->common_res[tbl_type].default_stc; 3346 if (--default_stc->refcount) 3347 return; 3348 3349 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit); 3350 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); 3351 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); 3352 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); 3353 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); 3354 simple_free(default_stc); 3355 ctx->common_res[tbl_type].default_stc = NULL; 3356 } 3357 3358 static void mlx5dr_action_modify_write(struct mlx5dr_send_engine *queue, 3359 uint32_t arg_idx, 3360 uint8_t *arg_data, 3361 uint16_t num_of_actions) 3362 { 3363 mlx5dr_arg_write(queue, NULL, arg_idx, arg_data, 3364 num_of_actions * MLX5DR_MODIFY_ACTION_SIZE); 3365 } 3366 3367 void 3368 mlx5dr_action_prepare_decap_l3_data(uint8_t *src, uint8_t *dst, 3369 uint16_t num_of_actions) 3370 { 3371 uint8_t *e_src; 3372 int i; 3373 3374 /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes 3375 * copy from end of src to the start of dst. 3376 * move to the end, 2 is the leftover from 14B or 18B 3377 */ 3378 if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN) 3379 e_src = src + MLX5DR_ACTION_HDR_LEN_L2; 3380 else 3381 e_src = src + MLX5DR_ACTION_HDR_LEN_L2_W_VLAN; 3382 3383 /* Move dst over the first remove action + zero data */ 3384 dst += MLX5DR_ACTION_DOUBLE_SIZE; 3385 /* Move dst over the first insert ctrl action */ 3386 dst += MLX5DR_ACTION_DOUBLE_SIZE / 2; 3387 /* Actions: 3388 * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. 3389 * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. 3390 * the loop is without the last insertion. 3391 */ 3392 for (i = 0; i < num_of_actions - 3; i++) { 3393 e_src -= MLX5DR_ACTION_INLINE_DATA_SIZE; 3394 memcpy(dst, e_src, MLX5DR_ACTION_INLINE_DATA_SIZE); /* data */ 3395 dst += MLX5DR_ACTION_DOUBLE_SIZE; 3396 } 3397 /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */ 3398 e_src -= MLX5DR_ACTION_INLINE_DATA_SIZE / 2; 3399 dst += MLX5DR_ACTION_INLINE_DATA_SIZE / 2; 3400 memcpy(dst, e_src, 2); 3401 } 3402 3403 static int mlx5dr_action_get_shared_stc_offset(struct mlx5dr_context_common_res *common_res, 3404 enum mlx5dr_context_shared_stc_type stc_type) 3405 { 3406 return common_res->shared_stc[stc_type]->remove_header.offset; 3407 } 3408 3409 static struct mlx5dr_actions_wqe_setter * 3410 mlx5dr_action_setter_find_first(struct mlx5dr_actions_wqe_setter *setter, 3411 uint8_t req_flags) 3412 { 3413 /* Use a new setter if requested flags are taken */ 3414 while (setter->flags & req_flags) 3415 setter++; 3416 3417 /* Use current setter in required flags are not used */ 3418 return setter; 3419 } 3420 3421 static void 3422 mlx5dr_action_apply_stc(struct mlx5dr_actions_apply_data *apply, 3423 enum mlx5dr_action_stc_idx stc_idx, 3424 uint8_t action_idx) 3425 { 3426 struct mlx5dr_action *action = apply->rule_action[action_idx].action; 3427 3428 apply->wqe_ctrl->stc_ix[stc_idx] = 3429 htobe32(action->stc[apply->tbl_type].offset); 3430 } 3431 3432 static void 3433 mlx5dr_action_setter_push_vlan(struct mlx5dr_actions_apply_data *apply, 3434 struct mlx5dr_actions_wqe_setter *setter) 3435 { 3436 struct mlx5dr_rule_action *rule_action; 3437 3438 rule_action = &apply->rule_action[setter->idx_double]; 3439 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3440 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr; 3441 3442 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); 3443 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3444 } 3445 3446 static void 3447 mlx5dr_action_setter_modify_header(struct mlx5dr_actions_apply_data *apply, 3448 struct mlx5dr_actions_wqe_setter *setter) 3449 { 3450 struct mlx5dr_rule_action *rule_action; 3451 uint32_t stc_idx, arg_sz, arg_idx; 3452 struct mlx5dr_action *action; 3453 uint8_t *single_action; 3454 3455 rule_action = &apply->rule_action[setter->idx_double]; 3456 action = rule_action->action + rule_action->modify_header.pattern_idx; 3457 3458 stc_idx = htobe32(action->stc[apply->tbl_type].offset); 3459 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3460 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3461 3462 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3463 3464 if (action->modify_header.num_of_actions == 1) { 3465 if (action->modify_header.single_action_type == 3466 MLX5_MODIFICATION_TYPE_COPY || 3467 action->modify_header.single_action_type == 3468 MLX5_MODIFICATION_TYPE_ADD_FIELD) { 3469 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0; 3470 return; 3471 } 3472 3473 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) 3474 single_action = (uint8_t *)&action->modify_header.single_action; 3475 else 3476 single_action = rule_action->modify_header.data; 3477 3478 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 3479 *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); 3480 } else { 3481 /* Argument offset multiple with number of args per these actions */ 3482 arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.max_num_of_actions); 3483 arg_idx = rule_action->modify_header.offset * arg_sz; 3484 3485 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); 3486 3487 if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { 3488 apply->require_dep = 1; 3489 mlx5dr_action_modify_write(apply->queue, 3490 action->modify_header.arg_obj->id + arg_idx, 3491 rule_action->modify_header.data, 3492 action->modify_header.num_of_actions); 3493 } 3494 } 3495 } 3496 3497 static void 3498 mlx5dr_action_setter_nat64(struct mlx5dr_actions_apply_data *apply, 3499 struct mlx5dr_actions_wqe_setter *setter) 3500 { 3501 struct mlx5dr_rule_action *rule_action; 3502 struct mlx5dr_action *cur_stage_action; 3503 struct mlx5dr_action *action; 3504 uint32_t stc_idx; 3505 3506 rule_action = &apply->rule_action[setter->idx_double]; 3507 action = rule_action->action; 3508 cur_stage_action = action->nat64.stages[setter->stage_idx]; 3509 3510 stc_idx = htobe32(cur_stage_action->stc[apply->tbl_type].offset); 3511 3512 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3513 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3514 3515 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3516 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0; 3517 } 3518 3519 static void 3520 mlx5dr_action_setter_insert_ptr(struct mlx5dr_actions_apply_data *apply, 3521 struct mlx5dr_actions_wqe_setter *setter) 3522 { 3523 struct mlx5dr_rule_action *rule_action; 3524 uint32_t stc_idx, arg_idx, arg_sz; 3525 struct mlx5dr_action *action; 3526 3527 rule_action = &apply->rule_action[setter->idx_double]; 3528 action = rule_action->action + rule_action->reformat.hdr_idx; 3529 3530 /* Argument offset multiple on args required for header size */ 3531 arg_sz = mlx5dr_arg_data_size_to_arg_size(action->reformat.max_hdr_sz); 3532 arg_idx = rule_action->reformat.offset * arg_sz; 3533 3534 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3535 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); 3536 3537 stc_idx = htobe32(action->stc[apply->tbl_type].offset); 3538 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3539 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3540 3541 if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { 3542 apply->require_dep = 1; 3543 mlx5dr_arg_write(apply->queue, NULL, 3544 action->reformat.arg_obj->id + arg_idx, 3545 rule_action->reformat.data, 3546 action->reformat.header_size); 3547 } 3548 } 3549 3550 static void 3551 mlx5dr_action_setter_tnl_l3_to_l2(struct mlx5dr_actions_apply_data *apply, 3552 struct mlx5dr_actions_wqe_setter *setter) 3553 { 3554 struct mlx5dr_rule_action *rule_action; 3555 uint32_t stc_idx, arg_sz, arg_idx; 3556 struct mlx5dr_action *action; 3557 3558 rule_action = &apply->rule_action[setter->idx_double]; 3559 action = rule_action->action + rule_action->reformat.hdr_idx; 3560 3561 /* Argument offset multiple on args required for num of actions */ 3562 arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.max_num_of_actions); 3563 arg_idx = rule_action->reformat.offset * arg_sz; 3564 3565 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3566 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); 3567 3568 stc_idx = htobe32(action->stc[apply->tbl_type].offset); 3569 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3570 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3571 3572 if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { 3573 apply->require_dep = 1; 3574 mlx5dr_arg_decapl3_write(apply->queue, 3575 action->modify_header.arg_obj->id + arg_idx, 3576 rule_action->reformat.data, 3577 action->modify_header.num_of_actions); 3578 } 3579 } 3580 3581 static void 3582 mlx5dr_action_setter_aso(struct mlx5dr_actions_apply_data *apply, 3583 struct mlx5dr_actions_wqe_setter *setter) 3584 { 3585 struct mlx5dr_rule_action *rule_action; 3586 uint32_t exe_aso_ctrl; 3587 uint32_t offset; 3588 3589 rule_action = &apply->rule_action[setter->idx_double]; 3590 3591 switch (rule_action->action->type) { 3592 case MLX5DR_ACTION_TYP_ASO_METER: 3593 /* exe_aso_ctrl format: 3594 * [STC only and reserved bits 29b][init_color 2b][meter_id 1b] 3595 */ 3596 offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ; 3597 exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ; 3598 exe_aso_ctrl |= rule_action->aso_meter.init_color << 3599 MLX5DR_ACTION_METER_INIT_COLOR_OFFSET; 3600 break; 3601 case MLX5DR_ACTION_TYP_ASO_CT: 3602 /* exe_aso_ctrl CT format: 3603 * [STC only and reserved bits 31b][direction 1b] 3604 */ 3605 offset = rule_action->aso_ct.offset / MLX5_ASO_CT_NUM_PER_OBJ; 3606 exe_aso_ctrl = rule_action->aso_ct.direction; 3607 break; 3608 default: 3609 DR_LOG(ERR, "Unsupported ASO action type: %d", rule_action->action->type); 3610 rte_errno = ENOTSUP; 3611 return; 3612 } 3613 3614 /* aso_object_offset format: [24B] */ 3615 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = htobe32(offset); 3616 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(exe_aso_ctrl); 3617 3618 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); 3619 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3620 } 3621 3622 static void 3623 mlx5dr_action_setter_tag(struct mlx5dr_actions_apply_data *apply, 3624 struct mlx5dr_actions_wqe_setter *setter) 3625 { 3626 struct mlx5dr_rule_action *rule_action; 3627 3628 rule_action = &apply->rule_action[setter->idx_single]; 3629 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = htobe32(rule_action->tag.value); 3630 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW5, setter->idx_single); 3631 } 3632 3633 static void 3634 mlx5dr_action_setter_ctrl_ctr(struct mlx5dr_actions_apply_data *apply, 3635 struct mlx5dr_actions_wqe_setter *setter) 3636 { 3637 struct mlx5dr_rule_action *rule_action; 3638 3639 rule_action = &apply->rule_action[setter->idx_ctr]; 3640 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW0] = htobe32(rule_action->counter.offset); 3641 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_CTRL, setter->idx_ctr); 3642 } 3643 3644 static void 3645 mlx5dr_action_setter_single(struct mlx5dr_actions_apply_data *apply, 3646 struct mlx5dr_actions_wqe_setter *setter) 3647 { 3648 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3649 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW5, setter->idx_single); 3650 } 3651 3652 static void 3653 mlx5dr_action_setter_single_double_pop(struct mlx5dr_actions_apply_data *apply, 3654 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3655 { 3656 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3657 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 3658 htobe32(mlx5dr_action_get_shared_stc_offset(apply->common_res, 3659 MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP)); 3660 } 3661 3662 static void 3663 mlx5dr_action_setter_hit(struct mlx5dr_actions_apply_data *apply, 3664 struct mlx5dr_actions_wqe_setter *setter) 3665 { 3666 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 0; 3667 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_HIT, setter->idx_hit); 3668 } 3669 3670 static void 3671 mlx5dr_action_setter_default_hit(struct mlx5dr_actions_apply_data *apply, 3672 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3673 { 3674 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 0; 3675 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_HIT] = 3676 htobe32(apply->common_res->default_stc->default_hit.offset); 3677 } 3678 3679 static void 3680 mlx5dr_action_setter_hit_matcher(struct mlx5dr_actions_apply_data *apply, 3681 struct mlx5dr_actions_wqe_setter *setter) 3682 { 3683 struct mlx5dr_rule_action *rule_action; 3684 3685 rule_action = &apply->rule_action[setter->idx_hit]; 3686 3687 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 3688 htobe32(rule_action->jump_to_matcher.offset << 6); 3689 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_HIT, setter->idx_hit); 3690 } 3691 3692 static void 3693 mlx5dr_action_setter_hit_next_action(struct mlx5dr_actions_apply_data *apply, 3694 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3695 { 3696 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = htobe32(apply->next_direct_idx << 6); 3697 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_HIT] = htobe32(apply->jump_to_action_stc); 3698 } 3699 3700 static void 3701 mlx5dr_action_setter_common_decap(struct mlx5dr_actions_apply_data *apply, 3702 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3703 { 3704 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3705 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 3706 htobe32(mlx5dr_action_get_shared_stc_offset(apply->common_res, 3707 MLX5DR_CONTEXT_SHARED_STC_DECAP_L3)); 3708 } 3709 3710 static void 3711 mlx5dr_action_setter_ipv6_route_ext_gen_push_mhdr(uint8_t *data, void *mh_data) 3712 { 3713 uint8_t *action_ptr = mh_data; 3714 uint32_t *ipv6_dst_addr; 3715 uint8_t seg_left; 3716 uint32_t i; 3717 3718 /* Fetch the last IPv6 address in the segment list which is the next hop */ 3719 seg_left = MLX5_GET(header_ipv6_routing_ext, data, segments_left) - 1; 3720 ipv6_dst_addr = (uint32_t *)data + MLX5_ST_SZ_DW(header_ipv6_routing_ext) 3721 + seg_left * MLX5_ST_SZ_DW(definer_hl_ipv6_addr); 3722 3723 /* Load next hop IPv6 address in reverse order to ipv6.dst_address */ 3724 for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) { 3725 MLX5_SET(set_action_in, action_ptr, data, be32toh(*ipv6_dst_addr++)); 3726 action_ptr += MLX5DR_MODIFY_ACTION_SIZE; 3727 } 3728 3729 /* Set ipv6_route_ext.next_hdr per user input */ 3730 MLX5_SET(set_action_in, action_ptr, data, *data); 3731 } 3732 3733 static void 3734 mlx5dr_action_setter_ipv6_route_ext_mhdr(struct mlx5dr_actions_apply_data *apply, 3735 struct mlx5dr_actions_wqe_setter *setter) 3736 { 3737 struct mlx5dr_rule_action *rule_action = apply->rule_action; 3738 struct mlx5dr_actions_wqe_setter tmp_setter = {0}; 3739 struct mlx5dr_rule_action tmp_rule_action; 3740 __be64 cmd[MLX5_SRV6_SAMPLE_NUM] = {0}; 3741 struct mlx5dr_action *ipv6_ext_action; 3742 uint8_t *header; 3743 3744 header = rule_action[setter->idx_double].ipv6_ext.header; 3745 ipv6_ext_action = rule_action[setter->idx_double].action; 3746 tmp_rule_action.action = ipv6_ext_action->ipv6_route_ext.action[setter->extra_data]; 3747 3748 if (tmp_rule_action.action->flags & MLX5DR_ACTION_FLAG_SHARED) { 3749 tmp_rule_action.modify_header.offset = 0; 3750 tmp_rule_action.modify_header.pattern_idx = 0; 3751 tmp_rule_action.modify_header.data = NULL; 3752 } else { 3753 /* 3754 * Copy ipv6_dst from ipv6_route_ext.last_seg. 3755 * Set ipv6_route_ext.next_hdr. 3756 */ 3757 mlx5dr_action_setter_ipv6_route_ext_gen_push_mhdr(header, cmd); 3758 tmp_rule_action.modify_header.data = (uint8_t *)cmd; 3759 tmp_rule_action.modify_header.pattern_idx = 0; 3760 tmp_rule_action.modify_header.offset = 3761 rule_action[setter->idx_double].ipv6_ext.offset; 3762 } 3763 3764 apply->rule_action = &tmp_rule_action; 3765 3766 /* Reuse regular */ 3767 mlx5dr_action_setter_modify_header(apply, &tmp_setter); 3768 3769 /* Swap rule actions from backup */ 3770 apply->rule_action = rule_action; 3771 } 3772 3773 static void 3774 mlx5dr_action_setter_ipv6_route_ext_insert_ptr(struct mlx5dr_actions_apply_data *apply, 3775 struct mlx5dr_actions_wqe_setter *setter) 3776 { 3777 struct mlx5dr_rule_action *rule_action = apply->rule_action; 3778 struct mlx5dr_actions_wqe_setter tmp_setter = {0}; 3779 struct mlx5dr_rule_action tmp_rule_action; 3780 struct mlx5dr_action *ipv6_ext_action; 3781 uint8_t header[MLX5_PUSH_MAX_LEN]; 3782 3783 ipv6_ext_action = rule_action[setter->idx_double].action; 3784 tmp_rule_action.action = ipv6_ext_action->ipv6_route_ext.action[setter->extra_data]; 3785 3786 if (tmp_rule_action.action->flags & MLX5DR_ACTION_FLAG_SHARED) { 3787 tmp_rule_action.reformat.offset = 0; 3788 tmp_rule_action.reformat.hdr_idx = 0; 3789 tmp_rule_action.reformat.data = NULL; 3790 } else { 3791 memcpy(header, rule_action[setter->idx_double].ipv6_ext.header, 3792 tmp_rule_action.action->reformat.header_size); 3793 /* Clear ipv6_route_ext.next_hdr for right checksum */ 3794 MLX5_SET(header_ipv6_routing_ext, header, next_hdr, 0); 3795 tmp_rule_action.reformat.data = header; 3796 tmp_rule_action.reformat.hdr_idx = 0; 3797 tmp_rule_action.reformat.offset = 3798 rule_action[setter->idx_double].ipv6_ext.offset; 3799 } 3800 3801 apply->rule_action = &tmp_rule_action; 3802 3803 /* Reuse regular */ 3804 mlx5dr_action_setter_insert_ptr(apply, &tmp_setter); 3805 3806 /* Swap rule actions from backup */ 3807 apply->rule_action = rule_action; 3808 } 3809 3810 static void 3811 mlx5dr_action_setter_ipv6_route_ext_pop(struct mlx5dr_actions_apply_data *apply, 3812 struct mlx5dr_actions_wqe_setter *setter) 3813 { 3814 struct mlx5dr_rule_action *rule_action = &apply->rule_action[setter->idx_single]; 3815 uint8_t idx = MLX5DR_ACTION_IPV6_EXT_MAX_SA - 1; 3816 struct mlx5dr_action *action; 3817 3818 /* Pop the ipv6_route_ext as set_single logic */ 3819 action = rule_action->action->ipv6_route_ext.action[idx]; 3820 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3821 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 3822 htobe32(action->stc[apply->tbl_type].offset); 3823 } 3824 3825 int mlx5dr_action_template_process(struct mlx5dr_action_template *at) 3826 { 3827 struct mlx5dr_actions_wqe_setter *start_setter = at->setters + 1; 3828 enum mlx5dr_action_type *action_type = at->action_type_arr; 3829 struct mlx5dr_actions_wqe_setter *setter = at->setters; 3830 struct mlx5dr_actions_wqe_setter *pop_setter = NULL; 3831 struct mlx5dr_actions_wqe_setter *last_setter; 3832 int i, j; 3833 3834 /* Note: Given action combination must be valid */ 3835 3836 /* Check if action were already processed */ 3837 if (at->num_of_action_stes) 3838 return 0; 3839 3840 for (i = 0; i < MLX5DR_ACTION_MAX_STE; i++) 3841 setter[i].set_hit = &mlx5dr_action_setter_hit_next_action; 3842 3843 /* The same action template setters can be used with jumbo or match 3844 * STE, to support both cases we reseve the first setter for cases 3845 * with jumbo STE to allow jump to the first action STE. 3846 * This extra setter can be reduced in some cases on rule creation. 3847 */ 3848 setter = start_setter; 3849 last_setter = start_setter; 3850 3851 for (i = 0; i < at->num_actions; i++) { 3852 switch (action_type[i]) { 3853 case MLX5DR_ACTION_TYP_DROP: 3854 case MLX5DR_ACTION_TYP_TIR: 3855 case MLX5DR_ACTION_TYP_TBL: 3856 case MLX5DR_ACTION_TYP_DEST_ROOT: 3857 case MLX5DR_ACTION_TYP_DEST_ARRAY: 3858 case MLX5DR_ACTION_TYP_VPORT: 3859 case MLX5DR_ACTION_TYP_MISS: 3860 /* Hit action */ 3861 last_setter->flags |= ASF_HIT; 3862 last_setter->set_hit = &mlx5dr_action_setter_hit; 3863 last_setter->idx_hit = i; 3864 break; 3865 3866 case MLX5DR_ACTION_TYP_POP_VLAN: 3867 /* Single remove header to header */ 3868 if (pop_setter) { 3869 /* We have 2 pops, use the shared */ 3870 pop_setter->set_single = &mlx5dr_action_setter_single_double_pop; 3871 break; 3872 } 3873 setter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_MODIFY | ASF_INSERT); 3874 setter->flags |= ASF_SINGLE1 | ASF_REMOVE; 3875 setter->set_single = &mlx5dr_action_setter_single; 3876 setter->idx_single = i; 3877 pop_setter = setter; 3878 break; 3879 3880 case MLX5DR_ACTION_TYP_PUSH_VLAN: 3881 /* Double insert inline */ 3882 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3883 setter->flags |= ASF_DOUBLE | ASF_INSERT; 3884 setter->set_double = &mlx5dr_action_setter_push_vlan; 3885 setter->idx_double = i; 3886 break; 3887 3888 case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT: 3889 /* 3890 * Backup ipv6_route_ext.next_hdr to ipv6_route_ext.seg_left. 3891 * Set ipv6_route_ext.next_hdr to 0 for checksum bug. 3892 */ 3893 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3894 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3895 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3896 setter->idx_double = i; 3897 setter->extra_data = 0; 3898 setter++; 3899 3900 /* 3901 * Restore ipv6_route_ext.next_hdr from ipv6_route_ext.seg_left. 3902 * Load the final destination address from flex parser sample 1->4. 3903 */ 3904 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3905 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3906 setter->idx_double = i; 3907 setter->extra_data = 1; 3908 setter++; 3909 3910 /* Set the ipv6.protocol per ipv6_route_ext.next_hdr */ 3911 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3912 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3913 setter->idx_double = i; 3914 setter->extra_data = 2; 3915 /* Pop ipv6_route_ext */ 3916 setter->flags |= ASF_SINGLE1 | ASF_REMOVE; 3917 setter->set_single = &mlx5dr_action_setter_ipv6_route_ext_pop; 3918 setter->idx_single = i; 3919 at->need_dep_write = true; 3920 break; 3921 3922 case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT: 3923 /* Insert ipv6_route_ext with next_hdr as 0 due to checksum bug */ 3924 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3925 setter->flags |= ASF_DOUBLE | ASF_INSERT; 3926 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_insert_ptr; 3927 setter->idx_double = i; 3928 setter->extra_data = 0; 3929 setter++; 3930 3931 /* Set ipv6.protocol as IPPROTO_ROUTING: 0x2b */ 3932 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3933 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3934 setter->idx_double = i; 3935 setter->extra_data = 1; 3936 setter++; 3937 3938 /* 3939 * Load the right ipv6_route_ext.next_hdr per user input buffer. 3940 * Load the next dest_addr from the ipv6_route_ext.seg_list[last]. 3941 */ 3942 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3943 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3944 setter->idx_double = i; 3945 setter->extra_data = 2; 3946 at->need_dep_write = true; 3947 break; 3948 3949 case MLX5DR_ACTION_TYP_MODIFY_HDR: 3950 /* Double modify header list */ 3951 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3952 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3953 setter->set_double = &mlx5dr_action_setter_modify_header; 3954 setter->idx_double = i; 3955 at->need_dep_write = true; 3956 break; 3957 3958 case MLX5DR_ACTION_TYP_ASO_METER: 3959 case MLX5DR_ACTION_TYP_ASO_CT: 3960 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE); 3961 setter->flags |= ASF_DOUBLE; 3962 setter->set_double = &mlx5dr_action_setter_aso; 3963 setter->idx_double = i; 3964 break; 3965 3966 case MLX5DR_ACTION_TYP_REMOVE_HEADER: 3967 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 3968 /* Single remove header to header */ 3969 setter = mlx5dr_action_setter_find_first(last_setter, 3970 ASF_SINGLE1 | ASF_MODIFY | ASF_INSERT); 3971 setter->flags |= ASF_SINGLE1 | ASF_REMOVE; 3972 setter->set_single = &mlx5dr_action_setter_single; 3973 setter->idx_single = i; 3974 break; 3975 3976 case MLX5DR_ACTION_TYP_INSERT_HEADER: 3977 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 3978 /* Double insert header with pointer */ 3979 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3980 setter->flags |= ASF_DOUBLE | ASF_INSERT; 3981 setter->set_double = &mlx5dr_action_setter_insert_ptr; 3982 setter->idx_double = i; 3983 at->need_dep_write = true; 3984 break; 3985 3986 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 3987 /* Single remove + Double insert header with pointer */ 3988 setter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_DOUBLE); 3989 setter->flags |= ASF_SINGLE1 | ASF_DOUBLE; 3990 setter->set_double = &mlx5dr_action_setter_insert_ptr; 3991 setter->idx_double = i; 3992 setter->set_single = &mlx5dr_action_setter_common_decap; 3993 setter->idx_single = i; 3994 at->need_dep_write = true; 3995 break; 3996 3997 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 3998 /* Double modify header list with remove and push inline */ 3999 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 4000 setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT; 4001 setter->set_double = &mlx5dr_action_setter_tnl_l3_to_l2; 4002 setter->idx_double = i; 4003 at->need_dep_write = true; 4004 break; 4005 4006 case MLX5DR_ACTION_TYP_TAG: 4007 /* Single TAG action, search for any room from the start */ 4008 setter = mlx5dr_action_setter_find_first(start_setter, ASF_SINGLE1); 4009 setter->flags |= ASF_SINGLE1; 4010 setter->set_single = &mlx5dr_action_setter_tag; 4011 setter->idx_single = i; 4012 break; 4013 4014 case MLX5DR_ACTION_TYP_CTR: 4015 /* Control counter action 4016 * TODO: Current counter executed first. Support is needed 4017 * for single ation counter action which is done last. 4018 * Example: Decap + CTR 4019 */ 4020 setter = mlx5dr_action_setter_find_first(start_setter, ASF_CTR); 4021 setter->flags |= ASF_CTR; 4022 setter->set_ctr = &mlx5dr_action_setter_ctrl_ctr; 4023 setter->idx_ctr = i; 4024 break; 4025 4026 case MLX5DR_ACTION_TYP_NAT64: 4027 /* NAT64 requires 3 setters, each of them does specific modify header */ 4028 for (j = 0; j < MLX5DR_ACTION_NAT64_STAGES; j++) { 4029 setter = mlx5dr_action_setter_find_first(last_setter, 4030 ASF_DOUBLE | ASF_REMOVE); 4031 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 4032 setter->set_double = &mlx5dr_action_setter_nat64; 4033 setter->idx_double = i; 4034 /* The stage indicates which modify-header to push */ 4035 setter->stage_idx = j; 4036 } 4037 break; 4038 4039 case MLX5DR_ACTION_TYP_JUMP_TO_MATCHER: 4040 last_setter->flags |= ASF_HIT; 4041 last_setter->set_hit = &mlx5dr_action_setter_hit_matcher; 4042 last_setter->idx_hit = i; 4043 break; 4044 4045 default: 4046 DR_LOG(ERR, "Unsupported action type: %d", action_type[i]); 4047 rte_errno = ENOTSUP; 4048 assert(false); 4049 return rte_errno; 4050 } 4051 4052 last_setter = RTE_MAX(setter, last_setter); 4053 } 4054 4055 /* Set default hit on the last STE if no hit action provided */ 4056 if (!(last_setter->flags & ASF_HIT)) 4057 last_setter->set_hit = &mlx5dr_action_setter_default_hit; 4058 4059 at->num_of_action_stes = last_setter - start_setter + 1; 4060 4061 /* Check if action template doesn't require any action DWs */ 4062 at->only_term = (at->num_of_action_stes == 1) && 4063 !(last_setter->flags & ~(ASF_CTR | ASF_HIT)); 4064 4065 return 0; 4066 } 4067 4068 struct mlx5dr_action_template * 4069 mlx5dr_action_template_create(const enum mlx5dr_action_type action_type[], 4070 uint32_t flags) 4071 { 4072 struct mlx5dr_action_template *at; 4073 uint8_t num_actions = 0; 4074 int i; 4075 4076 if (flags > MLX5DR_ACTION_TEMPLATE_FLAG_RELAXED_ORDER) { 4077 DR_LOG(ERR, "Unsupported action template flag provided"); 4078 rte_errno = EINVAL; 4079 return NULL; 4080 } 4081 4082 at = simple_calloc(1, sizeof(*at)); 4083 if (!at) { 4084 DR_LOG(ERR, "Failed to allocate action template"); 4085 rte_errno = ENOMEM; 4086 return NULL; 4087 } 4088 4089 at->flags = flags; 4090 4091 while (action_type[num_actions++] != MLX5DR_ACTION_TYP_LAST) 4092 ; 4093 4094 at->num_actions = num_actions - 1; 4095 at->action_type_arr = simple_calloc(num_actions, sizeof(*action_type)); 4096 if (!at->action_type_arr) { 4097 DR_LOG(ERR, "Failed to allocate action type array"); 4098 rte_errno = ENOMEM; 4099 goto free_at; 4100 } 4101 4102 for (i = 0; i < num_actions; i++) 4103 at->action_type_arr[i] = action_type[i]; 4104 4105 return at; 4106 4107 free_at: 4108 simple_free(at); 4109 return NULL; 4110 } 4111 4112 int mlx5dr_action_template_destroy(struct mlx5dr_action_template *at) 4113 { 4114 simple_free(at->action_type_arr); 4115 simple_free(at); 4116 return 0; 4117 } 4118