1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 #define WIRE_PORT 0xFFFF 8 9 #define MLX5DR_ACTION_METER_INIT_COLOR_OFFSET 1 10 /* Header removal size limited to 128B (64 words) */ 11 #define MLX5DR_ACTION_REMOVE_HEADER_MAX_SIZE 128 12 13 /* This is the maximum allowed action order for each table type: 14 * TX: POP_VLAN, CTR, ASO_METER, AS_CT, PUSH_VLAN, MODIFY, ENCAP, Term 15 * RX: TAG, DECAP, POP_VLAN, CTR, ASO_METER, ASO_CT, PUSH_VLAN, MODIFY, 16 * ENCAP, Term 17 * FDB: DECAP, POP_VLAN, CTR, ASO_METER, ASO_CT, PUSH_VLAN, MODIFY, 18 * ENCAP, Term 19 */ 20 static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_MAX] = { 21 [MLX5DR_TABLE_TYPE_NIC_RX] = { 22 BIT(MLX5DR_ACTION_TYP_TAG), 23 BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) | 24 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | 25 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) | 26 BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT), 27 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 28 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 29 BIT(MLX5DR_ACTION_TYP_CTR), 30 BIT(MLX5DR_ACTION_TYP_ASO_METER), 31 BIT(MLX5DR_ACTION_TYP_ASO_CT), 32 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 33 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 34 BIT(MLX5DR_ACTION_TYP_NAT64), 35 BIT(MLX5DR_ACTION_TYP_MODIFY_HDR), 36 BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) | 37 BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) | 38 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | 39 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), 40 BIT(MLX5DR_ACTION_TYP_TBL) | 41 BIT(MLX5DR_ACTION_TYP_MISS) | 42 BIT(MLX5DR_ACTION_TYP_TIR) | 43 BIT(MLX5DR_ACTION_TYP_DROP) | 44 BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | 45 BIT(MLX5DR_ACTION_TYP_DEST_ARRAY), 46 BIT(MLX5DR_ACTION_TYP_LAST), 47 }, 48 [MLX5DR_TABLE_TYPE_NIC_TX] = { 49 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 50 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 51 BIT(MLX5DR_ACTION_TYP_CTR), 52 BIT(MLX5DR_ACTION_TYP_ASO_METER), 53 BIT(MLX5DR_ACTION_TYP_ASO_CT), 54 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 55 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 56 BIT(MLX5DR_ACTION_TYP_NAT64), 57 BIT(MLX5DR_ACTION_TYP_MODIFY_HDR), 58 BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) | 59 BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) | 60 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | 61 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), 62 BIT(MLX5DR_ACTION_TYP_TBL) | 63 BIT(MLX5DR_ACTION_TYP_MISS) | 64 BIT(MLX5DR_ACTION_TYP_DROP) | 65 BIT(MLX5DR_ACTION_TYP_DEST_ROOT), 66 BIT(MLX5DR_ACTION_TYP_LAST), 67 }, 68 [MLX5DR_TABLE_TYPE_FDB] = { 69 BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) | 70 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | 71 BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) | 72 BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT), 73 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 74 BIT(MLX5DR_ACTION_TYP_POP_VLAN), 75 BIT(MLX5DR_ACTION_TYP_CTR), 76 BIT(MLX5DR_ACTION_TYP_ASO_METER), 77 BIT(MLX5DR_ACTION_TYP_ASO_CT), 78 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 79 BIT(MLX5DR_ACTION_TYP_PUSH_VLAN), 80 BIT(MLX5DR_ACTION_TYP_NAT64), 81 BIT(MLX5DR_ACTION_TYP_MODIFY_HDR), 82 BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) | 83 BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) | 84 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | 85 BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), 86 BIT(MLX5DR_ACTION_TYP_TBL) | 87 BIT(MLX5DR_ACTION_TYP_MISS) | 88 BIT(MLX5DR_ACTION_TYP_VPORT) | 89 BIT(MLX5DR_ACTION_TYP_DROP) | 90 BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | 91 BIT(MLX5DR_ACTION_TYP_DEST_ARRAY), 92 BIT(MLX5DR_ACTION_TYP_LAST), 93 }, 94 }; 95 96 static int mlx5dr_action_get_shared_stc_nic(struct mlx5dr_context *ctx, 97 enum mlx5dr_context_shared_stc_type stc_type, 98 uint8_t tbl_type) 99 { 100 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 101 struct mlx5dr_action_shared_stc *shared_stc; 102 int ret; 103 104 pthread_spin_lock(&ctx->ctrl_lock); 105 if (ctx->common_res[tbl_type].shared_stc[stc_type]) { 106 ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++; 107 pthread_spin_unlock(&ctx->ctrl_lock); 108 return 0; 109 } 110 111 shared_stc = simple_calloc(1, sizeof(*shared_stc)); 112 if (!shared_stc) { 113 DR_LOG(ERR, "Failed to allocate memory for shared STCs"); 114 rte_errno = ENOMEM; 115 goto unlock_and_out; 116 } 117 switch (stc_type) { 118 case MLX5DR_CONTEXT_SHARED_STC_DECAP_L3: 119 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; 120 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5; 121 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 122 stc_attr.remove_header.decap = 0; 123 stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; 124 stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4; 125 break; 126 case MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP: 127 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; 128 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5; 129 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 130 stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; 131 stc_attr.remove_words.num_of_words = MLX5DR_ACTION_HDR_LEN_L2_VLAN; 132 break; 133 default: 134 DR_LOG(ERR, "No such type : stc_type"); 135 assert(false); 136 rte_errno = EINVAL; 137 goto unlock_and_out; 138 } 139 140 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 141 &shared_stc->remove_header); 142 if (ret) { 143 DR_LOG(ERR, "Failed to allocate shared decap l2 STC"); 144 goto free_shared_stc; 145 } 146 147 ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc; 148 ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1; 149 150 pthread_spin_unlock(&ctx->ctrl_lock); 151 152 return 0; 153 154 free_shared_stc: 155 simple_free(shared_stc); 156 unlock_and_out: 157 pthread_spin_unlock(&ctx->ctrl_lock); 158 return rte_errno; 159 } 160 161 static void mlx5dr_action_put_shared_stc_nic(struct mlx5dr_context *ctx, 162 enum mlx5dr_context_shared_stc_type stc_type, 163 uint8_t tbl_type) 164 { 165 struct mlx5dr_action_shared_stc *shared_stc; 166 167 pthread_spin_lock(&ctx->ctrl_lock); 168 if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) { 169 pthread_spin_unlock(&ctx->ctrl_lock); 170 return; 171 } 172 173 shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type]; 174 175 mlx5dr_action_free_single_stc(ctx, tbl_type, &shared_stc->remove_header); 176 simple_free(shared_stc); 177 ctx->common_res[tbl_type].shared_stc[stc_type] = NULL; 178 pthread_spin_unlock(&ctx->ctrl_lock); 179 } 180 181 static int mlx5dr_action_get_shared_stc(struct mlx5dr_action *action, 182 enum mlx5dr_context_shared_stc_type stc_type) 183 { 184 struct mlx5dr_context *ctx = action->ctx; 185 int ret; 186 187 if (stc_type >= MLX5DR_CONTEXT_SHARED_STC_MAX) { 188 assert(false); 189 rte_errno = EINVAL; 190 return rte_errno; 191 } 192 193 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) { 194 ret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX); 195 if (ret) { 196 DR_LOG(ERR, "Failed to allocate memory for RX shared STCs (type: %d)", 197 stc_type); 198 return ret; 199 } 200 } 201 202 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) { 203 ret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX); 204 if (ret) { 205 DR_LOG(ERR, "Failed to allocate memory for TX shared STCs(type: %d)", 206 stc_type); 207 goto clean_nic_rx_stc; 208 } 209 } 210 211 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) { 212 ret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_FDB); 213 if (ret) { 214 DR_LOG(ERR, "Failed to allocate memory for FDB shared STCs (type: %d)", 215 stc_type); 216 goto clean_nic_tx_stc; 217 } 218 } 219 220 return 0; 221 222 clean_nic_tx_stc: 223 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 224 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX); 225 clean_nic_rx_stc: 226 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 227 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX); 228 229 return ret; 230 } 231 232 static void mlx5dr_action_put_shared_stc(struct mlx5dr_action *action, 233 enum mlx5dr_context_shared_stc_type stc_type) 234 { 235 struct mlx5dr_context *ctx = action->ctx; 236 237 if (stc_type >= MLX5DR_CONTEXT_SHARED_STC_MAX) { 238 assert(false); 239 return; 240 } 241 242 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 243 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX); 244 245 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 246 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX); 247 248 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) 249 mlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_FDB); 250 } 251 252 static void 253 mlx5dr_action_create_nat64_zero_all_addr(uint8_t **action_ptr, bool is_v4_to_v6) 254 { 255 if (is_v4_to_v6) { 256 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 257 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV4); 258 MLX5_SET(set_action_in, *action_ptr, data, 0); 259 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 260 261 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 262 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV4); 263 MLX5_SET(set_action_in, *action_ptr, data, 0); 264 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 265 } else { 266 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 267 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_127_96); 268 MLX5_SET(set_action_in, *action_ptr, data, 0); 269 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 270 271 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 272 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_95_64); 273 MLX5_SET(set_action_in, *action_ptr, data, 0); 274 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 275 276 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 277 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_63_32); 278 MLX5_SET(set_action_in, *action_ptr, data, 0); 279 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 280 281 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 282 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_SIPV6_31_0); 283 MLX5_SET(set_action_in, *action_ptr, data, 0); 284 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 285 286 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 287 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_127_96); 288 MLX5_SET(set_action_in, *action_ptr, data, 0); 289 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 290 291 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 292 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_95_64); 293 MLX5_SET(set_action_in, *action_ptr, data, 0); 294 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 295 296 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 297 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_63_32); 298 MLX5_SET(set_action_in, *action_ptr, data, 0); 299 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 300 301 MLX5_SET(set_action_in, *action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 302 MLX5_SET(set_action_in, *action_ptr, field, MLX5_MODI_OUT_DIPV6_31_0); 303 MLX5_SET(set_action_in, *action_ptr, data, 0); 304 *action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 305 } 306 } 307 308 static struct mlx5dr_action * 309 mlx5dr_action_create_nat64_copy_state(struct mlx5dr_context *ctx, 310 struct mlx5dr_action_nat64_attr *attr, 311 uint32_t flags) 312 { 313 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 314 struct mlx5dr_action_mh_pattern pat[2]; 315 struct mlx5dr_action *action; 316 uint32_t packet_len_field; 317 uint8_t *action_ptr; 318 uint32_t tos_field; 319 uint32_t tos_size; 320 uint32_t src_addr; 321 uint32_t dst_addr; 322 bool is_v4_to_v6; 323 uint32_t ecn; 324 325 is_v4_to_v6 = attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6; 326 327 if (is_v4_to_v6) { 328 packet_len_field = MLX5_MODI_OUT_IPV4_TOTAL_LEN; 329 tos_field = MLX5_MODI_OUT_IP_DSCP; 330 tos_size = 6; 331 ecn = MLX5_MODI_OUT_IP_ECN; 332 src_addr = MLX5_MODI_OUT_SIPV4; 333 dst_addr = MLX5_MODI_OUT_DIPV4; 334 } else { 335 packet_len_field = MLX5_MODI_OUT_IPV6_PAYLOAD_LEN; 336 tos_field = MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS; 337 tos_size = 8; 338 ecn = 0; 339 src_addr = MLX5_MODI_OUT_SIPV6_31_0; 340 dst_addr = MLX5_MODI_OUT_DIPV6_31_0; 341 } 342 343 memset(modify_action_data, 0, sizeof(modify_action_data)); 344 action_ptr = (uint8_t *)modify_action_data; 345 346 if (attr->flags & MLX5DR_ACTION_NAT64_BACKUP_ADDR) { 347 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 348 MLX5_SET(copy_action_in, action_ptr, src_field, src_addr); 349 MLX5_SET(copy_action_in, action_ptr, dst_field, 350 attr->registers[MLX5DR_ACTION_NAT64_REG_SRC_IP]); 351 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 352 353 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 354 MLX5_SET(copy_action_in, action_ptr, src_field, dst_addr); 355 MLX5_SET(copy_action_in, action_ptr, dst_field, 356 attr->registers[MLX5DR_ACTION_NAT64_REG_DST_IP]); 357 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 358 } 359 360 /* | 8 bit - 8 bit - 16 bit | 361 * | TOS - protocol - packet-len | 362 */ 363 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 364 MLX5_SET(copy_action_in, action_ptr, src_field, packet_len_field); 365 MLX5_SET(copy_action_in, action_ptr, dst_field, 366 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 367 MLX5_SET(copy_action_in, action_ptr, dst_offset, 0);/* 16 bits in the lsb */ 368 MLX5_SET(copy_action_in, action_ptr, length, 16); 369 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 370 371 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 372 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 373 374 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 375 MLX5_SET(copy_action_in, action_ptr, src_field, MLX5_MODI_OUT_IP_PROTOCOL); 376 MLX5_SET(copy_action_in, action_ptr, dst_field, 377 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 378 MLX5_SET(copy_action_in, action_ptr, dst_offset, 16); 379 MLX5_SET(copy_action_in, action_ptr, length, 8); 380 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 381 382 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 383 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 384 385 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 386 MLX5_SET(copy_action_in, action_ptr, src_field, tos_field); 387 MLX5_SET(copy_action_in, action_ptr, dst_field, 388 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 389 MLX5_SET(copy_action_in, action_ptr, dst_offset, 24); 390 MLX5_SET(copy_action_in, action_ptr, length, tos_size); 391 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 392 /* in ipv4 TOS = {dscp (6bits) - ecn (2bits) }*/ 393 if (ecn) { 394 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 395 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 396 397 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 398 MLX5_SET(copy_action_in, action_ptr, src_field, ecn); 399 MLX5_SET(copy_action_in, action_ptr, dst_field, 400 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 401 MLX5_SET(copy_action_in, action_ptr, dst_offset, 24 + tos_size); 402 MLX5_SET(copy_action_in, action_ptr, length, MLX5DR_ACTION_NAT64_ECN_SIZE); 403 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 404 } 405 406 /* set sip and dip to 0, in order to have new csum */ 407 mlx5dr_action_create_nat64_zero_all_addr(&action_ptr, is_v4_to_v6); 408 409 pat[0].data = modify_action_data; 410 pat[0].sz = (action_ptr - (uint8_t *)modify_action_data); 411 412 action = mlx5dr_action_create_modify_header(ctx, 1, pat, 0, flags); 413 if (!action) { 414 DR_LOG(ERR, "Failed to create copy for NAT64: action_sz: %zu, flags: 0x%x\n", 415 pat[0].sz, flags); 416 return NULL; 417 } 418 419 return action; 420 } 421 422 static struct mlx5dr_action * 423 mlx5dr_action_create_nat64_repalce_state(struct mlx5dr_context *ctx, 424 struct mlx5dr_action_nat64_attr *attr, 425 uint32_t flags) 426 { 427 uint32_t address_prefix[MLX5DR_ACTION_NAT64_HEADER_MINUS_ONE] = {0}; 428 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 429 struct mlx5dr_action_mh_pattern pat[2]; 430 static struct mlx5dr_action *action; 431 uint8_t header_size_in_dw; 432 uint8_t *action_ptr; 433 uint32_t eth_type; 434 bool is_v4_to_v6; 435 uint32_t ip_ver; 436 int i; 437 438 is_v4_to_v6 = attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6; 439 440 if (is_v4_to_v6) { 441 uint32_t nat64_well_known_pref[] = {0x00010000, 442 0x9bff6400, 0x0, 0x0, 0x0, 443 0x9bff6400, 0x0, 0x0, 0x0}; 444 445 header_size_in_dw = MLX5DR_ACTION_NAT64_IPV6_HEADER; 446 ip_ver = MLX5DR_ACTION_NAT64_IPV6_VER; 447 eth_type = RTE_ETHER_TYPE_IPV6; 448 memcpy(address_prefix, nat64_well_known_pref, 449 MLX5DR_ACTION_NAT64_HEADER_MINUS_ONE * sizeof(uint32_t)); 450 } else { 451 /* In order to fix HW csum issue, make the prefix ready */ 452 uint32_t ipv4_pref[] = {0x0, 0xffba0000, 0x0, 0x0, 0x0}; 453 454 header_size_in_dw = MLX5DR_ACTION_NAT64_IPV4_HEADER; 455 ip_ver = MLX5DR_ACTION_NAT64_IPV4_VER; 456 eth_type = RTE_ETHER_TYPE_IPV4; 457 memcpy(address_prefix, ipv4_pref, 458 MLX5DR_ACTION_NAT64_IPV4_HEADER * sizeof(uint32_t)); 459 } 460 461 memset(modify_action_data, 0, sizeof(modify_action_data)); 462 action_ptr = (uint8_t *)modify_action_data; 463 464 MLX5_SET(set_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 465 MLX5_SET(set_action_in, action_ptr, field, MLX5_MODI_OUT_ETHERTYPE); 466 MLX5_SET(set_action_in, action_ptr, length, 16); 467 MLX5_SET(set_action_in, action_ptr, data, eth_type); 468 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 469 470 /* push empty header with ipv6 as version */ 471 MLX5_SET(stc_ste_param_insert, action_ptr, action_type, 472 MLX5_MODIFICATION_TYPE_INSERT); 473 MLX5_SET(stc_ste_param_insert, action_ptr, inline_data, 0x1); 474 MLX5_SET(stc_ste_param_insert, action_ptr, insert_anchor, 475 MLX5_HEADER_ANCHOR_IPV6_IPV4); 476 MLX5_SET(stc_ste_param_insert, action_ptr, insert_size, 2); 477 MLX5_SET(stc_ste_param_insert, action_ptr, insert_argument, ip_ver); 478 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 479 480 for (i = 0; i < header_size_in_dw - 1; i++) { 481 MLX5_SET(stc_ste_param_insert, action_ptr, action_type, 482 MLX5_MODIFICATION_TYPE_INSERT); 483 MLX5_SET(stc_ste_param_insert, action_ptr, inline_data, 0x1); 484 MLX5_SET(stc_ste_param_insert, action_ptr, insert_anchor, 485 MLX5_HEADER_ANCHOR_IPV6_IPV4); 486 MLX5_SET(stc_ste_param_insert, action_ptr, insert_size, 2); 487 MLX5_SET(stc_ste_param_insert, action_ptr, insert_argument, 488 htobe32(address_prefix[i])); 489 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 490 } 491 492 /* Remove orig src/dst addr (8 bytes, 4 words) */ 493 MLX5_SET(stc_ste_param_remove, action_ptr, action_type, 494 MLX5_MODIFICATION_TYPE_REMOVE); 495 MLX5_SET(stc_ste_param_remove, action_ptr, remove_start_anchor, 496 MLX5_HEADER_ANCHOR_IPV6_IPV4); 497 MLX5_SET(stc_ste_param_remove, action_ptr, remove_end_anchor, 498 MLX5_HEADER_ANCHOR_TCP_UDP); 499 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 500 501 pat[0].data = modify_action_data; 502 pat[0].sz = action_ptr - (uint8_t *)modify_action_data; 503 504 action = mlx5dr_action_create_modify_header(ctx, 1, pat, 0, flags); 505 if (!action) { 506 DR_LOG(ERR, "Failed to create action: action_sz: %zu flags: 0x%x\n", 507 pat[0].sz, flags); 508 return NULL; 509 } 510 511 return action; 512 } 513 514 static struct mlx5dr_action * 515 mlx5dr_action_create_nat64_copy_proto_state(struct mlx5dr_context *ctx, 516 struct mlx5dr_action_nat64_attr *attr, 517 uint32_t flags) 518 { 519 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 520 struct mlx5dr_action_mh_pattern pat[2]; 521 struct mlx5dr_action *action; 522 uint8_t *action_ptr; 523 524 memset(modify_action_data, 0, sizeof(modify_action_data)); 525 action_ptr = (uint8_t *)modify_action_data; 526 527 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 528 MLX5_SET(copy_action_in, action_ptr, src_field, 529 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 530 MLX5_SET(copy_action_in, action_ptr, dst_field, 531 MLX5_MODI_OUT_IP_PROTOCOL); 532 MLX5_SET(copy_action_in, action_ptr, src_offset, 16); 533 MLX5_SET(copy_action_in, action_ptr, dst_offset, 0); 534 MLX5_SET(copy_action_in, action_ptr, length, 8); 535 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 536 537 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 538 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 539 540 pat[0].data = modify_action_data; 541 pat[0].sz = action_ptr - (uint8_t *)modify_action_data; 542 543 action = mlx5dr_action_create_modify_header_reparse(ctx, 1, pat, 0, flags, 544 MLX5DR_ACTION_STC_REPARSE_ON); 545 if (!action) { 546 DR_LOG(ERR, "Failed to create action: action_sz: %zu, flags: 0x%x\n", 547 pat[0].sz, flags); 548 return NULL; 549 } 550 551 return action; 552 } 553 554 static struct mlx5dr_action * 555 mlx5dr_action_create_nat64_copy_back_state(struct mlx5dr_context *ctx, 556 struct mlx5dr_action_nat64_attr *attr, 557 uint32_t flags) 558 { 559 __be64 modify_action_data[MLX5DR_ACTION_NAT64_MAX_MODIFY_ACTIONS]; 560 struct mlx5dr_action_mh_pattern pat[2]; 561 struct mlx5dr_action *action; 562 uint32_t packet_len_field; 563 uint32_t packet_len_add; 564 uint8_t *action_ptr; 565 uint32_t tos_field; 566 uint32_t ttl_field; 567 uint32_t tos_size; 568 uint32_t src_addr; 569 uint32_t dst_addr; 570 bool is_v4_to_v6; 571 uint32_t ecn; 572 573 is_v4_to_v6 = attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6; 574 575 if (is_v4_to_v6) { 576 packet_len_field = MLX5_MODI_OUT_IPV6_PAYLOAD_LEN; 577 /* 2' comp to 20, to get -20 in add operation */ 578 packet_len_add = MLX5DR_ACTION_NAT64_DEC_20; 579 ttl_field = MLX5_MODI_OUT_IPV6_HOPLIMIT; 580 src_addr = MLX5_MODI_OUT_SIPV6_31_0; 581 dst_addr = MLX5_MODI_OUT_DIPV6_31_0; 582 tos_field = MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS; 583 tos_size = 8; 584 ecn = 0; 585 } else { 586 packet_len_field = MLX5_MODI_OUT_IPV4_TOTAL_LEN; 587 /* ipv4 len is including 20 bytes of the header, so add 20 over ipv6 len */ 588 packet_len_add = MLX5DR_ACTION_NAT64_ADD_20; 589 ttl_field = MLX5_MODI_OUT_IPV4_TTL; 590 src_addr = MLX5_MODI_OUT_SIPV4; 591 dst_addr = MLX5_MODI_OUT_DIPV4; 592 tos_field = MLX5_MODI_OUT_IP_DSCP; 593 tos_size = 6; 594 ecn = MLX5_MODI_OUT_IP_ECN; 595 } 596 597 memset(modify_action_data, 0, sizeof(modify_action_data)); 598 action_ptr = (uint8_t *)modify_action_data; 599 600 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 601 MLX5_SET(copy_action_in, action_ptr, src_field, 602 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 603 MLX5_SET(copy_action_in, action_ptr, dst_field, 604 packet_len_field); 605 MLX5_SET(copy_action_in, action_ptr, src_offset, 32); 606 MLX5_SET(copy_action_in, action_ptr, length, 16); 607 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 608 609 MLX5_SET(set_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_SET); 610 MLX5_SET(set_action_in, action_ptr, field, ttl_field); 611 MLX5_SET(set_action_in, action_ptr, length, 8); 612 MLX5_SET(set_action_in, action_ptr, data, MLX5DR_ACTION_NAT64_TTL_DEFAULT_VAL); 613 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 614 615 /* copy TOS */ 616 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 617 MLX5_SET(copy_action_in, action_ptr, src_field, 618 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 619 MLX5_SET(copy_action_in, action_ptr, dst_field, tos_field); 620 MLX5_SET(copy_action_in, action_ptr, src_offset, 24); 621 MLX5_SET(copy_action_in, action_ptr, length, tos_size); 622 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 623 624 if (ecn) { 625 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 626 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 627 628 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 629 MLX5_SET(copy_action_in, action_ptr, src_field, 630 attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); 631 MLX5_SET(copy_action_in, action_ptr, dst_field, ecn); 632 MLX5_SET(copy_action_in, action_ptr, src_offset, 24 + tos_size); 633 MLX5_SET(copy_action_in, action_ptr, length, MLX5DR_ACTION_NAT64_ECN_SIZE); 634 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 635 } 636 637 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_NOP); 638 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 639 640 /* if required Copy original addresses */ 641 if (attr->flags & MLX5DR_ACTION_NAT64_BACKUP_ADDR) { 642 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 643 MLX5_SET(copy_action_in, action_ptr, src_field, 644 attr->registers[MLX5DR_ACTION_NAT64_REG_SRC_IP]); 645 MLX5_SET(copy_action_in, action_ptr, dst_field, src_addr); 646 MLX5_SET(copy_action_in, action_ptr, src_offset, 0); 647 MLX5_SET(copy_action_in, action_ptr, length, 32); 648 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 649 650 MLX5_SET(copy_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_COPY); 651 MLX5_SET(copy_action_in, action_ptr, src_field, 652 attr->registers[MLX5DR_ACTION_NAT64_REG_DST_IP]); 653 MLX5_SET(copy_action_in, action_ptr, dst_field, dst_addr); 654 MLX5_SET(copy_action_in, action_ptr, src_offset, 0); 655 MLX5_SET(copy_action_in, action_ptr, length, 32); 656 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 657 } 658 659 /* take/add off 20 bytes ipv4/6 from/to the total size */ 660 MLX5_SET(set_action_in, action_ptr, action_type, MLX5_MODIFICATION_TYPE_ADD); 661 MLX5_SET(set_action_in, action_ptr, field, packet_len_field); 662 MLX5_SET(set_action_in, action_ptr, data, packet_len_add); 663 MLX5_SET(set_action_in, action_ptr, length, 16); 664 action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; 665 666 pat[0].data = modify_action_data; 667 pat[0].sz = action_ptr - (uint8_t *)modify_action_data; 668 669 action = mlx5dr_action_create_modify_header(ctx, 1, pat, 0, flags); 670 if (!action) { 671 DR_LOG(ERR, "Failed to create action: action_sz: %zu, flags: 0x%x\n", 672 pat[0].sz, flags); 673 return NULL; 674 } 675 676 return action; 677 } 678 679 static void mlx5dr_action_print_combo(enum mlx5dr_action_type *user_actions) 680 { 681 DR_LOG(ERR, "Invalid action_type sequence"); 682 while (*user_actions != MLX5DR_ACTION_TYP_LAST) { 683 DR_LOG(ERR, "%s", mlx5dr_debug_action_type_to_str(*user_actions)); 684 user_actions++; 685 } 686 } 687 688 bool mlx5dr_action_check_combo(enum mlx5dr_action_type *user_actions, 689 enum mlx5dr_table_type table_type) 690 { 691 const uint32_t *order_arr = action_order_arr[table_type]; 692 uint8_t order_idx = 0; 693 uint8_t user_idx = 0; 694 bool valid_combo; 695 696 while (order_arr[order_idx] != BIT(MLX5DR_ACTION_TYP_LAST)) { 697 /* User action order validated move to next user action */ 698 if (BIT(user_actions[user_idx]) & order_arr[order_idx]) 699 user_idx++; 700 701 /* Iterate to the next supported action in the order */ 702 order_idx++; 703 } 704 705 /* Combination is valid if all user action were processed */ 706 valid_combo = user_actions[user_idx] == MLX5DR_ACTION_TYP_LAST; 707 if (!valid_combo) 708 mlx5dr_action_print_combo(user_actions); 709 710 return valid_combo; 711 } 712 713 int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[], 714 uint32_t num_actions, 715 struct mlx5dv_flow_action_attr *attr) 716 { 717 struct mlx5dr_action *action; 718 uint32_t i; 719 720 for (i = 0; i < num_actions; i++) { 721 action = rule_actions[i].action; 722 723 switch (action->type) { 724 case MLX5DR_ACTION_TYP_TBL: 725 case MLX5DR_ACTION_TYP_TIR: 726 attr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX; 727 attr[i].obj = action->devx_obj; 728 break; 729 case MLX5DR_ACTION_TYP_TAG: 730 attr[i].type = MLX5DV_FLOW_ACTION_TAG; 731 attr[i].tag_value = rule_actions[i].tag.value; 732 break; 733 #ifdef HAVE_MLX5_DR_CREATE_ACTION_DEFAULT_MISS 734 case MLX5DR_ACTION_TYP_MISS: 735 attr[i].type = MLX5DV_FLOW_ACTION_DEFAULT_MISS; 736 break; 737 #endif 738 case MLX5DR_ACTION_TYP_DROP: 739 attr[i].type = MLX5DV_FLOW_ACTION_DROP; 740 break; 741 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 742 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 743 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 744 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 745 case MLX5DR_ACTION_TYP_MODIFY_HDR: 746 attr[i].type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; 747 attr[i].action = action->flow_action; 748 break; 749 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS 750 case MLX5DR_ACTION_TYP_CTR: 751 attr[i].type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX; 752 attr[i].obj = action->devx_obj; 753 754 if (rule_actions[i].counter.offset) { 755 DR_LOG(ERR, "Counter offset not supported over root"); 756 rte_errno = ENOTSUP; 757 return rte_errno; 758 } 759 break; 760 #endif 761 default: 762 DR_LOG(ERR, "Found unsupported action type: %d", action->type); 763 rte_errno = ENOTSUP; 764 return rte_errno; 765 } 766 } 767 768 return 0; 769 } 770 771 static bool 772 mlx5dr_action_fixup_stc_attr(struct mlx5dr_context *ctx, 773 struct mlx5dr_cmd_stc_modify_attr *stc_attr, 774 struct mlx5dr_cmd_stc_modify_attr *fixup_stc_attr, 775 enum mlx5dr_table_type table_type, 776 bool is_mirror) 777 { 778 struct mlx5dr_devx_obj *devx_obj; 779 bool use_fixup = false; 780 uint32_t fw_tbl_type; 781 782 fw_tbl_type = mlx5dr_table_get_res_fw_ft_type(table_type, is_mirror); 783 784 switch (stc_attr->action_type) { 785 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 786 if (!is_mirror) 787 devx_obj = mlx5dr_pool_chunk_get_base_devx_obj(stc_attr->ste_table.ste_pool, 788 &stc_attr->ste_table.ste); 789 else 790 devx_obj = 791 mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_attr->ste_table.ste_pool, 792 &stc_attr->ste_table.ste); 793 794 *fixup_stc_attr = *stc_attr; 795 fixup_stc_attr->ste_table.ste_obj_id = devx_obj->id; 796 use_fixup = true; 797 break; 798 799 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 800 if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { 801 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; 802 fixup_stc_attr->action_offset = stc_attr->action_offset; 803 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 804 fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id; 805 fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number; 806 fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = 807 ctx->caps->merged_eswitch; 808 use_fixup = true; 809 } 810 break; 811 812 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 813 if (stc_attr->vport.vport_num != WIRE_PORT) 814 break; 815 816 if (fw_tbl_type == FS_FT_FDB_RX) { 817 /* The FW doesn't allow to go back to wire in RX, so change it to DROP */ 818 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 819 fixup_stc_attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 820 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 821 } else if (fw_tbl_type == FS_FT_FDB_TX) { 822 /*The FW doesn't allow to go to wire in the TX by JUMP_TO_VPORT*/ 823 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK; 824 fixup_stc_attr->action_offset = stc_attr->action_offset; 825 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 826 fixup_stc_attr->vport.vport_num = 0; 827 fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id; 828 fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = 829 stc_attr->vport.eswitch_owner_vhca_id_valid; 830 } 831 use_fixup = true; 832 break; 833 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 834 /* TIR is allowed on RX side, requires mask in case of FDB */ 835 if (fw_tbl_type == FS_FT_FDB_TX) { 836 fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 837 fixup_stc_attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 838 fixup_stc_attr->stc_offset = stc_attr->stc_offset; 839 use_fixup = true; 840 } 841 break; 842 default: 843 break; 844 } 845 846 return use_fixup; 847 } 848 849 int mlx5dr_action_alloc_single_stc(struct mlx5dr_context *ctx, 850 struct mlx5dr_cmd_stc_modify_attr *stc_attr, 851 uint32_t table_type, 852 struct mlx5dr_pool_chunk *stc) 853 { 854 struct mlx5dr_cmd_stc_modify_attr cleanup_stc_attr = {0}; 855 struct mlx5dr_pool *stc_pool = ctx->stc_pool[table_type]; 856 struct mlx5dr_cmd_stc_modify_attr fixup_stc_attr = {0}; 857 struct mlx5dr_devx_obj *devx_obj_0; 858 bool use_fixup; 859 int ret; 860 861 ret = mlx5dr_pool_chunk_alloc(stc_pool, stc); 862 if (ret) { 863 DR_LOG(ERR, "Failed to allocate single action STC"); 864 return ret; 865 } 866 867 stc_attr->stc_offset = stc->offset; 868 869 /* Dynamic reparse not supported, overwrite and use default */ 870 if (!mlx5dr_context_cap_dynamic_reparse(ctx)) 871 stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 872 873 devx_obj_0 = mlx5dr_pool_chunk_get_base_devx_obj(stc_pool, stc); 874 875 /* According to table/action limitation change the stc_attr */ 876 use_fixup = mlx5dr_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false); 877 ret = mlx5dr_cmd_stc_modify(devx_obj_0, use_fixup ? &fixup_stc_attr : stc_attr); 878 if (ret) { 879 DR_LOG(ERR, "Failed to modify STC action_type %d tbl_type %d", 880 stc_attr->action_type, table_type); 881 goto free_chunk; 882 } 883 884 /* Modify the FDB peer */ 885 if (table_type == MLX5DR_TABLE_TYPE_FDB) { 886 struct mlx5dr_devx_obj *devx_obj_1; 887 888 devx_obj_1 = mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_pool, stc); 889 890 use_fixup = mlx5dr_action_fixup_stc_attr(ctx, stc_attr, 891 &fixup_stc_attr, 892 table_type, true); 893 ret = mlx5dr_cmd_stc_modify(devx_obj_1, use_fixup ? &fixup_stc_attr : stc_attr); 894 if (ret) { 895 DR_LOG(ERR, "Failed to modify peer STC action_type %d tbl_type %d", 896 stc_attr->action_type, table_type); 897 goto clean_devx_obj_0; 898 } 899 } 900 901 return 0; 902 903 clean_devx_obj_0: 904 cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 905 cleanup_stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT; 906 cleanup_stc_attr.stc_offset = stc->offset; 907 mlx5dr_cmd_stc_modify(devx_obj_0, &cleanup_stc_attr); 908 free_chunk: 909 mlx5dr_pool_chunk_free(stc_pool, stc); 910 return rte_errno; 911 } 912 913 void mlx5dr_action_free_single_stc(struct mlx5dr_context *ctx, 914 uint32_t table_type, 915 struct mlx5dr_pool_chunk *stc) 916 { 917 struct mlx5dr_pool *stc_pool = ctx->stc_pool[table_type]; 918 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 919 struct mlx5dr_devx_obj *devx_obj; 920 921 /* Modify the STC not to point to an object */ 922 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 923 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT; 924 stc_attr.stc_offset = stc->offset; 925 devx_obj = mlx5dr_pool_chunk_get_base_devx_obj(stc_pool, stc); 926 mlx5dr_cmd_stc_modify(devx_obj, &stc_attr); 927 928 if (table_type == MLX5DR_TABLE_TYPE_FDB) { 929 devx_obj = mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_pool, stc); 930 mlx5dr_cmd_stc_modify(devx_obj, &stc_attr); 931 } 932 933 mlx5dr_pool_chunk_free(stc_pool, stc); 934 } 935 936 static uint32_t mlx5dr_action_get_mh_stc_type(__be64 pattern) 937 { 938 uint8_t action_type = MLX5_GET(set_action_in, &pattern, action_type); 939 940 switch (action_type) { 941 case MLX5_MODIFICATION_TYPE_SET: 942 return MLX5_IFC_STC_ACTION_TYPE_SET; 943 case MLX5_MODIFICATION_TYPE_ADD: 944 return MLX5_IFC_STC_ACTION_TYPE_ADD; 945 case MLX5_MODIFICATION_TYPE_COPY: 946 return MLX5_IFC_STC_ACTION_TYPE_COPY; 947 case MLX5_MODIFICATION_TYPE_ADD_FIELD: 948 return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD; 949 default: 950 assert(false); 951 DR_LOG(ERR, "Unsupported action type: 0x%x", action_type); 952 rte_errno = ENOTSUP; 953 return MLX5_IFC_STC_ACTION_TYPE_NOP; 954 } 955 } 956 957 static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action, 958 struct mlx5dr_devx_obj *obj, 959 struct mlx5dr_cmd_stc_modify_attr *attr) 960 { 961 attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 962 963 switch (action->type) { 964 case MLX5DR_ACTION_TYP_TAG: 965 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG; 966 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 967 break; 968 case MLX5DR_ACTION_TYP_DROP: 969 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; 970 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 971 break; 972 case MLX5DR_ACTION_TYP_MISS: 973 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; 974 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 975 break; 976 case MLX5DR_ACTION_TYP_CTR: 977 attr->id = obj->id; 978 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER; 979 attr->action_offset = MLX5DR_ACTION_OFFSET_DW0; 980 break; 981 case MLX5DR_ACTION_TYP_TIR: 982 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR; 983 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 984 attr->dest_tir_num = obj->id; 985 break; 986 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 987 case MLX5DR_ACTION_TYP_MODIFY_HDR: 988 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 989 attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 990 if (action->modify_header.require_reparse) 991 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 992 993 if (action->modify_header.num_of_actions == 1) { 994 attr->modify_action.data = action->modify_header.single_action; 995 attr->action_type = mlx5dr_action_get_mh_stc_type(attr->modify_action.data); 996 997 if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD || 998 attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET) 999 MLX5_SET(set_action_in, &attr->modify_action.data, data, 0); 1000 } else { 1001 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST; 1002 attr->modify_header.arg_id = action->modify_header.arg_obj->id; 1003 attr->modify_header.pattern_id = action->modify_header.pat_obj->id; 1004 } 1005 break; 1006 case MLX5DR_ACTION_TYP_TBL: 1007 case MLX5DR_ACTION_TYP_DEST_ARRAY: 1008 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; 1009 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1010 attr->dest_table_id = obj->id; 1011 break; 1012 case MLX5DR_ACTION_TYP_DEST_ROOT: 1013 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; 1014 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1015 attr->dest_table_id = action->root_tbl.sa->id; 1016 break; 1017 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 1018 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; 1019 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 1020 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1021 attr->remove_header.decap = 1; 1022 attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; 1023 attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC; 1024 break; 1025 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 1026 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 1027 case MLX5DR_ACTION_TYP_INSERT_HEADER: 1028 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1029 if (!action->reformat.require_reparse) 1030 attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 1031 1032 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; 1033 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1034 attr->insert_header.encap = action->reformat.encap; 1035 attr->insert_header.push_esp = action->reformat.push_esp; 1036 attr->insert_header.insert_anchor = action->reformat.anchor; 1037 attr->insert_header.arg_id = action->reformat.arg_obj->id; 1038 attr->insert_header.header_size = action->reformat.header_size; 1039 attr->insert_header.insert_offset = action->reformat.offset; 1040 break; 1041 case MLX5DR_ACTION_TYP_ASO_METER: 1042 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1043 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO; 1044 attr->aso.aso_type = ASO_OPC_MOD_POLICER; 1045 attr->aso.devx_obj_id = obj->id; 1046 attr->aso.return_reg_id = action->aso.return_reg_id; 1047 break; 1048 case MLX5DR_ACTION_TYP_ASO_CT: 1049 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1050 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO; 1051 attr->aso.aso_type = ASO_OPC_MOD_CONNECTION_TRACKING; 1052 attr->aso.devx_obj_id = obj->id; 1053 attr->aso.return_reg_id = action->aso.return_reg_id; 1054 break; 1055 case MLX5DR_ACTION_TYP_VPORT: 1056 attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; 1057 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; 1058 attr->vport.vport_num = action->vport.vport_num; 1059 attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id; 1060 attr->vport.eswitch_owner_vhca_id_valid = action->ctx->caps->merged_eswitch; 1061 break; 1062 case MLX5DR_ACTION_TYP_POP_VLAN: 1063 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; 1064 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 1065 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1066 attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; 1067 attr->remove_words.num_of_words = MLX5DR_ACTION_HDR_LEN_L2_VLAN / 2; 1068 break; 1069 case MLX5DR_ACTION_TYP_PUSH_VLAN: 1070 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; 1071 attr->action_offset = MLX5DR_ACTION_OFFSET_DW6; 1072 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1073 attr->insert_header.encap = 0; 1074 attr->insert_header.push_esp = 0; 1075 attr->insert_header.is_inline = 1; 1076 attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START; 1077 attr->insert_header.insert_offset = MLX5DR_ACTION_HDR_LEN_L2_MACS; 1078 attr->insert_header.header_size = MLX5DR_ACTION_HDR_LEN_L2_VLAN; 1079 break; 1080 case MLX5DR_ACTION_TYP_REMOVE_HEADER: 1081 if (action->remove_header.type == MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER) { 1082 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; 1083 attr->remove_header.decap = action->remove_header.decap; 1084 attr->remove_header.start_anchor = action->remove_header.start_anchor; 1085 attr->remove_header.end_anchor = action->remove_header.end_anchor; 1086 } else { 1087 attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; 1088 attr->remove_words.start_anchor = action->remove_header.start_anchor; 1089 attr->remove_words.num_of_words = action->remove_header.num_of_words; 1090 } 1091 attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; 1092 attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; 1093 break; 1094 default: 1095 DR_LOG(ERR, "Invalid action type %d", action->type); 1096 assert(false); 1097 } 1098 } 1099 1100 static int 1101 mlx5dr_action_create_stcs(struct mlx5dr_action *action, 1102 struct mlx5dr_devx_obj *obj) 1103 { 1104 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 1105 struct mlx5dr_context *ctx = action->ctx; 1106 int ret; 1107 1108 mlx5dr_action_fill_stc_attr(action, obj, &stc_attr); 1109 1110 /* Block unsupported parallel devx obj modify over the same base */ 1111 pthread_spin_lock(&ctx->ctrl_lock); 1112 1113 /* Allocate STC for RX */ 1114 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) { 1115 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, 1116 MLX5DR_TABLE_TYPE_NIC_RX, 1117 &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]); 1118 if (ret) 1119 goto out_err; 1120 } 1121 1122 /* Allocate STC for TX */ 1123 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) { 1124 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, 1125 MLX5DR_TABLE_TYPE_NIC_TX, 1126 &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]); 1127 if (ret) 1128 goto free_nic_rx_stc; 1129 } 1130 1131 /* Allocate STC for FDB */ 1132 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) { 1133 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, 1134 MLX5DR_TABLE_TYPE_FDB, 1135 &action->stc[MLX5DR_TABLE_TYPE_FDB]); 1136 if (ret) 1137 goto free_nic_tx_stc; 1138 } 1139 1140 pthread_spin_unlock(&ctx->ctrl_lock); 1141 1142 return 0; 1143 1144 free_nic_tx_stc: 1145 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 1146 mlx5dr_action_free_single_stc(ctx, 1147 MLX5DR_TABLE_TYPE_NIC_TX, 1148 &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]); 1149 free_nic_rx_stc: 1150 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 1151 mlx5dr_action_free_single_stc(ctx, 1152 MLX5DR_TABLE_TYPE_NIC_RX, 1153 &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]); 1154 out_err: 1155 pthread_spin_unlock(&ctx->ctrl_lock); 1156 return rte_errno; 1157 } 1158 1159 static void 1160 mlx5dr_action_destroy_stcs(struct mlx5dr_action *action) 1161 { 1162 struct mlx5dr_context *ctx = action->ctx; 1163 1164 /* Block unsupported parallel devx obj modify over the same base */ 1165 pthread_spin_lock(&ctx->ctrl_lock); 1166 1167 if (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) 1168 mlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_NIC_RX, 1169 &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]); 1170 1171 if (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) 1172 mlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_NIC_TX, 1173 &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]); 1174 1175 if (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) 1176 mlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_FDB, 1177 &action->stc[MLX5DR_TABLE_TYPE_FDB]); 1178 1179 pthread_spin_unlock(&ctx->ctrl_lock); 1180 } 1181 1182 static bool 1183 mlx5dr_action_is_root_flags(uint32_t flags) 1184 { 1185 return flags & (MLX5DR_ACTION_FLAG_ROOT_RX | 1186 MLX5DR_ACTION_FLAG_ROOT_TX | 1187 MLX5DR_ACTION_FLAG_ROOT_FDB); 1188 } 1189 1190 static bool 1191 mlx5dr_action_is_hws_flags(uint32_t flags) 1192 { 1193 return flags & (MLX5DR_ACTION_FLAG_HWS_RX | 1194 MLX5DR_ACTION_FLAG_HWS_TX | 1195 MLX5DR_ACTION_FLAG_HWS_FDB); 1196 } 1197 1198 static struct mlx5dr_action * 1199 mlx5dr_action_create_generic_bulk(struct mlx5dr_context *ctx, 1200 uint32_t flags, 1201 enum mlx5dr_action_type action_type, 1202 uint8_t bulk_sz) 1203 { 1204 struct mlx5dr_action *action; 1205 int i; 1206 1207 if (!mlx5dr_action_is_root_flags(flags) && 1208 !mlx5dr_action_is_hws_flags(flags)) { 1209 DR_LOG(ERR, "Action flags must specify root or non root (HWS)"); 1210 rte_errno = ENOTSUP; 1211 return NULL; 1212 } 1213 1214 if (mlx5dr_action_is_hws_flags(flags) && 1215 !(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT)) { 1216 DR_LOG(ERR, "Cannot create HWS action since HWS is not supported"); 1217 rte_errno = ENOTSUP; 1218 return NULL; 1219 } 1220 1221 action = simple_calloc(bulk_sz, sizeof(*action)); 1222 if (!action) { 1223 DR_LOG(ERR, "Failed to allocate memory for action [%d]", action_type); 1224 rte_errno = ENOMEM; 1225 return NULL; 1226 } 1227 1228 for (i = 0; i < bulk_sz; i++) { 1229 action[i].ctx = ctx; 1230 action[i].flags = flags; 1231 action[i].type = action_type; 1232 } 1233 1234 return action; 1235 } 1236 1237 static struct mlx5dr_action * 1238 mlx5dr_action_create_generic(struct mlx5dr_context *ctx, 1239 uint32_t flags, 1240 enum mlx5dr_action_type action_type) 1241 { 1242 return mlx5dr_action_create_generic_bulk(ctx, flags, action_type, 1); 1243 } 1244 1245 struct mlx5dr_action * 1246 mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx, 1247 struct mlx5dr_table *tbl, 1248 uint32_t flags) 1249 { 1250 struct mlx5dr_action *action; 1251 int ret; 1252 1253 if (mlx5dr_table_is_root(tbl)) { 1254 DR_LOG(ERR, "Root table cannot be set as destination"); 1255 rte_errno = ENOTSUP; 1256 return NULL; 1257 } 1258 1259 if (mlx5dr_action_is_hws_flags(flags) && 1260 mlx5dr_action_is_root_flags(flags)) { 1261 DR_LOG(ERR, "Same action cannot be used for root and non root"); 1262 rte_errno = ENOTSUP; 1263 return NULL; 1264 } 1265 1266 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TBL); 1267 if (!action) 1268 return NULL; 1269 1270 if (mlx5dr_action_is_root_flags(flags)) { 1271 if (mlx5dr_context_shared_gvmi_used(ctx)) 1272 action->devx_obj = tbl->local_ft->obj; 1273 else 1274 action->devx_obj = tbl->ft->obj; 1275 } else { 1276 ret = mlx5dr_action_create_stcs(action, tbl->ft); 1277 if (ret) 1278 goto free_action; 1279 1280 action->devx_dest.devx_obj = tbl->ft; 1281 } 1282 1283 return action; 1284 1285 free_action: 1286 simple_free(action); 1287 return NULL; 1288 } 1289 1290 static int mlx5dr_action_get_dest_tir_obj(struct mlx5dr_context *ctx, 1291 struct mlx5dr_action *action, 1292 struct mlx5dr_devx_obj *obj, 1293 struct mlx5dr_devx_obj **ret_obj) 1294 { 1295 int ret; 1296 1297 if (mlx5dr_context_shared_gvmi_used(ctx)) { 1298 ret = mlx5dr_matcher_create_aliased_obj(ctx, 1299 ctx->local_ibv_ctx, 1300 ctx->ibv_ctx, 1301 ctx->caps->vhca_id, 1302 obj->id, 1303 MLX5_GENERAL_OBJ_TYPE_TIR_ALIAS, 1304 &action->alias.devx_obj); 1305 if (ret) { 1306 DR_LOG(ERR, "Failed to create tir alias"); 1307 return rte_errno; 1308 } 1309 *ret_obj = action->alias.devx_obj; 1310 } else { 1311 *ret_obj = obj; 1312 } 1313 1314 return 0; 1315 } 1316 1317 struct mlx5dr_action * 1318 mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx, 1319 struct mlx5dr_devx_obj *obj, 1320 uint32_t flags, 1321 bool is_local) 1322 { 1323 struct mlx5dr_action *action; 1324 int ret; 1325 1326 if (mlx5dr_action_is_hws_flags(flags) && 1327 mlx5dr_action_is_root_flags(flags)) { 1328 DR_LOG(ERR, "Same action cannot be used for root and non root"); 1329 rte_errno = ENOTSUP; 1330 return NULL; 1331 } 1332 1333 if ((flags & MLX5DR_ACTION_FLAG_ROOT_FDB) || 1334 (flags & MLX5DR_ACTION_FLAG_HWS_FDB && !ctx->caps->fdb_tir_stc)) { 1335 DR_LOG(ERR, "TIR action not support on FDB"); 1336 rte_errno = ENOTSUP; 1337 return NULL; 1338 } 1339 1340 if (!is_local) { 1341 DR_LOG(ERR, "TIR should be created on local ibv_device, flags: 0x%x", 1342 flags); 1343 rte_errno = ENOTSUP; 1344 return NULL; 1345 } 1346 1347 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TIR); 1348 if (!action) 1349 return NULL; 1350 1351 if (mlx5dr_action_is_root_flags(flags)) { 1352 action->devx_obj = obj->obj; 1353 } else { 1354 struct mlx5dr_devx_obj *cur_obj = NULL; /*compilation warn*/ 1355 1356 ret = mlx5dr_action_get_dest_tir_obj(ctx, action, obj, &cur_obj); 1357 if (ret) { 1358 DR_LOG(ERR, "Failed to create tir alias (flags: %d)", flags); 1359 goto free_action; 1360 } 1361 1362 ret = mlx5dr_action_create_stcs(action, cur_obj); 1363 if (ret) 1364 goto clean_obj; 1365 1366 action->devx_dest.devx_obj = cur_obj; 1367 } 1368 1369 return action; 1370 1371 clean_obj: 1372 mlx5dr_cmd_destroy_obj(action->alias.devx_obj); 1373 free_action: 1374 simple_free(action); 1375 return NULL; 1376 } 1377 1378 struct mlx5dr_action * 1379 mlx5dr_action_create_dest_drop(struct mlx5dr_context *ctx, 1380 uint32_t flags) 1381 { 1382 struct mlx5dr_action *action; 1383 int ret; 1384 1385 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DROP); 1386 if (!action) 1387 return NULL; 1388 1389 if (mlx5dr_action_is_hws_flags(flags)) { 1390 ret = mlx5dr_action_create_stcs(action, NULL); 1391 if (ret) 1392 goto free_action; 1393 } 1394 1395 return action; 1396 1397 free_action: 1398 simple_free(action); 1399 return NULL; 1400 } 1401 1402 struct mlx5dr_action * 1403 mlx5dr_action_create_default_miss(struct mlx5dr_context *ctx, 1404 uint32_t flags) 1405 { 1406 struct mlx5dr_action *action; 1407 int ret; 1408 1409 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_MISS); 1410 if (!action) 1411 return NULL; 1412 1413 if (mlx5dr_action_is_hws_flags(flags)) { 1414 ret = mlx5dr_action_create_stcs(action, NULL); 1415 if (ret) 1416 goto free_action; 1417 } 1418 1419 return action; 1420 1421 free_action: 1422 simple_free(action); 1423 return NULL; 1424 } 1425 1426 struct mlx5dr_action * 1427 mlx5dr_action_create_tag(struct mlx5dr_context *ctx, 1428 uint32_t flags) 1429 { 1430 struct mlx5dr_action *action; 1431 int ret; 1432 1433 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TAG); 1434 if (!action) 1435 return NULL; 1436 1437 if (mlx5dr_action_is_hws_flags(flags)) { 1438 ret = mlx5dr_action_create_stcs(action, NULL); 1439 if (ret) 1440 goto free_action; 1441 } 1442 1443 return action; 1444 1445 free_action: 1446 simple_free(action); 1447 return NULL; 1448 } 1449 1450 struct mlx5dr_action * 1451 mlx5dr_action_create_last(struct mlx5dr_context *ctx, 1452 uint32_t flags) 1453 { 1454 return mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_LAST); 1455 } 1456 1457 static struct mlx5dr_action * 1458 mlx5dr_action_create_aso(struct mlx5dr_context *ctx, 1459 enum mlx5dr_action_type action_type, 1460 struct mlx5dr_devx_obj *devx_obj, 1461 uint8_t return_reg_id, 1462 uint32_t flags) 1463 { 1464 struct mlx5dr_action *action; 1465 int ret; 1466 1467 if (mlx5dr_action_is_root_flags(flags)) { 1468 DR_LOG(ERR, "ASO action cannot be used over root table"); 1469 rte_errno = ENOTSUP; 1470 return NULL; 1471 } 1472 1473 action = mlx5dr_action_create_generic(ctx, flags, action_type); 1474 if (!action) 1475 return NULL; 1476 1477 action->aso.devx_obj = devx_obj; 1478 action->aso.return_reg_id = return_reg_id; 1479 1480 ret = mlx5dr_action_create_stcs(action, devx_obj); 1481 if (ret) 1482 goto free_action; 1483 1484 return action; 1485 1486 free_action: 1487 simple_free(action); 1488 return NULL; 1489 } 1490 1491 struct mlx5dr_action * 1492 mlx5dr_action_create_aso_meter(struct mlx5dr_context *ctx, 1493 struct mlx5dr_devx_obj *devx_obj, 1494 uint8_t return_reg_id, 1495 uint32_t flags) 1496 { 1497 return mlx5dr_action_create_aso(ctx, MLX5DR_ACTION_TYP_ASO_METER, 1498 devx_obj, return_reg_id, flags); 1499 } 1500 1501 struct mlx5dr_action * 1502 mlx5dr_action_create_aso_ct(struct mlx5dr_context *ctx, 1503 struct mlx5dr_devx_obj *devx_obj, 1504 uint8_t return_reg_id, 1505 uint32_t flags) 1506 { 1507 return mlx5dr_action_create_aso(ctx, MLX5DR_ACTION_TYP_ASO_CT, 1508 devx_obj, return_reg_id, flags); 1509 } 1510 1511 struct mlx5dr_action * 1512 mlx5dr_action_create_counter(struct mlx5dr_context *ctx, 1513 struct mlx5dr_devx_obj *obj, 1514 uint32_t flags) 1515 { 1516 struct mlx5dr_action *action; 1517 int ret; 1518 1519 if (mlx5dr_action_is_hws_flags(flags) && 1520 mlx5dr_action_is_root_flags(flags)) { 1521 DR_LOG(ERR, "Same action cannot be used for root and non root"); 1522 rte_errno = ENOTSUP; 1523 return NULL; 1524 } 1525 1526 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_CTR); 1527 if (!action) 1528 return NULL; 1529 1530 if (mlx5dr_action_is_root_flags(flags)) { 1531 action->devx_obj = obj->obj; 1532 } else { 1533 ret = mlx5dr_action_create_stcs(action, obj); 1534 if (ret) 1535 goto free_action; 1536 } 1537 1538 return action; 1539 1540 free_action: 1541 simple_free(action); 1542 return NULL; 1543 } 1544 1545 static int mlx5dr_action_create_dest_vport_hws(struct mlx5dr_context *ctx, 1546 struct mlx5dr_action *action, 1547 uint32_t ib_port_num) 1548 { 1549 struct mlx5dr_cmd_query_vport_caps vport_caps = {0}; 1550 int ret; 1551 1552 ret = mlx5dr_cmd_query_ib_port(ctx->ibv_ctx, &vport_caps, ib_port_num); 1553 if (ret) { 1554 DR_LOG(ERR, "Failed querying port %d", ib_port_num); 1555 return ret; 1556 } 1557 action->vport.vport_num = vport_caps.vport_num; 1558 action->vport.esw_owner_vhca_id = vport_caps.esw_owner_vhca_id; 1559 1560 if (!ctx->caps->merged_eswitch && 1561 action->vport.esw_owner_vhca_id != ctx->caps->vhca_id) { 1562 DR_LOG(ERR, "Not merged-eswitch (%d), not allowed to send to other vhca_id (%d)", 1563 ctx->caps->vhca_id, action->vport.esw_owner_vhca_id); 1564 rte_errno = ENOTSUP; 1565 return rte_errno; 1566 } 1567 1568 ret = mlx5dr_action_create_stcs(action, NULL); 1569 if (ret) { 1570 DR_LOG(ERR, "Failed creating stc for port %d", ib_port_num); 1571 return ret; 1572 } 1573 1574 return 0; 1575 } 1576 1577 struct mlx5dr_action * 1578 mlx5dr_action_create_dest_vport(struct mlx5dr_context *ctx, 1579 uint32_t ib_port_num, 1580 uint32_t flags) 1581 { 1582 struct mlx5dr_action *action; 1583 int ret; 1584 1585 if (!(flags & MLX5DR_ACTION_FLAG_HWS_FDB)) { 1586 DR_LOG(ERR, "Vport action is supported for FDB only"); 1587 rte_errno = EINVAL; 1588 return NULL; 1589 } 1590 1591 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_VPORT); 1592 if (!action) 1593 return NULL; 1594 1595 ret = mlx5dr_action_create_dest_vport_hws(ctx, action, ib_port_num); 1596 if (ret) { 1597 DR_LOG(ERR, "Failed to create vport action HWS"); 1598 goto free_action; 1599 } 1600 1601 return action; 1602 1603 free_action: 1604 simple_free(action); 1605 return NULL; 1606 } 1607 1608 struct mlx5dr_action * 1609 mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags) 1610 { 1611 struct mlx5dr_action *action; 1612 int ret; 1613 1614 if (mlx5dr_action_is_root_flags(flags)) { 1615 DR_LOG(ERR, "Push vlan action not supported for root"); 1616 rte_errno = ENOTSUP; 1617 return NULL; 1618 } 1619 1620 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_PUSH_VLAN); 1621 if (!action) 1622 return NULL; 1623 1624 ret = mlx5dr_action_create_stcs(action, NULL); 1625 if (ret) { 1626 DR_LOG(ERR, "Failed creating stc for push vlan"); 1627 goto free_action; 1628 } 1629 1630 return action; 1631 1632 free_action: 1633 simple_free(action); 1634 return NULL; 1635 } 1636 1637 struct mlx5dr_action * 1638 mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags) 1639 { 1640 struct mlx5dr_action *action; 1641 int ret; 1642 1643 if (mlx5dr_action_is_root_flags(flags)) { 1644 DR_LOG(ERR, "Pop vlan action not supported for root"); 1645 rte_errno = ENOTSUP; 1646 return NULL; 1647 } 1648 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_POP_VLAN); 1649 if (!action) 1650 return NULL; 1651 1652 ret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP); 1653 if (ret) { 1654 DR_LOG(ERR, "Failed to create remove stc for reformat"); 1655 goto free_action; 1656 } 1657 1658 ret = mlx5dr_action_create_stcs(action, NULL); 1659 if (ret) { 1660 DR_LOG(ERR, "Failed creating stc for pop vlan"); 1661 goto free_shared; 1662 } 1663 1664 return action; 1665 1666 free_shared: 1667 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP); 1668 free_action: 1669 simple_free(action); 1670 return NULL; 1671 } 1672 1673 static int 1674 mlx5dr_action_conv_reformat_to_verbs(uint32_t action_type, 1675 uint32_t *verb_reformat_type) 1676 { 1677 switch (action_type) { 1678 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 1679 *verb_reformat_type = 1680 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2; 1681 return 0; 1682 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 1683 *verb_reformat_type = 1684 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 1685 return 0; 1686 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 1687 *verb_reformat_type = 1688 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 1689 return 0; 1690 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 1691 *verb_reformat_type = 1692 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 1693 return 0; 1694 default: 1695 DR_LOG(ERR, "Invalid root reformat action type"); 1696 rte_errno = EINVAL; 1697 return rte_errno; 1698 } 1699 } 1700 1701 static int 1702 mlx5dr_action_conv_flags_to_ft_type(uint32_t flags, enum mlx5dv_flow_table_type *ft_type) 1703 { 1704 if (flags & (MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX)) { 1705 *ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 1706 } else if (flags & (MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX)) { 1707 *ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX; 1708 #ifdef HAVE_MLX5DV_FLOW_MATCHER_FT_TYPE 1709 } else if (flags & (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB)) { 1710 *ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 1711 #endif 1712 } else { 1713 rte_errno = ENOTSUP; 1714 return 1; 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int 1721 mlx5dr_action_create_reformat_root(struct mlx5dr_action *action, 1722 size_t data_sz, 1723 void *data) 1724 { 1725 enum mlx5dv_flow_table_type ft_type = 0; /*fix compilation warn*/ 1726 uint32_t verb_reformat_type = 0; 1727 struct ibv_context *ibv_ctx; 1728 int ret; 1729 1730 /* Convert action to FT type and verbs reformat type */ 1731 ret = mlx5dr_action_conv_flags_to_ft_type(action->flags, &ft_type); 1732 if (ret) 1733 return rte_errno; 1734 1735 ret = mlx5dr_action_conv_reformat_to_verbs(action->type, &verb_reformat_type); 1736 if (ret) 1737 return rte_errno; 1738 1739 /* Create the reformat type for root table */ 1740 ibv_ctx = mlx5dr_context_get_local_ibv(action->ctx); 1741 action->flow_action = 1742 mlx5_glue->dv_create_flow_action_packet_reformat_root(ibv_ctx, 1743 data_sz, 1744 data, 1745 verb_reformat_type, 1746 ft_type); 1747 if (!action->flow_action) { 1748 DR_LOG(ERR, "Failed to create dv_create_flow reformat"); 1749 rte_errno = errno; 1750 return rte_errno; 1751 } 1752 1753 return 0; 1754 } 1755 1756 static int 1757 mlx5dr_action_handle_insert_with_ptr(struct mlx5dr_action *action, 1758 uint8_t num_of_hdrs, 1759 struct mlx5dr_action_reformat_header *hdrs, 1760 uint32_t log_bulk_sz, uint32_t reparse) 1761 { 1762 struct mlx5dr_devx_obj *arg_obj; 1763 size_t max_sz = 0; 1764 int ret, i; 1765 1766 for (i = 0; i < num_of_hdrs; i++) { 1767 if (hdrs[i].sz % W_SIZE != 0) { 1768 DR_LOG(ERR, "Header data size should be in WORD granularity"); 1769 rte_errno = EINVAL; 1770 return rte_errno; 1771 } 1772 max_sz = RTE_MAX(hdrs[i].sz, max_sz); 1773 } 1774 1775 /* Allocate single shared arg object for all headers */ 1776 arg_obj = mlx5dr_arg_create(action->ctx, 1777 hdrs->data, 1778 max_sz, 1779 log_bulk_sz, 1780 action->flags & MLX5DR_ACTION_FLAG_SHARED); 1781 if (!arg_obj) 1782 return rte_errno; 1783 1784 for (i = 0; i < num_of_hdrs; i++) { 1785 action[i].reformat.arg_obj = arg_obj; 1786 action[i].reformat.header_size = hdrs[i].sz; 1787 action[i].reformat.num_of_hdrs = num_of_hdrs; 1788 action[i].reformat.max_hdr_sz = max_sz; 1789 1790 if (action[i].type == MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 || 1791 action[i].type == MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) { 1792 action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START; 1793 action[i].reformat.offset = 0; 1794 action[i].reformat.encap = 1; 1795 action[i].reformat.push_esp = 0; 1796 } 1797 1798 if (likely(reparse == MLX5DR_ACTION_STC_REPARSE_DEFAULT)) 1799 action[i].reformat.require_reparse = true; 1800 else if (reparse == MLX5DR_ACTION_STC_REPARSE_ON) 1801 action[i].reformat.require_reparse = true; 1802 1803 ret = mlx5dr_action_create_stcs(&action[i], NULL); 1804 if (ret) { 1805 DR_LOG(ERR, "Failed to create stc for reformat"); 1806 goto free_stc; 1807 } 1808 } 1809 1810 return 0; 1811 1812 free_stc: 1813 while (i--) 1814 mlx5dr_action_destroy_stcs(&action[i]); 1815 1816 mlx5dr_cmd_destroy_obj(arg_obj); 1817 return ret; 1818 } 1819 1820 static int 1821 mlx5dr_action_handle_l2_to_tunnel_l3(struct mlx5dr_action *action, 1822 uint8_t num_of_hdrs, 1823 struct mlx5dr_action_reformat_header *hdrs, 1824 uint32_t log_bulk_sz) 1825 { 1826 int ret; 1827 1828 /* The action is remove-l2-header + insert-l3-header */ 1829 ret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP_L3); 1830 if (ret) { 1831 DR_LOG(ERR, "Failed to create remove stc for reformat"); 1832 return ret; 1833 } 1834 1835 /* Reuse the insert with pointer for the L2L3 header */ 1836 ret = mlx5dr_action_handle_insert_with_ptr(action, 1837 num_of_hdrs, 1838 hdrs, 1839 log_bulk_sz, 1840 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 1841 if (ret) 1842 goto put_shared_stc; 1843 1844 return 0; 1845 1846 put_shared_stc: 1847 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP_L3); 1848 return ret; 1849 } 1850 1851 static void mlx5dr_action_prepare_decap_l3_actions(size_t data_sz, 1852 uint8_t *mh_data, 1853 int *num_of_actions) 1854 { 1855 int actions; 1856 uint32_t i; 1857 1858 /* Remove L2L3 outer headers */ 1859 MLX5_SET(stc_ste_param_remove, mh_data, action_type, 1860 MLX5_MODIFICATION_TYPE_REMOVE); 1861 MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1); 1862 MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor, 1863 MLX5_HEADER_ANCHOR_PACKET_START); 1864 MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor, 1865 MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4); 1866 mh_data += MLX5DR_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */ 1867 actions = 1; 1868 1869 /* Add the new header using inline action 4Byte at a time, the header 1870 * is added in reversed order to the beginning of the packet to avoid 1871 * incorrect parsing by the HW. Since header is 14B or 18B an extra 1872 * two bytes are padded and later removed. 1873 */ 1874 for (i = 0; i < data_sz / MLX5DR_ACTION_INLINE_DATA_SIZE + 1; i++) { 1875 MLX5_SET(stc_ste_param_insert, mh_data, action_type, 1876 MLX5_MODIFICATION_TYPE_INSERT); 1877 MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1); 1878 MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor, 1879 MLX5_HEADER_ANCHOR_PACKET_START); 1880 MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2); 1881 mh_data += MLX5DR_ACTION_DOUBLE_SIZE; 1882 actions++; 1883 } 1884 1885 /* Remove first 2 extra bytes */ 1886 MLX5_SET(stc_ste_param_remove_words, mh_data, action_type, 1887 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 1888 MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor, 1889 MLX5_HEADER_ANCHOR_PACKET_START); 1890 /* The hardware expects here size in words (2 bytes) */ 1891 MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1); 1892 actions++; 1893 1894 *num_of_actions = actions; 1895 } 1896 1897 static int 1898 mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, 1899 uint8_t num_of_hdrs, 1900 struct mlx5dr_action_reformat_header *hdrs, 1901 uint32_t log_bulk_sz) 1902 { 1903 uint8_t mh_data[MLX5DR_ACTION_REFORMAT_DATA_SIZE] = {0}; 1904 struct mlx5dr_devx_obj *arg_obj, *pat_obj; 1905 struct mlx5dr_context *ctx = action->ctx; 1906 int num_of_actions; 1907 int mh_data_size; 1908 int ret, i; 1909 1910 for (i = 0; i < num_of_hdrs; i++) { 1911 if (hdrs[i].sz != MLX5DR_ACTION_HDR_LEN_L2 && 1912 hdrs[i].sz != MLX5DR_ACTION_HDR_LEN_L2_W_VLAN) { 1913 DR_LOG(ERR, "Data size is not supported for decap-l3"); 1914 rte_errno = EINVAL; 1915 return rte_errno; 1916 } 1917 } 1918 1919 /* Create a full modify header action list in case shared */ 1920 mlx5dr_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); 1921 1922 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) 1923 mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); 1924 1925 /* All DecapL3 cases require the same max arg size */ 1926 arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, 1927 (__be64 *)mh_data, 1928 num_of_actions, 1929 log_bulk_sz, 1930 action->flags & MLX5DR_ACTION_FLAG_SHARED); 1931 if (!arg_obj) 1932 return rte_errno; 1933 1934 for (i = 0; i < num_of_hdrs; i++) { 1935 memset(mh_data, 0, MLX5DR_ACTION_REFORMAT_DATA_SIZE); 1936 mlx5dr_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions); 1937 mh_data_size = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE; 1938 1939 pat_obj = mlx5dr_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size); 1940 if (!pat_obj) { 1941 DR_LOG(ERR, "Failed to allocate pattern for DecapL3"); 1942 goto free_stc_and_pat; 1943 } 1944 1945 action[i].modify_header.max_num_of_actions = num_of_actions; 1946 action[i].modify_header.num_of_actions = num_of_actions; 1947 action[i].modify_header.num_of_patterns = num_of_hdrs; 1948 action[i].modify_header.arg_obj = arg_obj; 1949 action[i].modify_header.pat_obj = pat_obj; 1950 action[i].modify_header.require_reparse = 1951 mlx5dr_pat_require_reparse((__be64 *)mh_data, num_of_actions); 1952 1953 ret = mlx5dr_action_create_stcs(&action[i], NULL); 1954 if (ret) { 1955 mlx5dr_pat_put_pattern(ctx, pat_obj); 1956 goto free_stc_and_pat; 1957 } 1958 } 1959 1960 return 0; 1961 1962 1963 free_stc_and_pat: 1964 while (i--) { 1965 mlx5dr_action_destroy_stcs(&action[i]); 1966 mlx5dr_pat_put_pattern(ctx, action[i].modify_header.pat_obj); 1967 } 1968 1969 mlx5dr_cmd_destroy_obj(arg_obj); 1970 return 0; 1971 } 1972 1973 static int 1974 mlx5dr_action_create_reformat_hws(struct mlx5dr_action *action, 1975 uint8_t num_of_hdrs, 1976 struct mlx5dr_action_reformat_header *hdrs, 1977 uint32_t bulk_size) 1978 { 1979 int ret; 1980 1981 switch (action->type) { 1982 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 1983 ret = mlx5dr_action_create_stcs(action, NULL); 1984 break; 1985 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 1986 ret = mlx5dr_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size, 1987 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 1988 break; 1989 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 1990 ret = mlx5dr_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size); 1991 break; 1992 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 1993 ret = mlx5dr_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size); 1994 break; 1995 default: 1996 DR_LOG(ERR, "Invalid HWS reformat action type"); 1997 rte_errno = EINVAL; 1998 return rte_errno; 1999 } 2000 2001 return ret; 2002 } 2003 2004 struct mlx5dr_action * 2005 mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, 2006 enum mlx5dr_action_type reformat_type, 2007 uint8_t num_of_hdrs, 2008 struct mlx5dr_action_reformat_header *hdrs, 2009 uint32_t log_bulk_size, 2010 uint32_t flags) 2011 { 2012 struct mlx5dr_action *action; 2013 int ret; 2014 2015 if (!num_of_hdrs) { 2016 DR_LOG(ERR, "Reformat num_of_hdrs cannot be zero"); 2017 rte_errno = EINVAL; 2018 return NULL; 2019 } 2020 2021 action = mlx5dr_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs); 2022 if (!action) 2023 return NULL; 2024 2025 if (mlx5dr_action_is_root_flags(flags)) { 2026 if (log_bulk_size) { 2027 DR_LOG(ERR, "Bulk reformat not supported over root"); 2028 rte_errno = ENOTSUP; 2029 goto free_action; 2030 } 2031 2032 ret = mlx5dr_action_create_reformat_root(action, 2033 hdrs ? hdrs->sz : 0, 2034 hdrs ? hdrs->data : NULL); 2035 if (ret) { 2036 DR_LOG(ERR, "Failed to create root reformat action"); 2037 goto free_action; 2038 } 2039 2040 return action; 2041 } 2042 2043 if (!mlx5dr_action_is_hws_flags(flags) || 2044 ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) { 2045 DR_LOG(ERR, "Reformat flags don't fit HWS (flags: 0x%x)", flags); 2046 rte_errno = EINVAL; 2047 goto free_action; 2048 } 2049 2050 ret = mlx5dr_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size); 2051 if (ret) { 2052 DR_LOG(ERR, "Failed to create HWS reformat action"); 2053 goto free_action; 2054 } 2055 2056 return action; 2057 2058 free_action: 2059 simple_free(action); 2060 return NULL; 2061 } 2062 2063 static int 2064 mlx5dr_action_create_modify_header_root(struct mlx5dr_action *action, 2065 size_t actions_sz, 2066 __be64 *actions) 2067 { 2068 enum mlx5dv_flow_table_type ft_type = 0; 2069 struct ibv_context *local_ibv_ctx; 2070 int ret; 2071 2072 ret = mlx5dr_action_conv_flags_to_ft_type(action->flags, &ft_type); 2073 if (ret) 2074 return rte_errno; 2075 2076 local_ibv_ctx = mlx5dr_context_get_local_ibv(action->ctx); 2077 2078 action->flow_action = 2079 mlx5_glue->dv_create_flow_action_modify_header_root(local_ibv_ctx, 2080 actions_sz, 2081 (uint64_t *)actions, 2082 ft_type); 2083 if (!action->flow_action) { 2084 rte_errno = errno; 2085 return rte_errno; 2086 } 2087 2088 return 0; 2089 } 2090 2091 static int 2092 mlx5dr_action_create_modify_header_hws(struct mlx5dr_action *action, 2093 uint8_t num_of_patterns, 2094 struct mlx5dr_action_mh_pattern *pattern, 2095 uint32_t log_bulk_size, 2096 uint32_t reparse) 2097 { 2098 struct mlx5dr_devx_obj *pat_obj, *arg_obj = NULL; 2099 struct mlx5dr_context *ctx = action->ctx; 2100 uint16_t num_actions, max_mh_actions = 0; 2101 int i, ret; 2102 2103 /* Calculate maximum number of mh actions for shared arg allocation */ 2104 for (i = 0; i < num_of_patterns; i++) 2105 max_mh_actions = RTE_MAX(max_mh_actions, pattern[i].sz / MLX5DR_MODIFY_ACTION_SIZE); 2106 2107 /* Allocate single shared arg for all patterns based on the max size */ 2108 if (max_mh_actions > 1) { 2109 arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, 2110 pattern->data, 2111 max_mh_actions, 2112 log_bulk_size, 2113 action->flags & 2114 MLX5DR_ACTION_FLAG_SHARED); 2115 if (!arg_obj) 2116 return rte_errno; 2117 } 2118 2119 for (i = 0; i < num_of_patterns; i++) { 2120 if (!mlx5dr_pat_verify_actions(pattern[i].data, pattern[i].sz)) { 2121 DR_LOG(ERR, "Fail to verify pattern modify actions"); 2122 rte_errno = EINVAL; 2123 goto free_stc_and_pat; 2124 } 2125 2126 num_actions = pattern[i].sz / MLX5DR_MODIFY_ACTION_SIZE; 2127 action[i].modify_header.num_of_patterns = num_of_patterns; 2128 action[i].modify_header.max_num_of_actions = max_mh_actions; 2129 action[i].modify_header.num_of_actions = num_actions; 2130 2131 if (likely(reparse == MLX5DR_ACTION_STC_REPARSE_DEFAULT)) 2132 action[i].modify_header.require_reparse = 2133 mlx5dr_pat_require_reparse(pattern[i].data, num_actions); 2134 else if (reparse == MLX5DR_ACTION_STC_REPARSE_ON) 2135 action[i].modify_header.require_reparse = true; 2136 2137 if (num_actions == 1) { 2138 pat_obj = NULL; 2139 /* Optimize single modify action to be used inline */ 2140 action[i].modify_header.single_action = pattern[i].data[0]; 2141 action[i].modify_header.single_action_type = 2142 MLX5_GET(set_action_in, pattern[i].data, action_type); 2143 } else { 2144 /* Multiple modify actions require a pattern */ 2145 pat_obj = mlx5dr_pat_get_pattern(ctx, pattern[i].data, pattern[i].sz); 2146 if (!pat_obj) { 2147 DR_LOG(ERR, "Failed to allocate pattern for modify header"); 2148 goto free_stc_and_pat; 2149 } 2150 2151 action[i].modify_header.arg_obj = arg_obj; 2152 action[i].modify_header.pat_obj = pat_obj; 2153 } 2154 /* Allocate STC for each action representing a header */ 2155 ret = mlx5dr_action_create_stcs(&action[i], NULL); 2156 if (ret) { 2157 if (pat_obj) 2158 mlx5dr_pat_put_pattern(ctx, pat_obj); 2159 goto free_stc_and_pat; 2160 } 2161 } 2162 2163 return 0; 2164 2165 free_stc_and_pat: 2166 while (i--) { 2167 mlx5dr_action_destroy_stcs(&action[i]); 2168 if (action[i].modify_header.pat_obj) 2169 mlx5dr_pat_put_pattern(ctx, action[i].modify_header.pat_obj); 2170 } 2171 2172 if (arg_obj) 2173 mlx5dr_cmd_destroy_obj(arg_obj); 2174 2175 return rte_errno; 2176 } 2177 2178 struct mlx5dr_action * 2179 mlx5dr_action_create_modify_header_reparse(struct mlx5dr_context *ctx, 2180 uint8_t num_of_patterns, 2181 struct mlx5dr_action_mh_pattern *patterns, 2182 uint32_t log_bulk_size, 2183 uint32_t flags, uint32_t reparse) 2184 { 2185 struct mlx5dr_action *action; 2186 int ret; 2187 2188 if (!num_of_patterns) { 2189 DR_LOG(ERR, "Invalid number of patterns"); 2190 rte_errno = ENOTSUP; 2191 return NULL; 2192 } 2193 2194 action = mlx5dr_action_create_generic_bulk(ctx, flags, 2195 MLX5DR_ACTION_TYP_MODIFY_HDR, 2196 num_of_patterns); 2197 if (!action) 2198 return NULL; 2199 2200 if (mlx5dr_action_is_root_flags(flags)) { 2201 if (log_bulk_size) { 2202 DR_LOG(ERR, "Bulk modify-header not supported over root"); 2203 rte_errno = ENOTSUP; 2204 goto free_action; 2205 } 2206 2207 if (num_of_patterns != 1) { 2208 DR_LOG(ERR, "Only a single pattern supported over root"); 2209 rte_errno = ENOTSUP; 2210 goto free_action; 2211 } 2212 2213 ret = mlx5dr_action_create_modify_header_root(action, 2214 patterns->sz, 2215 patterns->data); 2216 if (ret) 2217 goto free_action; 2218 2219 return action; 2220 } 2221 2222 if ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) { 2223 DR_LOG(ERR, "Action cannot be shared with requested pattern or size"); 2224 rte_errno = EINVAL; 2225 goto free_action; 2226 } 2227 2228 ret = mlx5dr_action_create_modify_header_hws(action, 2229 num_of_patterns, 2230 patterns, 2231 log_bulk_size, 2232 reparse); 2233 if (ret) 2234 goto free_action; 2235 2236 return action; 2237 2238 free_action: 2239 simple_free(action); 2240 return NULL; 2241 } 2242 2243 struct mlx5dr_action * 2244 mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx, 2245 uint8_t num_of_patterns, 2246 struct mlx5dr_action_mh_pattern *patterns, 2247 uint32_t log_bulk_size, 2248 uint32_t flags) 2249 { 2250 return mlx5dr_action_create_modify_header_reparse(ctx, num_of_patterns, patterns, 2251 log_bulk_size, flags, 2252 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 2253 } 2254 static struct mlx5dr_devx_obj * 2255 mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx, 2256 enum mlx5dr_action_type type, 2257 void *reformat_data, 2258 size_t reformat_data_sz) 2259 { 2260 struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0}; 2261 struct mlx5dr_devx_obj *reformat_devx_obj; 2262 2263 if (!reformat_data || !reformat_data_sz) { 2264 DR_LOG(ERR, "Empty reformat action or data"); 2265 rte_errno = EINVAL; 2266 return NULL; 2267 } 2268 2269 switch (type) { 2270 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 2271 pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 2272 break; 2273 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 2274 pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 2275 break; 2276 default: 2277 DR_LOG(ERR, "Invalid value for reformat type"); 2278 rte_errno = EINVAL; 2279 return NULL; 2280 } 2281 pr_attr.reformat_param_0 = 0; 2282 pr_attr.data_sz = reformat_data_sz; 2283 pr_attr.data = reformat_data; 2284 2285 reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr); 2286 if (!reformat_devx_obj) 2287 return NULL; 2288 2289 return reformat_devx_obj; 2290 } 2291 2292 struct mlx5dr_action * 2293 mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx, 2294 size_t num_dest, 2295 struct mlx5dr_action_dest_attr *dests, 2296 uint32_t flags) 2297 { 2298 struct mlx5dr_cmd_set_fte_dest *dest_list = NULL; 2299 struct mlx5dr_devx_obj *packet_reformat = NULL; 2300 struct mlx5dr_cmd_ft_create_attr ft_attr = {0}; 2301 struct mlx5dr_cmd_set_fte_attr fte_attr = {0}; 2302 struct mlx5dr_cmd_forward_tbl *fw_island; 2303 enum mlx5dr_table_type table_type; 2304 struct mlx5dr_action *action; 2305 uint32_t i; 2306 int ret; 2307 2308 if (num_dest <= 1) { 2309 rte_errno = EINVAL; 2310 DR_LOG(ERR, "Action must have multiple dests"); 2311 return NULL; 2312 } 2313 2314 if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) { 2315 ft_attr.type = FS_FT_NIC_RX; 2316 ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1; 2317 table_type = MLX5DR_TABLE_TYPE_NIC_RX; 2318 } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) { 2319 ft_attr.type = FS_FT_FDB; 2320 ft_attr.level = ctx->caps->fdb_ft.max_level - 1; 2321 table_type = MLX5DR_TABLE_TYPE_FDB; 2322 } else { 2323 DR_LOG(ERR, "Action flags not supported"); 2324 rte_errno = ENOTSUP; 2325 return NULL; 2326 } 2327 2328 if (mlx5dr_context_shared_gvmi_used(ctx)) { 2329 DR_LOG(ERR, "Cannot use this action in shared GVMI context"); 2330 rte_errno = ENOTSUP; 2331 return NULL; 2332 } 2333 2334 dest_list = simple_calloc(num_dest, sizeof(*dest_list)); 2335 if (!dest_list) { 2336 DR_LOG(ERR, "Failed to allocate memory for destinations"); 2337 rte_errno = ENOMEM; 2338 return NULL; 2339 } 2340 2341 for (i = 0; i < num_dest; i++) { 2342 enum mlx5dr_action_type *action_type = dests[i].action_type; 2343 2344 if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) { 2345 DR_LOG(ERR, "Invalid combination of actions"); 2346 rte_errno = EINVAL; 2347 goto free_dest_list; 2348 } 2349 2350 for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) { 2351 switch (*action_type) { 2352 case MLX5DR_ACTION_TYP_TBL: 2353 dest_list[i].destination_type = 2354 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 2355 dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id; 2356 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2357 fte_attr.ignore_flow_level = 1; 2358 break; 2359 case MLX5DR_ACTION_TYP_MISS: 2360 if (table_type != MLX5DR_TABLE_TYPE_FDB) { 2361 DR_LOG(ERR, "Miss action supported for FDB only"); 2362 rte_errno = ENOTSUP; 2363 goto free_dest_list; 2364 } 2365 dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2366 dest_list[i].destination_id = 2367 ctx->caps->eswitch_manager_vport_number; 2368 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2369 break; 2370 case MLX5DR_ACTION_TYP_VPORT: 2371 dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2372 dest_list[i].destination_id = dests[i].dest->vport.vport_num; 2373 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2374 if (ctx->caps->merged_eswitch) { 2375 dest_list[i].ext_flags |= 2376 MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID; 2377 dest_list[i].esw_owner_vhca_id = 2378 dests[i].dest->vport.esw_owner_vhca_id; 2379 } 2380 break; 2381 case MLX5DR_ACTION_TYP_TIR: 2382 dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 2383 dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id; 2384 fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2385 break; 2386 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 2387 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 2388 packet_reformat = mlx5dr_action_dest_array_process_reformat 2389 (ctx, 2390 *action_type, 2391 dests[i].reformat.reformat_data, 2392 dests[i].reformat.reformat_data_sz); 2393 if (!packet_reformat) 2394 goto free_dest_list; 2395 2396 dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT; 2397 dest_list[i].ext_reformat = packet_reformat; 2398 ft_attr.reformat_en = true; 2399 fte_attr.extended_dest = 1; 2400 break; 2401 default: 2402 DR_LOG(ERR, "Unsupported action in dest_array"); 2403 rte_errno = ENOTSUP; 2404 goto free_dest_list; 2405 } 2406 } 2407 } 2408 fte_attr.dests_num = num_dest; 2409 fte_attr.dests = dest_list; 2410 2411 fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr); 2412 if (!fw_island) 2413 goto free_dest_list; 2414 2415 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY); 2416 if (!action) 2417 goto destroy_fw_island; 2418 2419 ret = mlx5dr_action_create_stcs(action, fw_island->ft); 2420 if (ret) 2421 goto free_action; 2422 2423 action->dest_array.fw_island = fw_island; 2424 action->dest_array.num_dest = num_dest; 2425 action->dest_array.dest_list = dest_list; 2426 2427 return action; 2428 2429 free_action: 2430 simple_free(action); 2431 destroy_fw_island: 2432 mlx5dr_cmd_forward_tbl_destroy(fw_island); 2433 free_dest_list: 2434 for (i = 0; i < num_dest; i++) { 2435 if (dest_list[i].ext_reformat) 2436 mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat); 2437 } 2438 simple_free(dest_list); 2439 return NULL; 2440 } 2441 2442 struct mlx5dr_action * 2443 mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx, 2444 uint16_t priority, 2445 uint32_t flags) 2446 { 2447 struct mlx5dv_steering_anchor_attr attr = {0}; 2448 struct mlx5dv_steering_anchor *sa; 2449 struct mlx5dr_action *action; 2450 int ret; 2451 2452 if (mlx5dr_action_is_root_flags(flags)) { 2453 DR_LOG(ERR, "Action flags must be only non root (HWS)"); 2454 rte_errno = ENOTSUP; 2455 return NULL; 2456 } 2457 2458 if (mlx5dr_context_shared_gvmi_used(ctx)) { 2459 DR_LOG(ERR, "Cannot use this action in shared GVMI context"); 2460 rte_errno = ENOTSUP; 2461 return NULL; 2462 } 2463 2464 if (mlx5dr_action_conv_flags_to_ft_type(flags, &attr.ft_type)) 2465 return NULL; 2466 2467 attr.priority = priority; 2468 2469 sa = mlx5_glue->create_steering_anchor(ctx->ibv_ctx, &attr); 2470 if (!sa) { 2471 DR_LOG(ERR, "Creation of steering anchor failed"); 2472 return NULL; 2473 } 2474 2475 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ROOT); 2476 if (!action) 2477 goto free_steering_anchor; 2478 2479 action->root_tbl.sa = sa; 2480 2481 ret = mlx5dr_action_create_stcs(action, NULL); 2482 if (ret) 2483 goto free_action; 2484 2485 return action; 2486 2487 free_action: 2488 simple_free(action); 2489 free_steering_anchor: 2490 mlx5_glue->destroy_steering_anchor(sa); 2491 return NULL; 2492 } 2493 2494 static struct mlx5dr_action * 2495 mlx5dr_action_create_insert_header_reparse(struct mlx5dr_context *ctx, 2496 uint8_t num_of_hdrs, 2497 struct mlx5dr_action_insert_header *hdrs, 2498 uint32_t log_bulk_size, 2499 uint32_t flags, uint32_t reparse) 2500 { 2501 struct mlx5dr_action_reformat_header *reformat_hdrs; 2502 struct mlx5dr_action *action; 2503 int i, ret; 2504 2505 if (!num_of_hdrs) { 2506 DR_LOG(ERR, "Reformat num_of_hdrs cannot be zero"); 2507 rte_errno = EINVAL; 2508 return NULL; 2509 } 2510 2511 if (mlx5dr_action_is_root_flags(flags)) { 2512 DR_LOG(ERR, "Dynamic reformat action not supported over root"); 2513 rte_errno = ENOTSUP; 2514 return NULL; 2515 } 2516 2517 if (!mlx5dr_action_is_hws_flags(flags) || 2518 ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) { 2519 DR_LOG(ERR, "Reformat flags don't fit HWS (flags: 0x%x)", flags); 2520 rte_errno = EINVAL; 2521 return NULL; 2522 } 2523 2524 action = mlx5dr_action_create_generic_bulk(ctx, flags, 2525 MLX5DR_ACTION_TYP_INSERT_HEADER, 2526 num_of_hdrs); 2527 if (!action) 2528 return NULL; 2529 2530 reformat_hdrs = simple_calloc(num_of_hdrs, sizeof(*reformat_hdrs)); 2531 if (!reformat_hdrs) { 2532 DR_LOG(ERR, "Failed to allocate memory for reformat_hdrs"); 2533 rte_errno = ENOMEM; 2534 goto free_action; 2535 } 2536 2537 for (i = 0; i < num_of_hdrs; i++) { 2538 if (hdrs[i].offset % W_SIZE != 0) { 2539 DR_LOG(ERR, "Header offset should be in WORD granularity"); 2540 rte_errno = EINVAL; 2541 goto free_reformat_hdrs; 2542 } 2543 2544 action[i].reformat.anchor = hdrs[i].anchor; 2545 action[i].reformat.encap = hdrs[i].encap; 2546 action[i].reformat.push_esp = hdrs[i].push_esp; 2547 action[i].reformat.offset = hdrs[i].offset; 2548 reformat_hdrs[i].sz = hdrs[i].hdr.sz; 2549 reformat_hdrs[i].data = hdrs[i].hdr.data; 2550 } 2551 2552 ret = mlx5dr_action_handle_insert_with_ptr(action, num_of_hdrs, 2553 reformat_hdrs, log_bulk_size, 2554 reparse); 2555 if (ret) { 2556 DR_LOG(ERR, "Failed to create HWS reformat action"); 2557 goto free_reformat_hdrs; 2558 } 2559 2560 simple_free(reformat_hdrs); 2561 2562 return action; 2563 2564 free_reformat_hdrs: 2565 simple_free(reformat_hdrs); 2566 free_action: 2567 simple_free(action); 2568 return NULL; 2569 } 2570 2571 struct mlx5dr_action * 2572 mlx5dr_action_create_insert_header(struct mlx5dr_context *ctx, 2573 uint8_t num_of_hdrs, 2574 struct mlx5dr_action_insert_header *hdrs, 2575 uint32_t log_bulk_size, 2576 uint32_t flags) 2577 { 2578 return mlx5dr_action_create_insert_header_reparse(ctx, num_of_hdrs, hdrs, 2579 log_bulk_size, flags, 2580 MLX5DR_ACTION_STC_REPARSE_DEFAULT); 2581 } 2582 2583 struct mlx5dr_action * 2584 mlx5dr_action_create_remove_header(struct mlx5dr_context *ctx, 2585 struct mlx5dr_action_remove_header_attr *attr, 2586 uint32_t flags) 2587 { 2588 struct mlx5dr_action *action; 2589 2590 if (mlx5dr_action_is_root_flags(flags)) { 2591 DR_LOG(ERR, "Remove header action not supported over root"); 2592 rte_errno = ENOTSUP; 2593 return NULL; 2594 } 2595 2596 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_REMOVE_HEADER); 2597 if (!action) 2598 return NULL; 2599 2600 switch (attr->type) { 2601 case MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER: 2602 action->remove_header.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER; 2603 action->remove_header.start_anchor = attr->by_anchor.start_anchor; 2604 action->remove_header.end_anchor = attr->by_anchor.end_anchor; 2605 action->remove_header.decap = attr->by_anchor.decap; 2606 break; 2607 case MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_OFFSET: 2608 if (attr->by_offset.size % W_SIZE != 0) { 2609 DR_LOG(ERR, "Invalid size, HW supports header remove in WORD granularity"); 2610 rte_errno = EINVAL; 2611 goto free_action; 2612 } 2613 2614 if (attr->by_offset.size > MLX5DR_ACTION_REMOVE_HEADER_MAX_SIZE) { 2615 DR_LOG(ERR, "Header removal size limited to %u bytes", 2616 MLX5DR_ACTION_REMOVE_HEADER_MAX_SIZE); 2617 rte_errno = EINVAL; 2618 goto free_action; 2619 } 2620 2621 action->remove_header.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_OFFSET; 2622 action->remove_header.start_anchor = attr->by_offset.start_anchor; 2623 action->remove_header.num_of_words = attr->by_offset.size / W_SIZE; 2624 break; 2625 default: 2626 DR_LOG(ERR, "Unsupported remove header type %u", attr->type); 2627 rte_errno = ENOTSUP; 2628 goto free_action; 2629 } 2630 2631 if (mlx5dr_action_create_stcs(action, NULL)) 2632 goto free_action; 2633 2634 return action; 2635 2636 free_action: 2637 simple_free(action); 2638 return NULL; 2639 } 2640 2641 static void * 2642 mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(struct mlx5dr_action *action) 2643 { 2644 struct mlx5dr_action_mh_pattern pattern; 2645 __be64 cmd[3] = {0}; 2646 uint16_t mod_id; 2647 2648 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2649 if (!mod_id) { 2650 rte_errno = EINVAL; 2651 return NULL; 2652 } 2653 2654 /* 2655 * Backup ipv6_route_ext.next_hdr to ipv6_route_ext.seg_left. 2656 * Next_hdr will be copied to ipv6.protocol after pop done. 2657 */ 2658 MLX5_SET(copy_action_in, &cmd[0], action_type, MLX5_MODIFICATION_TYPE_COPY); 2659 MLX5_SET(copy_action_in, &cmd[0], length, 8); 2660 MLX5_SET(copy_action_in, &cmd[0], src_offset, 24); 2661 MLX5_SET(copy_action_in, &cmd[0], src_field, mod_id); 2662 MLX5_SET(copy_action_in, &cmd[0], dst_field, mod_id); 2663 2664 /* Add nop between the continuous same modify field id */ 2665 MLX5_SET(copy_action_in, &cmd[1], action_type, MLX5_MODIFICATION_TYPE_NOP); 2666 2667 /* Clear next_hdr for right checksum */ 2668 MLX5_SET(set_action_in, &cmd[2], action_type, MLX5_MODIFICATION_TYPE_SET); 2669 MLX5_SET(set_action_in, &cmd[2], length, 8); 2670 MLX5_SET(set_action_in, &cmd[2], offset, 24); 2671 MLX5_SET(set_action_in, &cmd[2], field, mod_id); 2672 2673 pattern.data = cmd; 2674 pattern.sz = sizeof(cmd); 2675 2676 return mlx5dr_action_create_modify_header_reparse(action->ctx, 1, &pattern, 0, 2677 action->flags, 2678 MLX5DR_ACTION_STC_REPARSE_ON); 2679 } 2680 2681 static void * 2682 mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(struct mlx5dr_action *action) 2683 { 2684 enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = { 2685 MLX5_MODI_OUT_DIPV6_127_96, 2686 MLX5_MODI_OUT_DIPV6_95_64, 2687 MLX5_MODI_OUT_DIPV6_63_32, 2688 MLX5_MODI_OUT_DIPV6_31_0 2689 }; 2690 struct mlx5dr_action_mh_pattern pattern; 2691 __be64 cmd[5] = {0}; 2692 uint16_t mod_id; 2693 uint32_t i; 2694 2695 /* Copy ipv6_route_ext[first_segment].dst_addr by flex parser to ipv6.dst_addr */ 2696 for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) { 2697 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, i + 1); 2698 if (!mod_id) { 2699 rte_errno = EINVAL; 2700 return NULL; 2701 } 2702 2703 MLX5_SET(copy_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_COPY); 2704 MLX5_SET(copy_action_in, &cmd[i], dst_field, field[i]); 2705 MLX5_SET(copy_action_in, &cmd[i], src_field, mod_id); 2706 } 2707 2708 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2709 if (!mod_id) { 2710 rte_errno = EINVAL; 2711 return NULL; 2712 } 2713 2714 /* Restore next_hdr from seg_left for flex parser identifying */ 2715 MLX5_SET(copy_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_COPY); 2716 MLX5_SET(copy_action_in, &cmd[4], length, 8); 2717 MLX5_SET(copy_action_in, &cmd[4], dst_offset, 24); 2718 MLX5_SET(copy_action_in, &cmd[4], src_field, mod_id); 2719 MLX5_SET(copy_action_in, &cmd[4], dst_field, mod_id); 2720 2721 pattern.data = cmd; 2722 pattern.sz = sizeof(cmd); 2723 2724 return mlx5dr_action_create_modify_header_reparse(action->ctx, 1, &pattern, 0, 2725 action->flags, 2726 MLX5DR_ACTION_STC_REPARSE_ON); 2727 } 2728 2729 static void * 2730 mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(struct mlx5dr_action *action) 2731 { 2732 uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0}; 2733 struct mlx5dr_action_mh_pattern pattern; 2734 uint16_t mod_id; 2735 2736 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2737 if (!mod_id) { 2738 rte_errno = EINVAL; 2739 return NULL; 2740 } 2741 2742 /* Copy ipv6_route_ext.next_hdr to ipv6.protocol */ 2743 MLX5_SET(copy_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_COPY); 2744 MLX5_SET(copy_action_in, cmd, length, 8); 2745 MLX5_SET(copy_action_in, cmd, src_offset, 24); 2746 MLX5_SET(copy_action_in, cmd, src_field, mod_id); 2747 MLX5_SET(copy_action_in, cmd, dst_field, MLX5_MODI_OUT_IP_PROTOCOL); 2748 2749 pattern.data = (__be64 *)cmd; 2750 pattern.sz = sizeof(cmd); 2751 2752 return mlx5dr_action_create_modify_header_reparse(action->ctx, 1, &pattern, 0, 2753 action->flags, 2754 MLX5DR_ACTION_STC_REPARSE_OFF); 2755 } 2756 2757 static int 2758 mlx5dr_action_create_pop_ipv6_route_ext(struct mlx5dr_action *action) 2759 { 2760 uint8_t anchor_id = flow_hw_get_ipv6_route_ext_anchor_from_ctx(action->ctx); 2761 struct mlx5dr_action_remove_header_attr hdr_attr; 2762 uint32_t i; 2763 2764 if (!anchor_id) { 2765 rte_errno = EINVAL; 2766 return rte_errno; 2767 } 2768 2769 action->ipv6_route_ext.action[0] = 2770 mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(action); 2771 action->ipv6_route_ext.action[1] = 2772 mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(action); 2773 action->ipv6_route_ext.action[2] = 2774 mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(action); 2775 2776 hdr_attr.by_anchor.decap = 1; 2777 hdr_attr.by_anchor.start_anchor = anchor_id; 2778 hdr_attr.by_anchor.end_anchor = MLX5_HEADER_ANCHOR_TCP_UDP; 2779 hdr_attr.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER; 2780 action->ipv6_route_ext.action[3] = 2781 mlx5dr_action_create_remove_header(action->ctx, &hdr_attr, action->flags); 2782 2783 if (!action->ipv6_route_ext.action[0] || !action->ipv6_route_ext.action[1] || 2784 !action->ipv6_route_ext.action[2] || !action->ipv6_route_ext.action[3]) { 2785 DR_LOG(ERR, "Failed to create ipv6_route_ext pop subaction"); 2786 goto err; 2787 } 2788 2789 return 0; 2790 2791 err: 2792 for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++) 2793 if (action->ipv6_route_ext.action[i]) 2794 mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); 2795 2796 return rte_errno; 2797 } 2798 2799 static void * 2800 mlx5dr_action_create_push_ipv6_route_ext_mhdr1(struct mlx5dr_action *action) 2801 { 2802 uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0}; 2803 struct mlx5dr_action_mh_pattern pattern; 2804 2805 /* Set ipv6.protocol to IPPROTO_ROUTING */ 2806 MLX5_SET(set_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_SET); 2807 MLX5_SET(set_action_in, cmd, length, 8); 2808 MLX5_SET(set_action_in, cmd, field, MLX5_MODI_OUT_IP_PROTOCOL); 2809 MLX5_SET(set_action_in, cmd, data, IPPROTO_ROUTING); 2810 2811 pattern.data = (__be64 *)cmd; 2812 pattern.sz = sizeof(cmd); 2813 2814 return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern, 0, 2815 action->flags | MLX5DR_ACTION_FLAG_SHARED); 2816 } 2817 2818 static void * 2819 mlx5dr_action_create_push_ipv6_route_ext_mhdr2(struct mlx5dr_action *action, 2820 uint32_t bulk_size, 2821 uint8_t *data) 2822 { 2823 enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = { 2824 MLX5_MODI_OUT_DIPV6_127_96, 2825 MLX5_MODI_OUT_DIPV6_95_64, 2826 MLX5_MODI_OUT_DIPV6_63_32, 2827 MLX5_MODI_OUT_DIPV6_31_0 2828 }; 2829 struct mlx5dr_action_mh_pattern pattern; 2830 uint32_t *ipv6_dst_addr = NULL; 2831 uint8_t seg_left, next_hdr; 2832 __be64 cmd[5] = {0}; 2833 uint16_t mod_id; 2834 uint32_t i; 2835 2836 /* Fetch the last IPv6 address in the segment list */ 2837 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { 2838 seg_left = MLX5_GET(header_ipv6_routing_ext, data, segments_left) - 1; 2839 ipv6_dst_addr = (uint32_t *)data + MLX5_ST_SZ_DW(header_ipv6_routing_ext) + 2840 seg_left * MLX5_ST_SZ_DW(definer_hl_ipv6_addr); 2841 } 2842 2843 /* Copy IPv6 destination address from ipv6_route_ext.last_segment */ 2844 for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) { 2845 MLX5_SET(set_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_SET); 2846 MLX5_SET(set_action_in, &cmd[i], field, field[i]); 2847 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) 2848 MLX5_SET(set_action_in, &cmd[i], data, be32toh(*ipv6_dst_addr++)); 2849 } 2850 2851 mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0); 2852 if (!mod_id) { 2853 rte_errno = EINVAL; 2854 return NULL; 2855 } 2856 2857 /* Set ipv6_route_ext.next_hdr since initially pushed as 0 for right checksum */ 2858 MLX5_SET(set_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_SET); 2859 MLX5_SET(set_action_in, &cmd[4], length, 8); 2860 MLX5_SET(set_action_in, &cmd[4], offset, 24); 2861 MLX5_SET(set_action_in, &cmd[4], field, mod_id); 2862 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { 2863 next_hdr = MLX5_GET(header_ipv6_routing_ext, data, next_hdr); 2864 MLX5_SET(set_action_in, &cmd[4], data, next_hdr); 2865 } 2866 2867 pattern.data = cmd; 2868 pattern.sz = sizeof(cmd); 2869 2870 return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern, 2871 bulk_size, action->flags); 2872 } 2873 2874 static int 2875 mlx5dr_action_create_push_ipv6_route_ext(struct mlx5dr_action *action, 2876 struct mlx5dr_action_reformat_header *hdr, 2877 uint32_t bulk_size) 2878 { 2879 struct mlx5dr_action_insert_header insert_hdr = { {0} }; 2880 uint8_t header[MLX5_PUSH_MAX_LEN]; 2881 uint32_t i; 2882 2883 if (!hdr || !hdr->sz || hdr->sz > MLX5_PUSH_MAX_LEN || 2884 ((action->flags & MLX5DR_ACTION_FLAG_SHARED) && !hdr->data)) { 2885 DR_LOG(ERR, "Invalid ipv6_route_ext header"); 2886 rte_errno = EINVAL; 2887 return rte_errno; 2888 } 2889 2890 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { 2891 memcpy(header, hdr->data, hdr->sz); 2892 /* Clear ipv6_route_ext.next_hdr for right checksum */ 2893 MLX5_SET(header_ipv6_routing_ext, header, next_hdr, 0); 2894 } 2895 2896 insert_hdr.anchor = MLX5_HEADER_ANCHOR_TCP_UDP; 2897 insert_hdr.encap = 1; 2898 insert_hdr.hdr.sz = hdr->sz; 2899 insert_hdr.hdr.data = header; 2900 action->ipv6_route_ext.action[0] = 2901 mlx5dr_action_create_insert_header_reparse(action->ctx, 1, &insert_hdr, 2902 bulk_size, action->flags, 2903 MLX5DR_ACTION_STC_REPARSE_OFF); 2904 action->ipv6_route_ext.action[1] = 2905 mlx5dr_action_create_push_ipv6_route_ext_mhdr1(action); 2906 action->ipv6_route_ext.action[2] = 2907 mlx5dr_action_create_push_ipv6_route_ext_mhdr2(action, bulk_size, hdr->data); 2908 2909 if (!action->ipv6_route_ext.action[0] || 2910 !action->ipv6_route_ext.action[1] || 2911 !action->ipv6_route_ext.action[2]) { 2912 DR_LOG(ERR, "Failed to create ipv6_route_ext push subaction"); 2913 goto err; 2914 } 2915 2916 return 0; 2917 2918 err: 2919 for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++) 2920 if (action->ipv6_route_ext.action[i]) 2921 mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); 2922 2923 return rte_errno; 2924 } 2925 2926 struct mlx5dr_action * 2927 mlx5dr_action_create_reformat_ipv6_ext(struct mlx5dr_context *ctx, 2928 enum mlx5dr_action_type action_type, 2929 struct mlx5dr_action_reformat_header *hdr, 2930 uint32_t log_bulk_size, 2931 uint32_t flags) 2932 { 2933 struct mlx5dr_action *action; 2934 int ret; 2935 2936 if (!mlx5dr_action_is_hws_flags(flags) || 2937 ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) { 2938 DR_LOG(ERR, "IPv6 extension flags don't fit HWS (flags: 0x%x)", flags); 2939 rte_errno = EINVAL; 2940 return NULL; 2941 } 2942 2943 action = mlx5dr_action_create_generic(ctx, flags, action_type); 2944 if (!action) { 2945 rte_errno = ENOMEM; 2946 return NULL; 2947 } 2948 2949 switch (action_type) { 2950 case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT: 2951 if (!(flags & MLX5DR_ACTION_FLAG_SHARED)) { 2952 DR_LOG(ERR, "Pop ipv6_route_ext must be shared"); 2953 rte_errno = EINVAL; 2954 goto free_action; 2955 } 2956 2957 ret = mlx5dr_action_create_pop_ipv6_route_ext(action); 2958 break; 2959 case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT: 2960 if (!mlx5dr_context_cap_dynamic_reparse(ctx)) { 2961 DR_LOG(ERR, "IPv6 routing extension push actions is not supported"); 2962 rte_errno = ENOTSUP; 2963 goto free_action; 2964 } 2965 2966 ret = mlx5dr_action_create_push_ipv6_route_ext(action, hdr, log_bulk_size); 2967 break; 2968 default: 2969 DR_LOG(ERR, "Unsupported action type %d\n", action_type); 2970 rte_errno = ENOTSUP; 2971 goto free_action; 2972 } 2973 2974 if (ret) { 2975 DR_LOG(ERR, "Failed to create IPv6 extension reformat action"); 2976 goto free_action; 2977 } 2978 2979 return action; 2980 2981 free_action: 2982 simple_free(action); 2983 return NULL; 2984 } 2985 2986 static bool 2987 mlx5dr_action_nat64_validate_param(struct mlx5dr_action_nat64_attr *attr, 2988 uint32_t flags) 2989 { 2990 if (mlx5dr_action_is_root_flags(flags)) { 2991 DR_LOG(ERR, "Nat64 action not supported for root"); 2992 rte_errno = ENOTSUP; 2993 return false; 2994 } 2995 2996 if (!(flags & MLX5DR_ACTION_FLAG_SHARED)) { 2997 DR_LOG(ERR, "Nat64 action must be with SHARED flag"); 2998 rte_errno = EINVAL; 2999 return false; 3000 } 3001 3002 if (attr->num_of_registers > MLX5DR_ACTION_NAT64_REG_MAX) { 3003 DR_LOG(ERR, "Nat64 action doesn't support more than %d registers", 3004 MLX5DR_ACTION_NAT64_REG_MAX); 3005 rte_errno = EINVAL; 3006 return false; 3007 } 3008 3009 if (attr->flags & MLX5DR_ACTION_NAT64_BACKUP_ADDR && 3010 attr->num_of_registers != MLX5DR_ACTION_NAT64_REG_MAX) { 3011 DR_LOG(ERR, "Nat64 backup addr requires %d registers", 3012 MLX5DR_ACTION_NAT64_REG_MAX); 3013 rte_errno = EINVAL; 3014 return false; 3015 } 3016 3017 if (!(attr->flags & MLX5DR_ACTION_NAT64_V4_TO_V6 || 3018 attr->flags & MLX5DR_ACTION_NAT64_V6_TO_V4)) { 3019 DR_LOG(ERR, "Nat64 backup addr requires one mode at least"); 3020 rte_errno = EINVAL; 3021 return false; 3022 } 3023 3024 return true; 3025 } 3026 3027 struct mlx5dr_action * 3028 mlx5dr_action_create_nat64(struct mlx5dr_context *ctx, 3029 struct mlx5dr_action_nat64_attr *attr, 3030 uint32_t flags) 3031 { 3032 struct mlx5dr_action *action; 3033 3034 if (!mlx5dr_action_nat64_validate_param(attr, flags)) 3035 return NULL; 3036 3037 action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_NAT64); 3038 if (!action) 3039 return NULL; 3040 3041 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY] = 3042 mlx5dr_action_create_nat64_copy_state(ctx, attr, flags); 3043 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY]) { 3044 DR_LOG(ERR, "Nat64 failed creating copy state"); 3045 goto free_action; 3046 } 3047 3048 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_REPLACE] = 3049 mlx5dr_action_create_nat64_repalce_state(ctx, attr, flags); 3050 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_REPLACE]) { 3051 DR_LOG(ERR, "Nat64 failed creating replace state"); 3052 goto free_copy; 3053 } 3054 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY_PROTOCOL] = 3055 mlx5dr_action_create_nat64_copy_proto_state(ctx, attr, flags); 3056 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY_PROTOCOL]) { 3057 DR_LOG(ERR, "Nat64 failed creating copy protocol state"); 3058 goto free_replace; 3059 } 3060 3061 action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPYBACK] = 3062 mlx5dr_action_create_nat64_copy_back_state(ctx, attr, flags); 3063 if (!action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPYBACK]) { 3064 DR_LOG(ERR, "Nat64 failed creating copyback state"); 3065 goto free_copy_proto; 3066 } 3067 3068 return action; 3069 3070 free_copy_proto: 3071 mlx5dr_action_destroy(action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY_PROTOCOL]); 3072 free_replace: 3073 mlx5dr_action_destroy(action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_REPLACE]); 3074 free_copy: 3075 mlx5dr_action_destroy(action->nat64.stages[MLX5DR_ACTION_NAT64_STAGE_COPY]); 3076 free_action: 3077 simple_free(action); 3078 return NULL; 3079 } 3080 3081 static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) 3082 { 3083 struct mlx5dr_devx_obj *obj = NULL; 3084 uint32_t i; 3085 3086 switch (action->type) { 3087 case MLX5DR_ACTION_TYP_TIR: 3088 mlx5dr_action_destroy_stcs(action); 3089 if (mlx5dr_context_shared_gvmi_used(action->ctx)) 3090 mlx5dr_cmd_destroy_obj(action->alias.devx_obj); 3091 break; 3092 case MLX5DR_ACTION_TYP_MISS: 3093 case MLX5DR_ACTION_TYP_TAG: 3094 case MLX5DR_ACTION_TYP_DROP: 3095 case MLX5DR_ACTION_TYP_CTR: 3096 case MLX5DR_ACTION_TYP_TBL: 3097 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 3098 case MLX5DR_ACTION_TYP_ASO_METER: 3099 case MLX5DR_ACTION_TYP_ASO_CT: 3100 case MLX5DR_ACTION_TYP_PUSH_VLAN: 3101 case MLX5DR_ACTION_TYP_REMOVE_HEADER: 3102 case MLX5DR_ACTION_TYP_VPORT: 3103 mlx5dr_action_destroy_stcs(action); 3104 break; 3105 case MLX5DR_ACTION_TYP_DEST_ROOT: 3106 mlx5dr_action_destroy_stcs(action); 3107 mlx5_glue->destroy_steering_anchor(action->root_tbl.sa); 3108 break; 3109 case MLX5DR_ACTION_TYP_POP_VLAN: 3110 mlx5dr_action_destroy_stcs(action); 3111 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP); 3112 break; 3113 case MLX5DR_ACTION_TYP_DEST_ARRAY: 3114 mlx5dr_action_destroy_stcs(action); 3115 mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island); 3116 for (i = 0; i < action->dest_array.num_dest; i++) { 3117 if (action->dest_array.dest_list[i].ext_reformat) 3118 mlx5dr_cmd_destroy_obj 3119 (action->dest_array.dest_list[i].ext_reformat); 3120 } 3121 simple_free(action->dest_array.dest_list); 3122 break; 3123 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 3124 case MLX5DR_ACTION_TYP_MODIFY_HDR: 3125 for (i = 0; i < action->modify_header.num_of_patterns; i++) { 3126 mlx5dr_action_destroy_stcs(&action[i]); 3127 if (action[i].modify_header.num_of_actions > 1) { 3128 mlx5dr_pat_put_pattern(action[i].ctx, 3129 action[i].modify_header.pat_obj); 3130 /* Save shared arg object if was used to free */ 3131 if (action[i].modify_header.arg_obj) 3132 obj = action[i].modify_header.arg_obj; 3133 } 3134 } 3135 if (obj) 3136 mlx5dr_cmd_destroy_obj(obj); 3137 break; 3138 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 3139 mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP_L3); 3140 for (i = 0; i < action->reformat.num_of_hdrs; i++) 3141 mlx5dr_action_destroy_stcs(&action[i]); 3142 mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); 3143 break; 3144 case MLX5DR_ACTION_TYP_INSERT_HEADER: 3145 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 3146 for (i = 0; i < action->reformat.num_of_hdrs; i++) 3147 mlx5dr_action_destroy_stcs(&action[i]); 3148 mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); 3149 break; 3150 case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT: 3151 case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT: 3152 for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++) 3153 if (action->ipv6_route_ext.action[i]) 3154 mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); 3155 break; 3156 case MLX5DR_ACTION_TYP_NAT64: 3157 for (i = 0; i < MLX5DR_ACTION_NAT64_STAGES; i++) 3158 mlx5dr_action_destroy(action->nat64.stages[i]); 3159 break; 3160 case MLX5DR_ACTION_TYP_LAST: 3161 break; 3162 default: 3163 DR_LOG(ERR, "Not supported action type: %d", action->type); 3164 assert(false); 3165 } 3166 } 3167 3168 static void mlx5dr_action_destroy_root(struct mlx5dr_action *action) 3169 { 3170 switch (action->type) { 3171 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 3172 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 3173 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 3174 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 3175 case MLX5DR_ACTION_TYP_MODIFY_HDR: 3176 ibv_destroy_flow_action(action->flow_action); 3177 break; 3178 } 3179 } 3180 3181 int mlx5dr_action_destroy(struct mlx5dr_action *action) 3182 { 3183 if (mlx5dr_action_is_root_flags(action->flags)) 3184 mlx5dr_action_destroy_root(action); 3185 else 3186 mlx5dr_action_destroy_hws(action); 3187 3188 simple_free(action); 3189 return 0; 3190 } 3191 3192 /* Called under pthread_spin_lock(&ctx->ctrl_lock) */ 3193 int mlx5dr_action_get_default_stc(struct mlx5dr_context *ctx, 3194 uint8_t tbl_type) 3195 { 3196 struct mlx5dr_cmd_stc_modify_attr stc_attr = {0}; 3197 struct mlx5dr_action_default_stc *default_stc; 3198 int ret; 3199 3200 if (ctx->common_res[tbl_type].default_stc) { 3201 ctx->common_res[tbl_type].default_stc->refcount++; 3202 return 0; 3203 } 3204 3205 default_stc = simple_calloc(1, sizeof(*default_stc)); 3206 if (!default_stc) { 3207 DR_LOG(ERR, "Failed to allocate memory for default STCs"); 3208 rte_errno = ENOMEM; 3209 return rte_errno; 3210 } 3211 3212 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP; 3213 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW0; 3214 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; 3215 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3216 &default_stc->nop_ctr); 3217 if (ret) { 3218 DR_LOG(ERR, "Failed to allocate default counter STC"); 3219 goto free_default_stc; 3220 } 3221 3222 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5; 3223 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3224 &default_stc->nop_dw5); 3225 if (ret) { 3226 DR_LOG(ERR, "Failed to allocate default NOP DW5 STC"); 3227 goto free_nop_ctr; 3228 } 3229 3230 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW6; 3231 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3232 &default_stc->nop_dw6); 3233 if (ret) { 3234 DR_LOG(ERR, "Failed to allocate default NOP DW6 STC"); 3235 goto free_nop_dw5; 3236 } 3237 3238 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW7; 3239 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3240 &default_stc->nop_dw7); 3241 if (ret) { 3242 DR_LOG(ERR, "Failed to allocate default NOP DW7 STC"); 3243 goto free_nop_dw6; 3244 } 3245 3246 stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT; 3247 if (!mlx5dr_context_shared_gvmi_used(ctx)) { 3248 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; 3249 } else { 3250 /* On shared gvmi the default hit behavior is jump to alias end ft */ 3251 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; 3252 stc_attr.dest_table_id = ctx->gvmi_res[tbl_type].aliased_end_ft->id; 3253 } 3254 3255 ret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type, 3256 &default_stc->default_hit); 3257 if (ret) { 3258 DR_LOG(ERR, "Failed to allocate default allow STC"); 3259 goto free_nop_dw7; 3260 } 3261 3262 ctx->common_res[tbl_type].default_stc = default_stc; 3263 ctx->common_res[tbl_type].default_stc->refcount++; 3264 3265 return 0; 3266 3267 free_nop_dw7: 3268 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); 3269 free_nop_dw6: 3270 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); 3271 free_nop_dw5: 3272 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); 3273 free_nop_ctr: 3274 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); 3275 free_default_stc: 3276 simple_free(default_stc); 3277 return rte_errno; 3278 } 3279 3280 void mlx5dr_action_put_default_stc(struct mlx5dr_context *ctx, 3281 uint8_t tbl_type) 3282 { 3283 struct mlx5dr_action_default_stc *default_stc; 3284 3285 default_stc = ctx->common_res[tbl_type].default_stc; 3286 3287 default_stc = ctx->common_res[tbl_type].default_stc; 3288 if (--default_stc->refcount) 3289 return; 3290 3291 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit); 3292 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); 3293 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); 3294 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); 3295 mlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); 3296 simple_free(default_stc); 3297 ctx->common_res[tbl_type].default_stc = NULL; 3298 } 3299 3300 static void mlx5dr_action_modify_write(struct mlx5dr_send_engine *queue, 3301 uint32_t arg_idx, 3302 uint8_t *arg_data, 3303 uint16_t num_of_actions) 3304 { 3305 mlx5dr_arg_write(queue, NULL, arg_idx, arg_data, 3306 num_of_actions * MLX5DR_MODIFY_ACTION_SIZE); 3307 } 3308 3309 void 3310 mlx5dr_action_prepare_decap_l3_data(uint8_t *src, uint8_t *dst, 3311 uint16_t num_of_actions) 3312 { 3313 uint8_t *e_src; 3314 int i; 3315 3316 /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes 3317 * copy from end of src to the start of dst. 3318 * move to the end, 2 is the leftover from 14B or 18B 3319 */ 3320 if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN) 3321 e_src = src + MLX5DR_ACTION_HDR_LEN_L2; 3322 else 3323 e_src = src + MLX5DR_ACTION_HDR_LEN_L2_W_VLAN; 3324 3325 /* Move dst over the first remove action + zero data */ 3326 dst += MLX5DR_ACTION_DOUBLE_SIZE; 3327 /* Move dst over the first insert ctrl action */ 3328 dst += MLX5DR_ACTION_DOUBLE_SIZE / 2; 3329 /* Actions: 3330 * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. 3331 * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. 3332 * the loop is without the last insertion. 3333 */ 3334 for (i = 0; i < num_of_actions - 3; i++) { 3335 e_src -= MLX5DR_ACTION_INLINE_DATA_SIZE; 3336 memcpy(dst, e_src, MLX5DR_ACTION_INLINE_DATA_SIZE); /* data */ 3337 dst += MLX5DR_ACTION_DOUBLE_SIZE; 3338 } 3339 /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */ 3340 e_src -= MLX5DR_ACTION_INLINE_DATA_SIZE / 2; 3341 dst += MLX5DR_ACTION_INLINE_DATA_SIZE / 2; 3342 memcpy(dst, e_src, 2); 3343 } 3344 3345 static int mlx5dr_action_get_shared_stc_offset(struct mlx5dr_context_common_res *common_res, 3346 enum mlx5dr_context_shared_stc_type stc_type) 3347 { 3348 return common_res->shared_stc[stc_type]->remove_header.offset; 3349 } 3350 3351 static struct mlx5dr_actions_wqe_setter * 3352 mlx5dr_action_setter_find_first(struct mlx5dr_actions_wqe_setter *setter, 3353 uint8_t req_flags) 3354 { 3355 /* Use a new setter if requested flags are taken */ 3356 while (setter->flags & req_flags) 3357 setter++; 3358 3359 /* Use current setter in required flags are not used */ 3360 return setter; 3361 } 3362 3363 static void 3364 mlx5dr_action_apply_stc(struct mlx5dr_actions_apply_data *apply, 3365 enum mlx5dr_action_stc_idx stc_idx, 3366 uint8_t action_idx) 3367 { 3368 struct mlx5dr_action *action = apply->rule_action[action_idx].action; 3369 3370 apply->wqe_ctrl->stc_ix[stc_idx] = 3371 htobe32(action->stc[apply->tbl_type].offset); 3372 } 3373 3374 static void 3375 mlx5dr_action_setter_push_vlan(struct mlx5dr_actions_apply_data *apply, 3376 struct mlx5dr_actions_wqe_setter *setter) 3377 { 3378 struct mlx5dr_rule_action *rule_action; 3379 3380 rule_action = &apply->rule_action[setter->idx_double]; 3381 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3382 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr; 3383 3384 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); 3385 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3386 } 3387 3388 static void 3389 mlx5dr_action_setter_modify_header(struct mlx5dr_actions_apply_data *apply, 3390 struct mlx5dr_actions_wqe_setter *setter) 3391 { 3392 struct mlx5dr_rule_action *rule_action; 3393 uint32_t stc_idx, arg_sz, arg_idx; 3394 struct mlx5dr_action *action; 3395 uint8_t *single_action; 3396 3397 rule_action = &apply->rule_action[setter->idx_double]; 3398 action = rule_action->action + rule_action->modify_header.pattern_idx; 3399 3400 stc_idx = htobe32(action->stc[apply->tbl_type].offset); 3401 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3402 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3403 3404 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3405 3406 if (action->modify_header.num_of_actions == 1) { 3407 if (action->modify_header.single_action_type == 3408 MLX5_MODIFICATION_TYPE_COPY || 3409 action->modify_header.single_action_type == 3410 MLX5_MODIFICATION_TYPE_ADD_FIELD) { 3411 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0; 3412 return; 3413 } 3414 3415 if (action->flags & MLX5DR_ACTION_FLAG_SHARED) 3416 single_action = (uint8_t *)&action->modify_header.single_action; 3417 else 3418 single_action = rule_action->modify_header.data; 3419 3420 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 3421 *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); 3422 } else { 3423 /* Argument offset multiple with number of args per these actions */ 3424 arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.max_num_of_actions); 3425 arg_idx = rule_action->modify_header.offset * arg_sz; 3426 3427 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); 3428 3429 if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { 3430 apply->require_dep = 1; 3431 mlx5dr_action_modify_write(apply->queue, 3432 action->modify_header.arg_obj->id + arg_idx, 3433 rule_action->modify_header.data, 3434 action->modify_header.num_of_actions); 3435 } 3436 } 3437 } 3438 3439 static void 3440 mlx5dr_action_setter_nat64(struct mlx5dr_actions_apply_data *apply, 3441 struct mlx5dr_actions_wqe_setter *setter) 3442 { 3443 struct mlx5dr_rule_action *rule_action; 3444 struct mlx5dr_action *cur_stage_action; 3445 struct mlx5dr_action *action; 3446 uint32_t stc_idx; 3447 3448 rule_action = &apply->rule_action[setter->idx_double]; 3449 action = rule_action->action; 3450 cur_stage_action = action->nat64.stages[setter->stage_idx]; 3451 3452 stc_idx = htobe32(cur_stage_action->stc[apply->tbl_type].offset); 3453 3454 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3455 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3456 3457 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3458 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0; 3459 } 3460 3461 static void 3462 mlx5dr_action_setter_insert_ptr(struct mlx5dr_actions_apply_data *apply, 3463 struct mlx5dr_actions_wqe_setter *setter) 3464 { 3465 struct mlx5dr_rule_action *rule_action; 3466 uint32_t stc_idx, arg_idx, arg_sz; 3467 struct mlx5dr_action *action; 3468 3469 rule_action = &apply->rule_action[setter->idx_double]; 3470 action = rule_action->action + rule_action->reformat.hdr_idx; 3471 3472 /* Argument offset multiple on args required for header size */ 3473 arg_sz = mlx5dr_arg_data_size_to_arg_size(action->reformat.max_hdr_sz); 3474 arg_idx = rule_action->reformat.offset * arg_sz; 3475 3476 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3477 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); 3478 3479 stc_idx = htobe32(action->stc[apply->tbl_type].offset); 3480 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3481 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3482 3483 if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { 3484 apply->require_dep = 1; 3485 mlx5dr_arg_write(apply->queue, NULL, 3486 action->reformat.arg_obj->id + arg_idx, 3487 rule_action->reformat.data, 3488 action->reformat.header_size); 3489 } 3490 } 3491 3492 static void 3493 mlx5dr_action_setter_tnl_l3_to_l2(struct mlx5dr_actions_apply_data *apply, 3494 struct mlx5dr_actions_wqe_setter *setter) 3495 { 3496 struct mlx5dr_rule_action *rule_action; 3497 uint32_t stc_idx, arg_sz, arg_idx; 3498 struct mlx5dr_action *action; 3499 3500 rule_action = &apply->rule_action[setter->idx_double]; 3501 action = rule_action->action + rule_action->reformat.hdr_idx; 3502 3503 /* Argument offset multiple on args required for num of actions */ 3504 arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.max_num_of_actions); 3505 arg_idx = rule_action->reformat.offset * arg_sz; 3506 3507 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; 3508 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); 3509 3510 stc_idx = htobe32(action->stc[apply->tbl_type].offset); 3511 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; 3512 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3513 3514 if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { 3515 apply->require_dep = 1; 3516 mlx5dr_arg_decapl3_write(apply->queue, 3517 action->modify_header.arg_obj->id + arg_idx, 3518 rule_action->reformat.data, 3519 action->modify_header.num_of_actions); 3520 } 3521 } 3522 3523 static void 3524 mlx5dr_action_setter_aso(struct mlx5dr_actions_apply_data *apply, 3525 struct mlx5dr_actions_wqe_setter *setter) 3526 { 3527 struct mlx5dr_rule_action *rule_action; 3528 uint32_t exe_aso_ctrl; 3529 uint32_t offset; 3530 3531 rule_action = &apply->rule_action[setter->idx_double]; 3532 3533 switch (rule_action->action->type) { 3534 case MLX5DR_ACTION_TYP_ASO_METER: 3535 /* exe_aso_ctrl format: 3536 * [STC only and reserved bits 29b][init_color 2b][meter_id 1b] 3537 */ 3538 offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ; 3539 exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ; 3540 exe_aso_ctrl |= rule_action->aso_meter.init_color << 3541 MLX5DR_ACTION_METER_INIT_COLOR_OFFSET; 3542 break; 3543 case MLX5DR_ACTION_TYP_ASO_CT: 3544 /* exe_aso_ctrl CT format: 3545 * [STC only and reserved bits 31b][direction 1b] 3546 */ 3547 offset = rule_action->aso_ct.offset / MLX5_ASO_CT_NUM_PER_OBJ; 3548 exe_aso_ctrl = rule_action->aso_ct.direction; 3549 break; 3550 default: 3551 DR_LOG(ERR, "Unsupported ASO action type: %d", rule_action->action->type); 3552 rte_errno = ENOTSUP; 3553 return; 3554 } 3555 3556 /* aso_object_offset format: [24B] */ 3557 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = htobe32(offset); 3558 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(exe_aso_ctrl); 3559 3560 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); 3561 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; 3562 } 3563 3564 static void 3565 mlx5dr_action_setter_tag(struct mlx5dr_actions_apply_data *apply, 3566 struct mlx5dr_actions_wqe_setter *setter) 3567 { 3568 struct mlx5dr_rule_action *rule_action; 3569 3570 rule_action = &apply->rule_action[setter->idx_single]; 3571 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = htobe32(rule_action->tag.value); 3572 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW5, setter->idx_single); 3573 } 3574 3575 static void 3576 mlx5dr_action_setter_ctrl_ctr(struct mlx5dr_actions_apply_data *apply, 3577 struct mlx5dr_actions_wqe_setter *setter) 3578 { 3579 struct mlx5dr_rule_action *rule_action; 3580 3581 rule_action = &apply->rule_action[setter->idx_ctr]; 3582 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW0] = htobe32(rule_action->counter.offset); 3583 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_CTRL, setter->idx_ctr); 3584 } 3585 3586 static void 3587 mlx5dr_action_setter_single(struct mlx5dr_actions_apply_data *apply, 3588 struct mlx5dr_actions_wqe_setter *setter) 3589 { 3590 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3591 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW5, setter->idx_single); 3592 } 3593 3594 static void 3595 mlx5dr_action_setter_single_double_pop(struct mlx5dr_actions_apply_data *apply, 3596 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3597 { 3598 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3599 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 3600 htobe32(mlx5dr_action_get_shared_stc_offset(apply->common_res, 3601 MLX5DR_CONTEXT_SHARED_STC_DOUBLE_POP)); 3602 } 3603 3604 static void 3605 mlx5dr_action_setter_hit(struct mlx5dr_actions_apply_data *apply, 3606 struct mlx5dr_actions_wqe_setter *setter) 3607 { 3608 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 0; 3609 mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_HIT, setter->idx_hit); 3610 } 3611 3612 static void 3613 mlx5dr_action_setter_default_hit(struct mlx5dr_actions_apply_data *apply, 3614 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3615 { 3616 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 0; 3617 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_HIT] = 3618 htobe32(apply->common_res->default_stc->default_hit.offset); 3619 } 3620 3621 static void 3622 mlx5dr_action_setter_hit_next_action(struct mlx5dr_actions_apply_data *apply, 3623 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3624 { 3625 apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = htobe32(apply->next_direct_idx << 6); 3626 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_HIT] = htobe32(apply->jump_to_action_stc); 3627 } 3628 3629 static void 3630 mlx5dr_action_setter_common_decap(struct mlx5dr_actions_apply_data *apply, 3631 __rte_unused struct mlx5dr_actions_wqe_setter *setter) 3632 { 3633 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3634 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 3635 htobe32(mlx5dr_action_get_shared_stc_offset(apply->common_res, 3636 MLX5DR_CONTEXT_SHARED_STC_DECAP_L3)); 3637 } 3638 3639 static void 3640 mlx5dr_action_setter_ipv6_route_ext_gen_push_mhdr(uint8_t *data, void *mh_data) 3641 { 3642 uint8_t *action_ptr = mh_data; 3643 uint32_t *ipv6_dst_addr; 3644 uint8_t seg_left; 3645 uint32_t i; 3646 3647 /* Fetch the last IPv6 address in the segment list which is the next hop */ 3648 seg_left = MLX5_GET(header_ipv6_routing_ext, data, segments_left) - 1; 3649 ipv6_dst_addr = (uint32_t *)data + MLX5_ST_SZ_DW(header_ipv6_routing_ext) 3650 + seg_left * MLX5_ST_SZ_DW(definer_hl_ipv6_addr); 3651 3652 /* Load next hop IPv6 address in reverse order to ipv6.dst_address */ 3653 for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) { 3654 MLX5_SET(set_action_in, action_ptr, data, be32toh(*ipv6_dst_addr++)); 3655 action_ptr += MLX5DR_MODIFY_ACTION_SIZE; 3656 } 3657 3658 /* Set ipv6_route_ext.next_hdr per user input */ 3659 MLX5_SET(set_action_in, action_ptr, data, *data); 3660 } 3661 3662 static void 3663 mlx5dr_action_setter_ipv6_route_ext_mhdr(struct mlx5dr_actions_apply_data *apply, 3664 struct mlx5dr_actions_wqe_setter *setter) 3665 { 3666 struct mlx5dr_rule_action *rule_action = apply->rule_action; 3667 struct mlx5dr_actions_wqe_setter tmp_setter = {0}; 3668 struct mlx5dr_rule_action tmp_rule_action; 3669 __be64 cmd[MLX5_SRV6_SAMPLE_NUM] = {0}; 3670 struct mlx5dr_action *ipv6_ext_action; 3671 uint8_t *header; 3672 3673 header = rule_action[setter->idx_double].ipv6_ext.header; 3674 ipv6_ext_action = rule_action[setter->idx_double].action; 3675 tmp_rule_action.action = ipv6_ext_action->ipv6_route_ext.action[setter->extra_data]; 3676 3677 if (tmp_rule_action.action->flags & MLX5DR_ACTION_FLAG_SHARED) { 3678 tmp_rule_action.modify_header.offset = 0; 3679 tmp_rule_action.modify_header.pattern_idx = 0; 3680 tmp_rule_action.modify_header.data = NULL; 3681 } else { 3682 /* 3683 * Copy ipv6_dst from ipv6_route_ext.last_seg. 3684 * Set ipv6_route_ext.next_hdr. 3685 */ 3686 mlx5dr_action_setter_ipv6_route_ext_gen_push_mhdr(header, cmd); 3687 tmp_rule_action.modify_header.data = (uint8_t *)cmd; 3688 tmp_rule_action.modify_header.pattern_idx = 0; 3689 tmp_rule_action.modify_header.offset = 3690 rule_action[setter->idx_double].ipv6_ext.offset; 3691 } 3692 3693 apply->rule_action = &tmp_rule_action; 3694 3695 /* Reuse regular */ 3696 mlx5dr_action_setter_modify_header(apply, &tmp_setter); 3697 3698 /* Swap rule actions from backup */ 3699 apply->rule_action = rule_action; 3700 } 3701 3702 static void 3703 mlx5dr_action_setter_ipv6_route_ext_insert_ptr(struct mlx5dr_actions_apply_data *apply, 3704 struct mlx5dr_actions_wqe_setter *setter) 3705 { 3706 struct mlx5dr_rule_action *rule_action = apply->rule_action; 3707 struct mlx5dr_actions_wqe_setter tmp_setter = {0}; 3708 struct mlx5dr_rule_action tmp_rule_action; 3709 struct mlx5dr_action *ipv6_ext_action; 3710 uint8_t header[MLX5_PUSH_MAX_LEN]; 3711 3712 ipv6_ext_action = rule_action[setter->idx_double].action; 3713 tmp_rule_action.action = ipv6_ext_action->ipv6_route_ext.action[setter->extra_data]; 3714 3715 if (tmp_rule_action.action->flags & MLX5DR_ACTION_FLAG_SHARED) { 3716 tmp_rule_action.reformat.offset = 0; 3717 tmp_rule_action.reformat.hdr_idx = 0; 3718 tmp_rule_action.reformat.data = NULL; 3719 } else { 3720 memcpy(header, rule_action[setter->idx_double].ipv6_ext.header, 3721 tmp_rule_action.action->reformat.header_size); 3722 /* Clear ipv6_route_ext.next_hdr for right checksum */ 3723 MLX5_SET(header_ipv6_routing_ext, header, next_hdr, 0); 3724 tmp_rule_action.reformat.data = header; 3725 tmp_rule_action.reformat.hdr_idx = 0; 3726 tmp_rule_action.reformat.offset = 3727 rule_action[setter->idx_double].ipv6_ext.offset; 3728 } 3729 3730 apply->rule_action = &tmp_rule_action; 3731 3732 /* Reuse regular */ 3733 mlx5dr_action_setter_insert_ptr(apply, &tmp_setter); 3734 3735 /* Swap rule actions from backup */ 3736 apply->rule_action = rule_action; 3737 } 3738 3739 static void 3740 mlx5dr_action_setter_ipv6_route_ext_pop(struct mlx5dr_actions_apply_data *apply, 3741 struct mlx5dr_actions_wqe_setter *setter) 3742 { 3743 struct mlx5dr_rule_action *rule_action = &apply->rule_action[setter->idx_single]; 3744 uint8_t idx = MLX5DR_ACTION_IPV6_EXT_MAX_SA - 1; 3745 struct mlx5dr_action *action; 3746 3747 /* Pop the ipv6_route_ext as set_single logic */ 3748 action = rule_action->action->ipv6_route_ext.action[idx]; 3749 apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0; 3750 apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 3751 htobe32(action->stc[apply->tbl_type].offset); 3752 } 3753 3754 int mlx5dr_action_template_process(struct mlx5dr_action_template *at) 3755 { 3756 struct mlx5dr_actions_wqe_setter *start_setter = at->setters + 1; 3757 enum mlx5dr_action_type *action_type = at->action_type_arr; 3758 struct mlx5dr_actions_wqe_setter *setter = at->setters; 3759 struct mlx5dr_actions_wqe_setter *pop_setter = NULL; 3760 struct mlx5dr_actions_wqe_setter *last_setter; 3761 int i, j; 3762 3763 /* Note: Given action combination must be valid */ 3764 3765 /* Check if action were already processed */ 3766 if (at->num_of_action_stes) 3767 return 0; 3768 3769 for (i = 0; i < MLX5DR_ACTION_MAX_STE; i++) 3770 setter[i].set_hit = &mlx5dr_action_setter_hit_next_action; 3771 3772 /* The same action template setters can be used with jumbo or match 3773 * STE, to support both cases we reseve the first setter for cases 3774 * with jumbo STE to allow jump to the first action STE. 3775 * This extra setter can be reduced in some cases on rule creation. 3776 */ 3777 setter = start_setter; 3778 last_setter = start_setter; 3779 3780 for (i = 0; i < at->num_actions; i++) { 3781 switch (action_type[i]) { 3782 case MLX5DR_ACTION_TYP_DROP: 3783 case MLX5DR_ACTION_TYP_TIR: 3784 case MLX5DR_ACTION_TYP_TBL: 3785 case MLX5DR_ACTION_TYP_DEST_ROOT: 3786 case MLX5DR_ACTION_TYP_DEST_ARRAY: 3787 case MLX5DR_ACTION_TYP_VPORT: 3788 case MLX5DR_ACTION_TYP_MISS: 3789 /* Hit action */ 3790 last_setter->flags |= ASF_HIT; 3791 last_setter->set_hit = &mlx5dr_action_setter_hit; 3792 last_setter->idx_hit = i; 3793 break; 3794 3795 case MLX5DR_ACTION_TYP_POP_VLAN: 3796 /* Single remove header to header */ 3797 if (pop_setter) { 3798 /* We have 2 pops, use the shared */ 3799 pop_setter->set_single = &mlx5dr_action_setter_single_double_pop; 3800 break; 3801 } 3802 setter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_MODIFY | ASF_INSERT); 3803 setter->flags |= ASF_SINGLE1 | ASF_REMOVE; 3804 setter->set_single = &mlx5dr_action_setter_single; 3805 setter->idx_single = i; 3806 pop_setter = setter; 3807 break; 3808 3809 case MLX5DR_ACTION_TYP_PUSH_VLAN: 3810 /* Double insert inline */ 3811 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3812 setter->flags |= ASF_DOUBLE | ASF_INSERT; 3813 setter->set_double = &mlx5dr_action_setter_push_vlan; 3814 setter->idx_double = i; 3815 break; 3816 3817 case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT: 3818 /* 3819 * Backup ipv6_route_ext.next_hdr to ipv6_route_ext.seg_left. 3820 * Set ipv6_route_ext.next_hdr to 0 for checksum bug. 3821 */ 3822 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3823 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3824 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3825 setter->idx_double = i; 3826 setter->extra_data = 0; 3827 setter++; 3828 3829 /* 3830 * Restore ipv6_route_ext.next_hdr from ipv6_route_ext.seg_left. 3831 * Load the final destination address from flex parser sample 1->4. 3832 */ 3833 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3834 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3835 setter->idx_double = i; 3836 setter->extra_data = 1; 3837 setter++; 3838 3839 /* Set the ipv6.protocol per ipv6_route_ext.next_hdr */ 3840 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3841 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3842 setter->idx_double = i; 3843 setter->extra_data = 2; 3844 /* Pop ipv6_route_ext */ 3845 setter->flags |= ASF_SINGLE1 | ASF_REMOVE; 3846 setter->set_single = &mlx5dr_action_setter_ipv6_route_ext_pop; 3847 setter->idx_single = i; 3848 at->need_dep_write = true; 3849 break; 3850 3851 case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT: 3852 /* Insert ipv6_route_ext with next_hdr as 0 due to checksum bug */ 3853 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3854 setter->flags |= ASF_DOUBLE | ASF_INSERT; 3855 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_insert_ptr; 3856 setter->idx_double = i; 3857 setter->extra_data = 0; 3858 setter++; 3859 3860 /* Set ipv6.protocol as IPPROTO_ROUTING: 0x2b */ 3861 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3862 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3863 setter->idx_double = i; 3864 setter->extra_data = 1; 3865 setter++; 3866 3867 /* 3868 * Load the right ipv6_route_ext.next_hdr per user input buffer. 3869 * Load the next dest_addr from the ipv6_route_ext.seg_list[last]. 3870 */ 3871 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3872 setter->set_double = &mlx5dr_action_setter_ipv6_route_ext_mhdr; 3873 setter->idx_double = i; 3874 setter->extra_data = 2; 3875 at->need_dep_write = true; 3876 break; 3877 3878 case MLX5DR_ACTION_TYP_MODIFY_HDR: 3879 /* Double modify header list */ 3880 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3881 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3882 setter->set_double = &mlx5dr_action_setter_modify_header; 3883 setter->idx_double = i; 3884 at->need_dep_write = true; 3885 break; 3886 3887 case MLX5DR_ACTION_TYP_ASO_METER: 3888 case MLX5DR_ACTION_TYP_ASO_CT: 3889 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE); 3890 setter->flags |= ASF_DOUBLE; 3891 setter->set_double = &mlx5dr_action_setter_aso; 3892 setter->idx_double = i; 3893 break; 3894 3895 case MLX5DR_ACTION_TYP_REMOVE_HEADER: 3896 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: 3897 /* Single remove header to header */ 3898 setter = mlx5dr_action_setter_find_first(last_setter, 3899 ASF_SINGLE1 | ASF_MODIFY | ASF_INSERT); 3900 setter->flags |= ASF_SINGLE1 | ASF_REMOVE; 3901 setter->set_single = &mlx5dr_action_setter_single; 3902 setter->idx_single = i; 3903 break; 3904 3905 case MLX5DR_ACTION_TYP_INSERT_HEADER: 3906 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: 3907 /* Double insert header with pointer */ 3908 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3909 setter->flags |= ASF_DOUBLE | ASF_INSERT; 3910 setter->set_double = &mlx5dr_action_setter_insert_ptr; 3911 setter->idx_double = i; 3912 at->need_dep_write = true; 3913 break; 3914 3915 case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: 3916 /* Single remove + Double insert header with pointer */ 3917 setter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_DOUBLE); 3918 setter->flags |= ASF_SINGLE1 | ASF_DOUBLE; 3919 setter->set_double = &mlx5dr_action_setter_insert_ptr; 3920 setter->idx_double = i; 3921 setter->set_single = &mlx5dr_action_setter_common_decap; 3922 setter->idx_single = i; 3923 at->need_dep_write = true; 3924 break; 3925 3926 case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: 3927 /* Double modify header list with remove and push inline */ 3928 setter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); 3929 setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT; 3930 setter->set_double = &mlx5dr_action_setter_tnl_l3_to_l2; 3931 setter->idx_double = i; 3932 at->need_dep_write = true; 3933 break; 3934 3935 case MLX5DR_ACTION_TYP_TAG: 3936 /* Single TAG action, search for any room from the start */ 3937 setter = mlx5dr_action_setter_find_first(start_setter, ASF_SINGLE1); 3938 setter->flags |= ASF_SINGLE1; 3939 setter->set_single = &mlx5dr_action_setter_tag; 3940 setter->idx_single = i; 3941 break; 3942 3943 case MLX5DR_ACTION_TYP_CTR: 3944 /* Control counter action 3945 * TODO: Current counter executed first. Support is needed 3946 * for single ation counter action which is done last. 3947 * Example: Decap + CTR 3948 */ 3949 setter = mlx5dr_action_setter_find_first(start_setter, ASF_CTR); 3950 setter->flags |= ASF_CTR; 3951 setter->set_ctr = &mlx5dr_action_setter_ctrl_ctr; 3952 setter->idx_ctr = i; 3953 break; 3954 3955 case MLX5DR_ACTION_TYP_NAT64: 3956 /* NAT64 requires 3 setters, each of them does specific modify header */ 3957 for (j = 0; j < MLX5DR_ACTION_NAT64_STAGES; j++) { 3958 setter = mlx5dr_action_setter_find_first(last_setter, 3959 ASF_DOUBLE | ASF_REMOVE); 3960 setter->flags |= ASF_DOUBLE | ASF_MODIFY; 3961 setter->set_double = &mlx5dr_action_setter_nat64; 3962 setter->idx_double = i; 3963 /* The stage indicates which modify-header to push */ 3964 setter->stage_idx = j; 3965 } 3966 break; 3967 3968 default: 3969 DR_LOG(ERR, "Unsupported action type: %d", action_type[i]); 3970 rte_errno = ENOTSUP; 3971 assert(false); 3972 return rte_errno; 3973 } 3974 3975 last_setter = RTE_MAX(setter, last_setter); 3976 } 3977 3978 /* Set default hit on the last STE if no hit action provided */ 3979 if (!(last_setter->flags & ASF_HIT)) 3980 last_setter->set_hit = &mlx5dr_action_setter_default_hit; 3981 3982 at->num_of_action_stes = last_setter - start_setter + 1; 3983 3984 /* Check if action template doesn't require any action DWs */ 3985 at->only_term = (at->num_of_action_stes == 1) && 3986 !(last_setter->flags & ~(ASF_CTR | ASF_HIT)); 3987 3988 return 0; 3989 } 3990 3991 struct mlx5dr_action_template * 3992 mlx5dr_action_template_create(const enum mlx5dr_action_type action_type[], 3993 uint32_t flags) 3994 { 3995 struct mlx5dr_action_template *at; 3996 uint8_t num_actions = 0; 3997 int i; 3998 3999 if (flags > MLX5DR_ACTION_TEMPLATE_FLAG_RELAXED_ORDER) { 4000 DR_LOG(ERR, "Unsupported action template flag provided"); 4001 rte_errno = EINVAL; 4002 return NULL; 4003 } 4004 4005 at = simple_calloc(1, sizeof(*at)); 4006 if (!at) { 4007 DR_LOG(ERR, "Failed to allocate action template"); 4008 rte_errno = ENOMEM; 4009 return NULL; 4010 } 4011 4012 at->flags = flags; 4013 4014 while (action_type[num_actions++] != MLX5DR_ACTION_TYP_LAST) 4015 ; 4016 4017 at->num_actions = num_actions - 1; 4018 at->action_type_arr = simple_calloc(num_actions, sizeof(*action_type)); 4019 if (!at->action_type_arr) { 4020 DR_LOG(ERR, "Failed to allocate action type array"); 4021 rte_errno = ENOMEM; 4022 goto free_at; 4023 } 4024 4025 for (i = 0; i < num_actions; i++) 4026 at->action_type_arr[i] = action_type[i]; 4027 4028 return at; 4029 4030 free_at: 4031 simple_free(at); 4032 return NULL; 4033 } 4034 4035 int mlx5dr_action_template_destroy(struct mlx5dr_action_template *at) 4036 { 4037 simple_free(at->action_type_arr); 4038 simple_free(at); 4039 return 0; 4040 } 4041