1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2020 NXP 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 13 #include <rte_ethdev.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_flow_driver.h> 17 #include <rte_tailq.h> 18 19 #include <rte_fslmc.h> 20 #include <fsl_dpdmux.h> 21 #include <fsl_dpkg.h> 22 23 #include <dpaa2_ethdev.h> 24 #include <dpaa2_pmd_logs.h> 25 26 struct dpaa2_dpdmux_dev { 27 TAILQ_ENTRY(dpaa2_dpdmux_dev) next; 28 /**< Pointer to Next device instance */ 29 struct fsl_mc_io dpdmux; /** handle to DPDMUX portal object */ 30 uint16_t token; 31 uint32_t dpdmux_id; /*HW ID for DPDMUX object */ 32 uint8_t num_ifs; /* Number of interfaces in DPDMUX */ 33 }; 34 35 struct rte_flow { 36 struct dpdmux_rule_cfg rule; 37 }; 38 39 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev); 40 static struct dpdmux_dev_list dpdmux_dev_list = 41 TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */ 42 43 static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) 44 { 45 struct dpaa2_dpdmux_dev *dpdmux_dev = NULL; 46 47 /* Get DPBP dev handle from list using index */ 48 TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) { 49 if (dpdmux_dev->dpdmux_id == dpdmux_id) 50 break; 51 } 52 53 return dpdmux_dev; 54 } 55 56 struct rte_flow * 57 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, 58 struct rte_flow_item *pattern[], 59 struct rte_flow_action *actions[]) 60 { 61 struct dpaa2_dpdmux_dev *dpdmux_dev; 62 struct dpkg_profile_cfg kg_cfg; 63 const struct rte_flow_action_vf *vf_conf; 64 struct dpdmux_cls_action dpdmux_action; 65 struct rte_flow *flow = NULL; 66 void *key_iova, *mask_iova, *key_cfg_iova = NULL; 67 uint8_t key_size = 0; 68 int ret; 69 70 /* Find the DPDMUX from dpdmux_id in our list */ 71 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 72 if (!dpdmux_dev) { 73 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 74 return NULL; 75 } 76 77 key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, 78 RTE_CACHE_LINE_SIZE); 79 if (!key_cfg_iova) { 80 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); 81 return NULL; 82 } 83 flow = rte_zmalloc(NULL, sizeof(struct rte_flow) + 84 (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE); 85 if (!flow) { 86 DPAA2_PMD_ERR( 87 "Memory allocation failure for rule configuration\n"); 88 goto creation_error; 89 } 90 key_iova = (void *)((size_t)flow + sizeof(struct rte_flow)); 91 mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE); 92 93 /* Currently taking only IP protocol as an extract type. 94 * This can be exended to other fields using pattern->type. 95 */ 96 memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); 97 98 switch (pattern[0]->type) { 99 case RTE_FLOW_ITEM_TYPE_IPV4: 100 { 101 const struct rte_flow_item_ipv4 *spec; 102 103 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP; 104 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO; 105 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 106 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 107 kg_cfg.num_extracts = 1; 108 109 spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec; 110 memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id), 111 sizeof(uint8_t)); 112 memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t)); 113 key_size = sizeof(uint8_t); 114 } 115 break; 116 117 case RTE_FLOW_ITEM_TYPE_UDP: 118 { 119 const struct rte_flow_item_udp *spec; 120 uint16_t udp_dst_port; 121 122 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP; 123 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; 124 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 125 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 126 kg_cfg.num_extracts = 1; 127 128 spec = (const struct rte_flow_item_udp *)pattern[0]->spec; 129 udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port); 130 memcpy((void *)key_iova, (const void *)&udp_dst_port, 131 sizeof(rte_be16_t)); 132 memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t)); 133 key_size = sizeof(uint16_t); 134 } 135 break; 136 137 case RTE_FLOW_ITEM_TYPE_ETH: 138 { 139 const struct rte_flow_item_eth *spec; 140 uint16_t eth_type; 141 142 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH; 143 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE; 144 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 145 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 146 kg_cfg.num_extracts = 1; 147 148 spec = (const struct rte_flow_item_eth *)pattern[0]->spec; 149 eth_type = rte_constant_bswap16(spec->type); 150 memcpy((void *)key_iova, (const void *)ð_type, 151 sizeof(rte_be16_t)); 152 memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t)); 153 key_size = sizeof(uint16_t); 154 } 155 break; 156 157 default: 158 DPAA2_PMD_ERR("Not supported pattern type: %d", 159 pattern[0]->type); 160 goto creation_error; 161 } 162 163 ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova); 164 if (ret) { 165 DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret); 166 goto creation_error; 167 } 168 169 ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 170 dpdmux_dev->token, 171 (uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova))); 172 if (ret) { 173 DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", ret); 174 goto creation_error; 175 } 176 177 /* As now our key extract parameters are set, let us configure 178 * the rule. 179 */ 180 flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova)); 181 flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova)); 182 flow->rule.key_size = key_size; 183 184 vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf); 185 if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { 186 DPAA2_PMD_ERR("Invalid destination id\n"); 187 goto creation_error; 188 } 189 dpdmux_action.dest_if = vf_conf->id; 190 191 ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 192 dpdmux_dev->token, &flow->rule, 193 &dpdmux_action); 194 if (ret) { 195 DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)", 196 ret); 197 goto creation_error; 198 } 199 200 return flow; 201 202 creation_error: 203 rte_free((void *)key_cfg_iova); 204 rte_free((void *)flow); 205 return NULL; 206 } 207 208 static int 209 dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, 210 struct vfio_device_info *obj_info __rte_unused, 211 int dpdmux_id) 212 { 213 struct dpaa2_dpdmux_dev *dpdmux_dev; 214 struct dpdmux_attr attr; 215 int ret; 216 217 PMD_INIT_FUNC_TRACE(); 218 219 /* Allocate DPAA2 dpdmux handle */ 220 dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0); 221 if (!dpdmux_dev) { 222 DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device"); 223 return -1; 224 } 225 226 /* Open the dpdmux object */ 227 dpdmux_dev->dpdmux.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 228 ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id, 229 &dpdmux_dev->token); 230 if (ret) { 231 DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret); 232 goto init_err; 233 } 234 235 ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 236 dpdmux_dev->token, &attr); 237 if (ret) { 238 DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret); 239 goto init_err; 240 } 241 242 ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 243 dpdmux_dev->token, 1); 244 if (ret) { 245 DPAA2_PMD_ERR("setting default interface failed in %s", 246 __func__); 247 goto init_err; 248 } 249 250 dpdmux_dev->dpdmux_id = dpdmux_id; 251 dpdmux_dev->num_ifs = attr.num_ifs; 252 253 TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next); 254 255 return 0; 256 257 init_err: 258 if (dpdmux_dev) 259 rte_free(dpdmux_dev); 260 261 return -1; 262 } 263 264 static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = { 265 .dev_type = DPAA2_MUX, 266 .create = dpaa2_create_dpdmux_device, 267 }; 268 269 RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj); 270