xref: /dpdk/drivers/net/dpaa2/dpaa2_mux.c (revision b7b78a089c454d42eb654360eeecb1e2f15e6cd8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <rte_fslmc.h>
20 #include <fsl_dpdmux.h>
21 #include <fsl_dpkg.h>
22 
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
25 
26 struct dpaa2_dpdmux_dev {
27 	TAILQ_ENTRY(dpaa2_dpdmux_dev) next;
28 		/**< Pointer to Next device instance */
29 	struct fsl_mc_io dpdmux;  /** handle to DPDMUX portal object */
30 	uint16_t token;
31 	uint32_t dpdmux_id; /*HW ID for DPDMUX object */
32 	uint8_t num_ifs;   /* Number of interfaces in DPDMUX */
33 };
34 
35 struct rte_flow {
36 	struct dpdmux_rule_cfg rule;
37 };
38 
39 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev);
40 static struct dpdmux_dev_list dpdmux_dev_list =
41 	TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */
42 
43 static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id)
44 {
45 	struct dpaa2_dpdmux_dev *dpdmux_dev = NULL;
46 
47 	/* Get DPBP dev handle from list using index */
48 	TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) {
49 		if (dpdmux_dev->dpdmux_id == dpdmux_id)
50 			break;
51 	}
52 
53 	return dpdmux_dev;
54 }
55 
56 struct rte_flow *
57 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
58 			      struct rte_flow_item *pattern[],
59 			      struct rte_flow_action *actions[])
60 {
61 	struct dpaa2_dpdmux_dev *dpdmux_dev;
62 	struct dpkg_profile_cfg kg_cfg;
63 	const struct rte_flow_action_vf *vf_conf;
64 	struct dpdmux_cls_action dpdmux_action;
65 	struct rte_flow *flow = NULL;
66 	void *key_iova, *mask_iova, *key_cfg_iova = NULL;
67 	uint8_t key_size = 0;
68 	int ret;
69 	static int i;
70 
71 	/* Find the DPDMUX from dpdmux_id in our list */
72 	dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
73 	if (!dpdmux_dev) {
74 		DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
75 		return NULL;
76 	}
77 
78 	key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
79 				   RTE_CACHE_LINE_SIZE);
80 	if (!key_cfg_iova) {
81 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
82 		return NULL;
83 	}
84 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow) +
85 			   (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
86 	if (!flow) {
87 		DPAA2_PMD_ERR(
88 			"Memory allocation failure for rule configuration\n");
89 		goto creation_error;
90 	}
91 	key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
92 	mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
93 
94 	/* Currently taking only IP protocol as an extract type.
95 	 * This can be exended to other fields using pattern->type.
96 	 */
97 	memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
98 
99 	switch (pattern[0]->type) {
100 	case RTE_FLOW_ITEM_TYPE_IPV4:
101 	{
102 		const struct rte_flow_item_ipv4 *spec;
103 
104 		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP;
105 		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO;
106 		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
107 		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
108 		kg_cfg.num_extracts = 1;
109 
110 		spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec;
111 		memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id),
112 			sizeof(uint8_t));
113 		memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t));
114 		key_size = sizeof(uint8_t);
115 	}
116 	break;
117 
118 	case RTE_FLOW_ITEM_TYPE_UDP:
119 	{
120 		const struct rte_flow_item_udp *spec;
121 		uint16_t udp_dst_port;
122 
123 		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP;
124 		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
125 		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
126 		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
127 		kg_cfg.num_extracts = 1;
128 
129 		spec = (const struct rte_flow_item_udp *)pattern[0]->spec;
130 		udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port);
131 		memcpy((void *)key_iova, (const void *)&udp_dst_port,
132 							sizeof(rte_be16_t));
133 		memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
134 		key_size = sizeof(uint16_t);
135 	}
136 	break;
137 
138 	case RTE_FLOW_ITEM_TYPE_ETH:
139 	{
140 		const struct rte_flow_item_eth *spec;
141 		uint16_t eth_type;
142 
143 		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH;
144 		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE;
145 		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
146 		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
147 		kg_cfg.num_extracts = 1;
148 
149 		spec = (const struct rte_flow_item_eth *)pattern[0]->spec;
150 		eth_type = rte_constant_bswap16(spec->type);
151 		memcpy((void *)key_iova, (const void *)&eth_type,
152 							sizeof(rte_be16_t));
153 		memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
154 		key_size = sizeof(uint16_t);
155 	}
156 	break;
157 
158 	case RTE_FLOW_ITEM_TYPE_RAW:
159 	{
160 		const struct rte_flow_item_raw *spec;
161 
162 		spec = (const struct rte_flow_item_raw *)pattern[0]->spec;
163 		kg_cfg.extracts[0].extract.from_data.offset = spec->offset;
164 		kg_cfg.extracts[0].extract.from_data.size = spec->length;
165 		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
166 		kg_cfg.num_extracts = 1;
167 		memcpy((void *)key_iova, (const void *)spec->pattern,
168 							spec->length);
169 		memcpy(mask_iova, pattern[0]->mask, spec->length);
170 
171 		key_size = spec->length;
172 	}
173 	break;
174 
175 	default:
176 		DPAA2_PMD_ERR("Not supported pattern type: %d",
177 				pattern[0]->type);
178 		goto creation_error;
179 	}
180 
181 	ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova);
182 	if (ret) {
183 		DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret);
184 		goto creation_error;
185 	}
186 
187 	/* Multiple rules with same DPKG extracts (kg_cfg.extracts) like same
188 	 * offset and length values in raw is supported right now. Different
189 	 * values of kg_cfg may not work.
190 	 */
191 	if (i == 0) {
192 		ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
193 					    dpdmux_dev->token,
194 				(uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova)));
195 		if (ret) {
196 			DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)",
197 					ret);
198 			goto creation_error;
199 		}
200 	}
201 	/* As now our key extract parameters are set, let us configure
202 	 * the rule.
203 	 */
204 	flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova));
205 	flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova));
206 	flow->rule.key_size = key_size;
207 	flow->rule.entry_index = i++;
208 
209 	vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf);
210 	if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) {
211 		DPAA2_PMD_ERR("Invalid destination id\n");
212 		goto creation_error;
213 	}
214 	dpdmux_action.dest_if = vf_conf->id;
215 
216 	ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
217 					  dpdmux_dev->token, &flow->rule,
218 					  &dpdmux_action);
219 	if (ret) {
220 		DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)",
221 			      ret);
222 		goto creation_error;
223 	}
224 
225 	return flow;
226 
227 creation_error:
228 	rte_free((void *)key_cfg_iova);
229 	rte_free((void *)flow);
230 	return NULL;
231 }
232 
233 int
234 rte_pmd_dpaa2_mux_rx_frame_len(uint32_t dpdmux_id, uint16_t max_rx_frame_len)
235 {
236 	struct dpaa2_dpdmux_dev *dpdmux_dev;
237 	int ret;
238 
239 	/* Find the DPDMUX from dpdmux_id in our list */
240 	dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
241 	if (!dpdmux_dev) {
242 		DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
243 		return -1;
244 	}
245 
246 	ret = dpdmux_set_max_frame_length(&dpdmux_dev->dpdmux,
247 			CMD_PRI_LOW, dpdmux_dev->token, max_rx_frame_len);
248 	if (ret) {
249 		DPAA2_PMD_ERR("DPDMUX:Unable to set mtu. check config %d", ret);
250 		return ret;
251 	}
252 
253 	DPAA2_PMD_INFO("dpdmux mtu set as %u",
254 			DPAA2_MAX_RX_PKT_LEN - RTE_ETHER_CRC_LEN);
255 
256 	return ret;
257 }
258 
259 static int
260 dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
261 			   struct vfio_device_info *obj_info __rte_unused,
262 			   int dpdmux_id)
263 {
264 	struct dpaa2_dpdmux_dev *dpdmux_dev;
265 	struct dpdmux_attr attr;
266 	int ret;
267 	uint16_t maj_ver;
268 	uint16_t min_ver;
269 
270 	PMD_INIT_FUNC_TRACE();
271 
272 	/* Allocate DPAA2 dpdmux handle */
273 	dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0);
274 	if (!dpdmux_dev) {
275 		DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device");
276 		return -1;
277 	}
278 
279 	/* Open the dpdmux object */
280 	dpdmux_dev->dpdmux.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
281 	ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id,
282 			  &dpdmux_dev->token);
283 	if (ret) {
284 		DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret);
285 		goto init_err;
286 	}
287 
288 	ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
289 				    dpdmux_dev->token, &attr);
290 	if (ret) {
291 		DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret);
292 		goto init_err;
293 	}
294 
295 	ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
296 				    dpdmux_dev->token, 1);
297 	if (ret) {
298 		DPAA2_PMD_ERR("setting default interface failed in %s",
299 			      __func__);
300 		goto init_err;
301 	}
302 
303 	ret = dpdmux_get_api_version(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
304 					&maj_ver, &min_ver);
305 	if (ret) {
306 		DPAA2_PMD_ERR("setting version failed in %s",
307 				__func__);
308 		goto init_err;
309 	}
310 
311 	/* The new dpdmux_set/get_resetable() API are available starting with
312 	 * DPDMUX_VER_MAJOR==6 and DPDMUX_VER_MINOR==6
313 	 */
314 	if (maj_ver >= 6 && min_ver >= 6) {
315 		ret = dpdmux_set_resetable(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
316 				dpdmux_dev->token,
317 				DPDMUX_SKIP_DEFAULT_INTERFACE |
318 				DPDMUX_SKIP_UNICAST_RULES |
319 				DPDMUX_SKIP_MULTICAST_RULES);
320 		if (ret) {
321 			DPAA2_PMD_ERR("setting default interface failed in %s",
322 				      __func__);
323 			goto init_err;
324 		}
325 	}
326 
327 	if (maj_ver >= 6 && min_ver >= 9) {
328 		struct dpdmux_error_cfg mux_err_cfg;
329 
330 		memset(&mux_err_cfg, 0, sizeof(mux_err_cfg));
331 		mux_err_cfg.error_action = DPDMUX_ERROR_ACTION_CONTINUE;
332 		mux_err_cfg.errors = DPDMUX_ERROR_DISC;
333 
334 		ret = dpdmux_if_set_errors_behavior(&dpdmux_dev->dpdmux,
335 				CMD_PRI_LOW,
336 				dpdmux_dev->token, dpdmux_id,
337 				&mux_err_cfg);
338 		if (ret) {
339 			DPAA2_PMD_ERR("dpdmux_if_set_errors_behavior %s err %d",
340 				      __func__, ret);
341 			goto init_err;
342 		}
343 	}
344 
345 	dpdmux_dev->dpdmux_id = dpdmux_id;
346 	dpdmux_dev->num_ifs = attr.num_ifs;
347 
348 	TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next);
349 
350 	return 0;
351 
352 init_err:
353 	if (dpdmux_dev)
354 		rte_free(dpdmux_dev);
355 
356 	return -1;
357 }
358 
359 static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = {
360 	.dev_type = DPAA2_MUX,
361 	.create = dpaa2_create_dpdmux_device,
362 };
363 
364 RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj);
365