xref: /dpdk/drivers/net/dpaa2/dpaa2_mux.c (revision e11bdd37745229bf26b557305c07d118c3dbaad7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <rte_fslmc.h>
20 #include <fsl_dpdmux.h>
21 #include <fsl_dpkg.h>
22 
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
25 
26 struct dpaa2_dpdmux_dev {
27 	TAILQ_ENTRY(dpaa2_dpdmux_dev) next;
28 		/**< Pointer to Next device instance */
29 	struct fsl_mc_io dpdmux;  /** handle to DPDMUX portal object */
30 	uint16_t token;
31 	uint32_t dpdmux_id; /*HW ID for DPDMUX object */
32 	uint8_t num_ifs;   /* Number of interfaces in DPDMUX */
33 };
34 
35 struct rte_flow {
36 	struct dpdmux_rule_cfg rule;
37 };
38 
39 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev);
40 static struct dpdmux_dev_list dpdmux_dev_list =
41 	TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */
42 
43 static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id)
44 {
45 	struct dpaa2_dpdmux_dev *dpdmux_dev = NULL;
46 
47 	/* Get DPBP dev handle from list using index */
48 	TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) {
49 		if (dpdmux_dev->dpdmux_id == dpdmux_id)
50 			break;
51 	}
52 
53 	return dpdmux_dev;
54 }
55 
56 struct rte_flow *
57 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
58 			      struct rte_flow_item *pattern[],
59 			      struct rte_flow_action *actions[])
60 {
61 	struct dpaa2_dpdmux_dev *dpdmux_dev;
62 	struct dpkg_profile_cfg kg_cfg;
63 	const struct rte_flow_action_vf *vf_conf;
64 	struct dpdmux_cls_action dpdmux_action;
65 	struct rte_flow *flow = NULL;
66 	void *key_iova, *mask_iova, *key_cfg_iova = NULL;
67 	uint8_t key_size = 0;
68 	int ret;
69 
70 	/* Find the DPDMUX from dpdmux_id in our list */
71 	dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
72 	if (!dpdmux_dev) {
73 		DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
74 		return NULL;
75 	}
76 
77 	key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
78 				   RTE_CACHE_LINE_SIZE);
79 	if (!key_cfg_iova) {
80 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
81 		return NULL;
82 	}
83 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow) +
84 			   (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
85 	if (!flow) {
86 		DPAA2_PMD_ERR(
87 			"Memory allocation failure for rule configuration\n");
88 		goto creation_error;
89 	}
90 	key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
91 	mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
92 
93 	/* Currently taking only IP protocol as an extract type.
94 	 * This can be exended to other fields using pattern->type.
95 	 */
96 	memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
97 
98 	switch (pattern[0]->type) {
99 	case RTE_FLOW_ITEM_TYPE_IPV4:
100 	{
101 		const struct rte_flow_item_ipv4 *spec;
102 		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP;
103 		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO;
104 		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
105 		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
106 		kg_cfg.num_extracts = 1;
107 
108 		spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec;
109 		memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id),
110 			sizeof(uint8_t));
111 		memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t));
112 		key_size = sizeof(uint8_t);
113 	}
114 	break;
115 
116 	case RTE_FLOW_ITEM_TYPE_ETH:
117 	{
118 		const struct rte_flow_item_eth *spec;
119 		uint16_t eth_type;
120 		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH;
121 		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE;
122 		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
123 		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
124 		kg_cfg.num_extracts = 1;
125 
126 		spec = (const struct rte_flow_item_eth *)pattern[0]->spec;
127 		eth_type = rte_constant_bswap16(spec->type);
128 		memcpy((void *)key_iova, (const void *)&eth_type,
129 							sizeof(rte_be16_t));
130 		memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
131 		key_size = sizeof(uint16_t);
132 	}
133 	break;
134 
135 	default:
136 		DPAA2_PMD_ERR("Not supported pattern type: %d",
137 				pattern[0]->type);
138 		goto creation_error;
139 	}
140 
141 	ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova);
142 	if (ret) {
143 		DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret);
144 		goto creation_error;
145 	}
146 
147 	ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
148 				    dpdmux_dev->token,
149 			(uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova)));
150 	if (ret) {
151 		DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", ret);
152 		goto creation_error;
153 	}
154 
155 	/* As now our key extract parameters are set, let us configure
156 	 * the rule.
157 	 */
158 	flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova));
159 	flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova));
160 	flow->rule.key_size = key_size;
161 
162 	vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf);
163 	if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) {
164 		DPAA2_PMD_ERR("Invalid destination id\n");
165 		goto creation_error;
166 	}
167 	dpdmux_action.dest_if = vf_conf->id;
168 
169 	ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
170 					  dpdmux_dev->token, &flow->rule,
171 					  &dpdmux_action);
172 	if (ret) {
173 		DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)",
174 			      ret);
175 		goto creation_error;
176 	}
177 
178 	return flow;
179 
180 creation_error:
181 	rte_free((void *)key_cfg_iova);
182 	rte_free((void *)flow);
183 	return NULL;
184 }
185 
186 static int
187 dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
188 			   struct vfio_device_info *obj_info __rte_unused,
189 			   int dpdmux_id)
190 {
191 	struct dpaa2_dpdmux_dev *dpdmux_dev;
192 	struct dpdmux_attr attr;
193 	int ret;
194 
195 	PMD_INIT_FUNC_TRACE();
196 
197 	/* Allocate DPAA2 dpdmux handle */
198 	dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0);
199 	if (!dpdmux_dev) {
200 		DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device");
201 		return -1;
202 	}
203 
204 	/* Open the dpdmux object */
205 	dpdmux_dev->dpdmux.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
206 	ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id,
207 			  &dpdmux_dev->token);
208 	if (ret) {
209 		DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret);
210 		goto init_err;
211 	}
212 
213 	ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
214 				    dpdmux_dev->token, &attr);
215 	if (ret) {
216 		DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret);
217 		goto init_err;
218 	}
219 
220 	ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
221 				    dpdmux_dev->token, 1);
222 	if (ret) {
223 		DPAA2_PMD_ERR("setting default interface failed in %s",
224 			      __func__);
225 		goto init_err;
226 	}
227 
228 	dpdmux_dev->dpdmux_id = dpdmux_id;
229 	dpdmux_dev->num_ifs = attr.num_ifs;
230 
231 	TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next);
232 
233 	return 0;
234 
235 init_err:
236 	if (dpdmux_dev)
237 		rte_free(dpdmux_dev);
238 
239 	return -1;
240 }
241 
242 static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = {
243 	.dev_type = DPAA2_MUX,
244 	.create = dpaa2_create_dpdmux_device,
245 };
246 
247 RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj);
248