xref: /dpdk/drivers/net/dpaa2/dpaa2_mux.c (revision 1def64c2d79e07eb3ec73f5d1ca797e512a3e4d5)
1*1def64c2SNipun Gupta /* SPDX-License-Identifier: BSD-3-Clause
2*1def64c2SNipun Gupta  * Copyright 2018 NXP
3*1def64c2SNipun Gupta  */
4*1def64c2SNipun Gupta 
5*1def64c2SNipun Gupta #include <sys/queue.h>
6*1def64c2SNipun Gupta #include <stdio.h>
7*1def64c2SNipun Gupta #include <errno.h>
8*1def64c2SNipun Gupta #include <stdint.h>
9*1def64c2SNipun Gupta #include <string.h>
10*1def64c2SNipun Gupta #include <unistd.h>
11*1def64c2SNipun Gupta #include <stdarg.h>
12*1def64c2SNipun Gupta 
13*1def64c2SNipun Gupta #include <rte_ethdev.h>
14*1def64c2SNipun Gupta #include <rte_log.h>
15*1def64c2SNipun Gupta #include <rte_eth_ctrl.h>
16*1def64c2SNipun Gupta #include <rte_malloc.h>
17*1def64c2SNipun Gupta #include <rte_flow_driver.h>
18*1def64c2SNipun Gupta #include <rte_tailq.h>
19*1def64c2SNipun Gupta 
20*1def64c2SNipun Gupta #include <rte_fslmc.h>
21*1def64c2SNipun Gupta #include <fsl_dpdmux.h>
22*1def64c2SNipun Gupta #include <fsl_dpkg.h>
23*1def64c2SNipun Gupta 
24*1def64c2SNipun Gupta #include <dpaa2_ethdev.h>
25*1def64c2SNipun Gupta #include <dpaa2_pmd_logs.h>
26*1def64c2SNipun Gupta 
27*1def64c2SNipun Gupta struct dpaa2_dpdmux_dev {
28*1def64c2SNipun Gupta 	TAILQ_ENTRY(dpaa2_dpdmux_dev) next;
29*1def64c2SNipun Gupta 		/**< Pointer to Next device instance */
30*1def64c2SNipun Gupta 	struct fsl_mc_io dpdmux;  /** handle to DPDMUX portal object */
31*1def64c2SNipun Gupta 	uint16_t token;
32*1def64c2SNipun Gupta 	uint32_t dpdmux_id; /*HW ID for DPDMUX object */
33*1def64c2SNipun Gupta 	uint8_t num_ifs;   /* Number of interfaces in DPDMUX */
34*1def64c2SNipun Gupta };
35*1def64c2SNipun Gupta 
36*1def64c2SNipun Gupta struct rte_flow {
37*1def64c2SNipun Gupta 	struct dpdmux_rule_cfg rule;
38*1def64c2SNipun Gupta };
39*1def64c2SNipun Gupta 
40*1def64c2SNipun Gupta TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev);
41*1def64c2SNipun Gupta static struct dpdmux_dev_list dpdmux_dev_list =
42*1def64c2SNipun Gupta 	TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */
43*1def64c2SNipun Gupta 
44*1def64c2SNipun Gupta static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id)
45*1def64c2SNipun Gupta {
46*1def64c2SNipun Gupta 	struct dpaa2_dpdmux_dev *dpdmux_dev = NULL;
47*1def64c2SNipun Gupta 
48*1def64c2SNipun Gupta 	/* Get DPBP dev handle from list using index */
49*1def64c2SNipun Gupta 	TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) {
50*1def64c2SNipun Gupta 		if (dpdmux_dev->dpdmux_id == dpdmux_id)
51*1def64c2SNipun Gupta 			break;
52*1def64c2SNipun Gupta 	}
53*1def64c2SNipun Gupta 
54*1def64c2SNipun Gupta 	return dpdmux_dev;
55*1def64c2SNipun Gupta }
56*1def64c2SNipun Gupta 
57*1def64c2SNipun Gupta struct rte_flow *
58*1def64c2SNipun Gupta rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
59*1def64c2SNipun Gupta 			      struct rte_flow_item *pattern[],
60*1def64c2SNipun Gupta 			      struct rte_flow_action *actions[])
61*1def64c2SNipun Gupta {
62*1def64c2SNipun Gupta 	struct dpaa2_dpdmux_dev *dpdmux_dev;
63*1def64c2SNipun Gupta 	struct dpkg_profile_cfg kg_cfg;
64*1def64c2SNipun Gupta 	const struct rte_flow_item_ipv4 *spec;
65*1def64c2SNipun Gupta 	const struct rte_flow_action_vf *vf_conf;
66*1def64c2SNipun Gupta 	struct dpdmux_cls_action dpdmux_action;
67*1def64c2SNipun Gupta 	struct rte_flow *flow = NULL;
68*1def64c2SNipun Gupta 	void *key_iova, *mask_iova, *key_cfg_iova = NULL;
69*1def64c2SNipun Gupta 	int ret;
70*1def64c2SNipun Gupta 
71*1def64c2SNipun Gupta 	if (pattern[0]->type != RTE_FLOW_ITEM_TYPE_IPV4) {
72*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Not supported pattern type: %d",
73*1def64c2SNipun Gupta 			      pattern[0]->type);
74*1def64c2SNipun Gupta 		return NULL;
75*1def64c2SNipun Gupta 	}
76*1def64c2SNipun Gupta 
77*1def64c2SNipun Gupta 	/* Find the DPDMUX from dpdmux_id in our list */
78*1def64c2SNipun Gupta 	dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
79*1def64c2SNipun Gupta 	if (!dpdmux_dev) {
80*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
81*1def64c2SNipun Gupta 		return NULL;
82*1def64c2SNipun Gupta 	}
83*1def64c2SNipun Gupta 
84*1def64c2SNipun Gupta 	key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
85*1def64c2SNipun Gupta 				   RTE_CACHE_LINE_SIZE);
86*1def64c2SNipun Gupta 	if (!key_cfg_iova) {
87*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
88*1def64c2SNipun Gupta 		return NULL;
89*1def64c2SNipun Gupta 	}
90*1def64c2SNipun Gupta 
91*1def64c2SNipun Gupta 	/* Currently taking only IP protocol as an extract type.
92*1def64c2SNipun Gupta 	 * This can be exended to other fields using pattern->type.
93*1def64c2SNipun Gupta 	 */
94*1def64c2SNipun Gupta 	memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
95*1def64c2SNipun Gupta 	kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP;
96*1def64c2SNipun Gupta 	kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO;
97*1def64c2SNipun Gupta 	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
98*1def64c2SNipun Gupta 	kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
99*1def64c2SNipun Gupta 	kg_cfg.num_extracts = 1;
100*1def64c2SNipun Gupta 
101*1def64c2SNipun Gupta 	ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova);
102*1def64c2SNipun Gupta 	if (ret) {
103*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret);
104*1def64c2SNipun Gupta 		goto creation_error;
105*1def64c2SNipun Gupta 	}
106*1def64c2SNipun Gupta 
107*1def64c2SNipun Gupta 	ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
108*1def64c2SNipun Gupta 				    dpdmux_dev->token,
109*1def64c2SNipun Gupta 			(uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova)));
110*1def64c2SNipun Gupta 	if (ret) {
111*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", ret);
112*1def64c2SNipun Gupta 		goto creation_error;
113*1def64c2SNipun Gupta 	}
114*1def64c2SNipun Gupta 
115*1def64c2SNipun Gupta 	/* As now our key extract parameters are set, let us configure
116*1def64c2SNipun Gupta 	 * the rule.
117*1def64c2SNipun Gupta 	 */
118*1def64c2SNipun Gupta 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow) +
119*1def64c2SNipun Gupta 			   (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
120*1def64c2SNipun Gupta 	if (!flow) {
121*1def64c2SNipun Gupta 		DPAA2_PMD_ERR(
122*1def64c2SNipun Gupta 			"Memory allocation failure for rule configration\n");
123*1def64c2SNipun Gupta 		goto creation_error;
124*1def64c2SNipun Gupta 	}
125*1def64c2SNipun Gupta 	key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
126*1def64c2SNipun Gupta 	mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
127*1def64c2SNipun Gupta 
128*1def64c2SNipun Gupta 	spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec;
129*1def64c2SNipun Gupta 	memcpy(key_iova, (const void *)&spec->hdr.next_proto_id,
130*1def64c2SNipun Gupta 	       sizeof(uint8_t));
131*1def64c2SNipun Gupta 	memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t));
132*1def64c2SNipun Gupta 
133*1def64c2SNipun Gupta 	flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova));
134*1def64c2SNipun Gupta 	flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova));
135*1def64c2SNipun Gupta 	flow->rule.key_size = sizeof(uint8_t);
136*1def64c2SNipun Gupta 
137*1def64c2SNipun Gupta 	vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf);
138*1def64c2SNipun Gupta 	if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) {
139*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Invalid destination id\n");
140*1def64c2SNipun Gupta 		goto creation_error;
141*1def64c2SNipun Gupta 	}
142*1def64c2SNipun Gupta 	dpdmux_action.dest_if = vf_conf->id;
143*1def64c2SNipun Gupta 
144*1def64c2SNipun Gupta 	ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
145*1def64c2SNipun Gupta 					  dpdmux_dev->token, &flow->rule,
146*1def64c2SNipun Gupta 					  &dpdmux_action);
147*1def64c2SNipun Gupta 	if (ret) {
148*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)",
149*1def64c2SNipun Gupta 			      ret);
150*1def64c2SNipun Gupta 		goto creation_error;
151*1def64c2SNipun Gupta 	}
152*1def64c2SNipun Gupta 
153*1def64c2SNipun Gupta 	return flow;
154*1def64c2SNipun Gupta 
155*1def64c2SNipun Gupta creation_error:
156*1def64c2SNipun Gupta 	rte_free((void *)key_cfg_iova);
157*1def64c2SNipun Gupta 	rte_free((void *)flow);
158*1def64c2SNipun Gupta 	return NULL;
159*1def64c2SNipun Gupta }
160*1def64c2SNipun Gupta 
161*1def64c2SNipun Gupta static int
162*1def64c2SNipun Gupta dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
163*1def64c2SNipun Gupta 			   struct vfio_device_info *obj_info __rte_unused,
164*1def64c2SNipun Gupta 			   int dpdmux_id)
165*1def64c2SNipun Gupta {
166*1def64c2SNipun Gupta 	struct dpaa2_dpdmux_dev *dpdmux_dev;
167*1def64c2SNipun Gupta 	struct dpdmux_attr attr;
168*1def64c2SNipun Gupta 	int ret;
169*1def64c2SNipun Gupta 
170*1def64c2SNipun Gupta 	PMD_INIT_FUNC_TRACE();
171*1def64c2SNipun Gupta 
172*1def64c2SNipun Gupta 	/* Allocate DPAA2 dpdmux handle */
173*1def64c2SNipun Gupta 	dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0);
174*1def64c2SNipun Gupta 	if (!dpdmux_dev) {
175*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device");
176*1def64c2SNipun Gupta 		return -1;
177*1def64c2SNipun Gupta 	}
178*1def64c2SNipun Gupta 
179*1def64c2SNipun Gupta 	/* Open the dpdmux object */
180*1def64c2SNipun Gupta 	dpdmux_dev->dpdmux.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
181*1def64c2SNipun Gupta 	ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id,
182*1def64c2SNipun Gupta 			  &dpdmux_dev->token);
183*1def64c2SNipun Gupta 	if (ret) {
184*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret);
185*1def64c2SNipun Gupta 		goto init_err;
186*1def64c2SNipun Gupta 	}
187*1def64c2SNipun Gupta 
188*1def64c2SNipun Gupta 	ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
189*1def64c2SNipun Gupta 				    dpdmux_dev->token, &attr);
190*1def64c2SNipun Gupta 	if (ret) {
191*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret);
192*1def64c2SNipun Gupta 		goto init_err;
193*1def64c2SNipun Gupta 	}
194*1def64c2SNipun Gupta 
195*1def64c2SNipun Gupta 	ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
196*1def64c2SNipun Gupta 				    dpdmux_dev->token, 1);
197*1def64c2SNipun Gupta 	if (ret) {
198*1def64c2SNipun Gupta 		DPAA2_PMD_ERR("setting default interface failed in %s",
199*1def64c2SNipun Gupta 			      __func__);
200*1def64c2SNipun Gupta 		goto init_err;
201*1def64c2SNipun Gupta 	}
202*1def64c2SNipun Gupta 
203*1def64c2SNipun Gupta 	dpdmux_dev->dpdmux_id = dpdmux_id;
204*1def64c2SNipun Gupta 	dpdmux_dev->num_ifs = attr.num_ifs;
205*1def64c2SNipun Gupta 
206*1def64c2SNipun Gupta 	TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next);
207*1def64c2SNipun Gupta 
208*1def64c2SNipun Gupta 	return 0;
209*1def64c2SNipun Gupta 
210*1def64c2SNipun Gupta init_err:
211*1def64c2SNipun Gupta 	if (dpdmux_dev)
212*1def64c2SNipun Gupta 		rte_free(dpdmux_dev);
213*1def64c2SNipun Gupta 
214*1def64c2SNipun Gupta 	return -1;
215*1def64c2SNipun Gupta }
216*1def64c2SNipun Gupta 
217*1def64c2SNipun Gupta static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = {
218*1def64c2SNipun Gupta 	.dev_type = DPAA2_MUX,
219*1def64c2SNipun Gupta 	.create = dpaa2_create_dpdmux_device,
220*1def64c2SNipun Gupta };
221*1def64c2SNipun Gupta 
222*1def64c2SNipun Gupta RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj);
223