xref: /dpdk/drivers/net/failsafe/failsafe_flow.c (revision 813f085f168728573987967049a585ff23e1f0f5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <string.h>
8 #include <sys/queue.h>
9 
10 #include <rte_errno.h>
11 #include <rte_malloc.h>
12 #include <rte_tailq.h>
13 #include <rte_flow.h>
14 #include <rte_flow_driver.h>
15 
16 #include "failsafe_private.h"
17 
18 static struct rte_flow *
fs_flow_allocate(const struct rte_flow_attr * attr,const struct rte_flow_item * items,const struct rte_flow_action * actions)19 fs_flow_allocate(const struct rte_flow_attr *attr,
20 		 const struct rte_flow_item *items,
21 		 const struct rte_flow_action *actions)
22 {
23 	struct rte_flow *flow;
24 	const struct rte_flow_conv_rule rule = {
25 		.attr_ro = attr,
26 		.pattern_ro = items,
27 		.actions_ro = actions,
28 	};
29 	struct rte_flow_error error;
30 	int ret;
31 
32 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
33 	if (ret < 0) {
34 		ERROR("Unable to process flow rule (%s): %s",
35 		      error.message ? error.message : "unspecified",
36 		      strerror(rte_errno));
37 		return NULL;
38 	}
39 	flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret,
40 			   RTE_CACHE_LINE_SIZE);
41 	if (flow == NULL) {
42 		ERROR("Could not allocate new flow");
43 		return NULL;
44 	}
45 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
46 			    &error);
47 	if (ret < 0) {
48 		ERROR("Failed to copy flow rule (%s): %s",
49 		      error.message ? error.message : "unspecified",
50 		      strerror(rte_errno));
51 		rte_free(flow);
52 		return NULL;
53 	}
54 	return flow;
55 }
56 
57 static void
fs_flow_release(struct rte_flow ** flow)58 fs_flow_release(struct rte_flow **flow)
59 {
60 	rte_free(*flow);
61 	*flow = NULL;
62 }
63 
64 static int
fs_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error)65 fs_flow_validate(struct rte_eth_dev *dev,
66 		 const struct rte_flow_attr *attr,
67 		 const struct rte_flow_item patterns[],
68 		 const struct rte_flow_action actions[],
69 		 struct rte_flow_error *error)
70 {
71 	struct sub_device *sdev;
72 	uint8_t i;
73 	int ret;
74 
75 	ret = fs_lock(dev, 0);
76 	if (ret != 0)
77 		return ret;
78 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
79 		DEBUG("Calling rte_flow_validate on sub_device %d", i);
80 		ret = rte_flow_validate(PORT_ID(sdev),
81 				attr, patterns, actions, error);
82 		if ((ret = fs_err(sdev, ret))) {
83 			ERROR("Operation rte_flow_validate failed for sub_device %d"
84 			      " with error %d", i, ret);
85 			fs_unlock(dev, 0);
86 			return ret;
87 		}
88 	}
89 	fs_unlock(dev, 0);
90 	return 0;
91 }
92 
93 static struct rte_flow *
fs_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error)94 fs_flow_create(struct rte_eth_dev *dev,
95 	       const struct rte_flow_attr *attr,
96 	       const struct rte_flow_item patterns[],
97 	       const struct rte_flow_action actions[],
98 	       struct rte_flow_error *error)
99 {
100 	struct sub_device *sdev;
101 	struct rte_flow *flow;
102 	uint8_t i;
103 
104 	if (fs_lock(dev, 0) != 0)
105 		return NULL;
106 	flow = fs_flow_allocate(attr, patterns, actions);
107 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
108 		flow->flows[i] = rte_flow_create(PORT_ID(sdev),
109 				attr, patterns, actions, error);
110 		if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) {
111 			ERROR("Failed to create flow on sub_device %d",
112 				i);
113 			goto err;
114 		}
115 	}
116 	TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
117 	fs_unlock(dev, 0);
118 	return flow;
119 err:
120 	FOREACH_SUBDEV(sdev, i, dev) {
121 		if (flow->flows[i] != NULL)
122 			rte_flow_destroy(PORT_ID(sdev),
123 				flow->flows[i], error);
124 	}
125 	fs_flow_release(&flow);
126 	fs_unlock(dev, 0);
127 	return NULL;
128 }
129 
130 static int
fs_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)131 fs_flow_destroy(struct rte_eth_dev *dev,
132 		struct rte_flow *flow,
133 		struct rte_flow_error *error)
134 {
135 	struct sub_device *sdev;
136 	uint8_t i;
137 	int ret;
138 
139 	if (flow == NULL) {
140 		ERROR("Invalid flow");
141 		return -EINVAL;
142 	}
143 	ret = fs_lock(dev, 0);
144 	if (ret != 0)
145 		return ret;
146 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
147 		int local_ret;
148 
149 		if (flow->flows[i] == NULL)
150 			continue;
151 		local_ret = rte_flow_destroy(PORT_ID(sdev),
152 				flow->flows[i], error);
153 		if ((local_ret = fs_err(sdev, local_ret))) {
154 			ERROR("Failed to destroy flow on sub_device %d: %d",
155 					i, local_ret);
156 			if (ret == 0)
157 				ret = local_ret;
158 		}
159 	}
160 	TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
161 	fs_flow_release(&flow);
162 	fs_unlock(dev, 0);
163 	return ret;
164 }
165 
166 static int
fs_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)167 fs_flow_flush(struct rte_eth_dev *dev,
168 	      struct rte_flow_error *error)
169 {
170 	struct sub_device *sdev;
171 	struct rte_flow *flow;
172 	void *tmp;
173 	uint8_t i;
174 	int ret;
175 
176 	ret = fs_lock(dev, 0);
177 	if (ret != 0)
178 		return ret;
179 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
180 		DEBUG("Calling rte_flow_flush on sub_device %d", i);
181 		ret = rte_flow_flush(PORT_ID(sdev), error);
182 		if ((ret = fs_err(sdev, ret))) {
183 			ERROR("Operation rte_flow_flush failed for sub_device %d"
184 			      " with error %d", i, ret);
185 			fs_unlock(dev, 0);
186 			return ret;
187 		}
188 	}
189 	RTE_TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
190 		TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
191 		fs_flow_release(&flow);
192 	}
193 	fs_unlock(dev, 0);
194 	return 0;
195 }
196 
197 static int
fs_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * action,void * arg,struct rte_flow_error * error)198 fs_flow_query(struct rte_eth_dev *dev,
199 	      struct rte_flow *flow,
200 	      const struct rte_flow_action *action,
201 	      void *arg,
202 	      struct rte_flow_error *error)
203 {
204 	struct sub_device *sdev;
205 
206 	if (fs_lock(dev, 0) != 0)
207 		return -1;
208 	sdev = TX_SUBDEV(dev);
209 	if (sdev != NULL) {
210 		int ret = rte_flow_query(PORT_ID(sdev),
211 					 flow->flows[SUB_ID(sdev)],
212 					 action, arg, error);
213 
214 		if ((ret = fs_err(sdev, ret))) {
215 			fs_unlock(dev, 0);
216 			return ret;
217 		}
218 	}
219 	fs_unlock(dev, 0);
220 	WARN("No active sub_device to query about its flow");
221 	return -1;
222 }
223 
224 static int
fs_flow_isolate(struct rte_eth_dev * dev,int set,struct rte_flow_error * error)225 fs_flow_isolate(struct rte_eth_dev *dev,
226 		int set,
227 		struct rte_flow_error *error)
228 {
229 	struct sub_device *sdev;
230 	uint8_t i;
231 	int ret;
232 
233 	ret = fs_lock(dev, 0);
234 	if (ret != 0)
235 		return ret;
236 	FOREACH_SUBDEV(sdev, i, dev) {
237 		if (sdev->state < DEV_PROBED)
238 			continue;
239 		DEBUG("Calling rte_flow_isolate on sub_device %d", i);
240 		if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
241 			WARN("flow isolation mode of sub_device %d in incoherent state.",
242 				i);
243 		ret = rte_flow_isolate(PORT_ID(sdev), set, error);
244 		if ((ret = fs_err(sdev, ret))) {
245 			ERROR("Operation rte_flow_isolate failed for sub_device %d"
246 			      " with error %d", i, ret);
247 			fs_unlock(dev, 0);
248 			return ret;
249 		}
250 		sdev->flow_isolated = set;
251 	}
252 	PRIV(dev)->flow_isolated = set;
253 	fs_unlock(dev, 0);
254 	return 0;
255 }
256 
257 const struct rte_flow_ops fs_flow_ops = {
258 	.validate = fs_flow_validate,
259 	.create = fs_flow_create,
260 	.destroy = fs_flow_destroy,
261 	.flush = fs_flow_flush,
262 	.query = fs_flow_query,
263 	.isolate = fs_flow_isolate,
264 };
265