xref: /dpdk/drivers/net/failsafe/failsafe_flow.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <string.h>
8 #include <sys/queue.h>
9 
10 #include <rte_errno.h>
11 #include <rte_malloc.h>
12 #include <rte_tailq.h>
13 #include <rte_flow.h>
14 #include <rte_flow_driver.h>
15 
16 #include "failsafe_private.h"
17 
18 static struct rte_flow *
19 fs_flow_allocate(const struct rte_flow_attr *attr,
20 		 const struct rte_flow_item *items,
21 		 const struct rte_flow_action *actions)
22 {
23 	struct rte_flow *flow;
24 	const struct rte_flow_conv_rule rule = {
25 		.attr_ro = attr,
26 		.pattern_ro = items,
27 		.actions_ro = actions,
28 	};
29 	struct rte_flow_error error;
30 	int ret;
31 
32 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
33 	if (ret < 0) {
34 		ERROR("Unable to process flow rule (%s): %s",
35 		      error.message ? error.message : "unspecified",
36 		      strerror(rte_errno));
37 		return NULL;
38 	}
39 	flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret,
40 			   RTE_CACHE_LINE_SIZE);
41 	if (flow == NULL) {
42 		ERROR("Could not allocate new flow");
43 		return NULL;
44 	}
45 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
46 			    &error);
47 	if (ret < 0) {
48 		ERROR("Failed to copy flow rule (%s): %s",
49 		      error.message ? error.message : "unspecified",
50 		      strerror(rte_errno));
51 		rte_free(flow);
52 		return NULL;
53 	}
54 	return flow;
55 }
56 
57 static void
58 fs_flow_release(struct rte_flow **flow)
59 {
60 	rte_free(*flow);
61 	*flow = NULL;
62 }
63 
64 static int
65 fs_flow_validate(struct rte_eth_dev *dev,
66 		 const struct rte_flow_attr *attr,
67 		 const struct rte_flow_item patterns[],
68 		 const struct rte_flow_action actions[],
69 		 struct rte_flow_error *error)
70 {
71 	struct sub_device *sdev;
72 	uint8_t i;
73 	int ret;
74 
75 	fs_lock(dev, 0);
76 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
77 		DEBUG("Calling rte_flow_validate on sub_device %d", i);
78 		ret = rte_flow_validate(PORT_ID(sdev),
79 				attr, patterns, actions, error);
80 		if ((ret = fs_err(sdev, ret))) {
81 			ERROR("Operation rte_flow_validate failed for sub_device %d"
82 			      " with error %d", i, ret);
83 			fs_unlock(dev, 0);
84 			return ret;
85 		}
86 	}
87 	fs_unlock(dev, 0);
88 	return 0;
89 }
90 
91 static struct rte_flow *
92 fs_flow_create(struct rte_eth_dev *dev,
93 	       const struct rte_flow_attr *attr,
94 	       const struct rte_flow_item patterns[],
95 	       const struct rte_flow_action actions[],
96 	       struct rte_flow_error *error)
97 {
98 	struct sub_device *sdev;
99 	struct rte_flow *flow;
100 	uint8_t i;
101 
102 	fs_lock(dev, 0);
103 	flow = fs_flow_allocate(attr, patterns, actions);
104 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
105 		flow->flows[i] = rte_flow_create(PORT_ID(sdev),
106 				attr, patterns, actions, error);
107 		if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) {
108 			ERROR("Failed to create flow on sub_device %d",
109 				i);
110 			goto err;
111 		}
112 	}
113 	TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
114 	fs_unlock(dev, 0);
115 	return flow;
116 err:
117 	FOREACH_SUBDEV(sdev, i, dev) {
118 		if (flow->flows[i] != NULL)
119 			rte_flow_destroy(PORT_ID(sdev),
120 				flow->flows[i], error);
121 	}
122 	fs_flow_release(&flow);
123 	fs_unlock(dev, 0);
124 	return NULL;
125 }
126 
127 static int
128 fs_flow_destroy(struct rte_eth_dev *dev,
129 		struct rte_flow *flow,
130 		struct rte_flow_error *error)
131 {
132 	struct sub_device *sdev;
133 	uint8_t i;
134 	int ret;
135 
136 	if (flow == NULL) {
137 		ERROR("Invalid flow");
138 		return -EINVAL;
139 	}
140 	ret = 0;
141 	fs_lock(dev, 0);
142 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
143 		int local_ret;
144 
145 		if (flow->flows[i] == NULL)
146 			continue;
147 		local_ret = rte_flow_destroy(PORT_ID(sdev),
148 				flow->flows[i], error);
149 		if ((local_ret = fs_err(sdev, local_ret))) {
150 			ERROR("Failed to destroy flow on sub_device %d: %d",
151 					i, local_ret);
152 			if (ret == 0)
153 				ret = local_ret;
154 		}
155 	}
156 	TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
157 	fs_flow_release(&flow);
158 	fs_unlock(dev, 0);
159 	return ret;
160 }
161 
162 static int
163 fs_flow_flush(struct rte_eth_dev *dev,
164 	      struct rte_flow_error *error)
165 {
166 	struct sub_device *sdev;
167 	struct rte_flow *flow;
168 	void *tmp;
169 	uint8_t i;
170 	int ret;
171 
172 	fs_lock(dev, 0);
173 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
174 		DEBUG("Calling rte_flow_flush on sub_device %d", i);
175 		ret = rte_flow_flush(PORT_ID(sdev), error);
176 		if ((ret = fs_err(sdev, ret))) {
177 			ERROR("Operation rte_flow_flush failed for sub_device %d"
178 			      " with error %d", i, ret);
179 			fs_unlock(dev, 0);
180 			return ret;
181 		}
182 	}
183 	RTE_TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
184 		TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
185 		fs_flow_release(&flow);
186 	}
187 	fs_unlock(dev, 0);
188 	return 0;
189 }
190 
191 static int
192 fs_flow_query(struct rte_eth_dev *dev,
193 	      struct rte_flow *flow,
194 	      const struct rte_flow_action *action,
195 	      void *arg,
196 	      struct rte_flow_error *error)
197 {
198 	struct sub_device *sdev;
199 
200 	fs_lock(dev, 0);
201 	sdev = TX_SUBDEV(dev);
202 	if (sdev != NULL) {
203 		int ret = rte_flow_query(PORT_ID(sdev),
204 					 flow->flows[SUB_ID(sdev)],
205 					 action, arg, error);
206 
207 		if ((ret = fs_err(sdev, ret))) {
208 			fs_unlock(dev, 0);
209 			return ret;
210 		}
211 	}
212 	fs_unlock(dev, 0);
213 	WARN("No active sub_device to query about its flow");
214 	return -1;
215 }
216 
217 static int
218 fs_flow_isolate(struct rte_eth_dev *dev,
219 		int set,
220 		struct rte_flow_error *error)
221 {
222 	struct sub_device *sdev;
223 	uint8_t i;
224 	int ret;
225 
226 	fs_lock(dev, 0);
227 	FOREACH_SUBDEV(sdev, i, dev) {
228 		if (sdev->state < DEV_PROBED)
229 			continue;
230 		DEBUG("Calling rte_flow_isolate on sub_device %d", i);
231 		if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
232 			WARN("flow isolation mode of sub_device %d in incoherent state.",
233 				i);
234 		ret = rte_flow_isolate(PORT_ID(sdev), set, error);
235 		if ((ret = fs_err(sdev, ret))) {
236 			ERROR("Operation rte_flow_isolate failed for sub_device %d"
237 			      " with error %d", i, ret);
238 			fs_unlock(dev, 0);
239 			return ret;
240 		}
241 		sdev->flow_isolated = set;
242 	}
243 	PRIV(dev)->flow_isolated = set;
244 	fs_unlock(dev, 0);
245 	return 0;
246 }
247 
248 const struct rte_flow_ops fs_flow_ops = {
249 	.validate = fs_flow_validate,
250 	.create = fs_flow_create,
251 	.destroy = fs_flow_destroy,
252 	.flush = fs_flow_flush,
253 	.query = fs_flow_query,
254 	.isolate = fs_flow_isolate,
255 };
256