xref: /dpdk/drivers/net/failsafe/failsafe_flow.c (revision 813f085f168728573987967049a585ff23e1f0f5)
1009c327cSOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2b737a1eeSGaetan Rivet  * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2017 Mellanox Technologies, Ltd
4b737a1eeSGaetan Rivet  */
5b737a1eeSGaetan Rivet 
633fcf207SAdrien Mazarguil #include <stddef.h>
733fcf207SAdrien Mazarguil #include <string.h>
8b737a1eeSGaetan Rivet #include <sys/queue.h>
9b737a1eeSGaetan Rivet 
1033fcf207SAdrien Mazarguil #include <rte_errno.h>
11b737a1eeSGaetan Rivet #include <rte_malloc.h>
12b737a1eeSGaetan Rivet #include <rte_tailq.h>
13b737a1eeSGaetan Rivet #include <rte_flow.h>
14b737a1eeSGaetan Rivet #include <rte_flow_driver.h>
15b737a1eeSGaetan Rivet 
16b737a1eeSGaetan Rivet #include "failsafe_private.h"
17b737a1eeSGaetan Rivet 
18b737a1eeSGaetan Rivet static struct rte_flow *
fs_flow_allocate(const struct rte_flow_attr * attr,const struct rte_flow_item * items,const struct rte_flow_action * actions)19b737a1eeSGaetan Rivet fs_flow_allocate(const struct rte_flow_attr *attr,
20b737a1eeSGaetan Rivet 		 const struct rte_flow_item *items,
21b737a1eeSGaetan Rivet 		 const struct rte_flow_action *actions)
22b737a1eeSGaetan Rivet {
23b737a1eeSGaetan Rivet 	struct rte_flow *flow;
2433fcf207SAdrien Mazarguil 	const struct rte_flow_conv_rule rule = {
2533fcf207SAdrien Mazarguil 		.attr_ro = attr,
2633fcf207SAdrien Mazarguil 		.pattern_ro = items,
2733fcf207SAdrien Mazarguil 		.actions_ro = actions,
2833fcf207SAdrien Mazarguil 	};
2933fcf207SAdrien Mazarguil 	struct rte_flow_error error;
3033fcf207SAdrien Mazarguil 	int ret;
31b737a1eeSGaetan Rivet 
3233fcf207SAdrien Mazarguil 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
3333fcf207SAdrien Mazarguil 	if (ret < 0) {
3433fcf207SAdrien Mazarguil 		ERROR("Unable to process flow rule (%s): %s",
3533fcf207SAdrien Mazarguil 		      error.message ? error.message : "unspecified",
3633fcf207SAdrien Mazarguil 		      strerror(rte_errno));
3733fcf207SAdrien Mazarguil 		return NULL;
3833fcf207SAdrien Mazarguil 	}
3933fcf207SAdrien Mazarguil 	flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret,
40b737a1eeSGaetan Rivet 			   RTE_CACHE_LINE_SIZE);
41b737a1eeSGaetan Rivet 	if (flow == NULL) {
42b737a1eeSGaetan Rivet 		ERROR("Could not allocate new flow");
43b737a1eeSGaetan Rivet 		return NULL;
44b737a1eeSGaetan Rivet 	}
4533fcf207SAdrien Mazarguil 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
4633fcf207SAdrien Mazarguil 			    &error);
4733fcf207SAdrien Mazarguil 	if (ret < 0) {
4833fcf207SAdrien Mazarguil 		ERROR("Failed to copy flow rule (%s): %s",
4933fcf207SAdrien Mazarguil 		      error.message ? error.message : "unspecified",
5033fcf207SAdrien Mazarguil 		      strerror(rte_errno));
51b737a1eeSGaetan Rivet 		rte_free(flow);
52b737a1eeSGaetan Rivet 		return NULL;
53b737a1eeSGaetan Rivet 	}
54b737a1eeSGaetan Rivet 	return flow;
55b737a1eeSGaetan Rivet }
56b737a1eeSGaetan Rivet 
57b737a1eeSGaetan Rivet static void
fs_flow_release(struct rte_flow ** flow)58b737a1eeSGaetan Rivet fs_flow_release(struct rte_flow **flow)
59b737a1eeSGaetan Rivet {
60b737a1eeSGaetan Rivet 	rte_free(*flow);
61b737a1eeSGaetan Rivet 	*flow = NULL;
62b737a1eeSGaetan Rivet }
63b737a1eeSGaetan Rivet 
64b737a1eeSGaetan Rivet static int
fs_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error)65b737a1eeSGaetan Rivet fs_flow_validate(struct rte_eth_dev *dev,
66b737a1eeSGaetan Rivet 		 const struct rte_flow_attr *attr,
67b737a1eeSGaetan Rivet 		 const struct rte_flow_item patterns[],
68b737a1eeSGaetan Rivet 		 const struct rte_flow_action actions[],
69b737a1eeSGaetan Rivet 		 struct rte_flow_error *error)
70b737a1eeSGaetan Rivet {
71b737a1eeSGaetan Rivet 	struct sub_device *sdev;
72b737a1eeSGaetan Rivet 	uint8_t i;
73b737a1eeSGaetan Rivet 	int ret;
74b737a1eeSGaetan Rivet 
75*813f085fSDavid Marchand 	ret = fs_lock(dev, 0);
76*813f085fSDavid Marchand 	if (ret != 0)
77*813f085fSDavid Marchand 		return ret;
78b737a1eeSGaetan Rivet 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
79b737a1eeSGaetan Rivet 		DEBUG("Calling rte_flow_validate on sub_device %d", i);
80b737a1eeSGaetan Rivet 		ret = rte_flow_validate(PORT_ID(sdev),
81b737a1eeSGaetan Rivet 				attr, patterns, actions, error);
82ae80146cSMatan Azrad 		if ((ret = fs_err(sdev, ret))) {
83b737a1eeSGaetan Rivet 			ERROR("Operation rte_flow_validate failed for sub_device %d"
84b737a1eeSGaetan Rivet 			      " with error %d", i, ret);
85655fcd68SMatan Azrad 			fs_unlock(dev, 0);
86b737a1eeSGaetan Rivet 			return ret;
87b737a1eeSGaetan Rivet 		}
88b737a1eeSGaetan Rivet 	}
89655fcd68SMatan Azrad 	fs_unlock(dev, 0);
90b737a1eeSGaetan Rivet 	return 0;
91b737a1eeSGaetan Rivet }
92b737a1eeSGaetan Rivet 
93b737a1eeSGaetan Rivet static struct rte_flow *
fs_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error)94b737a1eeSGaetan Rivet fs_flow_create(struct rte_eth_dev *dev,
95b737a1eeSGaetan Rivet 	       const struct rte_flow_attr *attr,
96b737a1eeSGaetan Rivet 	       const struct rte_flow_item patterns[],
97b737a1eeSGaetan Rivet 	       const struct rte_flow_action actions[],
98b737a1eeSGaetan Rivet 	       struct rte_flow_error *error)
99b737a1eeSGaetan Rivet {
100b737a1eeSGaetan Rivet 	struct sub_device *sdev;
101b737a1eeSGaetan Rivet 	struct rte_flow *flow;
102b737a1eeSGaetan Rivet 	uint8_t i;
103b737a1eeSGaetan Rivet 
104*813f085fSDavid Marchand 	if (fs_lock(dev, 0) != 0)
105*813f085fSDavid Marchand 		return NULL;
106b737a1eeSGaetan Rivet 	flow = fs_flow_allocate(attr, patterns, actions);
107b737a1eeSGaetan Rivet 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
108b737a1eeSGaetan Rivet 		flow->flows[i] = rte_flow_create(PORT_ID(sdev),
109b737a1eeSGaetan Rivet 				attr, patterns, actions, error);
110ae80146cSMatan Azrad 		if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) {
111b737a1eeSGaetan Rivet 			ERROR("Failed to create flow on sub_device %d",
112b737a1eeSGaetan Rivet 				i);
113b737a1eeSGaetan Rivet 			goto err;
114b737a1eeSGaetan Rivet 		}
115b737a1eeSGaetan Rivet 	}
116b737a1eeSGaetan Rivet 	TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
117655fcd68SMatan Azrad 	fs_unlock(dev, 0);
118b737a1eeSGaetan Rivet 	return flow;
119b737a1eeSGaetan Rivet err:
120b737a1eeSGaetan Rivet 	FOREACH_SUBDEV(sdev, i, dev) {
121b737a1eeSGaetan Rivet 		if (flow->flows[i] != NULL)
122b737a1eeSGaetan Rivet 			rte_flow_destroy(PORT_ID(sdev),
123b737a1eeSGaetan Rivet 				flow->flows[i], error);
124b737a1eeSGaetan Rivet 	}
125b737a1eeSGaetan Rivet 	fs_flow_release(&flow);
126655fcd68SMatan Azrad 	fs_unlock(dev, 0);
127b737a1eeSGaetan Rivet 	return NULL;
128b737a1eeSGaetan Rivet }
129b737a1eeSGaetan Rivet 
130b737a1eeSGaetan Rivet static int
fs_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)131b737a1eeSGaetan Rivet fs_flow_destroy(struct rte_eth_dev *dev,
132b737a1eeSGaetan Rivet 		struct rte_flow *flow,
133b737a1eeSGaetan Rivet 		struct rte_flow_error *error)
134b737a1eeSGaetan Rivet {
135b737a1eeSGaetan Rivet 	struct sub_device *sdev;
136b737a1eeSGaetan Rivet 	uint8_t i;
137b737a1eeSGaetan Rivet 	int ret;
138b737a1eeSGaetan Rivet 
139b737a1eeSGaetan Rivet 	if (flow == NULL) {
140b737a1eeSGaetan Rivet 		ERROR("Invalid flow");
141b737a1eeSGaetan Rivet 		return -EINVAL;
142b737a1eeSGaetan Rivet 	}
143*813f085fSDavid Marchand 	ret = fs_lock(dev, 0);
144*813f085fSDavid Marchand 	if (ret != 0)
145*813f085fSDavid Marchand 		return ret;
146b737a1eeSGaetan Rivet 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
147b737a1eeSGaetan Rivet 		int local_ret;
148b737a1eeSGaetan Rivet 
149b737a1eeSGaetan Rivet 		if (flow->flows[i] == NULL)
150b737a1eeSGaetan Rivet 			continue;
151b737a1eeSGaetan Rivet 		local_ret = rte_flow_destroy(PORT_ID(sdev),
152b737a1eeSGaetan Rivet 				flow->flows[i], error);
153ae80146cSMatan Azrad 		if ((local_ret = fs_err(sdev, local_ret))) {
154b737a1eeSGaetan Rivet 			ERROR("Failed to destroy flow on sub_device %d: %d",
155b737a1eeSGaetan Rivet 					i, local_ret);
156b737a1eeSGaetan Rivet 			if (ret == 0)
157b737a1eeSGaetan Rivet 				ret = local_ret;
158b737a1eeSGaetan Rivet 		}
159b737a1eeSGaetan Rivet 	}
160b737a1eeSGaetan Rivet 	TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
161b737a1eeSGaetan Rivet 	fs_flow_release(&flow);
162655fcd68SMatan Azrad 	fs_unlock(dev, 0);
163b737a1eeSGaetan Rivet 	return ret;
164b737a1eeSGaetan Rivet }
165b737a1eeSGaetan Rivet 
166b737a1eeSGaetan Rivet static int
fs_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)167b737a1eeSGaetan Rivet fs_flow_flush(struct rte_eth_dev *dev,
168b737a1eeSGaetan Rivet 	      struct rte_flow_error *error)
169b737a1eeSGaetan Rivet {
170b737a1eeSGaetan Rivet 	struct sub_device *sdev;
171b737a1eeSGaetan Rivet 	struct rte_flow *flow;
172b737a1eeSGaetan Rivet 	void *tmp;
173b737a1eeSGaetan Rivet 	uint8_t i;
174b737a1eeSGaetan Rivet 	int ret;
175b737a1eeSGaetan Rivet 
176*813f085fSDavid Marchand 	ret = fs_lock(dev, 0);
177*813f085fSDavid Marchand 	if (ret != 0)
178*813f085fSDavid Marchand 		return ret;
179b737a1eeSGaetan Rivet 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
180b737a1eeSGaetan Rivet 		DEBUG("Calling rte_flow_flush on sub_device %d", i);
181b737a1eeSGaetan Rivet 		ret = rte_flow_flush(PORT_ID(sdev), error);
182ae80146cSMatan Azrad 		if ((ret = fs_err(sdev, ret))) {
183b737a1eeSGaetan Rivet 			ERROR("Operation rte_flow_flush failed for sub_device %d"
184b737a1eeSGaetan Rivet 			      " with error %d", i, ret);
185655fcd68SMatan Azrad 			fs_unlock(dev, 0);
186b737a1eeSGaetan Rivet 			return ret;
187b737a1eeSGaetan Rivet 		}
188b737a1eeSGaetan Rivet 	}
189f1f6ebc0SWilliam Tu 	RTE_TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
190b737a1eeSGaetan Rivet 		TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
191b737a1eeSGaetan Rivet 		fs_flow_release(&flow);
192b737a1eeSGaetan Rivet 	}
193655fcd68SMatan Azrad 	fs_unlock(dev, 0);
194b737a1eeSGaetan Rivet 	return 0;
195b737a1eeSGaetan Rivet }
196b737a1eeSGaetan Rivet 
197b737a1eeSGaetan Rivet static int
fs_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * action,void * arg,struct rte_flow_error * error)198b737a1eeSGaetan Rivet fs_flow_query(struct rte_eth_dev *dev,
199b737a1eeSGaetan Rivet 	      struct rte_flow *flow,
200fb8fd96dSDeclan Doherty 	      const struct rte_flow_action *action,
201b737a1eeSGaetan Rivet 	      void *arg,
202b737a1eeSGaetan Rivet 	      struct rte_flow_error *error)
203b737a1eeSGaetan Rivet {
204b737a1eeSGaetan Rivet 	struct sub_device *sdev;
205b737a1eeSGaetan Rivet 
206*813f085fSDavid Marchand 	if (fs_lock(dev, 0) != 0)
207*813f085fSDavid Marchand 		return -1;
208b737a1eeSGaetan Rivet 	sdev = TX_SUBDEV(dev);
209b737a1eeSGaetan Rivet 	if (sdev != NULL) {
210ae80146cSMatan Azrad 		int ret = rte_flow_query(PORT_ID(sdev),
211ae80146cSMatan Azrad 					 flow->flows[SUB_ID(sdev)],
212fb8fd96dSDeclan Doherty 					 action, arg, error);
213ae80146cSMatan Azrad 
214655fcd68SMatan Azrad 		if ((ret = fs_err(sdev, ret))) {
215655fcd68SMatan Azrad 			fs_unlock(dev, 0);
216ae80146cSMatan Azrad 			return ret;
217b737a1eeSGaetan Rivet 		}
218655fcd68SMatan Azrad 	}
219655fcd68SMatan Azrad 	fs_unlock(dev, 0);
220b737a1eeSGaetan Rivet 	WARN("No active sub_device to query about its flow");
221b737a1eeSGaetan Rivet 	return -1;
222b737a1eeSGaetan Rivet }
223b737a1eeSGaetan Rivet 
2242cc52cd7SGaetan Rivet static int
fs_flow_isolate(struct rte_eth_dev * dev,int set,struct rte_flow_error * error)2252cc52cd7SGaetan Rivet fs_flow_isolate(struct rte_eth_dev *dev,
2262cc52cd7SGaetan Rivet 		int set,
2272cc52cd7SGaetan Rivet 		struct rte_flow_error *error)
2282cc52cd7SGaetan Rivet {
2292cc52cd7SGaetan Rivet 	struct sub_device *sdev;
2302cc52cd7SGaetan Rivet 	uint8_t i;
2312cc52cd7SGaetan Rivet 	int ret;
2322cc52cd7SGaetan Rivet 
233*813f085fSDavid Marchand 	ret = fs_lock(dev, 0);
234*813f085fSDavid Marchand 	if (ret != 0)
235*813f085fSDavid Marchand 		return ret;
2362cc52cd7SGaetan Rivet 	FOREACH_SUBDEV(sdev, i, dev) {
2372cc52cd7SGaetan Rivet 		if (sdev->state < DEV_PROBED)
2382cc52cd7SGaetan Rivet 			continue;
2392cc52cd7SGaetan Rivet 		DEBUG("Calling rte_flow_isolate on sub_device %d", i);
2402cc52cd7SGaetan Rivet 		if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
2412cc52cd7SGaetan Rivet 			WARN("flow isolation mode of sub_device %d in incoherent state.",
2422cc52cd7SGaetan Rivet 				i);
2432cc52cd7SGaetan Rivet 		ret = rte_flow_isolate(PORT_ID(sdev), set, error);
244ae80146cSMatan Azrad 		if ((ret = fs_err(sdev, ret))) {
2452cc52cd7SGaetan Rivet 			ERROR("Operation rte_flow_isolate failed for sub_device %d"
2462cc52cd7SGaetan Rivet 			      " with error %d", i, ret);
247655fcd68SMatan Azrad 			fs_unlock(dev, 0);
2482cc52cd7SGaetan Rivet 			return ret;
2492cc52cd7SGaetan Rivet 		}
2502cc52cd7SGaetan Rivet 		sdev->flow_isolated = set;
2512cc52cd7SGaetan Rivet 	}
2522cc52cd7SGaetan Rivet 	PRIV(dev)->flow_isolated = set;
253655fcd68SMatan Azrad 	fs_unlock(dev, 0);
2542cc52cd7SGaetan Rivet 	return 0;
2552cc52cd7SGaetan Rivet }
2562cc52cd7SGaetan Rivet 
257b737a1eeSGaetan Rivet const struct rte_flow_ops fs_flow_ops = {
258b737a1eeSGaetan Rivet 	.validate = fs_flow_validate,
259b737a1eeSGaetan Rivet 	.create = fs_flow_create,
260b737a1eeSGaetan Rivet 	.destroy = fs_flow_destroy,
261b737a1eeSGaetan Rivet 	.flush = fs_flow_flush,
262b737a1eeSGaetan Rivet 	.query = fs_flow_query,
2632cc52cd7SGaetan Rivet 	.isolate = fs_flow_isolate,
264b737a1eeSGaetan Rivet };
265