xref: /dpdk/drivers/net/failsafe/failsafe_flow.c (revision 117eaa70584b73eebf6f648cf3ee6f2ab03264a0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox.
4  */
5 
6 #include <sys/queue.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_tailq.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 
13 #include "failsafe_private.h"
14 
15 static struct rte_flow *
16 fs_flow_allocate(const struct rte_flow_attr *attr,
17 		 const struct rte_flow_item *items,
18 		 const struct rte_flow_action *actions)
19 {
20 	struct rte_flow *flow;
21 	size_t fdsz;
22 
23 	fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
24 	flow = rte_zmalloc(NULL,
25 			   sizeof(struct rte_flow) + fdsz,
26 			   RTE_CACHE_LINE_SIZE);
27 	if (flow == NULL) {
28 		ERROR("Could not allocate new flow");
29 		return NULL;
30 	}
31 	flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
32 	if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) {
33 		ERROR("Failed to copy flow description");
34 		rte_free(flow);
35 		return NULL;
36 	}
37 	return flow;
38 }
39 
40 static void
41 fs_flow_release(struct rte_flow **flow)
42 {
43 	rte_free(*flow);
44 	*flow = NULL;
45 }
46 
47 static int
48 fs_flow_validate(struct rte_eth_dev *dev,
49 		 const struct rte_flow_attr *attr,
50 		 const struct rte_flow_item patterns[],
51 		 const struct rte_flow_action actions[],
52 		 struct rte_flow_error *error)
53 {
54 	struct sub_device *sdev;
55 	uint8_t i;
56 	int ret;
57 
58 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
59 		DEBUG("Calling rte_flow_validate on sub_device %d", i);
60 		ret = rte_flow_validate(PORT_ID(sdev),
61 				attr, patterns, actions, error);
62 		if ((ret = fs_err(sdev, ret))) {
63 			ERROR("Operation rte_flow_validate failed for sub_device %d"
64 			      " with error %d", i, ret);
65 			return ret;
66 		}
67 	}
68 	return 0;
69 }
70 
71 static struct rte_flow *
72 fs_flow_create(struct rte_eth_dev *dev,
73 	       const struct rte_flow_attr *attr,
74 	       const struct rte_flow_item patterns[],
75 	       const struct rte_flow_action actions[],
76 	       struct rte_flow_error *error)
77 {
78 	struct sub_device *sdev;
79 	struct rte_flow *flow;
80 	uint8_t i;
81 
82 	flow = fs_flow_allocate(attr, patterns, actions);
83 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
84 		flow->flows[i] = rte_flow_create(PORT_ID(sdev),
85 				attr, patterns, actions, error);
86 		if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) {
87 			ERROR("Failed to create flow on sub_device %d",
88 				i);
89 			goto err;
90 		}
91 	}
92 	TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
93 	return flow;
94 err:
95 	FOREACH_SUBDEV(sdev, i, dev) {
96 		if (flow->flows[i] != NULL)
97 			rte_flow_destroy(PORT_ID(sdev),
98 				flow->flows[i], error);
99 	}
100 	fs_flow_release(&flow);
101 	return NULL;
102 }
103 
104 static int
105 fs_flow_destroy(struct rte_eth_dev *dev,
106 		struct rte_flow *flow,
107 		struct rte_flow_error *error)
108 {
109 	struct sub_device *sdev;
110 	uint8_t i;
111 	int ret;
112 
113 	if (flow == NULL) {
114 		ERROR("Invalid flow");
115 		return -EINVAL;
116 	}
117 	ret = 0;
118 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
119 		int local_ret;
120 
121 		if (flow->flows[i] == NULL)
122 			continue;
123 		local_ret = rte_flow_destroy(PORT_ID(sdev),
124 				flow->flows[i], error);
125 		if ((local_ret = fs_err(sdev, local_ret))) {
126 			ERROR("Failed to destroy flow on sub_device %d: %d",
127 					i, local_ret);
128 			if (ret == 0)
129 				ret = local_ret;
130 		}
131 	}
132 	TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
133 	fs_flow_release(&flow);
134 	return ret;
135 }
136 
137 static int
138 fs_flow_flush(struct rte_eth_dev *dev,
139 	      struct rte_flow_error *error)
140 {
141 	struct sub_device *sdev;
142 	struct rte_flow *flow;
143 	void *tmp;
144 	uint8_t i;
145 	int ret;
146 
147 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
148 		DEBUG("Calling rte_flow_flush on sub_device %d", i);
149 		ret = rte_flow_flush(PORT_ID(sdev), error);
150 		if ((ret = fs_err(sdev, ret))) {
151 			ERROR("Operation rte_flow_flush failed for sub_device %d"
152 			      " with error %d", i, ret);
153 			return ret;
154 		}
155 	}
156 	TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
157 		TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
158 		fs_flow_release(&flow);
159 	}
160 	return 0;
161 }
162 
163 static int
164 fs_flow_query(struct rte_eth_dev *dev,
165 	      struct rte_flow *flow,
166 	      enum rte_flow_action_type type,
167 	      void *arg,
168 	      struct rte_flow_error *error)
169 {
170 	struct sub_device *sdev;
171 
172 	sdev = TX_SUBDEV(dev);
173 	if (sdev != NULL) {
174 		int ret = rte_flow_query(PORT_ID(sdev),
175 					 flow->flows[SUB_ID(sdev)],
176 					 type, arg, error);
177 
178 		if ((ret = fs_err(sdev, ret)))
179 			return ret;
180 	}
181 	WARN("No active sub_device to query about its flow");
182 	return -1;
183 }
184 
185 static int
186 fs_flow_isolate(struct rte_eth_dev *dev,
187 		int set,
188 		struct rte_flow_error *error)
189 {
190 	struct sub_device *sdev;
191 	uint8_t i;
192 	int ret;
193 
194 	FOREACH_SUBDEV(sdev, i, dev) {
195 		if (sdev->state < DEV_PROBED)
196 			continue;
197 		DEBUG("Calling rte_flow_isolate on sub_device %d", i);
198 		if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
199 			WARN("flow isolation mode of sub_device %d in incoherent state.",
200 				i);
201 		ret = rte_flow_isolate(PORT_ID(sdev), set, error);
202 		if ((ret = fs_err(sdev, ret))) {
203 			ERROR("Operation rte_flow_isolate failed for sub_device %d"
204 			      " with error %d", i, ret);
205 			return ret;
206 		}
207 		sdev->flow_isolated = set;
208 	}
209 	PRIV(dev)->flow_isolated = set;
210 	return 0;
211 }
212 
213 const struct rte_flow_ops fs_flow_ops = {
214 	.validate = fs_flow_validate,
215 	.create = fs_flow_create,
216 	.destroy = fs_flow_destroy,
217 	.flush = fs_flow_flush,
218 	.query = fs_flow_query,
219 	.isolate = fs_flow_isolate,
220 };
221