1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox. 4 */ 5 6 #include <sys/queue.h> 7 8 #include <rte_malloc.h> 9 #include <rte_tailq.h> 10 #include <rte_flow.h> 11 #include <rte_flow_driver.h> 12 13 #include "failsafe_private.h" 14 15 static struct rte_flow * 16 fs_flow_allocate(const struct rte_flow_attr *attr, 17 const struct rte_flow_item *items, 18 const struct rte_flow_action *actions) 19 { 20 struct rte_flow *flow; 21 size_t fdsz; 22 23 fdsz = rte_flow_copy(NULL, 0, attr, items, actions); 24 flow = rte_zmalloc(NULL, 25 sizeof(struct rte_flow) + fdsz, 26 RTE_CACHE_LINE_SIZE); 27 if (flow == NULL) { 28 ERROR("Could not allocate new flow"); 29 return NULL; 30 } 31 flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); 32 if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) { 33 ERROR("Failed to copy flow description"); 34 rte_free(flow); 35 return NULL; 36 } 37 return flow; 38 } 39 40 static void 41 fs_flow_release(struct rte_flow **flow) 42 { 43 rte_free(*flow); 44 *flow = NULL; 45 } 46 47 static int 48 fs_flow_validate(struct rte_eth_dev *dev, 49 const struct rte_flow_attr *attr, 50 const struct rte_flow_item patterns[], 51 const struct rte_flow_action actions[], 52 struct rte_flow_error *error) 53 { 54 struct sub_device *sdev; 55 uint8_t i; 56 int ret; 57 58 fs_lock(dev, 0); 59 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 60 DEBUG("Calling rte_flow_validate on sub_device %d", i); 61 ret = rte_flow_validate(PORT_ID(sdev), 62 attr, patterns, actions, error); 63 if ((ret = fs_err(sdev, ret))) { 64 ERROR("Operation rte_flow_validate failed for sub_device %d" 65 " with error %d", i, ret); 66 fs_unlock(dev, 0); 67 return ret; 68 } 69 } 70 fs_unlock(dev, 0); 71 return 0; 72 } 73 74 static struct rte_flow * 75 fs_flow_create(struct rte_eth_dev *dev, 76 const struct rte_flow_attr *attr, 77 const struct rte_flow_item patterns[], 78 const struct rte_flow_action actions[], 79 struct rte_flow_error *error) 80 { 81 struct sub_device *sdev; 82 struct rte_flow *flow; 83 uint8_t i; 84 85 fs_lock(dev, 0); 86 flow = fs_flow_allocate(attr, patterns, actions); 87 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 88 flow->flows[i] = rte_flow_create(PORT_ID(sdev), 89 attr, patterns, actions, error); 90 if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) { 91 ERROR("Failed to create flow on sub_device %d", 92 i); 93 goto err; 94 } 95 } 96 TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next); 97 fs_unlock(dev, 0); 98 return flow; 99 err: 100 FOREACH_SUBDEV(sdev, i, dev) { 101 if (flow->flows[i] != NULL) 102 rte_flow_destroy(PORT_ID(sdev), 103 flow->flows[i], error); 104 } 105 fs_flow_release(&flow); 106 fs_unlock(dev, 0); 107 return NULL; 108 } 109 110 static int 111 fs_flow_destroy(struct rte_eth_dev *dev, 112 struct rte_flow *flow, 113 struct rte_flow_error *error) 114 { 115 struct sub_device *sdev; 116 uint8_t i; 117 int ret; 118 119 if (flow == NULL) { 120 ERROR("Invalid flow"); 121 return -EINVAL; 122 } 123 ret = 0; 124 fs_lock(dev, 0); 125 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 126 int local_ret; 127 128 if (flow->flows[i] == NULL) 129 continue; 130 local_ret = rte_flow_destroy(PORT_ID(sdev), 131 flow->flows[i], error); 132 if ((local_ret = fs_err(sdev, local_ret))) { 133 ERROR("Failed to destroy flow on sub_device %d: %d", 134 i, local_ret); 135 if (ret == 0) 136 ret = local_ret; 137 } 138 } 139 TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); 140 fs_flow_release(&flow); 141 fs_unlock(dev, 0); 142 return ret; 143 } 144 145 static int 146 fs_flow_flush(struct rte_eth_dev *dev, 147 struct rte_flow_error *error) 148 { 149 struct sub_device *sdev; 150 struct rte_flow *flow; 151 void *tmp; 152 uint8_t i; 153 int ret; 154 155 fs_lock(dev, 0); 156 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 157 DEBUG("Calling rte_flow_flush on sub_device %d", i); 158 ret = rte_flow_flush(PORT_ID(sdev), error); 159 if ((ret = fs_err(sdev, ret))) { 160 ERROR("Operation rte_flow_flush failed for sub_device %d" 161 " with error %d", i, ret); 162 fs_unlock(dev, 0); 163 return ret; 164 } 165 } 166 TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) { 167 TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); 168 fs_flow_release(&flow); 169 } 170 fs_unlock(dev, 0); 171 return 0; 172 } 173 174 static int 175 fs_flow_query(struct rte_eth_dev *dev, 176 struct rte_flow *flow, 177 enum rte_flow_action_type type, 178 void *arg, 179 struct rte_flow_error *error) 180 { 181 struct sub_device *sdev; 182 183 fs_lock(dev, 0); 184 sdev = TX_SUBDEV(dev); 185 if (sdev != NULL) { 186 int ret = rte_flow_query(PORT_ID(sdev), 187 flow->flows[SUB_ID(sdev)], 188 type, arg, error); 189 190 if ((ret = fs_err(sdev, ret))) { 191 fs_unlock(dev, 0); 192 return ret; 193 } 194 } 195 fs_unlock(dev, 0); 196 WARN("No active sub_device to query about its flow"); 197 return -1; 198 } 199 200 static int 201 fs_flow_isolate(struct rte_eth_dev *dev, 202 int set, 203 struct rte_flow_error *error) 204 { 205 struct sub_device *sdev; 206 uint8_t i; 207 int ret; 208 209 fs_lock(dev, 0); 210 FOREACH_SUBDEV(sdev, i, dev) { 211 if (sdev->state < DEV_PROBED) 212 continue; 213 DEBUG("Calling rte_flow_isolate on sub_device %d", i); 214 if (PRIV(dev)->flow_isolated != sdev->flow_isolated) 215 WARN("flow isolation mode of sub_device %d in incoherent state.", 216 i); 217 ret = rte_flow_isolate(PORT_ID(sdev), set, error); 218 if ((ret = fs_err(sdev, ret))) { 219 ERROR("Operation rte_flow_isolate failed for sub_device %d" 220 " with error %d", i, ret); 221 fs_unlock(dev, 0); 222 return ret; 223 } 224 sdev->flow_isolated = set; 225 } 226 PRIV(dev)->flow_isolated = set; 227 fs_unlock(dev, 0); 228 return 0; 229 } 230 231 const struct rte_flow_ops fs_flow_ops = { 232 .validate = fs_flow_validate, 233 .create = fs_flow_create, 234 .destroy = fs_flow_destroy, 235 .flush = fs_flow_flush, 236 .query = fs_flow_query, 237 .isolate = fs_flow_isolate, 238 }; 239