1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 36 #include <rte_malloc.h> 37 #include <rte_tailq.h> 38 #include <rte_flow.h> 39 #include <rte_flow_driver.h> 40 41 #include "failsafe_private.h" 42 43 static struct rte_flow * 44 fs_flow_allocate(const struct rte_flow_attr *attr, 45 const struct rte_flow_item *items, 46 const struct rte_flow_action *actions) 47 { 48 struct rte_flow *flow; 49 size_t fdsz; 50 51 fdsz = rte_flow_copy(NULL, 0, attr, items, actions); 52 flow = rte_zmalloc(NULL, 53 sizeof(struct rte_flow) + fdsz, 54 RTE_CACHE_LINE_SIZE); 55 if (flow == NULL) { 56 ERROR("Could not allocate new flow"); 57 return NULL; 58 } 59 flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); 60 if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) { 61 ERROR("Failed to copy flow description"); 62 rte_free(flow); 63 return NULL; 64 } 65 return flow; 66 } 67 68 static void 69 fs_flow_release(struct rte_flow **flow) 70 { 71 rte_free(*flow); 72 *flow = NULL; 73 } 74 75 static int 76 fs_flow_validate(struct rte_eth_dev *dev, 77 const struct rte_flow_attr *attr, 78 const struct rte_flow_item patterns[], 79 const struct rte_flow_action actions[], 80 struct rte_flow_error *error) 81 { 82 struct sub_device *sdev; 83 uint8_t i; 84 int ret; 85 86 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 87 DEBUG("Calling rte_flow_validate on sub_device %d", i); 88 ret = rte_flow_validate(PORT_ID(sdev), 89 attr, patterns, actions, error); 90 if ((ret = fs_err(sdev, ret))) { 91 ERROR("Operation rte_flow_validate failed for sub_device %d" 92 " with error %d", i, ret); 93 return ret; 94 } 95 } 96 return 0; 97 } 98 99 static struct rte_flow * 100 fs_flow_create(struct rte_eth_dev *dev, 101 const struct rte_flow_attr *attr, 102 const struct rte_flow_item patterns[], 103 const struct rte_flow_action actions[], 104 struct rte_flow_error *error) 105 { 106 struct sub_device *sdev; 107 struct rte_flow *flow; 108 uint8_t i; 109 110 flow = fs_flow_allocate(attr, patterns, actions); 111 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 112 flow->flows[i] = rte_flow_create(PORT_ID(sdev), 113 attr, patterns, actions, error); 114 if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) { 115 ERROR("Failed to create flow on sub_device %d", 116 i); 117 goto err; 118 } 119 } 120 TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next); 121 return flow; 122 err: 123 FOREACH_SUBDEV(sdev, i, dev) { 124 if (flow->flows[i] != NULL) 125 rte_flow_destroy(PORT_ID(sdev), 126 flow->flows[i], error); 127 } 128 fs_flow_release(&flow); 129 return NULL; 130 } 131 132 static int 133 fs_flow_destroy(struct rte_eth_dev *dev, 134 struct rte_flow *flow, 135 struct rte_flow_error *error) 136 { 137 struct sub_device *sdev; 138 uint8_t i; 139 int ret; 140 141 if (flow == NULL) { 142 ERROR("Invalid flow"); 143 return -EINVAL; 144 } 145 ret = 0; 146 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 147 int local_ret; 148 149 if (flow->flows[i] == NULL) 150 continue; 151 local_ret = rte_flow_destroy(PORT_ID(sdev), 152 flow->flows[i], error); 153 if ((local_ret = fs_err(sdev, local_ret))) { 154 ERROR("Failed to destroy flow on sub_device %d: %d", 155 i, local_ret); 156 if (ret == 0) 157 ret = local_ret; 158 } 159 } 160 TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); 161 fs_flow_release(&flow); 162 return ret; 163 } 164 165 static int 166 fs_flow_flush(struct rte_eth_dev *dev, 167 struct rte_flow_error *error) 168 { 169 struct sub_device *sdev; 170 struct rte_flow *flow; 171 void *tmp; 172 uint8_t i; 173 int ret; 174 175 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 176 DEBUG("Calling rte_flow_flush on sub_device %d", i); 177 ret = rte_flow_flush(PORT_ID(sdev), error); 178 if ((ret = fs_err(sdev, ret))) { 179 ERROR("Operation rte_flow_flush failed for sub_device %d" 180 " with error %d", i, ret); 181 return ret; 182 } 183 } 184 TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) { 185 TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); 186 fs_flow_release(&flow); 187 } 188 return 0; 189 } 190 191 static int 192 fs_flow_query(struct rte_eth_dev *dev, 193 struct rte_flow *flow, 194 enum rte_flow_action_type type, 195 void *arg, 196 struct rte_flow_error *error) 197 { 198 struct sub_device *sdev; 199 200 sdev = TX_SUBDEV(dev); 201 if (sdev != NULL) { 202 int ret = rte_flow_query(PORT_ID(sdev), 203 flow->flows[SUB_ID(sdev)], 204 type, arg, error); 205 206 if ((ret = fs_err(sdev, ret))) 207 return ret; 208 } 209 WARN("No active sub_device to query about its flow"); 210 return -1; 211 } 212 213 static int 214 fs_flow_isolate(struct rte_eth_dev *dev, 215 int set, 216 struct rte_flow_error *error) 217 { 218 struct sub_device *sdev; 219 uint8_t i; 220 int ret; 221 222 FOREACH_SUBDEV(sdev, i, dev) { 223 if (sdev->state < DEV_PROBED) 224 continue; 225 DEBUG("Calling rte_flow_isolate on sub_device %d", i); 226 if (PRIV(dev)->flow_isolated != sdev->flow_isolated) 227 WARN("flow isolation mode of sub_device %d in incoherent state.", 228 i); 229 ret = rte_flow_isolate(PORT_ID(sdev), set, error); 230 if ((ret = fs_err(sdev, ret))) { 231 ERROR("Operation rte_flow_isolate failed for sub_device %d" 232 " with error %d", i, ret); 233 return ret; 234 } 235 sdev->flow_isolated = set; 236 } 237 PRIV(dev)->flow_isolated = set; 238 return 0; 239 } 240 241 const struct rte_flow_ops fs_flow_ops = { 242 .validate = fs_flow_validate, 243 .create = fs_flow_create, 244 .destroy = fs_flow_destroy, 245 .flush = fs_flow_flush, 246 .query = fs_flow_query, 247 .isolate = fs_flow_isolate, 248 }; 249