1*009c327cSOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2b737a1eeSGaetan Rivet * Copyright 2017 6WIND S.A. 3b737a1eeSGaetan Rivet * Copyright 2017 Mellanox. 4b737a1eeSGaetan Rivet */ 5b737a1eeSGaetan Rivet 6b737a1eeSGaetan Rivet #include <sys/queue.h> 7b737a1eeSGaetan Rivet 8b737a1eeSGaetan Rivet #include <rte_malloc.h> 9b737a1eeSGaetan Rivet #include <rte_tailq.h> 10b737a1eeSGaetan Rivet #include <rte_flow.h> 11b737a1eeSGaetan Rivet #include <rte_flow_driver.h> 12b737a1eeSGaetan Rivet 13b737a1eeSGaetan Rivet #include "failsafe_private.h" 14b737a1eeSGaetan Rivet 15b737a1eeSGaetan Rivet static struct rte_flow * 16b737a1eeSGaetan Rivet fs_flow_allocate(const struct rte_flow_attr *attr, 17b737a1eeSGaetan Rivet const struct rte_flow_item *items, 18b737a1eeSGaetan Rivet const struct rte_flow_action *actions) 19b737a1eeSGaetan Rivet { 20b737a1eeSGaetan Rivet struct rte_flow *flow; 21b737a1eeSGaetan Rivet size_t fdsz; 22b737a1eeSGaetan Rivet 23b737a1eeSGaetan Rivet fdsz = rte_flow_copy(NULL, 0, attr, items, actions); 24b737a1eeSGaetan Rivet flow = rte_zmalloc(NULL, 25b737a1eeSGaetan Rivet sizeof(struct rte_flow) + fdsz, 26b737a1eeSGaetan Rivet RTE_CACHE_LINE_SIZE); 27b737a1eeSGaetan Rivet if (flow == NULL) { 28b737a1eeSGaetan Rivet ERROR("Could not allocate new flow"); 29b737a1eeSGaetan Rivet return NULL; 30b737a1eeSGaetan Rivet } 31b737a1eeSGaetan Rivet flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); 32b737a1eeSGaetan Rivet if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) { 33b737a1eeSGaetan Rivet ERROR("Failed to copy flow description"); 34b737a1eeSGaetan Rivet rte_free(flow); 35b737a1eeSGaetan Rivet return NULL; 36b737a1eeSGaetan Rivet } 37b737a1eeSGaetan Rivet return flow; 38b737a1eeSGaetan Rivet } 39b737a1eeSGaetan Rivet 40b737a1eeSGaetan Rivet static void 41b737a1eeSGaetan Rivet fs_flow_release(struct rte_flow **flow) 42b737a1eeSGaetan Rivet { 43b737a1eeSGaetan Rivet rte_free(*flow); 44b737a1eeSGaetan Rivet *flow = NULL; 45b737a1eeSGaetan Rivet } 46b737a1eeSGaetan Rivet 47b737a1eeSGaetan Rivet static int 48b737a1eeSGaetan Rivet fs_flow_validate(struct rte_eth_dev *dev, 49b737a1eeSGaetan Rivet const struct rte_flow_attr *attr, 50b737a1eeSGaetan Rivet const struct rte_flow_item patterns[], 51b737a1eeSGaetan Rivet const struct rte_flow_action actions[], 52b737a1eeSGaetan Rivet struct rte_flow_error *error) 53b737a1eeSGaetan Rivet { 54b737a1eeSGaetan Rivet struct sub_device *sdev; 55b737a1eeSGaetan Rivet uint8_t i; 56b737a1eeSGaetan Rivet int ret; 57b737a1eeSGaetan Rivet 58b737a1eeSGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 59b737a1eeSGaetan Rivet DEBUG("Calling rte_flow_validate on sub_device %d", i); 60b737a1eeSGaetan Rivet ret = rte_flow_validate(PORT_ID(sdev), 61b737a1eeSGaetan Rivet attr, patterns, actions, error); 62ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 63b737a1eeSGaetan Rivet ERROR("Operation rte_flow_validate failed for sub_device %d" 64b737a1eeSGaetan Rivet " with error %d", i, ret); 65b737a1eeSGaetan Rivet return ret; 66b737a1eeSGaetan Rivet } 67b737a1eeSGaetan Rivet } 68b737a1eeSGaetan Rivet return 0; 69b737a1eeSGaetan Rivet } 70b737a1eeSGaetan Rivet 71b737a1eeSGaetan Rivet static struct rte_flow * 72b737a1eeSGaetan Rivet fs_flow_create(struct rte_eth_dev *dev, 73b737a1eeSGaetan Rivet const struct rte_flow_attr *attr, 74b737a1eeSGaetan Rivet const struct rte_flow_item patterns[], 75b737a1eeSGaetan Rivet const struct rte_flow_action actions[], 76b737a1eeSGaetan Rivet struct rte_flow_error *error) 77b737a1eeSGaetan Rivet { 78b737a1eeSGaetan Rivet struct sub_device *sdev; 79b737a1eeSGaetan Rivet struct rte_flow *flow; 80b737a1eeSGaetan Rivet uint8_t i; 81b737a1eeSGaetan Rivet 82b737a1eeSGaetan Rivet flow = fs_flow_allocate(attr, patterns, actions); 83b737a1eeSGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 84b737a1eeSGaetan Rivet flow->flows[i] = rte_flow_create(PORT_ID(sdev), 85b737a1eeSGaetan Rivet attr, patterns, actions, error); 86ae80146cSMatan Azrad if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) { 87b737a1eeSGaetan Rivet ERROR("Failed to create flow on sub_device %d", 88b737a1eeSGaetan Rivet i); 89b737a1eeSGaetan Rivet goto err; 90b737a1eeSGaetan Rivet } 91b737a1eeSGaetan Rivet } 92b737a1eeSGaetan Rivet TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next); 93b737a1eeSGaetan Rivet return flow; 94b737a1eeSGaetan Rivet err: 95b737a1eeSGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) { 96b737a1eeSGaetan Rivet if (flow->flows[i] != NULL) 97b737a1eeSGaetan Rivet rte_flow_destroy(PORT_ID(sdev), 98b737a1eeSGaetan Rivet flow->flows[i], error); 99b737a1eeSGaetan Rivet } 100b737a1eeSGaetan Rivet fs_flow_release(&flow); 101b737a1eeSGaetan Rivet return NULL; 102b737a1eeSGaetan Rivet } 103b737a1eeSGaetan Rivet 104b737a1eeSGaetan Rivet static int 105b737a1eeSGaetan Rivet fs_flow_destroy(struct rte_eth_dev *dev, 106b737a1eeSGaetan Rivet struct rte_flow *flow, 107b737a1eeSGaetan Rivet struct rte_flow_error *error) 108b737a1eeSGaetan Rivet { 109b737a1eeSGaetan Rivet struct sub_device *sdev; 110b737a1eeSGaetan Rivet uint8_t i; 111b737a1eeSGaetan Rivet int ret; 112b737a1eeSGaetan Rivet 113b737a1eeSGaetan Rivet if (flow == NULL) { 114b737a1eeSGaetan Rivet ERROR("Invalid flow"); 115b737a1eeSGaetan Rivet return -EINVAL; 116b737a1eeSGaetan Rivet } 117b737a1eeSGaetan Rivet ret = 0; 118b737a1eeSGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 119b737a1eeSGaetan Rivet int local_ret; 120b737a1eeSGaetan Rivet 121b737a1eeSGaetan Rivet if (flow->flows[i] == NULL) 122b737a1eeSGaetan Rivet continue; 123b737a1eeSGaetan Rivet local_ret = rte_flow_destroy(PORT_ID(sdev), 124b737a1eeSGaetan Rivet flow->flows[i], error); 125ae80146cSMatan Azrad if ((local_ret = fs_err(sdev, local_ret))) { 126b737a1eeSGaetan Rivet ERROR("Failed to destroy flow on sub_device %d: %d", 127b737a1eeSGaetan Rivet i, local_ret); 128b737a1eeSGaetan Rivet if (ret == 0) 129b737a1eeSGaetan Rivet ret = local_ret; 130b737a1eeSGaetan Rivet } 131b737a1eeSGaetan Rivet } 132b737a1eeSGaetan Rivet TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); 133b737a1eeSGaetan Rivet fs_flow_release(&flow); 134b737a1eeSGaetan Rivet return ret; 135b737a1eeSGaetan Rivet } 136b737a1eeSGaetan Rivet 137b737a1eeSGaetan Rivet static int 138b737a1eeSGaetan Rivet fs_flow_flush(struct rte_eth_dev *dev, 139b737a1eeSGaetan Rivet struct rte_flow_error *error) 140b737a1eeSGaetan Rivet { 141b737a1eeSGaetan Rivet struct sub_device *sdev; 142b737a1eeSGaetan Rivet struct rte_flow *flow; 143b737a1eeSGaetan Rivet void *tmp; 144b737a1eeSGaetan Rivet uint8_t i; 145b737a1eeSGaetan Rivet int ret; 146b737a1eeSGaetan Rivet 147b737a1eeSGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 148b737a1eeSGaetan Rivet DEBUG("Calling rte_flow_flush on sub_device %d", i); 149b737a1eeSGaetan Rivet ret = rte_flow_flush(PORT_ID(sdev), error); 150ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 151b737a1eeSGaetan Rivet ERROR("Operation rte_flow_flush failed for sub_device %d" 152b737a1eeSGaetan Rivet " with error %d", i, ret); 153b737a1eeSGaetan Rivet return ret; 154b737a1eeSGaetan Rivet } 155b737a1eeSGaetan Rivet } 156b737a1eeSGaetan Rivet TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) { 157b737a1eeSGaetan Rivet TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); 158b737a1eeSGaetan Rivet fs_flow_release(&flow); 159b737a1eeSGaetan Rivet } 160b737a1eeSGaetan Rivet return 0; 161b737a1eeSGaetan Rivet } 162b737a1eeSGaetan Rivet 163b737a1eeSGaetan Rivet static int 164b737a1eeSGaetan Rivet fs_flow_query(struct rte_eth_dev *dev, 165b737a1eeSGaetan Rivet struct rte_flow *flow, 166b737a1eeSGaetan Rivet enum rte_flow_action_type type, 167b737a1eeSGaetan Rivet void *arg, 168b737a1eeSGaetan Rivet struct rte_flow_error *error) 169b737a1eeSGaetan Rivet { 170b737a1eeSGaetan Rivet struct sub_device *sdev; 171b737a1eeSGaetan Rivet 172b737a1eeSGaetan Rivet sdev = TX_SUBDEV(dev); 173b737a1eeSGaetan Rivet if (sdev != NULL) { 174ae80146cSMatan Azrad int ret = rte_flow_query(PORT_ID(sdev), 175ae80146cSMatan Azrad flow->flows[SUB_ID(sdev)], 176ae80146cSMatan Azrad type, arg, error); 177ae80146cSMatan Azrad 178ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) 179ae80146cSMatan Azrad return ret; 180b737a1eeSGaetan Rivet } 181b737a1eeSGaetan Rivet WARN("No active sub_device to query about its flow"); 182b737a1eeSGaetan Rivet return -1; 183b737a1eeSGaetan Rivet } 184b737a1eeSGaetan Rivet 1852cc52cd7SGaetan Rivet static int 1862cc52cd7SGaetan Rivet fs_flow_isolate(struct rte_eth_dev *dev, 1872cc52cd7SGaetan Rivet int set, 1882cc52cd7SGaetan Rivet struct rte_flow_error *error) 1892cc52cd7SGaetan Rivet { 1902cc52cd7SGaetan Rivet struct sub_device *sdev; 1912cc52cd7SGaetan Rivet uint8_t i; 1922cc52cd7SGaetan Rivet int ret; 1932cc52cd7SGaetan Rivet 1942cc52cd7SGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) { 1952cc52cd7SGaetan Rivet if (sdev->state < DEV_PROBED) 1962cc52cd7SGaetan Rivet continue; 1972cc52cd7SGaetan Rivet DEBUG("Calling rte_flow_isolate on sub_device %d", i); 1982cc52cd7SGaetan Rivet if (PRIV(dev)->flow_isolated != sdev->flow_isolated) 1992cc52cd7SGaetan Rivet WARN("flow isolation mode of sub_device %d in incoherent state.", 2002cc52cd7SGaetan Rivet i); 2012cc52cd7SGaetan Rivet ret = rte_flow_isolate(PORT_ID(sdev), set, error); 202ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 2032cc52cd7SGaetan Rivet ERROR("Operation rte_flow_isolate failed for sub_device %d" 2042cc52cd7SGaetan Rivet " with error %d", i, ret); 2052cc52cd7SGaetan Rivet return ret; 2062cc52cd7SGaetan Rivet } 2072cc52cd7SGaetan Rivet sdev->flow_isolated = set; 2082cc52cd7SGaetan Rivet } 2092cc52cd7SGaetan Rivet PRIV(dev)->flow_isolated = set; 2102cc52cd7SGaetan Rivet return 0; 2112cc52cd7SGaetan Rivet } 2122cc52cd7SGaetan Rivet 213b737a1eeSGaetan Rivet const struct rte_flow_ops fs_flow_ops = { 214b737a1eeSGaetan Rivet .validate = fs_flow_validate, 215b737a1eeSGaetan Rivet .create = fs_flow_create, 216b737a1eeSGaetan Rivet .destroy = fs_flow_destroy, 217b737a1eeSGaetan Rivet .flush = fs_flow_flush, 218b737a1eeSGaetan Rivet .query = fs_flow_query, 2192cc52cd7SGaetan Rivet .isolate = fs_flow_isolate, 220b737a1eeSGaetan Rivet }; 221