xref: /dpdk/drivers/net/cnxk/cnxk_ethdev_cman.c (revision 9fd66d79d3e27dfb28e04d10e0ee63dfc64820c9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell International Ltd.
3  */
4 
5 #include "cnxk_ethdev.h"
6 
7 #define CNXK_NIX_CMAN_RED_MIN_THRESH 75
8 #define CNXK_NIX_CMAN_RED_MAX_THRESH 95
9 
10 int
cnxk_nix_cman_info_get(struct rte_eth_dev * dev,struct rte_eth_cman_info * info)11 cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info)
12 {
13 	RTE_SET_USED(dev);
14 
15 	info->modes_supported = RTE_CMAN_RED;
16 	info->objs_supported = RTE_ETH_CMAN_OBJ_RX_QUEUE | RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL;
17 
18 	return 0;
19 }
20 
21 int
cnxk_nix_cman_config_init(struct rte_eth_dev * dev,struct rte_eth_cman_config * config)22 cnxk_nix_cman_config_init(struct rte_eth_dev *dev, struct rte_eth_cman_config *config)
23 {
24 	RTE_SET_USED(dev);
25 
26 	memset(config, 0, sizeof(struct rte_eth_cman_config));
27 
28 	config->obj = RTE_ETH_CMAN_OBJ_RX_QUEUE;
29 	config->mode = RTE_CMAN_RED;
30 	config->mode_param.red.min_th = CNXK_NIX_CMAN_RED_MIN_THRESH;
31 	config->mode_param.red.max_th = CNXK_NIX_CMAN_RED_MAX_THRESH;
32 	return 0;
33 }
34 
35 static int
nix_cman_config_validate(struct rte_eth_dev * eth_dev,const struct rte_eth_cman_config * config)36 nix_cman_config_validate(struct rte_eth_dev *eth_dev, const struct rte_eth_cman_config *config)
37 {
38 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
39 	struct rte_eth_cman_info info;
40 
41 	memset(&info, 0, sizeof(struct rte_eth_cman_info));
42 	cnxk_nix_cman_info_get(eth_dev, &info);
43 
44 	if (!(config->obj & info.objs_supported)) {
45 		plt_err("Invalid object");
46 		return -EINVAL;
47 	}
48 
49 	if (!(config->mode & info.modes_supported)) {
50 		plt_err("Invalid mode");
51 		return -EINVAL;
52 	}
53 
54 	if (config->obj_param.rx_queue >= dev->nb_rxq) {
55 		plt_err("Invalid queue ID. Queue = %u", config->obj_param.rx_queue);
56 		return -EINVAL;
57 	}
58 
59 	if (config->mode_param.red.min_th > CNXK_NIX_CMAN_RED_MAX_THRESH) {
60 		plt_err("Invalid RED minimum threshold. min_th = %u",
61 			config->mode_param.red.min_th);
62 		return -EINVAL;
63 	}
64 
65 	if (config->mode_param.red.max_th > CNXK_NIX_CMAN_RED_MAX_THRESH) {
66 		plt_err("Invalid RED maximum threshold. max_th = %u",
67 			config->mode_param.red.max_th);
68 		return -EINVAL;
69 	}
70 
71 	if (config->mode_param.red.min_th > config->mode_param.red.max_th) {
72 		plt_err("RED minimum threshold must be less or equal to maximum threshold");
73 		return -EINVAL;
74 	}
75 
76 	return 0;
77 }
78 
79 int
cnxk_nix_cman_config_set(struct rte_eth_dev * eth_dev,const struct rte_eth_cman_config * config)80 cnxk_nix_cman_config_set(struct rte_eth_dev *eth_dev, const struct rte_eth_cman_config *config)
81 {
82 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
83 	struct roc_nix *nix = &dev->nix;
84 	uint8_t drop, pass, shift;
85 	uint8_t min_th, max_th;
86 	struct roc_nix_cq *cq;
87 	struct roc_nix_rq *rq;
88 	bool is_mempool;
89 	uint64_t buf_cnt;
90 	int rc;
91 
92 	rc = nix_cman_config_validate(eth_dev, config);
93 	if (rc)
94 		return rc;
95 
96 	cq = &dev->cqs[config->obj_param.rx_queue];
97 	rq = &dev->rqs[config->obj_param.rx_queue];
98 	is_mempool = config->obj & RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL ? true : false;
99 	min_th = config->mode_param.red.min_th;
100 	max_th = config->mode_param.red.max_th;
101 
102 	if (is_mempool) {
103 		buf_cnt = roc_npa_aura_op_limit_get(rq->aura_handle);
104 		shift = plt_log2_u32(buf_cnt);
105 		shift = shift < 8 ? 0 : shift - 8;
106 		pass = (buf_cnt >> shift) - ((buf_cnt * min_th / 100) >> shift);
107 		drop = (buf_cnt >> shift) - ((buf_cnt * max_th / 100) >> shift);
108 		rq->red_pass = pass;
109 		rq->red_drop = drop;
110 
111 		if (rq->spb_ena) {
112 			buf_cnt = roc_npa_aura_op_limit_get(rq->spb_aura_handle);
113 			shift = plt_log2_u32(buf_cnt);
114 			shift = shift < 8 ? 0 : shift - 8;
115 			pass = (buf_cnt >> shift) - ((buf_cnt * min_th / 100) >> shift);
116 			drop = (buf_cnt >> shift) - ((buf_cnt * max_th / 100) >> shift);
117 			rq->spb_red_pass = pass;
118 			rq->spb_red_drop = drop;
119 		}
120 	} else {
121 		shift = plt_log2_u32(cq->nb_desc);
122 		shift = shift < 8 ? 0 : shift - 8;
123 		pass = 256 - ((cq->nb_desc * min_th / 100) >> shift);
124 		drop = 256 - ((cq->nb_desc * max_th / 100) >> shift);
125 
126 		rq->xqe_red_pass = pass;
127 		rq->xqe_red_drop = drop;
128 	}
129 
130 	rc = roc_nix_rq_cman_config(nix, rq);
131 	if (rc)
132 		return rc;
133 
134 	memcpy(&dev->cman_cfg, config, sizeof(struct rte_eth_cman_config));
135 	return 0;
136 }
137 
138 int
cnxk_nix_cman_config_get(struct rte_eth_dev * eth_dev,struct rte_eth_cman_config * config)139 cnxk_nix_cman_config_get(struct rte_eth_dev *eth_dev, struct rte_eth_cman_config *config)
140 {
141 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142 
143 	memcpy(config, &dev->cman_cfg, sizeof(struct rte_eth_cman_config));
144 	return 0;
145 }
146