xref: /dpdk/drivers/raw/ifpga/base/ifpga_fme_dperf.c (revision 473c88f9b391c2cd8b8622dcc488116cb09b624a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include "ifpga_feature_dev.h"
6 
7 #define PERF_OBJ_ROOT_ID	0xff
8 
fme_dperf_get_clock(struct ifpga_fme_hw * fme,u64 * clock)9 static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
10 {
11 	struct feature_fme_dperf *dperf;
12 	struct feature_fme_dfpmon_clk_ctr clk;
13 
14 	dperf = get_fme_feature_ioaddr_by_index(fme,
15 						FME_FEATURE_ID_GLOBAL_DPERF);
16 	clk.afu_interf_clock = readq(&dperf->clk);
17 
18 	*clock = clk.afu_interf_clock;
19 	return 0;
20 }
21 
fme_dperf_get_revision(struct ifpga_fme_hw * fme,u64 * revision)22 static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
23 {
24 	struct feature_fme_dperf *dperf;
25 	struct feature_header header;
26 
27 	dperf = get_fme_feature_ioaddr_by_index(fme,
28 						FME_FEATURE_ID_GLOBAL_DPERF);
29 	header.csr = readq(&dperf->header);
30 	*revision = header.revision;
31 
32 	return 0;
33 }
34 
35 #define DPERF_TIMEOUT	30
36 
fabric_pobj_is_enabled(int port_id,struct feature_fme_dperf * dperf)37 static bool fabric_pobj_is_enabled(int port_id,
38 				   struct feature_fme_dperf *dperf)
39 {
40 	struct feature_fme_dfpmon_fab_ctl ctl;
41 
42 	ctl.csr = readq(&dperf->fab_ctl);
43 
44 	if (ctl.port_filter == FAB_DISABLE_FILTER)
45 		return port_id == PERF_OBJ_ROOT_ID;
46 
47 	return port_id == ctl.port_id;
48 }
49 
read_fabric_counter(struct ifpga_fme_hw * fme,u8 port_id,enum dperf_fab_events fab_event)50 static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
51 			       enum dperf_fab_events fab_event)
52 {
53 	struct feature_fme_dfpmon_fab_ctl ctl;
54 	struct feature_fme_dfpmon_fab_ctr ctr;
55 	struct feature_fme_dperf *dperf;
56 	u64 counter = 0;
57 
58 	spinlock_lock(&fme->lock);
59 	dperf = get_fme_feature_ioaddr_by_index(fme,
60 						FME_FEATURE_ID_GLOBAL_DPERF);
61 
62 	/* if it is disabled, force the counter to return zero. */
63 	if (!fabric_pobj_is_enabled(port_id, dperf))
64 		goto exit;
65 
66 	ctl.csr = readq(&dperf->fab_ctl);
67 	ctl.fab_evtcode = fab_event;
68 	writeq(ctl.csr, &dperf->fab_ctl);
69 
70 	ctr.event_code = fab_event;
71 
72 	if (fpga_wait_register_field(event_code, ctr,
73 				     &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
74 		dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
75 		spinlock_unlock(&fme->lock);
76 		return -ETIMEDOUT;
77 	}
78 
79 	ctr.csr = readq(&dperf->fab_ctr);
80 	counter = ctr.fab_cnt;
81 exit:
82 	spinlock_unlock(&fme->lock);
83 	return counter;
84 }
85 
86 #define FAB_PORT_SHOW(name, event)					\
87 static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme,	\
88 					 u8 port_id, u64 *counter)	\
89 {									\
90 	*counter = read_fabric_counter(fme, port_id, event);		\
91 	return 0;							\
92 }
93 
94 FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
95 FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
96 FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
97 FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
98 
fme_dperf_get_fab_port_enable(struct ifpga_fme_hw * fme,u8 port_id,u64 * enable)99 static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
100 					 u8 port_id, u64 *enable)
101 {
102 	struct feature_fme_dperf *dperf;
103 	int status;
104 
105 	dperf = get_fme_feature_ioaddr_by_index(fme,
106 						FME_FEATURE_ID_GLOBAL_DPERF);
107 
108 	status = fabric_pobj_is_enabled(port_id, dperf);
109 	*enable = (u64)status;
110 
111 	return 0;
112 }
113 
114 /*
115  * If enable one port or all port event counter in fabric, other
116  * fabric event counter originally enabled will be disable automatically.
117  */
fme_dperf_set_fab_port_enable(struct ifpga_fme_hw * fme,u8 port_id,u64 enable)118 static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
119 					 u8 port_id, u64 enable)
120 {
121 	struct feature_fme_dfpmon_fab_ctl ctl;
122 	struct feature_fme_dperf *dperf;
123 	bool state;
124 
125 	state = !!enable;
126 
127 	if (!state)
128 		return -EINVAL;
129 
130 	dperf = get_fme_feature_ioaddr_by_index(fme,
131 						FME_FEATURE_ID_GLOBAL_DPERF);
132 
133 	/* if it is already enabled. */
134 	if (fabric_pobj_is_enabled(port_id, dperf))
135 		return 0;
136 
137 	spinlock_lock(&fme->lock);
138 	ctl.csr = readq(&dperf->fab_ctl);
139 	if (port_id == PERF_OBJ_ROOT_ID) {
140 		ctl.port_filter = FAB_DISABLE_FILTER;
141 	} else {
142 		ctl.port_filter = FAB_ENABLE_FILTER;
143 		ctl.port_id = port_id;
144 	}
145 
146 	writeq(ctl.csr, &dperf->fab_ctl);
147 	spinlock_unlock(&fme->lock);
148 
149 	return 0;
150 }
151 
fme_dperf_get_fab_freeze(struct ifpga_fme_hw * fme,u64 * freeze)152 static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
153 {
154 	struct feature_fme_dperf *dperf;
155 	struct feature_fme_dfpmon_fab_ctl ctl;
156 
157 	dperf = get_fme_feature_ioaddr_by_index(fme,
158 						FME_FEATURE_ID_GLOBAL_DPERF);
159 	ctl.csr = readq(&dperf->fab_ctl);
160 	*freeze = (u64)ctl.freeze;
161 
162 	return 0;
163 }
164 
fme_dperf_set_fab_freeze(struct ifpga_fme_hw * fme,u64 freeze)165 static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
166 {
167 	struct feature_fme_dperf *dperf;
168 	struct feature_fme_dfpmon_fab_ctl ctl;
169 	bool state;
170 
171 	state = !!freeze;
172 
173 	spinlock_lock(&fme->lock);
174 	dperf = get_fme_feature_ioaddr_by_index(fme,
175 						FME_FEATURE_ID_GLOBAL_DPERF);
176 	ctl.csr = readq(&dperf->fab_ctl);
177 	ctl.freeze = state;
178 	writeq(ctl.csr, &dperf->fab_ctl);
179 	spinlock_unlock(&fme->lock);
180 
181 	return 0;
182 }
183 
184 #define PERF_MAX_PORT_NUM	1
185 
fme_global_dperf_init(struct ifpga_feature * feature)186 static int fme_global_dperf_init(struct ifpga_feature *feature)
187 {
188 	UNUSED(feature);
189 
190 	dev_info(NULL, "FME global_dperf Init.\n");
191 
192 	return 0;
193 }
194 
fme_global_dperf_uinit(struct ifpga_feature * feature)195 static void fme_global_dperf_uinit(struct ifpga_feature *feature)
196 {
197 	UNUSED(feature);
198 
199 	dev_info(NULL, "FME global_dperf UInit.\n");
200 }
201 
fme_dperf_fab_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)202 static int fme_dperf_fab_get_prop(struct ifpga_feature *feature,
203 				  struct feature_prop *prop)
204 {
205 	struct ifpga_fme_hw *fme = feature->parent;
206 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
207 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
208 
209 	switch (id) {
210 	case 0x1: /* FREEZE */
211 		return fme_dperf_get_fab_freeze(fme, &prop->data);
212 	case 0x2: /* PCIE0_READ */
213 		return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
214 	case 0x3: /* PCIE0_WRITE */
215 		return fme_dperf_get_fab_port_pcie0_write(fme, sub,
216 							  &prop->data);
217 	case 0x4: /* MMIO_READ */
218 		return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
219 	case 0x5: /* MMIO_WRITE */
220 		return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
221 	case 0x6: /* ENABLE */
222 		return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
223 	}
224 
225 	return -ENOENT;
226 }
227 
fme_dperf_root_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)228 static int fme_dperf_root_get_prop(struct ifpga_feature *feature,
229 				   struct feature_prop *prop)
230 {
231 	struct ifpga_fme_hw *fme = feature->parent;
232 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
233 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
234 
235 	if (sub != PERF_PROP_SUB_UNUSED)
236 		return -ENOENT;
237 
238 	switch (id) {
239 	case 0x1: /* CLOCK */
240 		return fme_dperf_get_clock(fme, &prop->data);
241 	case 0x2: /* REVISION */
242 		return fme_dperf_get_revision(fme, &prop->data);
243 	}
244 
245 	return -ENOENT;
246 }
247 
fme_global_dperf_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)248 static int fme_global_dperf_get_prop(struct ifpga_feature *feature,
249 				     struct feature_prop *prop)
250 {
251 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
252 
253 	switch (top) {
254 	case PERF_PROP_TOP_FAB:
255 		return fme_dperf_fab_get_prop(feature, prop);
256 	case PERF_PROP_TOP_UNUSED:
257 		return fme_dperf_root_get_prop(feature, prop);
258 	}
259 
260 	return -ENOENT;
261 }
262 
fme_dperf_fab_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)263 static int fme_dperf_fab_set_prop(struct ifpga_feature *feature,
264 				  struct feature_prop *prop)
265 {
266 	struct ifpga_fme_hw *fme = feature->parent;
267 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
268 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
269 
270 	switch (id) {
271 	case 0x1: /* FREEZE - fab root only prop */
272 		if (sub != PERF_PROP_SUB_UNUSED)
273 			return -ENOENT;
274 		return fme_dperf_set_fab_freeze(fme, prop->data);
275 	case 0x6: /* ENABLE - fab both root and sub */
276 		return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
277 	}
278 
279 	return -ENOENT;
280 }
281 
fme_global_dperf_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)282 static int fme_global_dperf_set_prop(struct ifpga_feature *feature,
283 				     struct feature_prop *prop)
284 {
285 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
286 
287 	switch (top) {
288 	case PERF_PROP_TOP_FAB:
289 		return fme_dperf_fab_set_prop(feature, prop);
290 	}
291 
292 	return -ENOENT;
293 }
294 
295 struct ifpga_feature_ops fme_global_dperf_ops = {
296 	.init = fme_global_dperf_init,
297 	.uinit = fme_global_dperf_uinit,
298 	.get_prop = fme_global_dperf_get_prop,
299 	.set_prop = fme_global_dperf_set_prop,
300 
301 };
302