xref: /dpdk/drivers/raw/ifpga/base/ifpga_fme_iperf.c (revision 473c88f9b391c2cd8b8622dcc488116cb09b624a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include "ifpga_feature_dev.h"
6 
7 #define PERF_OBJ_ROOT_ID	0xff
8 
fme_iperf_get_clock(struct ifpga_fme_hw * fme,u64 * clock)9 static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
10 {
11 	struct feature_fme_iperf *iperf;
12 	struct feature_fme_ifpmon_clk_ctr clk;
13 
14 	iperf = get_fme_feature_ioaddr_by_index(fme,
15 						FME_FEATURE_ID_GLOBAL_IPERF);
16 	clk.afu_interf_clock = readq(&iperf->clk);
17 
18 	*clock = clk.afu_interf_clock;
19 	return 0;
20 }
21 
fme_iperf_get_revision(struct ifpga_fme_hw * fme,u64 * revision)22 static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
23 {
24 	struct feature_fme_iperf *iperf;
25 	struct feature_header header;
26 
27 	iperf = get_fme_feature_ioaddr_by_index(fme,
28 						FME_FEATURE_ID_GLOBAL_IPERF);
29 	header.csr = readq(&iperf->header);
30 	*revision = header.revision;
31 
32 	return 0;
33 }
34 
fme_iperf_get_cache_freeze(struct ifpga_fme_hw * fme,u64 * freeze)35 static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
36 {
37 	struct feature_fme_iperf *iperf;
38 	struct feature_fme_ifpmon_ch_ctl ctl;
39 
40 	iperf = get_fme_feature_ioaddr_by_index(fme,
41 						FME_FEATURE_ID_GLOBAL_IPERF);
42 	ctl.csr = readq(&iperf->ch_ctl);
43 	*freeze = (u64)ctl.freeze;
44 	return 0;
45 }
46 
fme_iperf_set_cache_freeze(struct ifpga_fme_hw * fme,u64 freeze)47 static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
48 {
49 	struct feature_fme_iperf *iperf;
50 	struct feature_fme_ifpmon_ch_ctl ctl;
51 	bool state;
52 
53 	state = !!freeze;
54 
55 	spinlock_lock(&fme->lock);
56 	iperf = get_fme_feature_ioaddr_by_index(fme,
57 						FME_FEATURE_ID_GLOBAL_IPERF);
58 	ctl.csr = readq(&iperf->ch_ctl);
59 	ctl.freeze = state;
60 	writeq(ctl.csr, &iperf->ch_ctl);
61 	spinlock_unlock(&fme->lock);
62 
63 	return 0;
64 }
65 
66 #define IPERF_TIMEOUT	30
67 
read_cache_counter(struct ifpga_fme_hw * fme,u8 channel,enum iperf_cache_events event)68 static u64 read_cache_counter(struct ifpga_fme_hw *fme,
69 			      u8 channel, enum iperf_cache_events event)
70 {
71 	struct feature_fme_iperf *iperf;
72 	struct feature_fme_ifpmon_ch_ctl ctl;
73 	struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
74 	u64 counter;
75 
76 	spinlock_lock(&fme->lock);
77 	iperf = get_fme_feature_ioaddr_by_index(fme,
78 						FME_FEATURE_ID_GLOBAL_IPERF);
79 
80 	/* set channel access type and cache event code. */
81 	ctl.csr = readq(&iperf->ch_ctl);
82 	ctl.cci_chsel = channel;
83 	ctl.cache_event = event;
84 	writeq(ctl.csr, &iperf->ch_ctl);
85 
86 	/* check the event type in the counter registers */
87 	ctr0.event_code = event;
88 
89 	if (fpga_wait_register_field(event_code, ctr0,
90 				     &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
91 		dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
92 		spinlock_unlock(&fme->lock);
93 		return -ETIMEDOUT;
94 	}
95 
96 	ctr0.csr = readq(&iperf->ch_ctr0);
97 	ctr1.csr = readq(&iperf->ch_ctr1);
98 	counter = ctr0.cache_counter + ctr1.cache_counter;
99 	spinlock_unlock(&fme->lock);
100 
101 	return counter;
102 }
103 
104 #define CACHE_SHOW(name, type, event)					\
105 static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme,		\
106 					u64 *counter)			\
107 {									\
108 	*counter = read_cache_counter(fme, type, event);		\
109 	return 0;							\
110 }
111 
112 CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
113 CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
114 CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
115 CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
116 CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
117 CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
118 CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
119 CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
120 CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
121 	   IPERF_CACHE_DATA_WR_PORT_CONTEN);
122 CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
123 	   IPERF_CACHE_TAG_WR_PORT_CONTEN);
124 
fme_iperf_get_vtd_freeze(struct ifpga_fme_hw * fme,u64 * freeze)125 static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
126 {
127 	struct feature_fme_ifpmon_vtd_ctl ctl;
128 	struct feature_fme_iperf *iperf;
129 
130 	iperf = get_fme_feature_ioaddr_by_index(fme,
131 						FME_FEATURE_ID_GLOBAL_IPERF);
132 	ctl.csr = readq(&iperf->vtd_ctl);
133 	*freeze = (u64)ctl.freeze;
134 
135 	return 0;
136 }
137 
fme_iperf_set_vtd_freeze(struct ifpga_fme_hw * fme,u64 freeze)138 static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
139 {
140 	struct feature_fme_ifpmon_vtd_ctl ctl;
141 	struct feature_fme_iperf *iperf;
142 	bool state;
143 
144 	state = !!freeze;
145 
146 	spinlock_lock(&fme->lock);
147 	iperf = get_fme_feature_ioaddr_by_index(fme,
148 						FME_FEATURE_ID_GLOBAL_IPERF);
149 	ctl.csr = readq(&iperf->vtd_ctl);
150 	ctl.freeze = state;
151 	writeq(ctl.csr, &iperf->vtd_ctl);
152 	spinlock_unlock(&fme->lock);
153 
154 	return 0;
155 }
156 
read_iommu_sip_counter(struct ifpga_fme_hw * fme,enum iperf_vtd_sip_events event)157 static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
158 				  enum iperf_vtd_sip_events event)
159 {
160 	struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
161 	struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
162 	struct feature_fme_iperf *iperf;
163 	u64 counter;
164 
165 	spinlock_lock(&fme->lock);
166 	iperf = get_fme_feature_ioaddr_by_index(fme,
167 						FME_FEATURE_ID_GLOBAL_IPERF);
168 	sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
169 	sip_ctl.vtd_evtcode = event;
170 	writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
171 
172 	sip_ctr.event_code = event;
173 
174 	if (fpga_wait_register_field(event_code, sip_ctr,
175 				     &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
176 		dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
177 		spinlock_unlock(&fme->lock);
178 		return -ETIMEDOUT;
179 	}
180 
181 	sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
182 	counter = sip_ctr.vtd_counter;
183 	spinlock_unlock(&fme->lock);
184 
185 	return counter;
186 }
187 
188 #define VTD_SIP_SHOW(name, event)					\
189 static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme,	\
190 						u64 *counter)		\
191 {									\
192 	*counter = read_iommu_sip_counter(fme, event);			\
193 	return 0;							\
194 }
195 
196 VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
197 VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
198 VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
199 VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
200 VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
201 VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
202 VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
203 VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
204 VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
205 VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
206 VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
207 VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
208 
read_iommu_counter(struct ifpga_fme_hw * fme,u8 port_id,enum iperf_vtd_events base_event)209 static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
210 			      enum iperf_vtd_events base_event)
211 {
212 	struct feature_fme_ifpmon_vtd_ctl ctl;
213 	struct feature_fme_ifpmon_vtd_ctr ctr;
214 	struct feature_fme_iperf *iperf;
215 	enum iperf_vtd_events event = base_event + port_id;
216 	u64 counter;
217 
218 	spinlock_lock(&fme->lock);
219 	iperf = get_fme_feature_ioaddr_by_index(fme,
220 						FME_FEATURE_ID_GLOBAL_IPERF);
221 	ctl.csr = readq(&iperf->vtd_ctl);
222 	ctl.vtd_evtcode = event;
223 	writeq(ctl.csr, &iperf->vtd_ctl);
224 
225 	ctr.event_code = event;
226 
227 	if (fpga_wait_register_field(event_code, ctr,
228 				     &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
229 		dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
230 		spinlock_unlock(&fme->lock);
231 		return -ETIMEDOUT;
232 	}
233 
234 	ctr.csr = readq(&iperf->vtd_ctr);
235 	counter = ctr.vtd_counter;
236 	spinlock_unlock(&fme->lock);
237 
238 	return counter;
239 }
240 
241 #define VTD_PORT_SHOW(name, base_event)					\
242 static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme,	\
243 				u8 port_id, u64 *counter)		\
244 {									\
245 	*counter = read_iommu_counter(fme, port_id, base_event);	\
246 	return 0;							\
247 }
248 
249 VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
250 VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
251 VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
252 VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
253 VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
254 VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
255 VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
256 
fabric_pobj_is_enabled(u8 port_id,struct feature_fme_iperf * iperf)257 static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
258 {
259 	struct feature_fme_ifpmon_fab_ctl ctl;
260 
261 	ctl.csr = readq(&iperf->fab_ctl);
262 
263 	if (ctl.port_filter == FAB_DISABLE_FILTER)
264 		return port_id == PERF_OBJ_ROOT_ID;
265 
266 	return port_id == ctl.port_id;
267 }
268 
read_fabric_counter(struct ifpga_fme_hw * fme,u8 port_id,enum iperf_fab_events fab_event)269 static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
270 			       enum iperf_fab_events fab_event)
271 {
272 	struct feature_fme_ifpmon_fab_ctl ctl;
273 	struct feature_fme_ifpmon_fab_ctr ctr;
274 	struct feature_fme_iperf *iperf;
275 	u64 counter = 0;
276 
277 	spinlock_lock(&fme->lock);
278 	iperf = get_fme_feature_ioaddr_by_index(fme,
279 						FME_FEATURE_ID_GLOBAL_IPERF);
280 
281 	/* if it is disabled, force the counter to return zero. */
282 	if (!fabric_pobj_is_enabled(port_id, iperf))
283 		goto exit;
284 
285 	ctl.csr = readq(&iperf->fab_ctl);
286 	ctl.fab_evtcode = fab_event;
287 	writeq(ctl.csr, &iperf->fab_ctl);
288 
289 	ctr.event_code = fab_event;
290 
291 	if (fpga_wait_register_field(event_code, ctr,
292 				     &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
293 		dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
294 		spinlock_unlock(&fme->lock);
295 		return -ETIMEDOUT;
296 	}
297 
298 	ctr.csr = readq(&iperf->fab_ctr);
299 	counter = ctr.fab_cnt;
300 exit:
301 	spinlock_unlock(&fme->lock);
302 	return counter;
303 }
304 
305 #define FAB_PORT_SHOW(name, event)					\
306 static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme,	\
307 				u8 port_id, u64 *counter)		\
308 {									\
309 	*counter = read_fabric_counter(fme, port_id, event);		\
310 	return 0;							\
311 }
312 
313 FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
314 FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
315 FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
316 FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
317 FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
318 FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
319 FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
320 FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
321 
fme_iperf_get_fab_port_enable(struct ifpga_fme_hw * fme,u8 port_id,u64 * enable)322 static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
323 					 u8 port_id, u64 *enable)
324 {
325 	struct feature_fme_iperf *iperf;
326 	int status;
327 
328 	iperf = get_fme_feature_ioaddr_by_index(fme,
329 						FME_FEATURE_ID_GLOBAL_IPERF);
330 
331 	status = fabric_pobj_is_enabled(port_id, iperf);
332 	*enable = (u64)status;
333 
334 	return 0;
335 }
336 
337 /*
338  * If enable one port or all port event counter in fabric, other
339  * fabric event counter originally enabled will be disable automatically.
340  */
fme_iperf_set_fab_port_enable(struct ifpga_fme_hw * fme,u8 port_id,u64 enable)341 static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
342 					 u8 port_id, u64 enable)
343 {
344 	struct feature_fme_ifpmon_fab_ctl ctl;
345 	struct feature_fme_iperf *iperf;
346 	bool state;
347 
348 	state = !!enable;
349 
350 	if (!state)
351 		return -EINVAL;
352 
353 	iperf = get_fme_feature_ioaddr_by_index(fme,
354 						FME_FEATURE_ID_GLOBAL_IPERF);
355 
356 	/* if it is already enabled. */
357 	if (fabric_pobj_is_enabled(port_id, iperf))
358 		return 0;
359 
360 	spinlock_lock(&fme->lock);
361 	ctl.csr = readq(&iperf->fab_ctl);
362 	if (port_id == PERF_OBJ_ROOT_ID) {
363 		ctl.port_filter = FAB_DISABLE_FILTER;
364 	} else {
365 		ctl.port_filter = FAB_ENABLE_FILTER;
366 		ctl.port_id = port_id;
367 	}
368 
369 	writeq(ctl.csr, &iperf->fab_ctl);
370 	spinlock_unlock(&fme->lock);
371 
372 	return 0;
373 }
374 
fme_iperf_get_fab_freeze(struct ifpga_fme_hw * fme,u64 * freeze)375 static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
376 {
377 	struct feature_fme_iperf *iperf;
378 	struct feature_fme_ifpmon_fab_ctl ctl;
379 
380 	iperf = get_fme_feature_ioaddr_by_index(fme,
381 						FME_FEATURE_ID_GLOBAL_IPERF);
382 	ctl.csr = readq(&iperf->fab_ctl);
383 	*freeze = (u64)ctl.freeze;
384 
385 	return 0;
386 }
387 
fme_iperf_set_fab_freeze(struct ifpga_fme_hw * fme,u64 freeze)388 static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
389 {
390 	struct feature_fme_iperf *iperf;
391 	struct feature_fme_ifpmon_fab_ctl ctl;
392 	bool state;
393 
394 	state = !!freeze;
395 
396 	spinlock_lock(&fme->lock);
397 	iperf = get_fme_feature_ioaddr_by_index(fme,
398 						FME_FEATURE_ID_GLOBAL_IPERF);
399 	ctl.csr = readq(&iperf->fab_ctl);
400 	ctl.freeze = state;
401 	writeq(ctl.csr, &iperf->fab_ctl);
402 	spinlock_unlock(&fme->lock);
403 
404 	return 0;
405 }
406 
407 #define PERF_MAX_PORT_NUM	1
408 #define FME_IPERF_CAP_IOMMU	0x1
409 
fme_global_iperf_init(struct ifpga_feature * feature)410 static int fme_global_iperf_init(struct ifpga_feature *feature)
411 {
412 	struct ifpga_fme_hw *fme;
413 	struct feature_fme_header *fme_hdr;
414 	struct feature_fme_capability fme_capability;
415 
416 	dev_info(NULL, "FME global_iperf Init.\n");
417 
418 	fme = (struct ifpga_fme_hw *)feature->parent;
419 	fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
420 
421 	/* check if iommu is not supported on this device. */
422 	fme_capability.csr = readq(&fme_hdr->capability);
423 	dev_info(NULL, "FME HEAD fme_capability %llx.\n",
424 		 (unsigned long long)fme_hdr->capability.csr);
425 
426 	if (fme_capability.iommu_support)
427 		feature->cap |= FME_IPERF_CAP_IOMMU;
428 
429 	return 0;
430 }
431 
fme_global_iperf_uinit(struct ifpga_feature * feature)432 static void fme_global_iperf_uinit(struct ifpga_feature *feature)
433 {
434 	UNUSED(feature);
435 
436 	dev_info(NULL, "FME global_iperf UInit.\n");
437 }
438 
fme_iperf_root_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)439 static int fme_iperf_root_get_prop(struct ifpga_feature *feature,
440 				   struct feature_prop *prop)
441 {
442 	struct ifpga_fme_hw *fme = feature->parent;
443 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
444 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
445 
446 	if (sub != PERF_PROP_SUB_UNUSED)
447 		return -ENOENT;
448 
449 	switch (id) {
450 	case 0x1: /* CLOCK */
451 		return fme_iperf_get_clock(fme, &prop->data);
452 	case 0x2: /* REVISION */
453 		return fme_iperf_get_revision(fme, &prop->data);
454 	}
455 
456 	return -ENOENT;
457 }
458 
fme_iperf_cache_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)459 static int fme_iperf_cache_get_prop(struct ifpga_feature *feature,
460 				    struct feature_prop *prop)
461 {
462 	struct ifpga_fme_hw *fme = feature->parent;
463 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
464 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
465 
466 	if (sub != PERF_PROP_SUB_UNUSED)
467 		return -ENOENT;
468 
469 	switch (id) {
470 	case 0x1: /* FREEZE */
471 		return fme_iperf_get_cache_freeze(fme, &prop->data);
472 	case 0x2: /* READ_HIT */
473 		return fme_iperf_get_cache_read_hit(fme, &prop->data);
474 	case 0x3: /* READ_MISS */
475 		return fme_iperf_get_cache_read_miss(fme, &prop->data);
476 	case 0x4: /* WRITE_HIT */
477 		return fme_iperf_get_cache_write_hit(fme, &prop->data);
478 	case 0x5: /* WRITE_MISS */
479 		return fme_iperf_get_cache_write_miss(fme, &prop->data);
480 	case 0x6: /* HOLD_REQUEST */
481 		return fme_iperf_get_cache_hold_request(fme, &prop->data);
482 	case 0x7: /* TX_REQ_STALL */
483 		return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
484 	case 0x8: /* RX_REQ_STALL */
485 		return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
486 	case 0x9: /* RX_EVICTION */
487 		return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
488 	case 0xa: /* DATA_WRITE_PORT_CONTENTION */
489 		return fme_iperf_get_cache_data_write_port_contention(fme,
490 							&prop->data);
491 	case 0xb: /* TAG_WRITE_PORT_CONTENTION */
492 		return fme_iperf_get_cache_tag_write_port_contention(fme,
493 							&prop->data);
494 	}
495 
496 	return -ENOENT;
497 }
498 
fme_iperf_vtd_root_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)499 static int fme_iperf_vtd_root_get_prop(struct ifpga_feature *feature,
500 				       struct feature_prop *prop)
501 {
502 	struct ifpga_fme_hw *fme = feature->parent;
503 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
504 
505 	switch (id) {
506 	case 0x1: /* FREEZE */
507 		return fme_iperf_get_vtd_freeze(fme, &prop->data);
508 	case 0x2: /* IOTLB_4K_HIT */
509 		return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
510 	case 0x3: /* IOTLB_2M_HIT */
511 		return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
512 	case 0x4: /* IOTLB_1G_HIT */
513 		return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
514 	case 0x5: /* SLPWC_L3_HIT */
515 		return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
516 	case 0x6: /* SLPWC_L4_HIT */
517 		return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
518 	case 0x7: /* RCC_HIT */
519 		return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
520 	case 0x8: /* IOTLB_4K_MISS */
521 		return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
522 	case 0x9: /* IOTLB_2M_MISS */
523 		return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
524 	case 0xa: /* IOTLB_1G_MISS */
525 		return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
526 	case 0xb: /* SLPWC_L3_MISS */
527 		return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
528 	case 0xc: /* SLPWC_L4_MISS */
529 		return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
530 	case 0xd: /* RCC_MISS */
531 		return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
532 	}
533 
534 	return -ENOENT;
535 }
536 
fme_iperf_vtd_sub_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)537 static int fme_iperf_vtd_sub_get_prop(struct ifpga_feature *feature,
538 				      struct feature_prop *prop)
539 {
540 	struct ifpga_fme_hw *fme = feature->parent;
541 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
542 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
543 
544 	if (sub > PERF_MAX_PORT_NUM)
545 		return -ENOENT;
546 
547 	switch (id) {
548 	case 0xe: /* READ_TRANSACTION */
549 		return fme_iperf_get_vtd_port_read_transaction(fme, sub,
550 							       &prop->data);
551 	case 0xf: /* WRITE_TRANSACTION */
552 		return fme_iperf_get_vtd_port_write_transaction(fme, sub,
553 								&prop->data);
554 	case 0x10: /* DEVTLB_READ_HIT */
555 		return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
556 							      &prop->data);
557 	case 0x11: /* DEVTLB_WRITE_HIT */
558 		return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
559 							       &prop->data);
560 	case 0x12: /* DEVTLB_4K_FILL */
561 		return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
562 							     &prop->data);
563 	case 0x13: /* DEVTLB_2M_FILL */
564 		return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
565 							     &prop->data);
566 	case 0x14: /* DEVTLB_1G_FILL */
567 		return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
568 							     &prop->data);
569 	}
570 
571 	return -ENOENT;
572 }
573 
fme_iperf_vtd_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)574 static int fme_iperf_vtd_get_prop(struct ifpga_feature *feature,
575 				  struct feature_prop *prop)
576 {
577 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
578 
579 	if (sub == PERF_PROP_SUB_UNUSED)
580 		return fme_iperf_vtd_root_get_prop(feature, prop);
581 
582 	return fme_iperf_vtd_sub_get_prop(feature, prop);
583 }
584 
fme_iperf_fab_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)585 static int fme_iperf_fab_get_prop(struct ifpga_feature *feature,
586 				  struct feature_prop *prop)
587 {
588 	struct ifpga_fme_hw *fme = feature->parent;
589 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
590 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
591 
592 	/* Other properties are present for both top and sub levels */
593 	switch (id) {
594 	case 0x1: /* FREEZE */
595 		if (sub != PERF_PROP_SUB_UNUSED)
596 			return -ENOENT;
597 		return fme_iperf_get_fab_freeze(fme, &prop->data);
598 	case 0x2: /* PCIE0_READ */
599 		return fme_iperf_get_fab_port_pcie0_read(fme, sub,
600 							 &prop->data);
601 	case 0x3: /* PCIE0_WRITE */
602 		return fme_iperf_get_fab_port_pcie0_write(fme, sub,
603 							  &prop->data);
604 	case 0x4: /* PCIE1_READ */
605 		return fme_iperf_get_fab_port_pcie1_read(fme, sub,
606 							 &prop->data);
607 	case 0x5: /* PCIE1_WRITE */
608 		return fme_iperf_get_fab_port_pcie1_write(fme, sub,
609 							  &prop->data);
610 	case 0x6: /* UPI_READ */
611 		return fme_iperf_get_fab_port_upi_read(fme, sub,
612 						       &prop->data);
613 	case 0x7: /* UPI_WRITE */
614 		return fme_iperf_get_fab_port_upi_write(fme, sub,
615 							&prop->data);
616 	case 0x8: /* MMIO_READ */
617 		return fme_iperf_get_fab_port_mmio_read(fme, sub,
618 							&prop->data);
619 	case 0x9: /* MMIO_WRITE */
620 		return fme_iperf_get_fab_port_mmio_write(fme, sub,
621 							 &prop->data);
622 	case 0xa: /* ENABLE */
623 		return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
624 	}
625 
626 	return -ENOENT;
627 }
628 
fme_global_iperf_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)629 static int fme_global_iperf_get_prop(struct ifpga_feature *feature,
630 				     struct feature_prop *prop)
631 {
632 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
633 
634 	switch (top) {
635 	case PERF_PROP_TOP_CACHE:
636 		return fme_iperf_cache_get_prop(feature, prop);
637 	case PERF_PROP_TOP_VTD:
638 		return fme_iperf_vtd_get_prop(feature, prop);
639 	case PERF_PROP_TOP_FAB:
640 		return fme_iperf_fab_get_prop(feature, prop);
641 	case PERF_PROP_TOP_UNUSED:
642 		return fme_iperf_root_get_prop(feature, prop);
643 	}
644 
645 	return -ENOENT;
646 }
647 
fme_iperf_cache_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)648 static int fme_iperf_cache_set_prop(struct ifpga_feature *feature,
649 				    struct feature_prop *prop)
650 {
651 	struct ifpga_fme_hw *fme = feature->parent;
652 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
653 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
654 
655 	if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
656 		return fme_iperf_set_cache_freeze(fme, prop->data);
657 
658 	return -ENOENT;
659 }
660 
fme_iperf_vtd_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)661 static int fme_iperf_vtd_set_prop(struct ifpga_feature *feature,
662 				  struct feature_prop *prop)
663 {
664 	struct ifpga_fme_hw *fme = feature->parent;
665 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
666 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
667 
668 	if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
669 		return fme_iperf_set_vtd_freeze(fme, prop->data);
670 
671 	return -ENOENT;
672 }
673 
fme_iperf_fab_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)674 static int fme_iperf_fab_set_prop(struct ifpga_feature *feature,
675 				  struct feature_prop *prop)
676 {
677 	struct ifpga_fme_hw *fme = feature->parent;
678 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
679 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
680 
681 	switch (id) {
682 	case 0x1: /* FREEZE */
683 		if (sub != PERF_PROP_SUB_UNUSED)
684 			return -ENOENT;
685 		return fme_iperf_set_fab_freeze(fme, prop->data);
686 	case 0xa: /* ENABLE */
687 		return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
688 	}
689 
690 	return -ENOENT;
691 }
692 
fme_global_iperf_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)693 static int fme_global_iperf_set_prop(struct ifpga_feature *feature,
694 				     struct feature_prop *prop)
695 {
696 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
697 
698 	switch (top) {
699 	case PERF_PROP_TOP_CACHE:
700 		return fme_iperf_cache_set_prop(feature, prop);
701 	case PERF_PROP_TOP_VTD:
702 		return fme_iperf_vtd_set_prop(feature, prop);
703 	case PERF_PROP_TOP_FAB:
704 		return fme_iperf_fab_set_prop(feature, prop);
705 	}
706 
707 	return -ENOENT;
708 }
709 
710 struct ifpga_feature_ops fme_global_iperf_ops = {
711 	.init = fme_global_iperf_init,
712 	.uinit = fme_global_iperf_uinit,
713 	.get_prop = fme_global_iperf_get_prop,
714 	.set_prop = fme_global_iperf_set_prop,
715 };
716