xref: /dpdk/drivers/raw/ifpga/base/ifpga_fme.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include "ifpga_feature_dev.h"
6 #include "opae_spi.h"
7 #include "opae_intel_max10.h"
8 #include "opae_i2c.h"
9 #include "opae_at24_eeprom.h"
10 #include "ifpga_sec_mgr.h"
11 
12 #define PWR_THRESHOLD_MAX       0x7F
13 
14 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
15 {
16 	struct ifpga_feature *feature;
17 
18 	if (!fme)
19 		return -ENOENT;
20 
21 	feature = get_fme_feature_by_id(fme, prop->feature_id);
22 
23 	if (feature && feature->ops && feature->ops->get_prop)
24 		return feature->ops->get_prop(feature, prop);
25 
26 	return -ENOENT;
27 }
28 
29 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
30 {
31 	struct ifpga_feature *feature;
32 
33 	if (!fme)
34 		return -ENOENT;
35 
36 	feature = get_fme_feature_by_id(fme, prop->feature_id);
37 
38 	if (feature && feature->ops && feature->ops->set_prop)
39 		return feature->ops->set_prop(feature, prop);
40 
41 	return -ENOENT;
42 }
43 
44 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
45 {
46 	struct ifpga_feature *feature;
47 
48 	if (!fme)
49 		return -ENOENT;
50 
51 	feature = get_fme_feature_by_id(fme, feature_id);
52 
53 	if (feature && feature->ops && feature->ops->set_irq)
54 		return feature->ops->set_irq(feature, irq_set);
55 
56 	return -ENOENT;
57 }
58 
59 /* fme private feature head */
60 static int fme_hdr_init(struct ifpga_feature *feature)
61 {
62 	struct feature_fme_header *fme_hdr;
63 
64 	fme_hdr = (struct feature_fme_header *)feature->addr;
65 
66 	dev_info(NULL, "FME HDR Init.\n");
67 	dev_info(NULL, "FME cap %llx.\n",
68 		 (unsigned long long)fme_hdr->capability.csr);
69 
70 	return 0;
71 }
72 
73 static void fme_hdr_uinit(struct ifpga_feature *feature)
74 {
75 	UNUSED(feature);
76 
77 	dev_info(NULL, "FME HDR UInit.\n");
78 }
79 
80 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
81 {
82 	struct feature_fme_header *fme_hdr
83 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
84 	struct feature_header header;
85 
86 	header.csr = readq(&fme_hdr->header);
87 	*revision = header.revision;
88 
89 	return 0;
90 }
91 
92 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
93 {
94 	struct feature_fme_header *fme_hdr
95 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
96 	struct feature_fme_capability fme_capability;
97 
98 	fme_capability.csr = readq(&fme_hdr->capability);
99 	*ports_num = fme_capability.num_ports;
100 
101 	return 0;
102 }
103 
104 static int fme_hdr_get_port_type(struct ifpga_fme_hw *fme, u64 *port_type)
105 {
106 	struct feature_fme_header *fme_hdr
107 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
108 	struct feature_fme_port pt;
109 	u32 port = (u32)((*port_type >> 32) & 0xffffffff);
110 
111 	pt.csr = readq(&fme_hdr->port[port]);
112 	if (!pt.port_implemented)
113 		return -ENODEV;
114 	if (pt.afu_access_control)
115 		*port_type |= 0x1;
116 	else
117 		*port_type &= ~0x1;
118 
119 	return 0;
120 }
121 
122 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
123 {
124 	struct feature_fme_header *fme_hdr
125 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
126 	struct feature_fme_capability fme_capability;
127 
128 	fme_capability.csr = readq(&fme_hdr->capability);
129 	*cache_size = fme_capability.cache_size;
130 
131 	return 0;
132 }
133 
134 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
135 {
136 	struct feature_fme_header *fme_hdr
137 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
138 	struct feature_fme_capability fme_capability;
139 
140 	fme_capability.csr = readq(&fme_hdr->capability);
141 	*version = fme_capability.fabric_verid;
142 
143 	return 0;
144 }
145 
146 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
147 {
148 	struct feature_fme_header *fme_hdr
149 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
150 	struct feature_fme_capability fme_capability;
151 
152 	fme_capability.csr = readq(&fme_hdr->capability);
153 	*socket_id = fme_capability.socket_id;
154 
155 	return 0;
156 }
157 
158 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
159 				    u64 *bitstream_id)
160 {
161 	struct feature_fme_header *fme_hdr
162 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
163 
164 	*bitstream_id = readq(&fme_hdr->bitstream_id);
165 
166 	return 0;
167 }
168 
169 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
170 					  u64 *bitstream_metadata)
171 {
172 	struct feature_fme_header *fme_hdr
173 		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
174 
175 	*bitstream_metadata = readq(&fme_hdr->bitstream_md);
176 
177 	return 0;
178 }
179 
180 static int
181 fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
182 {
183 	struct ifpga_fme_hw *fme = feature->parent;
184 
185 	switch (prop->prop_id) {
186 	case FME_HDR_PROP_REVISION:
187 		return fme_hdr_get_revision(fme, &prop->data);
188 	case FME_HDR_PROP_PORTS_NUM:
189 		return fme_hdr_get_ports_num(fme, &prop->data);
190 	case FME_HDR_PROP_CACHE_SIZE:
191 		return fme_hdr_get_cache_size(fme, &prop->data);
192 	case FME_HDR_PROP_VERSION:
193 		return fme_hdr_get_version(fme, &prop->data);
194 	case FME_HDR_PROP_SOCKET_ID:
195 		return fme_hdr_get_socket_id(fme, &prop->data);
196 	case FME_HDR_PROP_BITSTREAM_ID:
197 		return fme_hdr_get_bitstream_id(fme, &prop->data);
198 	case FME_HDR_PROP_BITSTREAM_METADATA:
199 		return fme_hdr_get_bitstream_metadata(fme, &prop->data);
200 	case FME_HDR_PROP_PORT_TYPE:
201 		return fme_hdr_get_port_type(fme, &prop->data);
202 	}
203 
204 	return -ENOENT;
205 }
206 
207 struct ifpga_feature_ops fme_hdr_ops = {
208 	.init = fme_hdr_init,
209 	.uinit = fme_hdr_uinit,
210 	.get_prop = fme_hdr_get_prop,
211 };
212 
213 /* thermal management */
214 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
215 {
216 	struct feature_fme_thermal *thermal;
217 	struct feature_fme_tmp_threshold temp_threshold;
218 
219 	thermal = get_fme_feature_ioaddr_by_index(fme,
220 						  FME_FEATURE_ID_THERMAL_MGMT);
221 
222 	temp_threshold.csr = readq(&thermal->threshold);
223 	*thres1 = temp_threshold.tmp_thshold1;
224 
225 	return 0;
226 }
227 
228 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
229 {
230 	struct feature_fme_thermal *thermal;
231 	struct feature_fme_header *fme_hdr;
232 	struct feature_fme_tmp_threshold tmp_threshold;
233 	struct feature_fme_capability fme_capability;
234 
235 	thermal = get_fme_feature_ioaddr_by_index(fme,
236 						  FME_FEATURE_ID_THERMAL_MGMT);
237 	fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
238 
239 	spinlock_lock(&fme->lock);
240 	tmp_threshold.csr = readq(&thermal->threshold);
241 	fme_capability.csr = readq(&fme_hdr->capability);
242 
243 	if (fme_capability.lock_bit == 1) {
244 		spinlock_unlock(&fme->lock);
245 		return -EBUSY;
246 	} else if (thres1 > 100) {
247 		spinlock_unlock(&fme->lock);
248 		return -EINVAL;
249 	} else if (thres1 == 0) {
250 		tmp_threshold.tmp_thshold1_enable = 0;
251 		tmp_threshold.tmp_thshold1 = thres1;
252 	} else {
253 		tmp_threshold.tmp_thshold1_enable = 1;
254 		tmp_threshold.tmp_thshold1 = thres1;
255 	}
256 
257 	writeq(tmp_threshold.csr, &thermal->threshold);
258 	spinlock_unlock(&fme->lock);
259 
260 	return 0;
261 }
262 
263 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
264 {
265 	struct feature_fme_thermal *thermal;
266 	struct feature_fme_tmp_threshold temp_threshold;
267 
268 	thermal = get_fme_feature_ioaddr_by_index(fme,
269 						  FME_FEATURE_ID_THERMAL_MGMT);
270 
271 	temp_threshold.csr = readq(&thermal->threshold);
272 	*thres2 = temp_threshold.tmp_thshold2;
273 
274 	return 0;
275 }
276 
277 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
278 {
279 	struct feature_fme_thermal *thermal;
280 	struct feature_fme_header *fme_hdr;
281 	struct feature_fme_tmp_threshold tmp_threshold;
282 	struct feature_fme_capability fme_capability;
283 
284 	thermal = get_fme_feature_ioaddr_by_index(fme,
285 						  FME_FEATURE_ID_THERMAL_MGMT);
286 	fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
287 
288 	spinlock_lock(&fme->lock);
289 	tmp_threshold.csr = readq(&thermal->threshold);
290 	fme_capability.csr = readq(&fme_hdr->capability);
291 
292 	if (fme_capability.lock_bit == 1) {
293 		spinlock_unlock(&fme->lock);
294 		return -EBUSY;
295 	} else if (thres2 > 100) {
296 		spinlock_unlock(&fme->lock);
297 		return -EINVAL;
298 	} else if (thres2 == 0) {
299 		tmp_threshold.tmp_thshold2_enable = 0;
300 		tmp_threshold.tmp_thshold2 = thres2;
301 	} else {
302 		tmp_threshold.tmp_thshold2_enable = 1;
303 		tmp_threshold.tmp_thshold2 = thres2;
304 	}
305 
306 	writeq(tmp_threshold.csr, &thermal->threshold);
307 	spinlock_unlock(&fme->lock);
308 
309 	return 0;
310 }
311 
312 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
313 					  u64 *thres_trip)
314 {
315 	struct feature_fme_thermal *thermal;
316 	struct feature_fme_tmp_threshold temp_threshold;
317 
318 	thermal = get_fme_feature_ioaddr_by_index(fme,
319 						  FME_FEATURE_ID_THERMAL_MGMT);
320 
321 	temp_threshold.csr = readq(&thermal->threshold);
322 	*thres_trip = temp_threshold.therm_trip_thshold;
323 
324 	return 0;
325 }
326 
327 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
328 					      u64 *thres1_reached)
329 {
330 	struct feature_fme_thermal *thermal;
331 	struct feature_fme_tmp_threshold temp_threshold;
332 
333 	thermal = get_fme_feature_ioaddr_by_index(fme,
334 						  FME_FEATURE_ID_THERMAL_MGMT);
335 
336 	temp_threshold.csr = readq(&thermal->threshold);
337 	*thres1_reached = temp_threshold.thshold1_status;
338 
339 	return 0;
340 }
341 
342 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
343 					      u64 *thres1_reached)
344 {
345 	struct feature_fme_thermal *thermal;
346 	struct feature_fme_tmp_threshold temp_threshold;
347 
348 	thermal = get_fme_feature_ioaddr_by_index(fme,
349 						  FME_FEATURE_ID_THERMAL_MGMT);
350 
351 	temp_threshold.csr = readq(&thermal->threshold);
352 	*thres1_reached = temp_threshold.thshold2_status;
353 
354 	return 0;
355 }
356 
357 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
358 					     u64 *thres1_policy)
359 {
360 	struct feature_fme_thermal *thermal;
361 	struct feature_fme_tmp_threshold temp_threshold;
362 
363 	thermal = get_fme_feature_ioaddr_by_index(fme,
364 						  FME_FEATURE_ID_THERMAL_MGMT);
365 
366 	temp_threshold.csr = readq(&thermal->threshold);
367 	*thres1_policy = temp_threshold.thshold_policy;
368 
369 	return 0;
370 }
371 
372 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
373 					     u64 thres1_policy)
374 {
375 	struct feature_fme_thermal *thermal;
376 	struct feature_fme_tmp_threshold tmp_threshold;
377 
378 	thermal = get_fme_feature_ioaddr_by_index(fme,
379 						  FME_FEATURE_ID_THERMAL_MGMT);
380 
381 	spinlock_lock(&fme->lock);
382 	tmp_threshold.csr = readq(&thermal->threshold);
383 
384 	if (thres1_policy == 0) {
385 		tmp_threshold.thshold_policy = 0;
386 	} else if (thres1_policy == 1) {
387 		tmp_threshold.thshold_policy = 1;
388 	} else {
389 		spinlock_unlock(&fme->lock);
390 		return -EINVAL;
391 	}
392 
393 	writeq(tmp_threshold.csr, &thermal->threshold);
394 	spinlock_unlock(&fme->lock);
395 
396 	return 0;
397 }
398 
399 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
400 {
401 	struct feature_fme_thermal *thermal;
402 	struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
403 
404 	thermal = get_fme_feature_ioaddr_by_index(fme,
405 						  FME_FEATURE_ID_THERMAL_MGMT);
406 
407 	temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
408 	*temp = temp_rdsensor_fmt1.fpga_temp;
409 
410 	return 0;
411 }
412 
413 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
414 {
415 	struct feature_fme_thermal *fme_thermal
416 		= get_fme_feature_ioaddr_by_index(fme,
417 						  FME_FEATURE_ID_THERMAL_MGMT);
418 	struct feature_header header;
419 
420 	header.csr = readq(&fme_thermal->header);
421 	*revision = header.revision;
422 
423 	return 0;
424 }
425 
426 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD	0x1
427 
428 static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
429 {
430 	struct feature_fme_thermal *fme_thermal;
431 	struct feature_fme_tmp_threshold_cap thermal_cap;
432 
433 	UNUSED(feature);
434 
435 	dev_info(NULL, "FME thermal mgmt Init.\n");
436 
437 	fme_thermal = (struct feature_fme_thermal *)feature->addr;
438 	thermal_cap.csr = readq(&fme_thermal->threshold_cap);
439 
440 	dev_info(NULL, "FME thermal cap %llx.\n",
441 		 (unsigned long long)fme_thermal->threshold_cap.csr);
442 
443 	if (thermal_cap.tmp_thshold_disabled)
444 		feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
445 
446 	return 0;
447 }
448 
449 static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
450 {
451 	UNUSED(feature);
452 
453 	dev_info(NULL, "FME thermal mgmt UInit.\n");
454 }
455 
456 static int
457 fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
458 {
459 	struct ifpga_fme_hw *fme = feature->parent;
460 
461 	if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
462 		return -ENOENT;
463 
464 	switch (prop->prop_id) {
465 	case FME_THERMAL_PROP_THRESHOLD1:
466 		return fme_thermal_set_threshold1(fme, prop->data);
467 	case FME_THERMAL_PROP_THRESHOLD2:
468 		return fme_thermal_set_threshold2(fme, prop->data);
469 	case FME_THERMAL_PROP_THRESHOLD1_POLICY:
470 		return fme_thermal_set_threshold1_policy(fme, prop->data);
471 	}
472 
473 	return -ENOENT;
474 }
475 
476 static int
477 fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
478 {
479 	struct ifpga_fme_hw *fme = feature->parent;
480 
481 	if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
482 	    prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
483 	    prop->prop_id != FME_THERMAL_PROP_REVISION)
484 		return -ENOENT;
485 
486 	switch (prop->prop_id) {
487 	case FME_THERMAL_PROP_THRESHOLD1:
488 		return fme_thermal_get_threshold1(fme, &prop->data);
489 	case FME_THERMAL_PROP_THRESHOLD2:
490 		return fme_thermal_get_threshold2(fme, &prop->data);
491 	case FME_THERMAL_PROP_THRESHOLD_TRIP:
492 		return fme_thermal_get_threshold_trip(fme, &prop->data);
493 	case FME_THERMAL_PROP_THRESHOLD1_REACHED:
494 		return fme_thermal_get_threshold1_reached(fme, &prop->data);
495 	case FME_THERMAL_PROP_THRESHOLD2_REACHED:
496 		return fme_thermal_get_threshold2_reached(fme, &prop->data);
497 	case FME_THERMAL_PROP_THRESHOLD1_POLICY:
498 		return fme_thermal_get_threshold1_policy(fme, &prop->data);
499 	case FME_THERMAL_PROP_TEMPERATURE:
500 		return fme_thermal_get_temperature(fme, &prop->data);
501 	case FME_THERMAL_PROP_REVISION:
502 		return fme_thermal_get_revision(fme, &prop->data);
503 	}
504 
505 	return -ENOENT;
506 }
507 
508 struct ifpga_feature_ops fme_thermal_mgmt_ops = {
509 	.init = fme_thermal_mgmt_init,
510 	.uinit = fme_thermal_mgmt_uinit,
511 	.get_prop = fme_thermal_get_prop,
512 	.set_prop = fme_thermal_set_prop,
513 };
514 
515 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
516 {
517 	struct feature_fme_power *fme_power
518 		= get_fme_feature_ioaddr_by_index(fme,
519 				FME_FEATURE_ID_POWER_MGMT);
520 	struct feature_fme_pm_status pm_status;
521 
522 	pm_status.csr = readq(&fme_power->status);
523 
524 	*consumed = pm_status.pwr_consumed;
525 
526 	return 0;
527 }
528 
529 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
530 {
531 	struct feature_fme_power *fme_power
532 		= get_fme_feature_ioaddr_by_index(fme,
533 				FME_FEATURE_ID_POWER_MGMT);
534 	struct feature_fme_pm_ap_threshold pm_ap_threshold;
535 
536 	pm_ap_threshold.csr = readq(&fme_power->threshold);
537 
538 	*threshold = pm_ap_threshold.threshold1;
539 
540 	return 0;
541 }
542 
543 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
544 {
545 	struct feature_fme_power *fme_power
546 		= get_fme_feature_ioaddr_by_index(fme,
547 				FME_FEATURE_ID_POWER_MGMT);
548 	struct feature_fme_pm_ap_threshold pm_ap_threshold;
549 
550 	spinlock_lock(&fme->lock);
551 	pm_ap_threshold.csr = readq(&fme_power->threshold);
552 
553 	if (threshold <= PWR_THRESHOLD_MAX) {
554 		pm_ap_threshold.threshold1 = threshold;
555 	} else {
556 		spinlock_unlock(&fme->lock);
557 		return -EINVAL;
558 	}
559 
560 	writeq(pm_ap_threshold.csr, &fme_power->threshold);
561 	spinlock_unlock(&fme->lock);
562 
563 	return 0;
564 }
565 
566 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
567 {
568 	struct feature_fme_power *fme_power
569 		= get_fme_feature_ioaddr_by_index(fme,
570 				FME_FEATURE_ID_POWER_MGMT);
571 	struct feature_fme_pm_ap_threshold pm_ap_threshold;
572 
573 	pm_ap_threshold.csr = readq(&fme_power->threshold);
574 
575 	*threshold = pm_ap_threshold.threshold2;
576 
577 	return 0;
578 }
579 
580 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
581 {
582 	struct feature_fme_power *fme_power
583 		= get_fme_feature_ioaddr_by_index(fme,
584 				FME_FEATURE_ID_POWER_MGMT);
585 	struct feature_fme_pm_ap_threshold pm_ap_threshold;
586 
587 	spinlock_lock(&fme->lock);
588 	pm_ap_threshold.csr = readq(&fme_power->threshold);
589 
590 	if (threshold <= PWR_THRESHOLD_MAX) {
591 		pm_ap_threshold.threshold2 = threshold;
592 	} else {
593 		spinlock_unlock(&fme->lock);
594 		return -EINVAL;
595 	}
596 
597 	writeq(pm_ap_threshold.csr, &fme_power->threshold);
598 	spinlock_unlock(&fme->lock);
599 
600 	return 0;
601 }
602 
603 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
604 					 u64 *threshold_status)
605 {
606 	struct feature_fme_power *fme_power
607 		= get_fme_feature_ioaddr_by_index(fme,
608 				FME_FEATURE_ID_POWER_MGMT);
609 	struct feature_fme_pm_ap_threshold pm_ap_threshold;
610 
611 	pm_ap_threshold.csr = readq(&fme_power->threshold);
612 
613 	*threshold_status = pm_ap_threshold.threshold1_status;
614 
615 	return 0;
616 }
617 
618 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
619 					 u64 *threshold_status)
620 {
621 	struct feature_fme_power *fme_power
622 		= get_fme_feature_ioaddr_by_index(fme,
623 				FME_FEATURE_ID_POWER_MGMT);
624 	struct feature_fme_pm_ap_threshold pm_ap_threshold;
625 
626 	pm_ap_threshold.csr = readq(&fme_power->threshold);
627 
628 	*threshold_status = pm_ap_threshold.threshold2_status;
629 
630 	return 0;
631 }
632 
633 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
634 {
635 	struct feature_fme_power *fme_power
636 		= get_fme_feature_ioaddr_by_index(fme,
637 				FME_FEATURE_ID_POWER_MGMT);
638 	struct feature_fme_pm_status pm_status;
639 
640 	pm_status.csr = readq(&fme_power->status);
641 
642 	*rtl = pm_status.fpga_latency_report;
643 
644 	return 0;
645 }
646 
647 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
648 {
649 	struct feature_fme_power *fme_power
650 		= get_fme_feature_ioaddr_by_index(fme,
651 				FME_FEATURE_ID_POWER_MGMT);
652 	struct feature_fme_pm_xeon_limit xeon_limit;
653 
654 	xeon_limit.csr = readq(&fme_power->xeon_limit);
655 
656 	if (!xeon_limit.enable)
657 		xeon_limit.pwr_limit = 0;
658 
659 	*limit = xeon_limit.pwr_limit;
660 
661 	return 0;
662 }
663 
664 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
665 {
666 	struct feature_fme_power *fme_power
667 		= get_fme_feature_ioaddr_by_index(fme,
668 				FME_FEATURE_ID_POWER_MGMT);
669 	struct feature_fme_pm_fpga_limit fpga_limit;
670 
671 	fpga_limit.csr = readq(&fme_power->fpga_limit);
672 
673 	if (!fpga_limit.enable)
674 		fpga_limit.pwr_limit = 0;
675 
676 	*limit = fpga_limit.pwr_limit;
677 
678 	return 0;
679 }
680 
681 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
682 {
683 	struct feature_fme_power *fme_power
684 		= get_fme_feature_ioaddr_by_index(fme,
685 						  FME_FEATURE_ID_POWER_MGMT);
686 	struct feature_header header;
687 
688 	header.csr = readq(&fme_power->header);
689 	*revision = header.revision;
690 
691 	return 0;
692 }
693 
694 static int fme_power_mgmt_init(struct ifpga_feature *feature)
695 {
696 	UNUSED(feature);
697 
698 	dev_info(NULL, "FME power mgmt Init.\n");
699 
700 	return 0;
701 }
702 
703 static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
704 {
705 	UNUSED(feature);
706 
707 	dev_info(NULL, "FME power mgmt UInit.\n");
708 }
709 
710 static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
711 				   struct feature_prop *prop)
712 {
713 	struct ifpga_fme_hw *fme = feature->parent;
714 
715 	switch (prop->prop_id) {
716 	case FME_PWR_PROP_CONSUMED:
717 		return fme_pwr_get_consumed(fme, &prop->data);
718 	case FME_PWR_PROP_THRESHOLD1:
719 		return fme_pwr_get_threshold1(fme, &prop->data);
720 	case FME_PWR_PROP_THRESHOLD2:
721 		return fme_pwr_get_threshold2(fme, &prop->data);
722 	case FME_PWR_PROP_THRESHOLD1_STATUS:
723 		return fme_pwr_get_threshold1_status(fme, &prop->data);
724 	case FME_PWR_PROP_THRESHOLD2_STATUS:
725 		return fme_pwr_get_threshold2_status(fme, &prop->data);
726 	case FME_PWR_PROP_RTL:
727 		return fme_pwr_get_rtl(fme, &prop->data);
728 	case FME_PWR_PROP_XEON_LIMIT:
729 		return fme_pwr_get_xeon_limit(fme, &prop->data);
730 	case FME_PWR_PROP_FPGA_LIMIT:
731 		return fme_pwr_get_fpga_limit(fme, &prop->data);
732 	case FME_PWR_PROP_REVISION:
733 		return fme_pwr_get_revision(fme, &prop->data);
734 	}
735 
736 	return -ENOENT;
737 }
738 
739 static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
740 				   struct feature_prop *prop)
741 {
742 	struct ifpga_fme_hw *fme = feature->parent;
743 
744 	switch (prop->prop_id) {
745 	case FME_PWR_PROP_THRESHOLD1:
746 		return fme_pwr_set_threshold1(fme, prop->data);
747 	case FME_PWR_PROP_THRESHOLD2:
748 		return fme_pwr_set_threshold2(fme, prop->data);
749 	}
750 
751 	return -ENOENT;
752 }
753 
754 struct ifpga_feature_ops fme_power_mgmt_ops = {
755 	.init = fme_power_mgmt_init,
756 	.uinit = fme_power_mgmt_uinit,
757 	.get_prop = fme_power_mgmt_get_prop,
758 	.set_prop = fme_power_mgmt_set_prop,
759 };
760 
761 static int fme_hssi_eth_init(struct ifpga_feature *feature)
762 {
763 	UNUSED(feature);
764 	return 0;
765 }
766 
767 static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
768 {
769 	UNUSED(feature);
770 }
771 
772 struct ifpga_feature_ops fme_hssi_eth_ops = {
773 	.init = fme_hssi_eth_init,
774 	.uinit = fme_hssi_eth_uinit,
775 };
776 
777 static int fme_emif_init(struct ifpga_feature *feature)
778 {
779 	UNUSED(feature);
780 	return 0;
781 }
782 
783 static void fme_emif_uinit(struct ifpga_feature *feature)
784 {
785 	UNUSED(feature);
786 }
787 
788 struct ifpga_feature_ops fme_emif_ops = {
789 	.init = fme_emif_init,
790 	.uinit = fme_emif_uinit,
791 };
792 
793 static const char *board_type_to_string(u32 type)
794 {
795 	switch (type) {
796 	case VC_8_10G:
797 		return "VC_8x10G";
798 	case VC_4_25G:
799 		return "VC_4x25G";
800 	case VC_2_1_25:
801 		return "VC_2x1x25G";
802 	case VC_4_25G_2_25G:
803 		return "VC_4x25G+2x25G";
804 	case VC_2_2_25G:
805 		return "VC_2x2x25G";
806 	}
807 
808 	return "unknown";
809 }
810 
811 static const char *board_major_to_string(u32 major)
812 {
813 	switch (major) {
814 	case VISTA_CREEK:
815 		return "VISTA_CREEK";
816 	case RUSH_CREEK:
817 		return "RUSH_CREEK";
818 	case DARBY_CREEK:
819 		return "DARBY_CREEK";
820 	}
821 
822 	return "unknown";
823 }
824 
825 static int board_type_to_info(u32 type,
826 		struct opae_board_info *info)
827 {
828 	switch (type) {
829 	case VC_8_10G:
830 		info->nums_of_retimer = 2;
831 		info->ports_per_retimer = 4;
832 		info->nums_of_fvl = 2;
833 		info->ports_per_fvl = 4;
834 		break;
835 	case VC_4_25G:
836 		info->nums_of_retimer = 1;
837 		info->ports_per_retimer = 4;
838 		info->nums_of_fvl = 2;
839 		info->ports_per_fvl = 2;
840 		break;
841 	case VC_2_1_25:
842 		info->nums_of_retimer = 2;
843 		info->ports_per_retimer = 1;
844 		info->nums_of_fvl = 1;
845 		info->ports_per_fvl = 2;
846 		break;
847 	case VC_2_2_25G:
848 		info->nums_of_retimer = 2;
849 		info->ports_per_retimer = 2;
850 		info->nums_of_fvl = 2;
851 		info->ports_per_fvl = 2;
852 		break;
853 	default:
854 		return -EINVAL;
855 	}
856 
857 	return 0;
858 }
859 
860 static int fme_get_board_interface(struct ifpga_fme_hw *fme)
861 {
862 	struct fme_bitstream_id id;
863 	struct ifpga_hw *hw;
864 	u32 val;
865 
866 	hw = fme->parent;
867 	if (!hw)
868 		return -ENODEV;
869 
870 	if (fme_hdr_get_bitstream_id(fme, &id.id))
871 		return -EINVAL;
872 
873 	fme->board_info.major = id.major;
874 	fme->board_info.minor = id.minor;
875 	fme->board_info.type = id.interface;
876 	fme->board_info.fvl_bypass = id.fvl_bypass;
877 	fme->board_info.mac_lightweight = id.mac_lightweight;
878 	fme->board_info.lightweight = id.lightweiht;
879 	fme->board_info.disaggregate = id.disagregate;
880 	fme->board_info.seu = id.seu;
881 	fme->board_info.ptp = id.ptp;
882 
883 	dev_info(fme, "found: PCI dev: %02x:%02x:%x board: %s type: %s\n",
884 			hw->pci_data->bus,
885 			hw->pci_data->devid,
886 			hw->pci_data->function,
887 			board_major_to_string(fme->board_info.major),
888 			board_type_to_string(fme->board_info.type));
889 
890 	dev_info(fme, "support feature:\n"
891 			"fvl_bypass:%s\n"
892 			"mac_lightweight:%s\n"
893 			"lightweight:%s\n"
894 			"disaggregate:%s\n"
895 			"seu:%s\n"
896 			"ptp1588:%s\n",
897 			check_support(fme->board_info.fvl_bypass),
898 			check_support(fme->board_info.mac_lightweight),
899 			check_support(fme->board_info.lightweight),
900 			check_support(fme->board_info.disaggregate),
901 			check_support(fme->board_info.seu),
902 			check_support(fme->board_info.ptp));
903 
904 
905 	if (board_type_to_info(fme->board_info.type, &fme->board_info))
906 		return -EINVAL;
907 
908 	dev_info(fme, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
909 			fme->board_info.nums_of_retimer,
910 			fme->board_info.ports_per_retimer,
911 			fme->board_info.nums_of_fvl,
912 			fme->board_info.ports_per_fvl);
913 
914 	if (max10_sys_read(fme->max10_dev, FPGA_PAGE_INFO, &val))
915 		return -EINVAL;
916 	fme->board_info.boot_page = val & 0x7;
917 
918 	if (max10_sys_read(fme->max10_dev, MAX10_BUILD_VER, &val))
919 		return -EINVAL;
920 	fme->board_info.max10_version = val;
921 
922 	if (max10_sys_read(fme->max10_dev, NIOS2_FW_VERSION, &val))
923 		return -EINVAL;
924 	fme->board_info.nios_fw_version = val;
925 
926 	dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
927 		fme->board_info.max10_version,
928 		fme->board_info.nios_fw_version);
929 
930 	return 0;
931 }
932 
933 static int spi_self_checking(struct intel_max10_device *dev)
934 {
935 	u32 val;
936 	int ret;
937 
938 	ret = max10_sys_read(dev, MAX10_TEST_REG, &val);
939 	if (ret)
940 		return -EIO;
941 
942 	dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
943 
944 	return 0;
945 }
946 
947 static void init_spi_share_data(struct ifpga_fme_hw *fme,
948 				struct altera_spi_device *spi)
949 {
950 	struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
951 	opae_share_data *sd = NULL;
952 
953 	if (hw && hw->adapter && hw->adapter->shm.ptr) {
954 		dev_info(NULL, "transfer share data to spi\n");
955 		sd = (opae_share_data *)hw->adapter->shm.ptr;
956 		spi->mutex = &sd->spi_mutex;
957 		spi->dtb_sz_ptr = &sd->dtb_size;
958 		spi->dtb = sd->dtb;
959 	} else {
960 		spi->mutex = NULL;
961 		spi->dtb_sz_ptr = NULL;
962 		spi->dtb = NULL;
963 	}
964 }
965 
966 static int fme_spi_init(struct ifpga_feature *feature)
967 {
968 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
969 	struct altera_spi_device *spi_master;
970 	struct intel_max10_device *max10;
971 	int ret = 0;
972 
973 	dev_info(fme, "FME SPI Master (Max10) Init.\n");
974 	dev_debug(fme, "FME SPI base addr %p.\n",
975 			feature->addr);
976 	dev_debug(fme, "spi param=0x%llx\n",
977 			(unsigned long long)opae_readq(feature->addr + 0x8));
978 
979 	spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
980 	if (!spi_master)
981 		return -ENODEV;
982 	init_spi_share_data(fme, spi_master);
983 
984 	altera_spi_init(spi_master);
985 
986 	max10 = intel_max10_device_probe(spi_master, 0);
987 	if (!max10) {
988 		ret = -ENODEV;
989 		dev_err(fme, "max10 init fail\n");
990 		goto spi_fail;
991 	}
992 
993 	fme->max10_dev = max10;
994 
995 	/* SPI self test */
996 	if (spi_self_checking(max10)) {
997 		ret = -EIO;
998 		goto max10_fail;
999 	}
1000 
1001 	return ret;
1002 
1003 max10_fail:
1004 	intel_max10_device_remove(fme->max10_dev);
1005 spi_fail:
1006 	altera_spi_release(spi_master);
1007 	return ret;
1008 }
1009 
1010 static void fme_spi_uinit(struct ifpga_feature *feature)
1011 {
1012 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1013 
1014 	if (fme->max10_dev)
1015 		intel_max10_device_remove(fme->max10_dev);
1016 }
1017 
1018 struct ifpga_feature_ops fme_spi_master_ops = {
1019 	.init = fme_spi_init,
1020 	.uinit = fme_spi_uinit,
1021 };
1022 
1023 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
1024 {
1025 	u32 val = 0;
1026 	unsigned long timeout = rte_get_timer_cycles() +
1027 			msecs_to_timer_cycles(10000);
1028 	unsigned long ticks;
1029 	int major_version;
1030 	int fecmode = FEC_MODE_NO;
1031 
1032 	if (spi_reg_read(dev, NIOS_VERSION, &val))
1033 		return -EIO;
1034 
1035 	major_version =
1036 		(val & NIOS_VERSION_MAJOR) >> NIOS_VERSION_MAJOR_SHIFT;
1037 	dev_info(dev, "A10 NIOS FW version %d\n", major_version);
1038 
1039 	if (major_version >= 3) {
1040 		/* read NIOS_INIT to check if PKVL INIT done or not */
1041 		if (spi_reg_read(dev, NIOS_INIT, &val))
1042 			return -EIO;
1043 
1044 		dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1045 
1046 		/* check if PKVLs are initialized already */
1047 		if (val & NIOS_INIT_DONE || val & NIOS_INIT_START)
1048 			goto nios_init_done;
1049 
1050 		/* start to config the default FEC mode */
1051 		val = fecmode | NIOS_INIT_START;
1052 
1053 		if (spi_reg_write(dev, NIOS_INIT, val))
1054 			return -EIO;
1055 	}
1056 
1057 nios_init_done:
1058 	do {
1059 		if (spi_reg_read(dev, NIOS_INIT, &val))
1060 			return -EIO;
1061 		if (val & NIOS_INIT_DONE)
1062 			break;
1063 
1064 		ticks = rte_get_timer_cycles();
1065 		if (time_after(ticks, timeout))
1066 			return -ETIMEDOUT;
1067 		msleep(100);
1068 	} while (1);
1069 
1070 	/* get the fecmode */
1071 	if (spi_reg_read(dev, NIOS_INIT, &val))
1072 		return -EIO;
1073 	dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1074 	fecmode = (val & REQ_FEC_MODE) >> REQ_FEC_MODE_SHIFT;
1075 	dev_info(dev, "fecmode: 0x%x, %s\n", fecmode,
1076 			(fecmode == FEC_MODE_KR) ? "kr" :
1077 			((fecmode == FEC_MODE_RS) ? "rs" : "no"));
1078 
1079 	return 0;
1080 }
1081 
1082 static int nios_spi_check_error(struct altera_spi_device *dev)
1083 {
1084 	u32 value = 0;
1085 
1086 	if (spi_reg_read(dev, PKVL_A_MODE_STS, &value))
1087 		return -EIO;
1088 
1089 	dev_debug(dev, "PKVL A Mode Status 0x%x\n", value);
1090 
1091 	if (value >= 0x100)
1092 		return -EINVAL;
1093 
1094 	if (spi_reg_read(dev, PKVL_B_MODE_STS, &value))
1095 		return -EIO;
1096 
1097 	dev_debug(dev, "PKVL B Mode Status 0x%x\n", value);
1098 
1099 	if (value >= 0x100)
1100 		return -EINVAL;
1101 
1102 	return 0;
1103 }
1104 
1105 static int fme_nios_spi_init(struct ifpga_feature *feature)
1106 {
1107 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1108 	struct altera_spi_device *spi_master;
1109 	struct intel_max10_device *max10;
1110 	struct ifpga_hw *hw;
1111 	struct opae_manager *mgr;
1112 	int ret = 0;
1113 
1114 	hw = fme->parent;
1115 	if (!hw)
1116 		return -ENODEV;
1117 
1118 	mgr = hw->adapter->mgr;
1119 	if (!mgr)
1120 		return -ENODEV;
1121 
1122 	dev_info(fme, "FME SPI Master (NIOS) Init.\n");
1123 	dev_debug(fme, "FME SPI base addr %p.\n",
1124 			feature->addr);
1125 	dev_debug(fme, "spi param=0x%llx\n",
1126 			(unsigned long long)opae_readq(feature->addr + 0x8));
1127 
1128 	spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
1129 	if (!spi_master)
1130 		return -ENODEV;
1131 	init_spi_share_data(fme, spi_master);
1132 
1133 	/**
1134 	 * 1. wait A10 NIOS initial finished and
1135 	 * release the SPI master to Host
1136 	 */
1137 	if (spi_master->mutex)
1138 		pthread_mutex_lock(spi_master->mutex);
1139 
1140 	ret = nios_spi_wait_init_done(spi_master);
1141 	if (ret != 0) {
1142 		dev_err(fme, "FME NIOS_SPI init fail\n");
1143 		if (spi_master->mutex)
1144 			pthread_mutex_unlock(spi_master->mutex);
1145 		goto release_dev;
1146 	}
1147 
1148 	dev_info(fme, "FME NIOS_SPI initial done\n");
1149 
1150 	/* 2. check if error occur? */
1151 	if (nios_spi_check_error(spi_master))
1152 		dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
1153 
1154 	if (spi_master->mutex)
1155 		pthread_mutex_unlock(spi_master->mutex);
1156 
1157 	/* 3. init the spi master*/
1158 	altera_spi_init(spi_master);
1159 
1160 	/* init the max10 device */
1161 	max10 = intel_max10_device_probe(spi_master, 0);
1162 	if (!max10) {
1163 		ret = -ENODEV;
1164 		dev_err(fme, "max10 init fail\n");
1165 		goto release_dev;
1166 	}
1167 
1168 	fme->max10_dev = max10;
1169 
1170 	max10->bus = hw->pci_data->bus;
1171 
1172 	fme_get_board_interface(fme);
1173 
1174 	mgr->sensor_list = &max10->opae_sensor_list;
1175 
1176 	/* SPI self test */
1177 	if (spi_self_checking(max10))
1178 		goto spi_fail;
1179 
1180 	ret = init_sec_mgr(fme);
1181 	if (ret) {
1182 		dev_err(fme, "security manager init fail\n");
1183 		goto spi_fail;
1184 	}
1185 
1186 	return ret;
1187 
1188 spi_fail:
1189 	intel_max10_device_remove(fme->max10_dev);
1190 release_dev:
1191 	altera_spi_release(spi_master);
1192 	return -ENODEV;
1193 }
1194 
1195 static void fme_nios_spi_uinit(struct ifpga_feature *feature)
1196 {
1197 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1198 
1199 	release_sec_mgr(fme);
1200 	if (fme->max10_dev)
1201 		intel_max10_device_remove(fme->max10_dev);
1202 }
1203 
1204 struct ifpga_feature_ops fme_nios_spi_master_ops = {
1205 	.init = fme_nios_spi_init,
1206 	.uinit = fme_nios_spi_uinit,
1207 };
1208 
1209 static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
1210 {
1211 	char buf[20];
1212 	int ret;
1213 	char read_buf[20] = {0,};
1214 	const char *string = "1a2b3c4d5e";
1215 
1216 	opae_memcpy(buf, string, strlen(string));
1217 
1218 	ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
1219 			(u8 *)buf, strlen(string));
1220 	if (ret < 0) {
1221 		dev_err(NULL, "write i2c error:%d\n", ret);
1222 		return ret;
1223 	}
1224 
1225 	ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
1226 			(u8 *)read_buf, strlen(string));
1227 	if (ret < 0) {
1228 		dev_err(NULL, "read i2c error:%d\n", ret);
1229 		return ret;
1230 	}
1231 
1232 	if (memcmp(buf, read_buf, strlen(string))) {
1233 		dev_err(NULL, "%s test fail!\n", __func__);
1234 		return -EFAULT;
1235 	}
1236 
1237 	dev_info(NULL, "%s test successful\n", __func__);
1238 
1239 	return 0;
1240 }
1241 
1242 static void init_i2c_mutex(struct ifpga_fme_hw *fme)
1243 {
1244 	struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
1245 	struct altera_i2c_dev *i2c_dev;
1246 	opae_share_data *sd = NULL;
1247 
1248 	if (fme->i2c_master) {
1249 		i2c_dev = (struct altera_i2c_dev *)fme->i2c_master;
1250 		if (hw && hw->adapter && hw->adapter->shm.ptr) {
1251 			dev_info(NULL, "use multi-process mutex in i2c\n");
1252 			sd = (opae_share_data *)hw->adapter->shm.ptr;
1253 			i2c_dev->mutex = &sd->i2c_mutex;
1254 		} else {
1255 			dev_info(NULL, "use multi-thread mutex in i2c\n");
1256 			i2c_dev->mutex = &i2c_dev->lock;
1257 		}
1258 	}
1259 }
1260 
1261 static int fme_i2c_init(struct ifpga_feature *feature)
1262 {
1263 	struct feature_fme_i2c *i2c;
1264 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1265 
1266 	i2c = (struct feature_fme_i2c *)feature->addr;
1267 
1268 	dev_info(NULL, "FME I2C Master Init.\n");
1269 
1270 	fme->i2c_master = altera_i2c_probe(i2c);
1271 	if (!fme->i2c_master)
1272 		return -ENODEV;
1273 
1274 	init_i2c_mutex(fme);
1275 
1276 	/* MAC ROM self test */
1277 	i2c_mac_rom_test(fme->i2c_master);
1278 
1279 	return 0;
1280 }
1281 
1282 static void fme_i2c_uninit(struct ifpga_feature *feature)
1283 {
1284 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1285 
1286 	altera_i2c_remove(fme->i2c_master);
1287 }
1288 
1289 struct ifpga_feature_ops fme_i2c_master_ops = {
1290 	.init = fme_i2c_init,
1291 	.uinit = fme_i2c_uninit,
1292 };
1293 
1294 static int fme_eth_group_init(struct ifpga_feature *feature)
1295 {
1296 	struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1297 	struct eth_group_device *dev;
1298 
1299 	dev = (struct eth_group_device *)eth_group_probe(feature->addr);
1300 	if (!dev)
1301 		return -ENODEV;
1302 
1303 	fme->eth_dev[dev->group_id] = dev;
1304 
1305 	fme->eth_group_region[dev->group_id].addr =
1306 		feature->addr;
1307 	fme->eth_group_region[dev->group_id].phys_addr =
1308 		feature->phys_addr;
1309 	fme->eth_group_region[dev->group_id].len =
1310 		feature->size;
1311 
1312 	fme->nums_eth_dev++;
1313 
1314 	dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
1315 	dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1316 			dev->group_id, feature->addr,
1317 			(unsigned long long)feature->phys_addr,
1318 			feature->size);
1319 
1320 	return 0;
1321 }
1322 
1323 static void fme_eth_group_uinit(struct ifpga_feature *feature)
1324 {
1325 	UNUSED(feature);
1326 }
1327 
1328 struct ifpga_feature_ops fme_eth_group_ops = {
1329 	.init = fme_eth_group_init,
1330 	.uinit = fme_eth_group_uinit,
1331 };
1332 
1333 int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
1334 		void *buf, int size)
1335 {
1336 	struct altera_i2c_dev *dev;
1337 
1338 	dev = fme->i2c_master;
1339 	if (!dev)
1340 		return -ENODEV;
1341 
1342 	return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1343 }
1344 
1345 int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
1346 		void *buf, int size)
1347 {
1348 	struct altera_i2c_dev *dev;
1349 
1350 	dev = fme->i2c_master;
1351 	if (!dev)
1352 		return -ENODEV;
1353 
1354 	return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1355 }
1356 
1357 static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
1358 		u8 group_id)
1359 {
1360 	struct eth_group_device *dev;
1361 
1362 	if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
1363 		return NULL;
1364 
1365 	dev = (struct eth_group_device *)fme->eth_dev[group_id];
1366 	if (!dev)
1367 		return NULL;
1368 
1369 	if (dev->status != ETH_GROUP_DEV_ATTACHED)
1370 		return NULL;
1371 
1372 	return dev;
1373 }
1374 
1375 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
1376 {
1377 	return fme->nums_eth_dev;
1378 }
1379 
1380 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
1381 		u8 group_id, struct opae_eth_group_info *info)
1382 {
1383 	struct eth_group_device *dev;
1384 
1385 	dev = get_eth_group_dev(fme, group_id);
1386 	if (!dev)
1387 		return -ENODEV;
1388 
1389 	info->group_id = group_id;
1390 	info->speed = dev->speed;
1391 	info->nums_of_mac = dev->mac_num;
1392 	info->nums_of_phy = dev->phy_num;
1393 
1394 	return 0;
1395 }
1396 
1397 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
1398 		u8 type, u8 index, u16 addr, u32 *data)
1399 {
1400 	struct eth_group_device *dev;
1401 
1402 	dev = get_eth_group_dev(fme, group_id);
1403 	if (!dev)
1404 		return -ENODEV;
1405 
1406 	return eth_group_read_reg(dev, type, index, addr, data);
1407 }
1408 
1409 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
1410 		u8 type, u8 index, u16 addr, u32 data)
1411 {
1412 	struct eth_group_device *dev;
1413 
1414 	dev = get_eth_group_dev(fme, group_id);
1415 	if (!dev)
1416 		return -ENODEV;
1417 
1418 	return eth_group_write_reg(dev, type, index, addr, data);
1419 }
1420 
1421 static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
1422 		u8 group_id)
1423 {
1424 	struct eth_group_device *dev;
1425 
1426 	dev = get_eth_group_dev(fme, group_id);
1427 	if (!dev)
1428 		return -ENODEV;
1429 
1430 	return dev->speed;
1431 }
1432 
1433 int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
1434 		struct opae_retimer_info *info)
1435 {
1436 	struct intel_max10_device *dev;
1437 
1438 	dev = (struct intel_max10_device *)fme->max10_dev;
1439 	if (!dev)
1440 		return -ENODEV;
1441 
1442 	info->nums_retimer = fme->board_info.nums_of_retimer;
1443 	info->ports_per_retimer = fme->board_info.ports_per_retimer;
1444 	info->nums_fvl = fme->board_info.nums_of_fvl;
1445 	info->ports_per_fvl = fme->board_info.ports_per_fvl;
1446 
1447 	/* The speed of PKVL is identical the eth group's speed */
1448 	info->support_speed = fme_get_eth_group_speed(fme,
1449 			LINE_SIDE_GROUP_ID);
1450 
1451 	return 0;
1452 }
1453 
1454 int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
1455 		struct opae_retimer_status *status)
1456 {
1457 	struct intel_max10_device *dev;
1458 	unsigned int val;
1459 
1460 	dev = (struct intel_max10_device *)fme->max10_dev;
1461 	if (!dev)
1462 		return -ENODEV;
1463 
1464 	if (max10_sys_read(dev, PKVL_LINK_STATUS, &val)) {
1465 		dev_err(dev, "%s: read pkvl status fail\n", __func__);
1466 		return -EINVAL;
1467 	}
1468 
1469 	/* The speed of PKVL is identical the eth group's speed */
1470 	status->speed = fme_get_eth_group_speed(fme,
1471 			LINE_SIDE_GROUP_ID);
1472 
1473 	status->line_link_bitmap = val;
1474 
1475 	dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1476 			status->speed,
1477 			status->line_link_bitmap);
1478 
1479 	return 0;
1480 }
1481 
1482 int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
1483 		struct opae_sensor_info *sensor,
1484 		unsigned int *value)
1485 {
1486 	struct intel_max10_device *dev;
1487 
1488 	dev = (struct intel_max10_device *)fme->max10_dev;
1489 	if (!dev)
1490 		return -ENODEV;
1491 
1492 	if (max10_sys_read(dev, sensor->value_reg, value)) {
1493 		dev_err(dev, "%s: read sensor value register 0x%x fail\n",
1494 				__func__, sensor->value_reg);
1495 		return -EINVAL;
1496 	}
1497 
1498 	*value *= sensor->multiplier;
1499 
1500 	return 0;
1501 }
1502