1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4
5 #include "ifpga_feature_dev.h"
6 #include "opae_spi.h"
7 #include "opae_intel_max10.h"
8 #include "opae_i2c.h"
9 #include "opae_at24_eeprom.h"
10 #include "ifpga_sec_mgr.h"
11
12 #define PWR_THRESHOLD_MAX 0x7F
13
fme_get_prop(struct ifpga_fme_hw * fme,struct feature_prop * prop)14 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
15 {
16 struct ifpga_feature *feature;
17
18 if (!fme)
19 return -ENOENT;
20
21 feature = get_fme_feature_by_id(fme, prop->feature_id);
22
23 if (feature && feature->ops && feature->ops->get_prop)
24 return feature->ops->get_prop(feature, prop);
25
26 return -ENOENT;
27 }
28
fme_set_prop(struct ifpga_fme_hw * fme,struct feature_prop * prop)29 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
30 {
31 struct ifpga_feature *feature;
32
33 if (!fme)
34 return -ENOENT;
35
36 feature = get_fme_feature_by_id(fme, prop->feature_id);
37
38 if (feature && feature->ops && feature->ops->set_prop)
39 return feature->ops->set_prop(feature, prop);
40
41 return -ENOENT;
42 }
43
fme_set_irq(struct ifpga_fme_hw * fme,u32 feature_id,void * irq_set)44 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
45 {
46 struct ifpga_feature *feature;
47
48 if (!fme)
49 return -ENOENT;
50
51 feature = get_fme_feature_by_id(fme, feature_id);
52
53 if (feature && feature->ops && feature->ops->set_irq)
54 return feature->ops->set_irq(feature, irq_set);
55
56 return -ENOENT;
57 }
58
59 /* fme private feature head */
fme_hdr_init(struct ifpga_feature * feature)60 static int fme_hdr_init(struct ifpga_feature *feature)
61 {
62 struct feature_fme_header *fme_hdr;
63
64 fme_hdr = (struct feature_fme_header *)feature->addr;
65
66 dev_info(NULL, "FME HDR Init.\n");
67 dev_info(NULL, "FME cap %llx.\n",
68 (unsigned long long)fme_hdr->capability.csr);
69
70 return 0;
71 }
72
fme_hdr_uinit(struct ifpga_feature * feature)73 static void fme_hdr_uinit(struct ifpga_feature *feature)
74 {
75 UNUSED(feature);
76
77 dev_info(NULL, "FME HDR UInit.\n");
78 }
79
fme_hdr_get_revision(struct ifpga_fme_hw * fme,u64 * revision)80 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
81 {
82 struct feature_fme_header *fme_hdr
83 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
84 struct feature_header header;
85
86 header.csr = readq(&fme_hdr->header);
87 *revision = header.revision;
88
89 return 0;
90 }
91
fme_hdr_get_ports_num(struct ifpga_fme_hw * fme,u64 * ports_num)92 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
93 {
94 struct feature_fme_header *fme_hdr
95 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
96 struct feature_fme_capability fme_capability;
97
98 fme_capability.csr = readq(&fme_hdr->capability);
99 *ports_num = fme_capability.num_ports;
100
101 return 0;
102 }
103
fme_hdr_get_port_type(struct ifpga_fme_hw * fme,u64 * port_type)104 static int fme_hdr_get_port_type(struct ifpga_fme_hw *fme, u64 *port_type)
105 {
106 struct feature_fme_header *fme_hdr
107 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
108 struct feature_fme_port pt;
109 u32 port = (u32)((*port_type >> 32) & 0xffffffff);
110
111 pt.csr = readq(&fme_hdr->port[port]);
112 if (!pt.port_implemented)
113 return -ENODEV;
114 if (pt.afu_access_control)
115 *port_type |= 0x1;
116 else
117 *port_type &= ~0x1;
118
119 return 0;
120 }
121
fme_hdr_get_cache_size(struct ifpga_fme_hw * fme,u64 * cache_size)122 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
123 {
124 struct feature_fme_header *fme_hdr
125 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
126 struct feature_fme_capability fme_capability;
127
128 fme_capability.csr = readq(&fme_hdr->capability);
129 *cache_size = fme_capability.cache_size;
130
131 return 0;
132 }
133
fme_hdr_get_version(struct ifpga_fme_hw * fme,u64 * version)134 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
135 {
136 struct feature_fme_header *fme_hdr
137 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
138 struct feature_fme_capability fme_capability;
139
140 fme_capability.csr = readq(&fme_hdr->capability);
141 *version = fme_capability.fabric_verid;
142
143 return 0;
144 }
145
fme_hdr_get_socket_id(struct ifpga_fme_hw * fme,u64 * socket_id)146 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
147 {
148 struct feature_fme_header *fme_hdr
149 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
150 struct feature_fme_capability fme_capability;
151
152 fme_capability.csr = readq(&fme_hdr->capability);
153 *socket_id = fme_capability.socket_id;
154
155 return 0;
156 }
157
fme_hdr_get_bitstream_id(struct ifpga_fme_hw * fme,u64 * bitstream_id)158 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
159 u64 *bitstream_id)
160 {
161 struct feature_fme_header *fme_hdr
162 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
163
164 *bitstream_id = readq(&fme_hdr->bitstream_id);
165
166 return 0;
167 }
168
fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw * fme,u64 * bitstream_metadata)169 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
170 u64 *bitstream_metadata)
171 {
172 struct feature_fme_header *fme_hdr
173 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
174
175 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
176
177 return 0;
178 }
179
180 static int
fme_hdr_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)181 fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
182 {
183 struct ifpga_fme_hw *fme = feature->parent;
184
185 switch (prop->prop_id) {
186 case FME_HDR_PROP_REVISION:
187 return fme_hdr_get_revision(fme, &prop->data);
188 case FME_HDR_PROP_PORTS_NUM:
189 return fme_hdr_get_ports_num(fme, &prop->data);
190 case FME_HDR_PROP_CACHE_SIZE:
191 return fme_hdr_get_cache_size(fme, &prop->data);
192 case FME_HDR_PROP_VERSION:
193 return fme_hdr_get_version(fme, &prop->data);
194 case FME_HDR_PROP_SOCKET_ID:
195 return fme_hdr_get_socket_id(fme, &prop->data);
196 case FME_HDR_PROP_BITSTREAM_ID:
197 return fme_hdr_get_bitstream_id(fme, &prop->data);
198 case FME_HDR_PROP_BITSTREAM_METADATA:
199 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
200 case FME_HDR_PROP_PORT_TYPE:
201 return fme_hdr_get_port_type(fme, &prop->data);
202 }
203
204 return -ENOENT;
205 }
206
207 struct ifpga_feature_ops fme_hdr_ops = {
208 .init = fme_hdr_init,
209 .uinit = fme_hdr_uinit,
210 .get_prop = fme_hdr_get_prop,
211 };
212
213 /* thermal management */
fme_thermal_get_threshold1(struct ifpga_fme_hw * fme,u64 * thres1)214 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
215 {
216 struct feature_fme_thermal *thermal;
217 struct feature_fme_tmp_threshold temp_threshold;
218
219 thermal = get_fme_feature_ioaddr_by_index(fme,
220 FME_FEATURE_ID_THERMAL_MGMT);
221
222 temp_threshold.csr = readq(&thermal->threshold);
223 *thres1 = temp_threshold.tmp_thshold1;
224
225 return 0;
226 }
227
fme_thermal_set_threshold1(struct ifpga_fme_hw * fme,u64 thres1)228 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
229 {
230 struct feature_fme_thermal *thermal;
231 struct feature_fme_header *fme_hdr;
232 struct feature_fme_tmp_threshold tmp_threshold;
233 struct feature_fme_capability fme_capability;
234
235 thermal = get_fme_feature_ioaddr_by_index(fme,
236 FME_FEATURE_ID_THERMAL_MGMT);
237 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
238
239 spinlock_lock(&fme->lock);
240 tmp_threshold.csr = readq(&thermal->threshold);
241 fme_capability.csr = readq(&fme_hdr->capability);
242
243 if (fme_capability.lock_bit == 1) {
244 spinlock_unlock(&fme->lock);
245 return -EBUSY;
246 } else if (thres1 > 100) {
247 spinlock_unlock(&fme->lock);
248 return -EINVAL;
249 } else if (thres1 == 0) {
250 tmp_threshold.tmp_thshold1_enable = 0;
251 tmp_threshold.tmp_thshold1 = thres1;
252 } else {
253 tmp_threshold.tmp_thshold1_enable = 1;
254 tmp_threshold.tmp_thshold1 = thres1;
255 }
256
257 writeq(tmp_threshold.csr, &thermal->threshold);
258 spinlock_unlock(&fme->lock);
259
260 return 0;
261 }
262
fme_thermal_get_threshold2(struct ifpga_fme_hw * fme,u64 * thres2)263 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
264 {
265 struct feature_fme_thermal *thermal;
266 struct feature_fme_tmp_threshold temp_threshold;
267
268 thermal = get_fme_feature_ioaddr_by_index(fme,
269 FME_FEATURE_ID_THERMAL_MGMT);
270
271 temp_threshold.csr = readq(&thermal->threshold);
272 *thres2 = temp_threshold.tmp_thshold2;
273
274 return 0;
275 }
276
fme_thermal_set_threshold2(struct ifpga_fme_hw * fme,u64 thres2)277 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
278 {
279 struct feature_fme_thermal *thermal;
280 struct feature_fme_header *fme_hdr;
281 struct feature_fme_tmp_threshold tmp_threshold;
282 struct feature_fme_capability fme_capability;
283
284 thermal = get_fme_feature_ioaddr_by_index(fme,
285 FME_FEATURE_ID_THERMAL_MGMT);
286 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
287
288 spinlock_lock(&fme->lock);
289 tmp_threshold.csr = readq(&thermal->threshold);
290 fme_capability.csr = readq(&fme_hdr->capability);
291
292 if (fme_capability.lock_bit == 1) {
293 spinlock_unlock(&fme->lock);
294 return -EBUSY;
295 } else if (thres2 > 100) {
296 spinlock_unlock(&fme->lock);
297 return -EINVAL;
298 } else if (thres2 == 0) {
299 tmp_threshold.tmp_thshold2_enable = 0;
300 tmp_threshold.tmp_thshold2 = thres2;
301 } else {
302 tmp_threshold.tmp_thshold2_enable = 1;
303 tmp_threshold.tmp_thshold2 = thres2;
304 }
305
306 writeq(tmp_threshold.csr, &thermal->threshold);
307 spinlock_unlock(&fme->lock);
308
309 return 0;
310 }
311
fme_thermal_get_threshold_trip(struct ifpga_fme_hw * fme,u64 * thres_trip)312 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
313 u64 *thres_trip)
314 {
315 struct feature_fme_thermal *thermal;
316 struct feature_fme_tmp_threshold temp_threshold;
317
318 thermal = get_fme_feature_ioaddr_by_index(fme,
319 FME_FEATURE_ID_THERMAL_MGMT);
320
321 temp_threshold.csr = readq(&thermal->threshold);
322 *thres_trip = temp_threshold.therm_trip_thshold;
323
324 return 0;
325 }
326
fme_thermal_get_threshold1_reached(struct ifpga_fme_hw * fme,u64 * thres1_reached)327 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
328 u64 *thres1_reached)
329 {
330 struct feature_fme_thermal *thermal;
331 struct feature_fme_tmp_threshold temp_threshold;
332
333 thermal = get_fme_feature_ioaddr_by_index(fme,
334 FME_FEATURE_ID_THERMAL_MGMT);
335
336 temp_threshold.csr = readq(&thermal->threshold);
337 *thres1_reached = temp_threshold.thshold1_status;
338
339 return 0;
340 }
341
fme_thermal_get_threshold2_reached(struct ifpga_fme_hw * fme,u64 * thres1_reached)342 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
343 u64 *thres1_reached)
344 {
345 struct feature_fme_thermal *thermal;
346 struct feature_fme_tmp_threshold temp_threshold;
347
348 thermal = get_fme_feature_ioaddr_by_index(fme,
349 FME_FEATURE_ID_THERMAL_MGMT);
350
351 temp_threshold.csr = readq(&thermal->threshold);
352 *thres1_reached = temp_threshold.thshold2_status;
353
354 return 0;
355 }
356
fme_thermal_get_threshold1_policy(struct ifpga_fme_hw * fme,u64 * thres1_policy)357 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
358 u64 *thres1_policy)
359 {
360 struct feature_fme_thermal *thermal;
361 struct feature_fme_tmp_threshold temp_threshold;
362
363 thermal = get_fme_feature_ioaddr_by_index(fme,
364 FME_FEATURE_ID_THERMAL_MGMT);
365
366 temp_threshold.csr = readq(&thermal->threshold);
367 *thres1_policy = temp_threshold.thshold_policy;
368
369 return 0;
370 }
371
fme_thermal_set_threshold1_policy(struct ifpga_fme_hw * fme,u64 thres1_policy)372 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
373 u64 thres1_policy)
374 {
375 struct feature_fme_thermal *thermal;
376 struct feature_fme_tmp_threshold tmp_threshold;
377
378 thermal = get_fme_feature_ioaddr_by_index(fme,
379 FME_FEATURE_ID_THERMAL_MGMT);
380
381 spinlock_lock(&fme->lock);
382 tmp_threshold.csr = readq(&thermal->threshold);
383
384 if (thres1_policy == 0) {
385 tmp_threshold.thshold_policy = 0;
386 } else if (thres1_policy == 1) {
387 tmp_threshold.thshold_policy = 1;
388 } else {
389 spinlock_unlock(&fme->lock);
390 return -EINVAL;
391 }
392
393 writeq(tmp_threshold.csr, &thermal->threshold);
394 spinlock_unlock(&fme->lock);
395
396 return 0;
397 }
398
fme_thermal_get_temperature(struct ifpga_fme_hw * fme,u64 * temp)399 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
400 {
401 struct feature_fme_thermal *thermal;
402 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
403
404 thermal = get_fme_feature_ioaddr_by_index(fme,
405 FME_FEATURE_ID_THERMAL_MGMT);
406
407 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
408 *temp = temp_rdsensor_fmt1.fpga_temp;
409
410 return 0;
411 }
412
fme_thermal_get_revision(struct ifpga_fme_hw * fme,u64 * revision)413 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
414 {
415 struct feature_fme_thermal *fme_thermal
416 = get_fme_feature_ioaddr_by_index(fme,
417 FME_FEATURE_ID_THERMAL_MGMT);
418 struct feature_header header;
419
420 header.csr = readq(&fme_thermal->header);
421 *revision = header.revision;
422
423 return 0;
424 }
425
426 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
427
fme_thermal_mgmt_init(struct ifpga_feature * feature)428 static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
429 {
430 struct feature_fme_thermal *fme_thermal;
431 struct feature_fme_tmp_threshold_cap thermal_cap;
432
433 UNUSED(feature);
434
435 dev_info(NULL, "FME thermal mgmt Init.\n");
436
437 fme_thermal = (struct feature_fme_thermal *)feature->addr;
438 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
439
440 dev_info(NULL, "FME thermal cap %llx.\n",
441 (unsigned long long)fme_thermal->threshold_cap.csr);
442
443 if (thermal_cap.tmp_thshold_disabled)
444 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
445
446 return 0;
447 }
448
fme_thermal_mgmt_uinit(struct ifpga_feature * feature)449 static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
450 {
451 UNUSED(feature);
452
453 dev_info(NULL, "FME thermal mgmt UInit.\n");
454 }
455
456 static int
fme_thermal_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)457 fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
458 {
459 struct ifpga_fme_hw *fme = feature->parent;
460
461 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
462 return -ENOENT;
463
464 switch (prop->prop_id) {
465 case FME_THERMAL_PROP_THRESHOLD1:
466 return fme_thermal_set_threshold1(fme, prop->data);
467 case FME_THERMAL_PROP_THRESHOLD2:
468 return fme_thermal_set_threshold2(fme, prop->data);
469 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
470 return fme_thermal_set_threshold1_policy(fme, prop->data);
471 }
472
473 return -ENOENT;
474 }
475
476 static int
fme_thermal_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)477 fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
478 {
479 struct ifpga_fme_hw *fme = feature->parent;
480
481 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
482 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
483 prop->prop_id != FME_THERMAL_PROP_REVISION)
484 return -ENOENT;
485
486 switch (prop->prop_id) {
487 case FME_THERMAL_PROP_THRESHOLD1:
488 return fme_thermal_get_threshold1(fme, &prop->data);
489 case FME_THERMAL_PROP_THRESHOLD2:
490 return fme_thermal_get_threshold2(fme, &prop->data);
491 case FME_THERMAL_PROP_THRESHOLD_TRIP:
492 return fme_thermal_get_threshold_trip(fme, &prop->data);
493 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
494 return fme_thermal_get_threshold1_reached(fme, &prop->data);
495 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
496 return fme_thermal_get_threshold2_reached(fme, &prop->data);
497 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
498 return fme_thermal_get_threshold1_policy(fme, &prop->data);
499 case FME_THERMAL_PROP_TEMPERATURE:
500 return fme_thermal_get_temperature(fme, &prop->data);
501 case FME_THERMAL_PROP_REVISION:
502 return fme_thermal_get_revision(fme, &prop->data);
503 }
504
505 return -ENOENT;
506 }
507
508 struct ifpga_feature_ops fme_thermal_mgmt_ops = {
509 .init = fme_thermal_mgmt_init,
510 .uinit = fme_thermal_mgmt_uinit,
511 .get_prop = fme_thermal_get_prop,
512 .set_prop = fme_thermal_set_prop,
513 };
514
fme_pwr_get_consumed(struct ifpga_fme_hw * fme,u64 * consumed)515 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
516 {
517 struct feature_fme_power *fme_power
518 = get_fme_feature_ioaddr_by_index(fme,
519 FME_FEATURE_ID_POWER_MGMT);
520 struct feature_fme_pm_status pm_status;
521
522 pm_status.csr = readq(&fme_power->status);
523
524 *consumed = pm_status.pwr_consumed;
525
526 return 0;
527 }
528
fme_pwr_get_threshold1(struct ifpga_fme_hw * fme,u64 * threshold)529 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
530 {
531 struct feature_fme_power *fme_power
532 = get_fme_feature_ioaddr_by_index(fme,
533 FME_FEATURE_ID_POWER_MGMT);
534 struct feature_fme_pm_ap_threshold pm_ap_threshold;
535
536 pm_ap_threshold.csr = readq(&fme_power->threshold);
537
538 *threshold = pm_ap_threshold.threshold1;
539
540 return 0;
541 }
542
fme_pwr_set_threshold1(struct ifpga_fme_hw * fme,u64 threshold)543 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
544 {
545 struct feature_fme_power *fme_power
546 = get_fme_feature_ioaddr_by_index(fme,
547 FME_FEATURE_ID_POWER_MGMT);
548 struct feature_fme_pm_ap_threshold pm_ap_threshold;
549
550 spinlock_lock(&fme->lock);
551 pm_ap_threshold.csr = readq(&fme_power->threshold);
552
553 if (threshold <= PWR_THRESHOLD_MAX) {
554 pm_ap_threshold.threshold1 = threshold;
555 } else {
556 spinlock_unlock(&fme->lock);
557 return -EINVAL;
558 }
559
560 writeq(pm_ap_threshold.csr, &fme_power->threshold);
561 spinlock_unlock(&fme->lock);
562
563 return 0;
564 }
565
fme_pwr_get_threshold2(struct ifpga_fme_hw * fme,u64 * threshold)566 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
567 {
568 struct feature_fme_power *fme_power
569 = get_fme_feature_ioaddr_by_index(fme,
570 FME_FEATURE_ID_POWER_MGMT);
571 struct feature_fme_pm_ap_threshold pm_ap_threshold;
572
573 pm_ap_threshold.csr = readq(&fme_power->threshold);
574
575 *threshold = pm_ap_threshold.threshold2;
576
577 return 0;
578 }
579
fme_pwr_set_threshold2(struct ifpga_fme_hw * fme,u64 threshold)580 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
581 {
582 struct feature_fme_power *fme_power
583 = get_fme_feature_ioaddr_by_index(fme,
584 FME_FEATURE_ID_POWER_MGMT);
585 struct feature_fme_pm_ap_threshold pm_ap_threshold;
586
587 spinlock_lock(&fme->lock);
588 pm_ap_threshold.csr = readq(&fme_power->threshold);
589
590 if (threshold <= PWR_THRESHOLD_MAX) {
591 pm_ap_threshold.threshold2 = threshold;
592 } else {
593 spinlock_unlock(&fme->lock);
594 return -EINVAL;
595 }
596
597 writeq(pm_ap_threshold.csr, &fme_power->threshold);
598 spinlock_unlock(&fme->lock);
599
600 return 0;
601 }
602
fme_pwr_get_threshold1_status(struct ifpga_fme_hw * fme,u64 * threshold_status)603 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
604 u64 *threshold_status)
605 {
606 struct feature_fme_power *fme_power
607 = get_fme_feature_ioaddr_by_index(fme,
608 FME_FEATURE_ID_POWER_MGMT);
609 struct feature_fme_pm_ap_threshold pm_ap_threshold;
610
611 pm_ap_threshold.csr = readq(&fme_power->threshold);
612
613 *threshold_status = pm_ap_threshold.threshold1_status;
614
615 return 0;
616 }
617
fme_pwr_get_threshold2_status(struct ifpga_fme_hw * fme,u64 * threshold_status)618 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
619 u64 *threshold_status)
620 {
621 struct feature_fme_power *fme_power
622 = get_fme_feature_ioaddr_by_index(fme,
623 FME_FEATURE_ID_POWER_MGMT);
624 struct feature_fme_pm_ap_threshold pm_ap_threshold;
625
626 pm_ap_threshold.csr = readq(&fme_power->threshold);
627
628 *threshold_status = pm_ap_threshold.threshold2_status;
629
630 return 0;
631 }
632
fme_pwr_get_rtl(struct ifpga_fme_hw * fme,u64 * rtl)633 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
634 {
635 struct feature_fme_power *fme_power
636 = get_fme_feature_ioaddr_by_index(fme,
637 FME_FEATURE_ID_POWER_MGMT);
638 struct feature_fme_pm_status pm_status;
639
640 pm_status.csr = readq(&fme_power->status);
641
642 *rtl = pm_status.fpga_latency_report;
643
644 return 0;
645 }
646
fme_pwr_get_xeon_limit(struct ifpga_fme_hw * fme,u64 * limit)647 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
648 {
649 struct feature_fme_power *fme_power
650 = get_fme_feature_ioaddr_by_index(fme,
651 FME_FEATURE_ID_POWER_MGMT);
652 struct feature_fme_pm_xeon_limit xeon_limit;
653
654 xeon_limit.csr = readq(&fme_power->xeon_limit);
655
656 if (!xeon_limit.enable)
657 xeon_limit.pwr_limit = 0;
658
659 *limit = xeon_limit.pwr_limit;
660
661 return 0;
662 }
663
fme_pwr_get_fpga_limit(struct ifpga_fme_hw * fme,u64 * limit)664 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
665 {
666 struct feature_fme_power *fme_power
667 = get_fme_feature_ioaddr_by_index(fme,
668 FME_FEATURE_ID_POWER_MGMT);
669 struct feature_fme_pm_fpga_limit fpga_limit;
670
671 fpga_limit.csr = readq(&fme_power->fpga_limit);
672
673 if (!fpga_limit.enable)
674 fpga_limit.pwr_limit = 0;
675
676 *limit = fpga_limit.pwr_limit;
677
678 return 0;
679 }
680
fme_pwr_get_revision(struct ifpga_fme_hw * fme,u64 * revision)681 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
682 {
683 struct feature_fme_power *fme_power
684 = get_fme_feature_ioaddr_by_index(fme,
685 FME_FEATURE_ID_POWER_MGMT);
686 struct feature_header header;
687
688 header.csr = readq(&fme_power->header);
689 *revision = header.revision;
690
691 return 0;
692 }
693
fme_power_mgmt_init(struct ifpga_feature * feature)694 static int fme_power_mgmt_init(struct ifpga_feature *feature)
695 {
696 UNUSED(feature);
697
698 dev_info(NULL, "FME power mgmt Init.\n");
699
700 return 0;
701 }
702
fme_power_mgmt_uinit(struct ifpga_feature * feature)703 static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
704 {
705 UNUSED(feature);
706
707 dev_info(NULL, "FME power mgmt UInit.\n");
708 }
709
fme_power_mgmt_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)710 static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
711 struct feature_prop *prop)
712 {
713 struct ifpga_fme_hw *fme = feature->parent;
714
715 switch (prop->prop_id) {
716 case FME_PWR_PROP_CONSUMED:
717 return fme_pwr_get_consumed(fme, &prop->data);
718 case FME_PWR_PROP_THRESHOLD1:
719 return fme_pwr_get_threshold1(fme, &prop->data);
720 case FME_PWR_PROP_THRESHOLD2:
721 return fme_pwr_get_threshold2(fme, &prop->data);
722 case FME_PWR_PROP_THRESHOLD1_STATUS:
723 return fme_pwr_get_threshold1_status(fme, &prop->data);
724 case FME_PWR_PROP_THRESHOLD2_STATUS:
725 return fme_pwr_get_threshold2_status(fme, &prop->data);
726 case FME_PWR_PROP_RTL:
727 return fme_pwr_get_rtl(fme, &prop->data);
728 case FME_PWR_PROP_XEON_LIMIT:
729 return fme_pwr_get_xeon_limit(fme, &prop->data);
730 case FME_PWR_PROP_FPGA_LIMIT:
731 return fme_pwr_get_fpga_limit(fme, &prop->data);
732 case FME_PWR_PROP_REVISION:
733 return fme_pwr_get_revision(fme, &prop->data);
734 }
735
736 return -ENOENT;
737 }
738
fme_power_mgmt_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)739 static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
740 struct feature_prop *prop)
741 {
742 struct ifpga_fme_hw *fme = feature->parent;
743
744 switch (prop->prop_id) {
745 case FME_PWR_PROP_THRESHOLD1:
746 return fme_pwr_set_threshold1(fme, prop->data);
747 case FME_PWR_PROP_THRESHOLD2:
748 return fme_pwr_set_threshold2(fme, prop->data);
749 }
750
751 return -ENOENT;
752 }
753
754 struct ifpga_feature_ops fme_power_mgmt_ops = {
755 .init = fme_power_mgmt_init,
756 .uinit = fme_power_mgmt_uinit,
757 .get_prop = fme_power_mgmt_get_prop,
758 .set_prop = fme_power_mgmt_set_prop,
759 };
760
fme_hssi_eth_init(struct ifpga_feature * feature)761 static int fme_hssi_eth_init(struct ifpga_feature *feature)
762 {
763 UNUSED(feature);
764 return 0;
765 }
766
fme_hssi_eth_uinit(struct ifpga_feature * feature)767 static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
768 {
769 UNUSED(feature);
770 }
771
772 struct ifpga_feature_ops fme_hssi_eth_ops = {
773 .init = fme_hssi_eth_init,
774 .uinit = fme_hssi_eth_uinit,
775 };
776
fme_emif_init(struct ifpga_feature * feature)777 static int fme_emif_init(struct ifpga_feature *feature)
778 {
779 UNUSED(feature);
780 return 0;
781 }
782
fme_emif_uinit(struct ifpga_feature * feature)783 static void fme_emif_uinit(struct ifpga_feature *feature)
784 {
785 UNUSED(feature);
786 }
787
788 struct ifpga_feature_ops fme_emif_ops = {
789 .init = fme_emif_init,
790 .uinit = fme_emif_uinit,
791 };
792
board_type_to_string(u32 board,u32 type)793 static const char *board_type_to_string(u32 board, u32 type)
794 {
795 if (board == VISTA_CREEK) {
796 switch (type) {
797 case VC_8_10G:
798 return "8x10G";
799 case VC_4_25G:
800 return "4x25G";
801 case VC_2_1_25:
802 return "2x1x25G";
803 case VC_4_25G_2_25G:
804 return "4x25G+2x25G";
805 case VC_2_2_25G:
806 return "2x2x25G";
807 break;
808 }
809 } else {
810 switch (type) {
811 case FIMA_10G_ADP:
812 return "2x4x10G";
813 case FIMA_25G_ADP:
814 return "2x2x25G";
815 case FIMA_100G_ADP:
816 return "2x100G";
817 break;
818 }
819 }
820
821 return "unknown";
822 }
823
board_major_to_string(u32 major)824 static const char *board_major_to_string(u32 major)
825 {
826 switch (major) {
827 case VISTA_CREEK:
828 return "VISTA_CREEK";
829 case RUSH_CREEK:
830 return "RUSH_CREEK";
831 case DARBY_CREEK:
832 return "DARBY_CREEK";
833 case LIGHTNING_CREEK:
834 return "LIGHTNING_CREEK";
835 case ARROW_CREEK:
836 return "ARROW_CREEK";
837 default:
838 break;
839 }
840
841 return "unknown";
842 }
843
board_type_to_info(u32 type,struct opae_board_info * info)844 static int board_type_to_info(u32 type,
845 struct opae_board_info *info)
846 {
847 switch (type) {
848 case VC_8_10G:
849 info->nums_of_retimer = 2;
850 info->ports_per_retimer = 4;
851 info->nums_of_fvl = 2;
852 info->ports_per_fvl = 4;
853 break;
854 case VC_4_25G:
855 info->nums_of_retimer = 1;
856 info->ports_per_retimer = 4;
857 info->nums_of_fvl = 2;
858 info->ports_per_fvl = 2;
859 break;
860 case VC_2_1_25:
861 info->nums_of_retimer = 2;
862 info->ports_per_retimer = 1;
863 info->nums_of_fvl = 1;
864 info->ports_per_fvl = 2;
865 break;
866 case VC_2_2_25G:
867 info->nums_of_retimer = 2;
868 info->ports_per_retimer = 2;
869 info->nums_of_fvl = 2;
870 info->ports_per_fvl = 2;
871 break;
872 default:
873 return -EINVAL;
874 }
875
876 return 0;
877 }
878
fme_get_board_interface(struct ifpga_fme_hw * fme)879 static int fme_get_board_interface(struct ifpga_fme_hw *fme)
880 {
881 struct feature_fme_bitstream_id id;
882 struct ifpga_hw *hw;
883 u32 val;
884 const char *type = NULL;
885 int ret;
886
887 hw = fme->parent;
888 if (!hw)
889 return -ENODEV;
890
891 if (fme_hdr_get_bitstream_id(fme, &id.csr))
892 return -EINVAL;
893
894 if (id.v1.major == ARROW_CREEK) {
895 fme->board_info.major = id.v2.bs_vermajor;
896 fme->board_info.minor = id.v2.bs_verminor;
897 fme->board_info.n6000_fim_type = id.v2.fim_type;
898 fme->board_info.n6000_hssi_id = id.v2.hssi_id;
899 type = board_type_to_string(fme->board_info.major,
900 fme->board_info.n6000_fim_type);
901 } else {
902 fme->board_info.major = id.v1.major;
903 fme->board_info.minor = id.v1.minor;
904 fme->board_info.type = id.v1.interface;
905 fme->board_info.fvl_bypass = id.v1.fvl_bypass;
906 fme->board_info.mac_lightweight = id.v1.mac_lightweight;
907 fme->board_info.lightweight = id.v1.lightweiht;
908 fme->board_info.disaggregate = id.v1.disagregate;
909 fme->board_info.seu = id.v1.seu;
910 fme->board_info.ptp = id.v1.ptp;
911 type = board_type_to_string(fme->board_info.major,
912 fme->board_info.type);
913 }
914
915 dev_info(fme, "found: PCI dev: %02x:%02x:%x board: %s type: %s\n",
916 hw->pci_data->bus,
917 hw->pci_data->devid,
918 hw->pci_data->function,
919 board_major_to_string(fme->board_info.major),
920 type);
921
922 ret = max10_get_fpga_load_info(fme->max10_dev, &val);
923 if (ret)
924 return ret;
925 fme->board_info.boot_page = val;
926
927 if (fme->board_info.major == VISTA_CREEK) {
928 dev_info(dev, "FPGA loaded from %s Image\n",
929 val ? "User" : "Factory");
930 dev_info(fme, "support feature:\n"
931 "fvl_bypass:%s\n"
932 "mac_lightweight:%s\n"
933 "lightweight:%s\n"
934 "disaggregate:%s\n"
935 "seu:%s\n"
936 "ptp1588:%s\n",
937 check_support(fme->board_info.fvl_bypass),
938 check_support(fme->board_info.mac_lightweight),
939 check_support(fme->board_info.lightweight),
940 check_support(fme->board_info.disaggregate),
941 check_support(fme->board_info.seu),
942 check_support(fme->board_info.ptp));
943
944 if (board_type_to_info(fme->board_info.type, &fme->board_info))
945 return -EINVAL;
946
947 dev_info(fme, "get board info: nums_retimers %d "
948 "ports_per_retimer %d nums_fvl %d "
949 "ports_per_fvl %d\n",
950 fme->board_info.nums_of_retimer,
951 fme->board_info.ports_per_retimer,
952 fme->board_info.nums_of_fvl,
953 fme->board_info.ports_per_fvl);
954 } else {
955 dev_info(dev, "FPGA loaded from %s Image\n",
956 val ? (val == 1 ? "User1" : "User2") : "Factory");
957 }
958
959 ret = max10_get_bmc_version(fme->max10_dev, &val);
960 if (ret)
961 return ret;
962 fme->board_info.max10_version = val;
963
964 ret = max10_get_bmcfw_version(fme->max10_dev, &val);
965 if (ret)
966 return ret;
967 fme->board_info.nios_fw_version = val;
968
969 dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
970 fme->board_info.max10_version,
971 fme->board_info.nios_fw_version);
972
973 return 0;
974 }
975
spi_self_checking(struct intel_max10_device * dev)976 static int spi_self_checking(struct intel_max10_device *dev)
977 {
978 u32 val;
979 int ret;
980
981 ret = max10_sys_read(dev, MAX10_TEST_REG, &val);
982 if (ret)
983 return -EIO;
984
985 dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
986
987 return 0;
988 }
989
init_spi_share_data(struct ifpga_fme_hw * fme,struct altera_spi_device * spi)990 static void init_spi_share_data(struct ifpga_fme_hw *fme,
991 struct altera_spi_device *spi)
992 {
993 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
994 opae_share_data *sd = NULL;
995
996 if (hw && hw->adapter && hw->adapter->shm.ptr) {
997 dev_info(NULL, "transfer share data to spi\n");
998 sd = (opae_share_data *)hw->adapter->shm.ptr;
999 spi->mutex = &sd->spi_mutex;
1000 spi->dtb_sz_ptr = &sd->dtb_size;
1001 spi->dtb = sd->dtb;
1002 } else {
1003 spi->mutex = NULL;
1004 spi->dtb_sz_ptr = NULL;
1005 spi->dtb = NULL;
1006 }
1007 }
1008
fme_spi_init(struct ifpga_feature * feature)1009 static int fme_spi_init(struct ifpga_feature *feature)
1010 {
1011 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1012 struct altera_spi_device *spi_master;
1013 struct intel_max10_device *max10;
1014 int ret = 0;
1015
1016 dev_info(fme, "FME SPI Master (Max10) Init.\n");
1017 dev_debug(fme, "FME SPI base addr %p.\n",
1018 feature->addr);
1019 dev_debug(fme, "spi param=0x%llx\n",
1020 (unsigned long long)opae_readq(feature->addr + 0x8));
1021
1022 spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
1023 if (!spi_master)
1024 return -ENODEV;
1025 init_spi_share_data(fme, spi_master);
1026
1027 altera_spi_init(spi_master);
1028
1029 max10 = opae_zmalloc(sizeof(*max10));
1030 if (!max10)
1031 goto release_dev;
1032
1033 max10->spi_master = spi_master;
1034 max10->type = M10_N3000;
1035
1036 max10->spi_tran_dev = spi_transaction_init(spi_master, 0);
1037 if (!max10->spi_tran_dev) {
1038 dev_err(fme, "%s spi tran init fail\n", __func__);
1039 goto free_max10;
1040 }
1041
1042 /* init the max10 device */
1043 ret = intel_max10_device_init(max10);
1044 if (ret) {
1045 ret = -ENODEV;
1046 dev_err(fme, "max10 init fail\n");
1047 goto release_spi_tran_dev;
1048 }
1049
1050 fme->max10_dev = max10;
1051
1052 /* SPI self test */
1053 if (spi_self_checking(max10)) {
1054 ret = -EIO;
1055 goto max10_fail;
1056 }
1057
1058 return ret;
1059
1060 max10_fail:
1061 intel_max10_device_remove(fme->max10_dev);
1062 release_spi_tran_dev:
1063 if (max10->spi_tran_dev)
1064 spi_transaction_remove(max10->spi_tran_dev);
1065 free_max10:
1066 opae_free(max10);
1067 release_dev:
1068 altera_spi_release(spi_master);
1069 return -ENODEV;
1070 }
1071
fme_spi_uinit(struct ifpga_feature * feature)1072 static void fme_spi_uinit(struct ifpga_feature *feature)
1073 {
1074 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1075
1076 if (fme->max10_dev) {
1077 intel_max10_device_remove(fme->max10_dev);
1078 opae_free(fme->max10_dev);
1079 }
1080 }
1081
1082 struct ifpga_feature_ops fme_spi_master_ops = {
1083 .init = fme_spi_init,
1084 .uinit = fme_spi_uinit,
1085 };
1086
nios_spi_wait_init_done(struct altera_spi_device * dev)1087 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
1088 {
1089 u32 val = 0;
1090 unsigned long timeout = rte_get_timer_cycles() +
1091 msecs_to_timer_cycles(10000);
1092 unsigned long ticks;
1093 int major_version;
1094 int fecmode = FEC_MODE_NO;
1095
1096 if (spi_reg_read(dev, NIOS_VERSION, &val))
1097 return -EIO;
1098
1099 major_version =
1100 (val & NIOS_VERSION_MAJOR) >> NIOS_VERSION_MAJOR_SHIFT;
1101 dev_info(dev, "A10 NIOS FW version %d\n", major_version);
1102
1103 if (major_version >= 3) {
1104 /* read NIOS_INIT to check if PKVL INIT done or not */
1105 if (spi_reg_read(dev, NIOS_INIT, &val))
1106 return -EIO;
1107
1108 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1109
1110 /* check if PKVLs are initialized already */
1111 if (val & NIOS_INIT_DONE || val & NIOS_INIT_START)
1112 goto nios_init_done;
1113
1114 /* start to config the default FEC mode */
1115 val = fecmode | NIOS_INIT_START;
1116
1117 if (spi_reg_write(dev, NIOS_INIT, val))
1118 return -EIO;
1119 }
1120
1121 nios_init_done:
1122 do {
1123 if (spi_reg_read(dev, NIOS_INIT, &val))
1124 return -EIO;
1125 if (val & NIOS_INIT_DONE)
1126 break;
1127
1128 ticks = rte_get_timer_cycles();
1129 if (time_after(ticks, timeout))
1130 return -ETIMEDOUT;
1131 msleep(100);
1132 } while (1);
1133
1134 /* get the fecmode */
1135 if (spi_reg_read(dev, NIOS_INIT, &val))
1136 return -EIO;
1137 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1138 fecmode = (val & REQ_FEC_MODE) >> REQ_FEC_MODE_SHIFT;
1139 dev_info(dev, "fecmode: 0x%x, %s\n", fecmode,
1140 (fecmode == FEC_MODE_KR) ? "kr" :
1141 ((fecmode == FEC_MODE_RS) ? "rs" : "no"));
1142
1143 return 0;
1144 }
1145
nios_spi_check_error(struct altera_spi_device * dev)1146 static int nios_spi_check_error(struct altera_spi_device *dev)
1147 {
1148 u32 value = 0;
1149
1150 if (spi_reg_read(dev, PKVL_A_MODE_STS, &value))
1151 return -EIO;
1152
1153 dev_debug(dev, "PKVL A Mode Status 0x%x\n", value);
1154
1155 if (value >= 0x100)
1156 return -EINVAL;
1157
1158 if (spi_reg_read(dev, PKVL_B_MODE_STS, &value))
1159 return -EIO;
1160
1161 dev_debug(dev, "PKVL B Mode Status 0x%x\n", value);
1162
1163 if (value >= 0x100)
1164 return -EINVAL;
1165
1166 return 0;
1167 }
1168
fme_nios_spi_init(struct ifpga_feature * feature)1169 static int fme_nios_spi_init(struct ifpga_feature *feature)
1170 {
1171 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1172 struct altera_spi_device *spi_master;
1173 struct intel_max10_device *max10;
1174 struct ifpga_hw *hw;
1175 struct opae_manager *mgr;
1176 int ret = 0;
1177
1178 hw = fme->parent;
1179 if (!hw)
1180 return -ENODEV;
1181
1182 mgr = hw->adapter->mgr;
1183 if (!mgr)
1184 return -ENODEV;
1185
1186 dev_info(fme, "FME SPI Master (NIOS) Init.\n");
1187 dev_debug(fme, "FME SPI base addr %p.\n",
1188 feature->addr);
1189 dev_debug(fme, "spi param=0x%llx\n",
1190 (unsigned long long)opae_readq(feature->addr + 0x8));
1191
1192 spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
1193 if (!spi_master)
1194 return -ENODEV;
1195 init_spi_share_data(fme, spi_master);
1196
1197 /**
1198 * 1. wait A10 NIOS initial finished and
1199 * release the SPI master to Host
1200 */
1201 if (spi_master->mutex)
1202 pthread_mutex_lock(spi_master->mutex);
1203
1204 ret = nios_spi_wait_init_done(spi_master);
1205 if (ret != 0) {
1206 dev_err(fme, "FME NIOS_SPI init fail\n");
1207 if (spi_master->mutex)
1208 pthread_mutex_unlock(spi_master->mutex);
1209 goto release_dev;
1210 }
1211
1212 dev_info(fme, "FME NIOS_SPI initial done\n");
1213
1214 /* 2. check if error occur? */
1215 if (nios_spi_check_error(spi_master))
1216 dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
1217
1218 if (spi_master->mutex)
1219 pthread_mutex_unlock(spi_master->mutex);
1220
1221 /* 3. init the spi master*/
1222 altera_spi_init(spi_master);
1223
1224 max10 = opae_zmalloc(sizeof(*max10));
1225 if (!max10)
1226 goto release_dev;
1227
1228 max10->spi_master = spi_master;
1229 max10->type = M10_N3000;
1230
1231 max10->spi_tran_dev = spi_transaction_init(spi_master, 0);
1232 if (!max10->spi_tran_dev) {
1233 dev_err(fme, "%s spi tran init fail\n", __func__);
1234 goto free_max10;
1235 }
1236
1237 /* init the max10 device */
1238 ret = intel_max10_device_init(max10);
1239 if (ret) {
1240 ret = -ENODEV;
1241 dev_err(fme, "max10 init fail\n");
1242 goto release_spi_tran_dev;
1243 }
1244
1245 fme->max10_dev = max10;
1246 max10->bus = hw->pci_data->bus;
1247 fme_get_board_interface(fme);
1248 mgr->sensor_list = &max10->opae_sensor_list;
1249
1250 /* SPI self test */
1251 if (spi_self_checking(max10))
1252 goto spi_fail;
1253
1254 ret = init_sec_mgr(fme);
1255 if (ret) {
1256 dev_err(fme, "security manager init fail\n");
1257 goto spi_fail;
1258 }
1259
1260 return ret;
1261
1262 spi_fail:
1263 intel_max10_device_remove(fme->max10_dev);
1264 release_spi_tran_dev:
1265 if (max10->spi_tran_dev)
1266 spi_transaction_remove(max10->spi_tran_dev);
1267 free_max10:
1268 opae_free(max10);
1269 release_dev:
1270 altera_spi_release(spi_master);
1271 return -ENODEV;
1272 }
1273
fme_nios_spi_uinit(struct ifpga_feature * feature)1274 static void fme_nios_spi_uinit(struct ifpga_feature *feature)
1275 {
1276 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1277
1278 release_sec_mgr(fme);
1279 if (fme->max10_dev) {
1280 intel_max10_device_remove(fme->max10_dev);
1281 opae_free(fme->max10_dev);
1282 }
1283 }
1284
1285 struct ifpga_feature_ops fme_nios_spi_master_ops = {
1286 .init = fme_nios_spi_init,
1287 .uinit = fme_nios_spi_uinit,
1288 };
1289
i2c_mac_rom_test(struct altera_i2c_dev * dev)1290 static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
1291 {
1292 char buf[20];
1293 int ret;
1294 char read_buf[20] = {0,};
1295 const char *string = "1a2b3c4d5e";
1296
1297 opae_memcpy(buf, string, strlen(string));
1298
1299 ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
1300 (u8 *)buf, strlen(string));
1301 if (ret < 0) {
1302 dev_err(NULL, "write i2c error:%d\n", ret);
1303 return ret;
1304 }
1305
1306 ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
1307 (u8 *)read_buf, strlen(string));
1308 if (ret < 0) {
1309 dev_err(NULL, "read i2c error:%d\n", ret);
1310 return ret;
1311 }
1312
1313 if (memcmp(buf, read_buf, strlen(string))) {
1314 dev_info(NULL, "%s test fail!\n", __func__);
1315 return -EFAULT;
1316 }
1317
1318 dev_info(NULL, "%s test successful\n", __func__);
1319
1320 return 0;
1321 }
1322
init_i2c_mutex(struct ifpga_fme_hw * fme)1323 static void init_i2c_mutex(struct ifpga_fme_hw *fme)
1324 {
1325 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
1326 struct altera_i2c_dev *i2c_dev;
1327 opae_share_data *sd = NULL;
1328
1329 if (fme->i2c_master) {
1330 i2c_dev = (struct altera_i2c_dev *)fme->i2c_master;
1331 if (hw && hw->adapter && hw->adapter->shm.ptr) {
1332 dev_info(NULL, "use multi-process mutex in i2c\n");
1333 sd = (opae_share_data *)hw->adapter->shm.ptr;
1334 i2c_dev->mutex = &sd->i2c_mutex;
1335 } else {
1336 dev_info(NULL, "use multi-thread mutex in i2c\n");
1337 i2c_dev->mutex = &i2c_dev->lock;
1338 }
1339 }
1340 }
1341
fme_i2c_init(struct ifpga_feature * feature)1342 static int fme_i2c_init(struct ifpga_feature *feature)
1343 {
1344 struct feature_fme_i2c *i2c;
1345 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1346
1347 i2c = (struct feature_fme_i2c *)feature->addr;
1348
1349 dev_info(NULL, "FME I2C Master Init.\n");
1350
1351 fme->i2c_master = altera_i2c_probe(i2c);
1352 if (!fme->i2c_master)
1353 return -ENODEV;
1354
1355 init_i2c_mutex(fme);
1356
1357 /* MAC ROM self test */
1358 i2c_mac_rom_test(fme->i2c_master);
1359
1360 return 0;
1361 }
1362
fme_i2c_uninit(struct ifpga_feature * feature)1363 static void fme_i2c_uninit(struct ifpga_feature *feature)
1364 {
1365 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1366
1367 altera_i2c_remove(fme->i2c_master);
1368 }
1369
1370 struct ifpga_feature_ops fme_i2c_master_ops = {
1371 .init = fme_i2c_init,
1372 .uinit = fme_i2c_uninit,
1373 };
1374
fme_eth_group_init(struct ifpga_feature * feature)1375 static int fme_eth_group_init(struct ifpga_feature *feature)
1376 {
1377 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1378 struct eth_group_device *dev;
1379
1380 dev = (struct eth_group_device *)eth_group_probe(feature->addr);
1381 if (!dev)
1382 return -ENODEV;
1383
1384 fme->eth_dev[dev->group_id] = dev;
1385
1386 fme->eth_group_region[dev->group_id].addr =
1387 feature->addr;
1388 fme->eth_group_region[dev->group_id].phys_addr =
1389 feature->phys_addr;
1390 fme->eth_group_region[dev->group_id].len =
1391 feature->size;
1392
1393 fme->nums_eth_dev++;
1394
1395 dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
1396 dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1397 dev->group_id, feature->addr,
1398 (unsigned long long)feature->phys_addr,
1399 feature->size);
1400
1401 return 0;
1402 }
1403
fme_eth_group_uinit(struct ifpga_feature * feature)1404 static void fme_eth_group_uinit(struct ifpga_feature *feature)
1405 {
1406 UNUSED(feature);
1407 }
1408
1409 struct ifpga_feature_ops fme_eth_group_ops = {
1410 .init = fme_eth_group_init,
1411 .uinit = fme_eth_group_uinit,
1412 };
1413
fme_mgr_read_mac_rom(struct ifpga_fme_hw * fme,int offset,void * buf,int size)1414 int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
1415 void *buf, int size)
1416 {
1417 struct altera_i2c_dev *dev;
1418
1419 dev = fme->i2c_master;
1420 if (!dev)
1421 return -ENODEV;
1422
1423 return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1424 }
1425
fme_mgr_write_mac_rom(struct ifpga_fme_hw * fme,int offset,void * buf,int size)1426 int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
1427 void *buf, int size)
1428 {
1429 struct altera_i2c_dev *dev;
1430
1431 dev = fme->i2c_master;
1432 if (!dev)
1433 return -ENODEV;
1434
1435 return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1436 }
1437
get_eth_group_dev(struct ifpga_fme_hw * fme,u8 group_id)1438 static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
1439 u8 group_id)
1440 {
1441 struct eth_group_device *dev;
1442
1443 if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
1444 return NULL;
1445
1446 dev = (struct eth_group_device *)fme->eth_dev[group_id];
1447 if (!dev)
1448 return NULL;
1449
1450 if (dev->status != ETH_GROUP_DEV_ATTACHED)
1451 return NULL;
1452
1453 return dev;
1454 }
1455
fme_mgr_get_eth_group_nums(struct ifpga_fme_hw * fme)1456 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
1457 {
1458 return fme->nums_eth_dev;
1459 }
1460
fme_mgr_get_eth_group_info(struct ifpga_fme_hw * fme,u8 group_id,struct opae_eth_group_info * info)1461 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
1462 u8 group_id, struct opae_eth_group_info *info)
1463 {
1464 struct eth_group_device *dev;
1465
1466 dev = get_eth_group_dev(fme, group_id);
1467 if (!dev)
1468 return -ENODEV;
1469
1470 info->group_id = group_id;
1471 info->speed = dev->speed;
1472 info->nums_of_mac = dev->mac_num;
1473 info->nums_of_phy = dev->phy_num;
1474
1475 return 0;
1476 }
1477
fme_mgr_eth_group_read_reg(struct ifpga_fme_hw * fme,u8 group_id,u8 type,u8 index,u16 addr,u32 * data)1478 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
1479 u8 type, u8 index, u16 addr, u32 *data)
1480 {
1481 struct eth_group_device *dev;
1482
1483 dev = get_eth_group_dev(fme, group_id);
1484 if (!dev)
1485 return -ENODEV;
1486
1487 return eth_group_read_reg(dev, type, index, addr, data);
1488 }
1489
fme_mgr_eth_group_write_reg(struct ifpga_fme_hw * fme,u8 group_id,u8 type,u8 index,u16 addr,u32 data)1490 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
1491 u8 type, u8 index, u16 addr, u32 data)
1492 {
1493 struct eth_group_device *dev;
1494
1495 dev = get_eth_group_dev(fme, group_id);
1496 if (!dev)
1497 return -ENODEV;
1498
1499 return eth_group_write_reg(dev, type, index, addr, data);
1500 }
1501
fme_get_eth_group_speed(struct ifpga_fme_hw * fme,u8 group_id)1502 static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
1503 u8 group_id)
1504 {
1505 struct eth_group_device *dev;
1506
1507 dev = get_eth_group_dev(fme, group_id);
1508 if (!dev)
1509 return -ENODEV;
1510
1511 return dev->speed;
1512 }
1513
fme_mgr_get_retimer_info(struct ifpga_fme_hw * fme,struct opae_retimer_info * info)1514 int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
1515 struct opae_retimer_info *info)
1516 {
1517 struct intel_max10_device *dev;
1518
1519 dev = (struct intel_max10_device *)fme->max10_dev;
1520 if (!dev)
1521 return -ENODEV;
1522
1523 info->nums_retimer = fme->board_info.nums_of_retimer;
1524 info->ports_per_retimer = fme->board_info.ports_per_retimer;
1525 info->nums_fvl = fme->board_info.nums_of_fvl;
1526 info->ports_per_fvl = fme->board_info.ports_per_fvl;
1527
1528 /* The speed of PKVL is identical the eth group's speed */
1529 info->support_speed = fme_get_eth_group_speed(fme,
1530 LINE_SIDE_GROUP_ID);
1531
1532 return 0;
1533 }
1534
fme_mgr_get_retimer_status(struct ifpga_fme_hw * fme,struct opae_retimer_status * status)1535 int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
1536 struct opae_retimer_status *status)
1537 {
1538 struct intel_max10_device *dev;
1539 unsigned int val;
1540
1541 dev = (struct intel_max10_device *)fme->max10_dev;
1542 if (!dev)
1543 return -ENODEV;
1544
1545 if (max10_sys_read(dev, PKVL_LINK_STATUS, &val)) {
1546 dev_err(dev, "%s: read pkvl status fail\n", __func__);
1547 return -EINVAL;
1548 }
1549
1550 /* The speed of PKVL is identical the eth group's speed */
1551 status->speed = fme_get_eth_group_speed(fme,
1552 LINE_SIDE_GROUP_ID);
1553
1554 status->line_link_bitmap = val;
1555
1556 dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1557 status->speed,
1558 status->line_link_bitmap);
1559
1560 return 0;
1561 }
1562
fme_mgr_get_sensor_value(struct ifpga_fme_hw * fme,struct opae_sensor_info * sensor,unsigned int * value)1563 int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
1564 struct opae_sensor_info *sensor,
1565 unsigned int *value)
1566 {
1567 struct intel_max10_device *dev;
1568
1569 dev = (struct intel_max10_device *)fme->max10_dev;
1570 if (!dev)
1571 return -ENODEV;
1572
1573 if (max10_sys_read(dev, sensor->value_reg, value)) {
1574 dev_err(dev, "%s: read sensor value register 0x%x fail\n",
1575 __func__, sensor->value_reg);
1576 return -EINVAL;
1577 }
1578
1579 *value *= sensor->multiplier;
1580
1581 return 0;
1582 }
1583
fme_pmci_init(struct ifpga_feature * feature)1584 static int fme_pmci_init(struct ifpga_feature *feature)
1585 {
1586 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1587 struct intel_max10_device *max10;
1588 struct ifpga_hw *hw;
1589 struct opae_manager *mgr;
1590 opae_share_data *sd = NULL;
1591 int ret = 0;
1592
1593 hw = fme->parent;
1594 if (!hw || !hw->adapter)
1595 return -ENODEV;
1596
1597 mgr = hw->adapter->mgr;
1598 if (!mgr)
1599 return -ENODEV;
1600
1601 dev_info(fme, "FME PMCI Init.\n");
1602 dev_debug(fme, "FME PMCI base addr %p.\n",
1603 feature->addr);
1604
1605 max10 = opae_zmalloc(sizeof(*max10));
1606 if (!max10)
1607 return -ENOMEM;
1608
1609 max10->type = M10_N6000;
1610 max10->mmio = feature->addr;
1611 if (hw->adapter->shm.ptr) {
1612 sd = (opae_share_data *)hw->adapter->shm.ptr;
1613 max10->bmc_ops.mutex = &sd->spi_mutex;
1614 } else {
1615 max10->bmc_ops.mutex = NULL;
1616 }
1617
1618 /* init the max10 device */
1619 ret = intel_max10_device_init(max10);
1620 if (ret) {
1621 dev_err(fme, "max10 init fail\n");
1622 goto free_max10;
1623 }
1624
1625 fme->max10_dev = max10;
1626 max10->bus = hw->pci_data->bus;
1627 fme_get_board_interface(fme);
1628 mgr->sensor_list = &max10->opae_sensor_list;
1629
1630 ret = init_sec_mgr(fme);
1631 if (ret) {
1632 dev_err(fme, "security manager init fail\n");
1633 goto release_max10;
1634 }
1635
1636 return ret;
1637
1638 release_max10:
1639 intel_max10_device_remove(max10);
1640 free_max10:
1641 opae_free(max10);
1642
1643 return ret;
1644 }
1645
fme_pmci_uinit(struct ifpga_feature * feature)1646 static void fme_pmci_uinit(struct ifpga_feature *feature)
1647 {
1648 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1649
1650 release_sec_mgr(fme);
1651 if (fme->max10_dev) {
1652 intel_max10_device_remove(fme->max10_dev);
1653 opae_free(fme->max10_dev);
1654 }
1655 }
1656
1657 struct ifpga_feature_ops fme_pmci_ops = {
1658 .init = fme_pmci_init,
1659 .uinit = fme_pmci_uinit,
1660 };
1661
fme_mgr_read_flash(struct ifpga_fme_hw * fme,u32 address,u32 size,void * buf)1662 int fme_mgr_read_flash(struct ifpga_fme_hw *fme, u32 address,
1663 u32 size, void *buf)
1664 {
1665 struct intel_max10_device *max10 = fme->max10_dev;
1666
1667 return opae_read_flash(max10, address, size, buf);
1668 }
1669