xref: /dpdk/drivers/raw/ifpga/base/ifpga_fme_error.c (revision 7c4fe2ad3b1214a6704a373dbaec087a4386c4bb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include "ifpga_feature_dev.h"
6 
fme_err_get_errors(struct ifpga_fme_hw * fme,u64 * val)7 static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
8 {
9 	struct feature_fme_err *fme_err
10 		= get_fme_feature_ioaddr_by_index(fme,
11 						  FME_FEATURE_ID_GLOBAL_ERR);
12 	struct feature_fme_error0 fme_error0;
13 
14 	fme_error0.csr = readq(&fme_err->fme_err);
15 	*val = fme_error0.csr;
16 
17 	return 0;
18 }
19 
fme_err_get_first_error(struct ifpga_fme_hw * fme,u64 * val)20 static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
21 {
22 	struct feature_fme_err *fme_err
23 		= get_fme_feature_ioaddr_by_index(fme,
24 						  FME_FEATURE_ID_GLOBAL_ERR);
25 	struct feature_fme_first_error fme_first_err;
26 
27 	fme_first_err.csr = readq(&fme_err->fme_first_err);
28 	*val = fme_first_err.err_reg_status;
29 
30 	return 0;
31 }
32 
fme_err_get_next_error(struct ifpga_fme_hw * fme,u64 * val)33 static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
34 {
35 	struct feature_fme_err *fme_err
36 		= get_fme_feature_ioaddr_by_index(fme,
37 						  FME_FEATURE_ID_GLOBAL_ERR);
38 	struct feature_fme_next_error fme_next_err;
39 
40 	fme_next_err.csr = readq(&fme_err->fme_next_err);
41 	*val = fme_next_err.err_reg_status;
42 
43 	return 0;
44 }
45 
fme_err_set_clear(struct ifpga_fme_hw * fme,u64 val)46 static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
47 {
48 	struct feature_fme_err *fme_err
49 		= get_fme_feature_ioaddr_by_index(fme,
50 						  FME_FEATURE_ID_GLOBAL_ERR);
51 
52 	spinlock_lock(&fme->lock);
53 
54 	writeq(val, &fme_err->fme_err);
55 
56 	spinlock_unlock(&fme->lock);
57 
58 	return 0;
59 }
60 
fme_err_get_revision(struct ifpga_fme_hw * fme,u64 * val)61 static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
62 {
63 	struct feature_fme_err *fme_err
64 		= get_fme_feature_ioaddr_by_index(fme,
65 						  FME_FEATURE_ID_GLOBAL_ERR);
66 	struct feature_header header;
67 
68 	header.csr = readq(&fme_err->header);
69 	*val = header.revision;
70 
71 	return 0;
72 }
73 
fme_err_get_pcie0_errors(struct ifpga_fme_hw * fme,u64 * val)74 static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
75 {
76 	struct feature_fme_err *fme_err
77 		= get_fme_feature_ioaddr_by_index(fme,
78 						  FME_FEATURE_ID_GLOBAL_ERR);
79 	struct feature_fme_pcie0_error pcie0_err;
80 
81 	pcie0_err.csr = readq(&fme_err->pcie0_err);
82 	*val = pcie0_err.csr;
83 
84 	return 0;
85 }
86 
fme_err_set_pcie0_errors(struct ifpga_fme_hw * fme,u64 val)87 static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
88 {
89 	struct feature_fme_err *fme_err
90 		= get_fme_feature_ioaddr_by_index(fme,
91 						  FME_FEATURE_ID_GLOBAL_ERR);
92 	struct feature_fme_pcie0_error pcie0_err;
93 	int ret = 0;
94 
95 	spinlock_lock(&fme->lock);
96 	writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
97 
98 	pcie0_err.csr = readq(&fme_err->pcie0_err);
99 	if (val != pcie0_err.csr)
100 		ret = -EBUSY;
101 	else
102 		writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
103 		       &fme_err->pcie0_err);
104 
105 	writeq(0UL, &fme_err->pcie0_err_mask);
106 	spinlock_unlock(&fme->lock);
107 
108 	return ret;
109 }
110 
fme_err_get_pcie1_errors(struct ifpga_fme_hw * fme,u64 * val)111 static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
112 {
113 	struct feature_fme_err *fme_err
114 		= get_fme_feature_ioaddr_by_index(fme,
115 						  FME_FEATURE_ID_GLOBAL_ERR);
116 	struct feature_fme_pcie1_error pcie1_err;
117 
118 	pcie1_err.csr = readq(&fme_err->pcie1_err);
119 	*val = pcie1_err.csr;
120 
121 	return 0;
122 }
123 
fme_err_set_pcie1_errors(struct ifpga_fme_hw * fme,u64 val)124 static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
125 {
126 	struct feature_fme_err *fme_err
127 		= get_fme_feature_ioaddr_by_index(fme,
128 						  FME_FEATURE_ID_GLOBAL_ERR);
129 	struct feature_fme_pcie1_error pcie1_err;
130 	int ret = 0;
131 
132 	spinlock_lock(&fme->lock);
133 	writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
134 
135 	pcie1_err.csr = readq(&fme_err->pcie1_err);
136 	if (val != pcie1_err.csr)
137 		ret = -EBUSY;
138 	else
139 		writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
140 		       &fme_err->pcie1_err);
141 
142 	writeq(0UL, &fme_err->pcie1_err_mask);
143 	spinlock_unlock(&fme->lock);
144 
145 	return ret;
146 }
147 
fme_err_get_nonfatal_errors(struct ifpga_fme_hw * fme,u64 * val)148 static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
149 {
150 	struct feature_fme_err *fme_err
151 		= get_fme_feature_ioaddr_by_index(fme,
152 						  FME_FEATURE_ID_GLOBAL_ERR);
153 	struct feature_fme_ras_nonfaterror ras_nonfaterr;
154 
155 	ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
156 	*val = ras_nonfaterr.csr;
157 
158 	return 0;
159 }
160 
fme_err_get_catfatal_errors(struct ifpga_fme_hw * fme,u64 * val)161 static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
162 {
163 	struct feature_fme_err *fme_err
164 		= get_fme_feature_ioaddr_by_index(fme,
165 						  FME_FEATURE_ID_GLOBAL_ERR);
166 	struct feature_fme_ras_catfaterror ras_catfaterr;
167 
168 	ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
169 	*val = ras_catfaterr.csr;
170 
171 	return 0;
172 }
173 
fme_err_get_inject_errors(struct ifpga_fme_hw * fme,u64 * val)174 static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
175 {
176 	struct feature_fme_err *fme_err
177 		= get_fme_feature_ioaddr_by_index(fme,
178 						  FME_FEATURE_ID_GLOBAL_ERR);
179 	struct feature_fme_ras_error_inj ras_error_inj;
180 
181 	ras_error_inj.csr = readq(&fme_err->ras_error_inj);
182 	*val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
183 
184 	return 0;
185 }
186 
fme_err_set_inject_errors(struct ifpga_fme_hw * fme,u64 val)187 static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
188 {
189 	struct feature_fme_err *fme_err
190 		= get_fme_feature_ioaddr_by_index(fme,
191 					      FME_FEATURE_ID_GLOBAL_ERR);
192 	struct feature_fme_ras_error_inj ras_error_inj;
193 
194 	spinlock_lock(&fme->lock);
195 	ras_error_inj.csr = readq(&fme_err->ras_error_inj);
196 
197 	if (val <= FME_RAS_ERROR_INJ_MASK) {
198 		ras_error_inj.csr = val;
199 	} else {
200 		spinlock_unlock(&fme->lock);
201 		return -EINVAL;
202 	}
203 
204 	writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
205 	spinlock_unlock(&fme->lock);
206 
207 	return 0;
208 }
209 
fme_error_enable(struct ifpga_fme_hw * fme)210 static void fme_error_enable(struct ifpga_fme_hw *fme)
211 {
212 	struct feature_fme_err *fme_err
213 		= get_fme_feature_ioaddr_by_index(fme,
214 						  FME_FEATURE_ID_GLOBAL_ERR);
215 
216 	writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
217 	writeq(0UL, &fme_err->pcie0_err_mask);
218 	writeq(0UL, &fme_err->pcie1_err_mask);
219 	writeq(0UL, &fme_err->ras_nonfat_mask);
220 	writeq(0UL, &fme_err->ras_catfat_mask);
221 }
222 
fme_global_error_init(struct ifpga_feature * feature)223 static int fme_global_error_init(struct ifpga_feature *feature)
224 {
225 	struct ifpga_fme_hw *fme = feature->parent;
226 
227 	dev_info(NULL, "FME error_module Init.\n");
228 
229 	fme_error_enable(fme);
230 
231 	if (feature->ctx_num)
232 		fme->capability |= FPGA_FME_CAP_ERR_IRQ;
233 
234 	return 0;
235 }
236 
fme_global_error_uinit(struct ifpga_feature * feature)237 static void fme_global_error_uinit(struct ifpga_feature *feature)
238 {
239 	UNUSED(feature);
240 }
241 
fme_err_check_seu(struct feature_fme_err * fme_err)242 static int fme_err_check_seu(struct feature_fme_err *fme_err)
243 {
244 	struct feature_fme_error_capability error_cap;
245 
246 	error_cap.csr = readq(&fme_err->fme_err_capability);
247 
248 	return error_cap.seu_support ? 1 : 0;
249 }
250 
fme_err_get_seu_emr(struct ifpga_fme_hw * fme,u64 * val,bool high)251 static int fme_err_get_seu_emr(struct ifpga_fme_hw *fme,
252 		u64 *val, bool high)
253 {
254 	struct feature_fme_err *fme_err
255 		= get_fme_feature_ioaddr_by_index(fme,
256 				FME_FEATURE_ID_GLOBAL_ERR);
257 
258 	if (!fme_err_check_seu(fme_err))
259 		return -ENODEV;
260 
261 	if (high)
262 		*val = readq(&fme_err->seu_emr_h);
263 	else
264 		*val = readq(&fme_err->seu_emr_l);
265 
266 	return 0;
267 }
268 
fme_err_fme_err_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)269 static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
270 				    struct feature_prop *prop)
271 {
272 	struct ifpga_fme_hw *fme = feature->parent;
273 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
274 
275 	switch (id) {
276 	case 0x1: /* ERRORS */
277 		return fme_err_get_errors(fme, &prop->data);
278 	case 0x2: /* FIRST_ERROR */
279 		return fme_err_get_first_error(fme, &prop->data);
280 	case 0x3: /* NEXT_ERROR */
281 		return fme_err_get_next_error(fme, &prop->data);
282 	case 0x5: /* SEU EMR LOW */
283 		return fme_err_get_seu_emr(fme, &prop->data, 0);
284 	case 0x6: /* SEU EMR HIGH */
285 		return fme_err_get_seu_emr(fme, &prop->data, 1);
286 	}
287 
288 	return -ENOENT;
289 }
290 
fme_err_root_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)291 static int fme_err_root_get_prop(struct ifpga_feature *feature,
292 				 struct feature_prop *prop)
293 {
294 	struct ifpga_fme_hw *fme = feature->parent;
295 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
296 
297 	switch (id) {
298 	case 0x5: /* REVISION */
299 		return fme_err_get_revision(fme, &prop->data);
300 	case 0x6: /* PCIE0_ERRORS */
301 		return fme_err_get_pcie0_errors(fme, &prop->data);
302 	case 0x7: /* PCIE1_ERRORS */
303 		return fme_err_get_pcie1_errors(fme, &prop->data);
304 	case 0x8: /* NONFATAL_ERRORS */
305 		return fme_err_get_nonfatal_errors(fme, &prop->data);
306 	case 0x9: /* CATFATAL_ERRORS */
307 		return fme_err_get_catfatal_errors(fme, &prop->data);
308 	case 0xa: /* INJECT_ERRORS */
309 		return fme_err_get_inject_errors(fme, &prop->data);
310 	case 0xb: /* REVISION*/
311 		return fme_err_get_revision(fme, &prop->data);
312 	}
313 
314 	return -ENOENT;
315 }
316 
fme_global_error_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)317 static int fme_global_error_get_prop(struct ifpga_feature *feature,
318 				     struct feature_prop *prop)
319 {
320 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
321 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
322 
323 	/* PROP_SUB is never used */
324 	if (sub != PROP_SUB_UNUSED)
325 		return -ENOENT;
326 
327 	switch (top) {
328 	case ERR_PROP_TOP_FME_ERR:
329 		return fme_err_fme_err_get_prop(feature, prop);
330 	case ERR_PROP_TOP_UNUSED:
331 		return fme_err_root_get_prop(feature, prop);
332 	}
333 
334 	return -ENOENT;
335 }
336 
fme_err_fme_err_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)337 static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
338 				    struct feature_prop *prop)
339 {
340 	struct ifpga_fme_hw *fme = feature->parent;
341 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
342 
343 	switch (id) {
344 	case 0x4: /* CLEAR */
345 		return fme_err_set_clear(fme, prop->data);
346 	}
347 
348 	return -ENOENT;
349 }
350 
fme_err_root_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)351 static int fme_err_root_set_prop(struct ifpga_feature *feature,
352 				 struct feature_prop *prop)
353 {
354 	struct ifpga_fme_hw *fme = feature->parent;
355 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
356 
357 	switch (id) {
358 	case 0x6: /* PCIE0_ERRORS */
359 		return fme_err_set_pcie0_errors(fme, prop->data);
360 	case 0x7: /* PCIE1_ERRORS */
361 		return fme_err_set_pcie1_errors(fme, prop->data);
362 	case 0xa: /* INJECT_ERRORS */
363 		return fme_err_set_inject_errors(fme, prop->data);
364 	}
365 
366 	return -ENOENT;
367 }
368 
fme_global_error_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)369 static int fme_global_error_set_prop(struct ifpga_feature *feature,
370 				     struct feature_prop *prop)
371 {
372 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
373 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
374 
375 	/* PROP_SUB is never used */
376 	if (sub != PROP_SUB_UNUSED)
377 		return -ENOENT;
378 
379 	switch (top) {
380 	case ERR_PROP_TOP_FME_ERR:
381 		return fme_err_fme_err_set_prop(feature, prop);
382 	case ERR_PROP_TOP_UNUSED:
383 		return fme_err_root_set_prop(feature, prop);
384 	}
385 
386 	return -ENOENT;
387 }
388 
fme_global_err_set_irq(struct ifpga_feature * feature,void * irq_set)389 static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
390 {
391 	struct fpga_fme_err_irq_set *err_irq_set = irq_set;
392 	struct ifpga_fme_hw *fme;
393 	int ret;
394 
395 	fme = (struct ifpga_fme_hw *)feature->parent;
396 
397 	if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
398 		return -ENODEV;
399 
400 	spinlock_lock(&fme->lock);
401 	ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
402 	spinlock_unlock(&fme->lock);
403 
404 	return ret;
405 }
406 
407 struct ifpga_feature_ops fme_global_err_ops = {
408 	.init = fme_global_error_init,
409 	.uinit = fme_global_error_uinit,
410 	.get_prop = fme_global_error_get_prop,
411 	.set_prop = fme_global_error_set_prop,
412 	.set_irq = fme_global_err_set_irq,
413 };
414