xref: /dpdk/drivers/raw/ifpga/base/ifpga_fme_error.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include "ifpga_feature_dev.h"
6 
7 static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
8 {
9 	struct feature_fme_err *fme_err
10 		= get_fme_feature_ioaddr_by_index(fme,
11 						  FME_FEATURE_ID_GLOBAL_ERR);
12 	struct feature_fme_error0 fme_error0;
13 
14 	fme_error0.csr = readq(&fme_err->fme_err);
15 	*val = fme_error0.csr;
16 
17 	return 0;
18 }
19 
20 static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
21 {
22 	struct feature_fme_err *fme_err
23 		= get_fme_feature_ioaddr_by_index(fme,
24 						  FME_FEATURE_ID_GLOBAL_ERR);
25 	struct feature_fme_first_error fme_first_err;
26 
27 	fme_first_err.csr = readq(&fme_err->fme_first_err);
28 	*val = fme_first_err.err_reg_status;
29 
30 	return 0;
31 }
32 
33 static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
34 {
35 	struct feature_fme_err *fme_err
36 		= get_fme_feature_ioaddr_by_index(fme,
37 						  FME_FEATURE_ID_GLOBAL_ERR);
38 	struct feature_fme_next_error fme_next_err;
39 
40 	fme_next_err.csr = readq(&fme_err->fme_next_err);
41 	*val = fme_next_err.err_reg_status;
42 
43 	return 0;
44 }
45 
46 static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
47 {
48 	struct feature_fme_err *fme_err
49 		= get_fme_feature_ioaddr_by_index(fme,
50 						  FME_FEATURE_ID_GLOBAL_ERR);
51 
52 	spinlock_lock(&fme->lock);
53 
54 	writeq(val, &fme_err->fme_err);
55 
56 	spinlock_unlock(&fme->lock);
57 
58 	return 0;
59 }
60 
61 static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
62 {
63 	struct feature_fme_err *fme_err
64 		= get_fme_feature_ioaddr_by_index(fme,
65 						  FME_FEATURE_ID_GLOBAL_ERR);
66 	struct feature_header header;
67 
68 	header.csr = readq(&fme_err->header);
69 	*val = header.revision;
70 
71 	return 0;
72 }
73 
74 static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
75 {
76 	struct feature_fme_err *fme_err
77 		= get_fme_feature_ioaddr_by_index(fme,
78 						  FME_FEATURE_ID_GLOBAL_ERR);
79 	struct feature_fme_pcie0_error pcie0_err;
80 
81 	pcie0_err.csr = readq(&fme_err->pcie0_err);
82 	*val = pcie0_err.csr;
83 
84 	return 0;
85 }
86 
87 static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
88 {
89 	struct feature_fme_err *fme_err
90 		= get_fme_feature_ioaddr_by_index(fme,
91 						  FME_FEATURE_ID_GLOBAL_ERR);
92 	struct feature_fme_pcie0_error pcie0_err;
93 	int ret = 0;
94 
95 	spinlock_lock(&fme->lock);
96 	writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
97 
98 	pcie0_err.csr = readq(&fme_err->pcie0_err);
99 	if (val != pcie0_err.csr)
100 		ret = -EBUSY;
101 	else
102 		writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
103 		       &fme_err->pcie0_err);
104 
105 	writeq(0UL, &fme_err->pcie0_err_mask);
106 	spinlock_unlock(&fme->lock);
107 
108 	return ret;
109 }
110 
111 static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
112 {
113 	struct feature_fme_err *fme_err
114 		= get_fme_feature_ioaddr_by_index(fme,
115 						  FME_FEATURE_ID_GLOBAL_ERR);
116 	struct feature_fme_pcie1_error pcie1_err;
117 
118 	pcie1_err.csr = readq(&fme_err->pcie1_err);
119 	*val = pcie1_err.csr;
120 
121 	return 0;
122 }
123 
124 static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
125 {
126 	struct feature_fme_err *fme_err
127 		= get_fme_feature_ioaddr_by_index(fme,
128 						  FME_FEATURE_ID_GLOBAL_ERR);
129 	struct feature_fme_pcie1_error pcie1_err;
130 	int ret = 0;
131 
132 	spinlock_lock(&fme->lock);
133 	writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
134 
135 	pcie1_err.csr = readq(&fme_err->pcie1_err);
136 	if (val != pcie1_err.csr)
137 		ret = -EBUSY;
138 	else
139 		writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
140 		       &fme_err->pcie1_err);
141 
142 	writeq(0UL, &fme_err->pcie1_err_mask);
143 	spinlock_unlock(&fme->lock);
144 
145 	return ret;
146 }
147 
148 static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
149 {
150 	struct feature_fme_err *fme_err
151 		= get_fme_feature_ioaddr_by_index(fme,
152 						  FME_FEATURE_ID_GLOBAL_ERR);
153 	struct feature_fme_ras_nonfaterror ras_nonfaterr;
154 
155 	ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
156 	*val = ras_nonfaterr.csr;
157 
158 	return 0;
159 }
160 
161 static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
162 {
163 	struct feature_fme_err *fme_err
164 		= get_fme_feature_ioaddr_by_index(fme,
165 						  FME_FEATURE_ID_GLOBAL_ERR);
166 	struct feature_fme_ras_catfaterror ras_catfaterr;
167 
168 	ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
169 	*val = ras_catfaterr.csr;
170 
171 	return 0;
172 }
173 
174 static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
175 {
176 	struct feature_fme_err *fme_err
177 		= get_fme_feature_ioaddr_by_index(fme,
178 						  FME_FEATURE_ID_GLOBAL_ERR);
179 	struct feature_fme_ras_error_inj ras_error_inj;
180 
181 	ras_error_inj.csr = readq(&fme_err->ras_error_inj);
182 	*val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
183 
184 	return 0;
185 }
186 
187 static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
188 {
189 	struct feature_fme_err *fme_err
190 		= get_fme_feature_ioaddr_by_index(fme,
191 					      FME_FEATURE_ID_GLOBAL_ERR);
192 	struct feature_fme_ras_error_inj ras_error_inj;
193 
194 	spinlock_lock(&fme->lock);
195 	ras_error_inj.csr = readq(&fme_err->ras_error_inj);
196 
197 	if (val <= FME_RAS_ERROR_INJ_MASK) {
198 		ras_error_inj.csr = val;
199 	} else {
200 		spinlock_unlock(&fme->lock);
201 		return -EINVAL;
202 	}
203 
204 	writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
205 	spinlock_unlock(&fme->lock);
206 
207 	return 0;
208 }
209 
210 static void fme_error_enable(struct ifpga_fme_hw *fme)
211 {
212 	struct feature_fme_err *fme_err
213 		= get_fme_feature_ioaddr_by_index(fme,
214 						  FME_FEATURE_ID_GLOBAL_ERR);
215 
216 	writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
217 	writeq(0UL, &fme_err->pcie0_err_mask);
218 	writeq(0UL, &fme_err->pcie1_err_mask);
219 	writeq(0UL, &fme_err->ras_nonfat_mask);
220 	writeq(0UL, &fme_err->ras_catfat_mask);
221 }
222 
223 static int fme_global_error_init(struct ifpga_feature *feature)
224 {
225 	struct ifpga_fme_hw *fme = feature->parent;
226 
227 	fme_error_enable(fme);
228 
229 	if (feature->ctx_num)
230 		fme->capability |= FPGA_FME_CAP_ERR_IRQ;
231 
232 	return 0;
233 }
234 
235 static void fme_global_error_uinit(struct ifpga_feature *feature)
236 {
237 	UNUSED(feature);
238 }
239 
240 static int fme_err_check_seu(struct feature_fme_err *fme_err)
241 {
242 	struct feature_fme_error_capability error_cap;
243 
244 	error_cap.csr = readq(&fme_err->fme_err_capability);
245 
246 	return error_cap.seu_support ? 1 : 0;
247 }
248 
249 static int fme_err_get_seu_emr(struct ifpga_fme_hw *fme,
250 		u64 *val, bool high)
251 {
252 	struct feature_fme_err *fme_err
253 		= get_fme_feature_ioaddr_by_index(fme,
254 				FME_FEATURE_ID_GLOBAL_ERR);
255 
256 	if (!fme_err_check_seu(fme_err))
257 		return -ENODEV;
258 
259 	if (high)
260 		*val = readq(&fme_err->seu_emr_h);
261 	else
262 		*val = readq(&fme_err->seu_emr_l);
263 
264 	return 0;
265 }
266 
267 static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
268 				    struct feature_prop *prop)
269 {
270 	struct ifpga_fme_hw *fme = feature->parent;
271 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
272 
273 	switch (id) {
274 	case 0x1: /* ERRORS */
275 		return fme_err_get_errors(fme, &prop->data);
276 	case 0x2: /* FIRST_ERROR */
277 		return fme_err_get_first_error(fme, &prop->data);
278 	case 0x3: /* NEXT_ERROR */
279 		return fme_err_get_next_error(fme, &prop->data);
280 	case 0x5: /* SEU EMR LOW */
281 		return fme_err_get_seu_emr(fme, &prop->data, 0);
282 	case 0x6: /* SEU EMR HIGH */
283 		return fme_err_get_seu_emr(fme, &prop->data, 1);
284 	}
285 
286 	return -ENOENT;
287 }
288 
289 static int fme_err_root_get_prop(struct ifpga_feature *feature,
290 				 struct feature_prop *prop)
291 {
292 	struct ifpga_fme_hw *fme = feature->parent;
293 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
294 
295 	switch (id) {
296 	case 0x5: /* REVISION */
297 		return fme_err_get_revision(fme, &prop->data);
298 	case 0x6: /* PCIE0_ERRORS */
299 		return fme_err_get_pcie0_errors(fme, &prop->data);
300 	case 0x7: /* PCIE1_ERRORS */
301 		return fme_err_get_pcie1_errors(fme, &prop->data);
302 	case 0x8: /* NONFATAL_ERRORS */
303 		return fme_err_get_nonfatal_errors(fme, &prop->data);
304 	case 0x9: /* CATFATAL_ERRORS */
305 		return fme_err_get_catfatal_errors(fme, &prop->data);
306 	case 0xa: /* INJECT_ERRORS */
307 		return fme_err_get_inject_errors(fme, &prop->data);
308 	case 0xb: /* REVISION*/
309 		return fme_err_get_revision(fme, &prop->data);
310 	}
311 
312 	return -ENOENT;
313 }
314 
315 static int fme_global_error_get_prop(struct ifpga_feature *feature,
316 				     struct feature_prop *prop)
317 {
318 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
319 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
320 
321 	/* PROP_SUB is never used */
322 	if (sub != PROP_SUB_UNUSED)
323 		return -ENOENT;
324 
325 	switch (top) {
326 	case ERR_PROP_TOP_FME_ERR:
327 		return fme_err_fme_err_get_prop(feature, prop);
328 	case ERR_PROP_TOP_UNUSED:
329 		return fme_err_root_get_prop(feature, prop);
330 	}
331 
332 	return -ENOENT;
333 }
334 
335 static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
336 				    struct feature_prop *prop)
337 {
338 	struct ifpga_fme_hw *fme = feature->parent;
339 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
340 
341 	switch (id) {
342 	case 0x4: /* CLEAR */
343 		return fme_err_set_clear(fme, prop->data);
344 	}
345 
346 	return -ENOENT;
347 }
348 
349 static int fme_err_root_set_prop(struct ifpga_feature *feature,
350 				 struct feature_prop *prop)
351 {
352 	struct ifpga_fme_hw *fme = feature->parent;
353 	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
354 
355 	switch (id) {
356 	case 0x6: /* PCIE0_ERRORS */
357 		return fme_err_set_pcie0_errors(fme, prop->data);
358 	case 0x7: /* PCIE1_ERRORS */
359 		return fme_err_set_pcie1_errors(fme, prop->data);
360 	case 0xa: /* INJECT_ERRORS */
361 		return fme_err_set_inject_errors(fme, prop->data);
362 	}
363 
364 	return -ENOENT;
365 }
366 
367 static int fme_global_error_set_prop(struct ifpga_feature *feature,
368 				     struct feature_prop *prop)
369 {
370 	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
371 	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
372 
373 	/* PROP_SUB is never used */
374 	if (sub != PROP_SUB_UNUSED)
375 		return -ENOENT;
376 
377 	switch (top) {
378 	case ERR_PROP_TOP_FME_ERR:
379 		return fme_err_fme_err_set_prop(feature, prop);
380 	case ERR_PROP_TOP_UNUSED:
381 		return fme_err_root_set_prop(feature, prop);
382 	}
383 
384 	return -ENOENT;
385 }
386 
387 static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
388 {
389 	struct fpga_fme_err_irq_set *err_irq_set = irq_set;
390 	struct ifpga_fme_hw *fme;
391 	int ret;
392 
393 	fme = (struct ifpga_fme_hw *)feature->parent;
394 
395 	if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
396 		return -ENODEV;
397 
398 	spinlock_lock(&fme->lock);
399 	ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
400 	spinlock_unlock(&fme->lock);
401 
402 	return ret;
403 }
404 
405 struct ifpga_feature_ops fme_global_err_ops = {
406 	.init = fme_global_error_init,
407 	.uinit = fme_global_error_uinit,
408 	.get_prop = fme_global_error_get_prop,
409 	.set_prop = fme_global_error_set_prop,
410 	.set_irq = fme_global_err_set_irq,
411 };
412