xref: /dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include <sys/ioctl.h>
6 
7 #include "ifpga_feature_dev.h"
8 
9 /*
10  * Enable Port by clear the port soft reset bit, which is set by default.
11  * The AFU is unable to respond to any MMIO access while in reset.
12  * __fpga_port_enable function should only be used after __fpga_port_disable
13  * function.
14  */
15 void __fpga_port_enable(struct ifpga_port_hw *port)
16 {
17 	struct feature_port_header *port_hdr;
18 	struct feature_port_control control;
19 
20 	WARN_ON(!port->disable_count);
21 
22 	if (--port->disable_count != 0)
23 		return;
24 
25 	port_hdr = get_port_feature_ioaddr_by_index(port,
26 						    PORT_FEATURE_ID_HEADER);
27 	WARN_ON(!port_hdr);
28 
29 	control.csr = readq(&port_hdr->control);
30 	control.port_sftrst = 0x0;
31 	writeq(control.csr, &port_hdr->control);
32 }
33 
34 int __fpga_port_disable(struct ifpga_port_hw *port)
35 {
36 	struct feature_port_header *port_hdr;
37 	struct feature_port_control control;
38 
39 	if (port->disable_count++ != 0)
40 		return 0;
41 
42 	port_hdr = get_port_feature_ioaddr_by_index(port,
43 						    PORT_FEATURE_ID_HEADER);
44 	WARN_ON(!port_hdr);
45 
46 	/* Set port soft reset */
47 	control.csr = readq(&port_hdr->control);
48 	control.port_sftrst = 0x1;
49 	writeq(control.csr, &port_hdr->control);
50 
51 	/*
52 	 * HW sets ack bit to 1 when all outstanding requests have been drained
53 	 * on this port and minimum soft reset pulse width has elapsed.
54 	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
55 	 */
56 	control.port_sftrst_ack = 1;
57 
58 	if (fpga_wait_register_field(port_sftrst_ack, control,
59 				     &port_hdr->control, RST_POLL_TIMEOUT,
60 				     RST_POLL_INVL)) {
61 		dev_err(port, "timeout, fail to reset device\n");
62 		return -ETIMEDOUT;
63 	}
64 
65 	return 0;
66 }
67 
68 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
69 {
70 	struct feature_port_header *port_hdr;
71 	u64 guidl, guidh;
72 
73 	if (!uuid)
74 		return -EINVAL;
75 
76 	port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
77 
78 	spinlock_lock(&port->lock);
79 	guidl = readq(&port_hdr->afu_header.guid.b[0]);
80 	guidh = readq(&port_hdr->afu_header.guid.b[8]);
81 	spinlock_unlock(&port->lock);
82 
83 	opae_memcpy(uuid->b, &guidl, sizeof(u64));
84 	opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
85 
86 	return 0;
87 }
88 
89 /* Mask / Unmask Port Errors by the Error Mask register. */
90 void port_err_mask(struct ifpga_port_hw *port, bool mask)
91 {
92 	struct feature_port_error *port_err;
93 	struct feature_port_err_key err_mask;
94 
95 	port_err = get_port_feature_ioaddr_by_index(port,
96 						    PORT_FEATURE_ID_ERROR);
97 
98 	if (mask)
99 		err_mask.csr = PORT_ERR_MASK;
100 	else
101 		err_mask.csr = 0;
102 
103 	writeq(err_mask.csr, &port_err->error_mask);
104 }
105 
106 /* Clear All Port Errors. */
107 int port_err_clear(struct ifpga_port_hw *port, u64 err)
108 {
109 	struct feature_port_header *port_hdr;
110 	struct feature_port_error *port_err;
111 	struct feature_port_err_key mask;
112 	struct feature_port_first_err_key first;
113 	struct feature_port_status status;
114 	int ret = 0;
115 
116 	port_err = get_port_feature_ioaddr_by_index(port,
117 						    PORT_FEATURE_ID_ERROR);
118 	port_hdr = get_port_feature_ioaddr_by_index(port,
119 						    PORT_FEATURE_ID_HEADER);
120 
121 	/*
122 	 * Clear All Port Errors
123 	 *
124 	 * - Check for AP6 State
125 	 * - Halt Port by keeping Port in reset
126 	 * - Set PORT Error mask to all 1 to mask errors
127 	 * - Clear all errors
128 	 * - Set Port mask to all 0 to enable errors
129 	 * - All errors start capturing new errors
130 	 * - Enable Port by pulling the port out of reset
131 	 */
132 
133 	/* If device is still in AP6 state, can not clear any error.*/
134 	status.csr = readq(&port_hdr->status);
135 	if (status.power_state == PORT_POWER_STATE_AP6) {
136 		dev_err(dev, "Could not clear errors, device in AP6 state.\n");
137 		return -EBUSY;
138 	}
139 
140 	/* Halt Port by keeping Port in reset */
141 	ret = __fpga_port_disable(port);
142 	if (ret)
143 		return ret;
144 
145 	/* Mask all errors */
146 	port_err_mask(port, true);
147 
148 	/* Clear errors if err input matches with current port errors.*/
149 	mask.csr = readq(&port_err->port_error);
150 
151 	if (mask.csr == err) {
152 		writeq(mask.csr, &port_err->port_error);
153 
154 		first.csr = readq(&port_err->port_first_error);
155 		writeq(first.csr, &port_err->port_first_error);
156 	} else {
157 		ret = -EBUSY;
158 	}
159 
160 	/* Clear mask */
161 	port_err_mask(port, false);
162 
163 	/* Enable the Port by clear the reset */
164 	__fpga_port_enable(port);
165 
166 	return ret;
167 }
168 
169 int port_clear_error(struct ifpga_port_hw *port)
170 {
171 	struct feature_port_error *port_err;
172 	struct feature_port_err_key error;
173 
174 	port_err = get_port_feature_ioaddr_by_index(port,
175 						    PORT_FEATURE_ID_ERROR);
176 	error.csr = readq(&port_err->port_error);
177 
178 	dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
179 
180 	return port_err_clear(port, error.csr);
181 }
182 
183 static struct feature_driver fme_feature_drvs[] = {
184 	{FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
185 			&fme_hdr_ops),},
186 	{FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
187 			&fme_thermal_mgmt_ops),},
188 	{FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
189 			&fme_power_mgmt_ops),},
190 	{FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
191 			&fme_global_err_ops),},
192 	{FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
193 			&fme_pr_mgmt_ops),},
194 	{FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
195 			&fme_global_dperf_ops),},
196 	{FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
197 	&fme_hssi_eth_ops),},
198 	{FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
199 	&fme_emif_ops),},
200 	{FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
201 	&fme_spi_master_ops),},
202 	{FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
203 	&fme_nios_spi_master_ops),},
204 	{FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
205 	&fme_i2c_master_ops),},
206 	{FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
207 	&fme_eth_group_ops),},
208 	{0, NULL, NULL}, /* end of arrary */
209 };
210 
211 static struct feature_driver port_feature_drvs[] = {
212 	{FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
213 			&ifpga_rawdev_port_hdr_ops)},
214 	{FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
215 			&ifpga_rawdev_port_error_ops)},
216 	{FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
217 			&ifpga_rawdev_port_uint_ops)},
218 	{FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
219 			&ifpga_rawdev_port_stp_ops)},
220 	{FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
221 			&ifpga_rawdev_port_afu_ops)},
222 	{0, NULL, NULL}, /* end of array */
223 };
224 
225 const char *get_fme_feature_name(unsigned int id)
226 {
227 	struct feature_driver *drv = fme_feature_drvs;
228 
229 	while (drv->name) {
230 		if (drv->id == id)
231 			return drv->name;
232 
233 		drv++;
234 	}
235 
236 	return NULL;
237 }
238 
239 const char *get_port_feature_name(unsigned int id)
240 {
241 	struct feature_driver *drv = port_feature_drvs;
242 
243 	while (drv->name) {
244 		if (drv->id == id)
245 			return drv->name;
246 
247 		drv++;
248 	}
249 
250 	return NULL;
251 }
252 
253 static void feature_uinit(struct ifpga_feature_list *list)
254 {
255 	struct ifpga_feature *feature;
256 
257 	TAILQ_FOREACH(feature, list, next) {
258 		if (feature->state != IFPGA_FEATURE_ATTACHED)
259 			continue;
260 		if (feature->ops && feature->ops->uinit)
261 			feature->ops->uinit(feature);
262 	}
263 }
264 
265 static int feature_init(struct feature_driver *drv,
266 		struct ifpga_feature_list *list)
267 {
268 	struct ifpga_feature *feature;
269 	int ret;
270 
271 	while (drv->ops) {
272 		TAILQ_FOREACH(feature, list, next) {
273 			if (feature->state != IFPGA_FEATURE_ATTACHED)
274 				continue;
275 			if (feature->id == drv->id) {
276 				feature->ops = drv->ops;
277 				feature->name = drv->name;
278 				if (feature->ops->init) {
279 					ret = feature->ops->init(feature);
280 					if (ret)
281 						goto error;
282 				}
283 			}
284 		}
285 		drv++;
286 	}
287 
288 	return 0;
289 error:
290 	feature_uinit(list);
291 	return ret;
292 }
293 
294 int fme_hw_init(struct ifpga_fme_hw *fme)
295 {
296 	int ret;
297 
298 	if (fme->state != IFPGA_FME_IMPLEMENTED)
299 		return -ENODEV;
300 
301 	ret = feature_init(fme_feature_drvs, &fme->feature_list);
302 	if (ret)
303 		return ret;
304 
305 	return 0;
306 }
307 
308 void fme_hw_uinit(struct ifpga_fme_hw *fme)
309 {
310 	feature_uinit(&fme->feature_list);
311 }
312 
313 void port_hw_uinit(struct ifpga_port_hw *port)
314 {
315 	feature_uinit(&port->feature_list);
316 }
317 
318 int port_hw_init(struct ifpga_port_hw *port)
319 {
320 	int ret;
321 
322 	if (port->state == IFPGA_PORT_UNUSED)
323 		return 0;
324 
325 	ret = feature_init(port_feature_drvs, &port->feature_list);
326 	if (ret)
327 		goto error;
328 
329 	return 0;
330 error:
331 	port_hw_uinit(port);
332 	return ret;
333 }
334