xref: /dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c (revision ca6eb0f7c836bcdc8fda8522297776c772b86ca3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include <sys/ioctl.h>
6 #include <rte_vfio.h>
7 
8 #include "ifpga_feature_dev.h"
9 
10 /*
11  * Enable Port by clear the port soft reset bit, which is set by default.
12  * The AFU is unable to respond to any MMIO access while in reset.
13  * __fpga_port_enable function should only be used after __fpga_port_disable
14  * function.
15  */
__fpga_port_enable(struct ifpga_port_hw * port)16 void __fpga_port_enable(struct ifpga_port_hw *port)
17 {
18 	struct feature_port_header *port_hdr;
19 	struct feature_port_control control;
20 
21 	WARN_ON(!port->disable_count);
22 
23 	if (--port->disable_count != 0)
24 		return;
25 
26 	port_hdr = get_port_feature_ioaddr_by_index(port,
27 						    PORT_FEATURE_ID_HEADER);
28 	WARN_ON(!port_hdr);
29 
30 	control.csr = readq(&port_hdr->control);
31 	control.port_sftrst = 0x0;
32 	writeq(control.csr, &port_hdr->control);
33 }
34 
__fpga_port_disable(struct ifpga_port_hw * port)35 int __fpga_port_disable(struct ifpga_port_hw *port)
36 {
37 	struct feature_port_header *port_hdr;
38 	struct feature_port_control control;
39 
40 	if (port->disable_count++ != 0)
41 		return 0;
42 
43 	port_hdr = get_port_feature_ioaddr_by_index(port,
44 						    PORT_FEATURE_ID_HEADER);
45 	WARN_ON(!port_hdr);
46 
47 	/* Set port soft reset */
48 	control.csr = readq(&port_hdr->control);
49 	control.port_sftrst = 0x1;
50 	writeq(control.csr, &port_hdr->control);
51 
52 	/*
53 	 * HW sets ack bit to 1 when all outstanding requests have been drained
54 	 * on this port and minimum soft reset pulse width has elapsed.
55 	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
56 	 */
57 	control.port_sftrst_ack = 1;
58 
59 	if (fpga_wait_register_field(port_sftrst_ack, control,
60 				     &port_hdr->control, RST_POLL_TIMEOUT,
61 				     RST_POLL_INVL)) {
62 		dev_err(port, "timeout, fail to reset FIM port\n");
63 		return -ETIMEDOUT;
64 	}
65 
66 	return 0;
67 }
68 
fpga_get_afu_uuid(struct ifpga_port_hw * port,struct uuid * uuid)69 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
70 {
71 	struct feature_port_header *port_hdr;
72 	u64 guidl, guidh;
73 
74 	if (!uuid)
75 		return -EINVAL;
76 
77 	port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
78 
79 	spinlock_lock(&port->lock);
80 	guidl = readq(&port_hdr->afu_header.guid.b[0]);
81 	guidh = readq(&port_hdr->afu_header.guid.b[8]);
82 	spinlock_unlock(&port->lock);
83 
84 	opae_memcpy(uuid->b, &guidl, sizeof(u64));
85 	opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
86 
87 	return 0;
88 }
89 
fpga_get_pr_uuid(struct ifpga_fme_hw * fme,struct uuid * uuid)90 int fpga_get_pr_uuid(struct ifpga_fme_hw *fme, struct uuid *uuid)
91 {
92 	struct feature_fme_pr *fme_pr;
93 	u64 guidl, guidh;
94 
95 	if (!fme || !uuid)
96 		return -EINVAL;
97 
98 	fme_pr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_PR_MGMT);
99 
100 	spinlock_lock(&fme->lock);
101 	guidl = readq(&fme_pr->fme_pr_intfc_id_l);
102 	guidh = readq(&fme_pr->fme_pr_intfc_id_h);
103 	spinlock_unlock(&fme->lock);
104 
105 	opae_memcpy(uuid->b, &guidl, sizeof(u64));
106 	opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
107 
108 	return 0;
109 }
110 
111 /* Mask / Unmask Port Errors by the Error Mask register. */
port_err_mask(struct ifpga_port_hw * port,bool mask)112 void port_err_mask(struct ifpga_port_hw *port, bool mask)
113 {
114 	struct feature_port_error *port_err;
115 	struct feature_port_err_key err_mask;
116 
117 	port_err = get_port_feature_ioaddr_by_index(port,
118 						    PORT_FEATURE_ID_ERROR);
119 
120 	if (mask)
121 		err_mask.csr = PORT_ERR_MASK;
122 	else
123 		err_mask.csr = 0;
124 
125 	writeq(err_mask.csr, &port_err->error_mask);
126 }
127 
128 /* Clear All Port Errors. */
port_err_clear(struct ifpga_port_hw * port,u64 err)129 int port_err_clear(struct ifpga_port_hw *port, u64 err)
130 {
131 	struct feature_port_header *port_hdr;
132 	struct feature_port_error *port_err;
133 	struct feature_port_err_key mask;
134 	struct feature_port_first_err_key first;
135 	struct feature_port_status status;
136 	int ret = 0;
137 
138 	port_err = get_port_feature_ioaddr_by_index(port,
139 						    PORT_FEATURE_ID_ERROR);
140 	port_hdr = get_port_feature_ioaddr_by_index(port,
141 						    PORT_FEATURE_ID_HEADER);
142 
143 	/*
144 	 * Clear All Port Errors
145 	 *
146 	 * - Check for AP6 State
147 	 * - Halt Port by keeping Port in reset
148 	 * - Set PORT Error mask to all 1 to mask errors
149 	 * - Clear all errors
150 	 * - Set Port mask to all 0 to enable errors
151 	 * - All errors start capturing new errors
152 	 * - Enable Port by pulling the port out of reset
153 	 */
154 
155 	/* If device is still in AP6 state, can not clear any error.*/
156 	status.csr = readq(&port_hdr->status);
157 	if (status.power_state == PORT_POWER_STATE_AP6) {
158 		dev_err(dev, "Could not clear errors, device in AP6 state.\n");
159 		return -EBUSY;
160 	}
161 
162 	/* Halt Port by keeping Port in reset */
163 	ret = __fpga_port_disable(port);
164 	if (ret)
165 		return ret;
166 
167 	/* Mask all errors */
168 	port_err_mask(port, true);
169 
170 	/* Clear errors if err input matches with current port errors.*/
171 	mask.csr = readq(&port_err->port_error);
172 
173 	if (mask.csr == err) {
174 		writeq(mask.csr, &port_err->port_error);
175 
176 		first.csr = readq(&port_err->port_first_error);
177 		writeq(first.csr, &port_err->port_first_error);
178 	} else {
179 		ret = -EBUSY;
180 	}
181 
182 	/* Clear mask */
183 	port_err_mask(port, false);
184 
185 	/* Enable the Port by clear the reset */
186 	__fpga_port_enable(port);
187 
188 	return ret;
189 }
190 
port_clear_error(struct ifpga_port_hw * port)191 int port_clear_error(struct ifpga_port_hw *port)
192 {
193 	struct feature_port_error *port_err;
194 	struct feature_port_err_key error;
195 
196 	port_err = get_port_feature_ioaddr_by_index(port,
197 						    PORT_FEATURE_ID_ERROR);
198 	error.csr = readq(&port_err->port_error);
199 
200 	dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
201 
202 	return port_err_clear(port, error.csr);
203 }
204 
205 static struct feature_driver fme_feature_drvs[] = {
206 	{FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
207 			&fme_hdr_ops),},
208 	{FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
209 			&fme_thermal_mgmt_ops),},
210 	{FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
211 			&fme_power_mgmt_ops),},
212 	{FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
213 			&fme_global_err_ops),},
214 	{FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
215 			&fme_pr_mgmt_ops),},
216 	{FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
217 			&fme_global_dperf_ops),},
218 	{FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
219 	&fme_hssi_eth_ops),},
220 	{FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
221 	&fme_emif_ops),},
222 	{FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
223 	&fme_spi_master_ops),},
224 	{FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
225 	&fme_nios_spi_master_ops),},
226 	{FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
227 	&fme_i2c_master_ops),},
228 	{FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
229 	&fme_eth_group_ops),},
230 	{FEATURE_DRV(FME_FEATURE_ID_PMCI, FME_FEATURE_PMCI,
231 	&fme_pmci_ops),},
232 	{0, NULL, NULL}, /* end of arrary */
233 };
234 
235 static struct feature_driver port_feature_drvs[] = {
236 	{FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
237 			&ifpga_rawdev_port_hdr_ops)},
238 	{FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
239 			&ifpga_rawdev_port_error_ops)},
240 	{FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
241 			&ifpga_rawdev_port_uint_ops)},
242 	{FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
243 			&ifpga_rawdev_port_stp_ops)},
244 	{FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
245 			&ifpga_rawdev_port_afu_ops)},
246 	{0, NULL, NULL}, /* end of array */
247 };
248 
get_fme_feature_name(unsigned int id)249 const char *get_fme_feature_name(unsigned int id)
250 {
251 	struct feature_driver *drv = fme_feature_drvs;
252 
253 	while (drv->name) {
254 		if (drv->id == id)
255 			return drv->name;
256 
257 		drv++;
258 	}
259 
260 	return NULL;
261 }
262 
get_port_feature_name(unsigned int id)263 const char *get_port_feature_name(unsigned int id)
264 {
265 	struct feature_driver *drv = port_feature_drvs;
266 
267 	while (drv->name) {
268 		if (drv->id == id)
269 			return drv->name;
270 
271 		drv++;
272 	}
273 
274 	return NULL;
275 }
276 
feature_uinit(struct ifpga_feature_list * list)277 static void feature_uinit(struct ifpga_feature_list *list)
278 {
279 	struct ifpga_feature *feature;
280 
281 	TAILQ_FOREACH(feature, list, next) {
282 		if (feature->state != IFPGA_FEATURE_INITED)
283 			continue;
284 		if (feature->ops && feature->ops->uinit)
285 			feature->ops->uinit(feature);
286 		feature->state = IFPGA_FEATURE_ATTACHED;
287 	}
288 }
289 
feature_init(struct feature_driver * drv,struct ifpga_feature_list * list)290 static int feature_init(struct feature_driver *drv,
291 		struct ifpga_feature_list *list)
292 {
293 	struct ifpga_feature *feature;
294 	int ret;
295 
296 	while (drv->ops) {
297 		TAILQ_FOREACH(feature, list, next) {
298 			if (feature->state != IFPGA_FEATURE_ATTACHED)
299 				continue;
300 			if (feature->id == drv->id) {
301 				feature->ops = drv->ops;
302 				feature->name = drv->name;
303 				if (feature->ops->init) {
304 					ret = feature->ops->init(feature);
305 					if (ret)
306 						goto error;
307 					else
308 						feature->state =
309 							IFPGA_FEATURE_INITED;
310 				}
311 			}
312 		}
313 		drv++;
314 	}
315 
316 	return 0;
317 error:
318 	feature_uinit(list);
319 	return ret;
320 }
321 
fme_hw_init(struct ifpga_fme_hw * fme)322 int fme_hw_init(struct ifpga_fme_hw *fme)
323 {
324 	if (fme->state == IFPGA_FME_IMPLEMENTED)
325 		return feature_init(fme_feature_drvs, &fme->feature_list);
326 
327 	return 0;
328 }
329 
fme_hw_uinit(struct ifpga_fme_hw * fme)330 void fme_hw_uinit(struct ifpga_fme_hw *fme)
331 {
332 	feature_uinit(&fme->feature_list);
333 }
334 
port_hw_uinit(struct ifpga_port_hw * port)335 void port_hw_uinit(struct ifpga_port_hw *port)
336 {
337 	feature_uinit(&port->feature_list);
338 }
339 
port_hw_init(struct ifpga_port_hw * port)340 int port_hw_init(struct ifpga_port_hw *port)
341 {
342 	int ret;
343 
344 	if (port->state == IFPGA_PORT_UNUSED)
345 		return 0;
346 
347 	ret = feature_init(port_feature_drvs, &port->feature_list);
348 	if (ret)
349 		goto error;
350 
351 	return 0;
352 error:
353 	port_hw_uinit(port);
354 	return ret;
355 }
356 
357 #define FPGA_MAX_MSIX_VEC_COUNT	128
358 /* irq set buffer length for interrupt */
359 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
360 				sizeof(int) * FPGA_MAX_MSIX_VEC_COUNT)
361 
362 /* only support msix for now*/
vfio_msix_enable_block(s32 vfio_dev_fd,unsigned int vec_start,unsigned int count,s32 * fds)363 static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,
364 				  unsigned int count, s32 *fds)
365 {
366 	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
367 	struct vfio_irq_set *irq_set;
368 	int len, ret;
369 	int *fd_ptr;
370 
371 	len = sizeof(irq_set_buf);
372 
373 	irq_set = (struct vfio_irq_set *)irq_set_buf;
374 	irq_set->argsz = len;
375 	/* 0 < irq_set->count < FPGA_MAX_MSIX_VEC_COUNT */
376 	irq_set->count = count ?
377 		(count > FPGA_MAX_MSIX_VEC_COUNT ?
378 		 FPGA_MAX_MSIX_VEC_COUNT : count) : 1;
379 	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
380 				VFIO_IRQ_SET_ACTION_TRIGGER;
381 	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
382 	irq_set->start = vec_start;
383 
384 	fd_ptr = (int *)&irq_set->data;
385 	opae_memcpy(fd_ptr, fds, sizeof(int) * count);
386 
387 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
388 	if (ret)
389 		printf("Error enabling MSI-X interrupts\n");
390 
391 	return ret;
392 }
393 
fpga_msix_set_block(struct ifpga_feature * feature,unsigned int start,unsigned int count,s32 * fds)394 int fpga_msix_set_block(struct ifpga_feature *feature, unsigned int start,
395 			unsigned int count, s32 *fds)
396 {
397 	struct feature_irq_ctx *ctx = feature->ctx;
398 	unsigned int i;
399 	int ret;
400 
401 	if (start >= feature->ctx_num || start + count > feature->ctx_num)
402 		return -EINVAL;
403 
404 	/* assume that each feature has continuous vector space in msix*/
405 	ret = vfio_msix_enable_block(feature->vfio_dev_fd,
406 				     ctx[start].idx, count, fds);
407 	if (!ret) {
408 		for (i = 0; i < count; i++)
409 			ctx[i].eventfd = fds[i];
410 	}
411 
412 	return ret;
413 }
414