xref: /spdk/module/accel/ioat/accel_ioat.c (revision 307b8c112ffd90a26d53dd15fad67bd9038ef526)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (c) Intel Corporation.
3   *   All rights reserved.
4   */
5  
6  #include "accel_ioat.h"
7  
8  #include "spdk/stdinc.h"
9  
10  #include "spdk_internal/accel_module.h"
11  #include "spdk/log.h"
12  
13  #include "spdk/env.h"
14  #include "spdk/event.h"
15  #include "spdk/thread.h"
16  #include "spdk/ioat.h"
17  
18  static bool g_ioat_enable = false;
19  static bool g_ioat_initialized = false;
20  
21  struct ioat_device {
22  	struct spdk_ioat_chan *ioat;
23  	bool is_allocated;
24  	/** linked list pointer for device list */
25  	TAILQ_ENTRY(ioat_device) tailq;
26  };
27  
28  struct pci_device {
29  	struct spdk_pci_device *pci_dev;
30  	TAILQ_ENTRY(pci_device) tailq;
31  };
32  
33  static TAILQ_HEAD(, ioat_device) g_devices = TAILQ_HEAD_INITIALIZER(g_devices);
34  static pthread_mutex_t g_ioat_mutex = PTHREAD_MUTEX_INITIALIZER;
35  
36  static TAILQ_HEAD(, pci_device) g_pci_devices = TAILQ_HEAD_INITIALIZER(g_pci_devices);
37  
38  struct ioat_io_channel {
39  	struct spdk_ioat_chan		*ioat_ch;
40  	struct ioat_device		*ioat_dev;
41  	struct spdk_poller		*poller;
42  };
43  
44  static struct ioat_device *
45  ioat_allocate_device(void)
46  {
47  	struct ioat_device *dev;
48  
49  	pthread_mutex_lock(&g_ioat_mutex);
50  	TAILQ_FOREACH(dev, &g_devices, tailq) {
51  		if (!dev->is_allocated) {
52  			dev->is_allocated = true;
53  			pthread_mutex_unlock(&g_ioat_mutex);
54  			return dev;
55  		}
56  	}
57  	pthread_mutex_unlock(&g_ioat_mutex);
58  
59  	return NULL;
60  }
61  
62  static void
63  ioat_free_device(struct ioat_device *dev)
64  {
65  	pthread_mutex_lock(&g_ioat_mutex);
66  	dev->is_allocated = false;
67  	pthread_mutex_unlock(&g_ioat_mutex);
68  }
69  
70  static int accel_ioat_init(void);
71  static void accel_ioat_exit(void *ctx);
72  static bool ioat_supports_opcode(enum accel_opcode opc);
73  static struct spdk_io_channel *ioat_get_io_channel(void);
74  static int ioat_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task);
75  
76  static size_t
77  accel_ioat_get_ctx_size(void)
78  {
79  	return sizeof(struct spdk_accel_task);
80  }
81  
82  static struct spdk_accel_module_if g_ioat_module = {
83  	.module_init = accel_ioat_init,
84  	.module_fini = accel_ioat_exit,
85  	.write_config_json = NULL,
86  	.get_ctx_size = accel_ioat_get_ctx_size,
87  	.name			= "ioat",
88  	.supports_opcode	= ioat_supports_opcode,
89  	.get_io_channel		= ioat_get_io_channel,
90  	.submit_tasks		= ioat_submit_tasks
91  };
92  
93  SPDK_ACCEL_MODULE_REGISTER(ioat, &g_ioat_module)
94  
95  static void
96  ioat_done(void *cb_arg)
97  {
98  	struct spdk_accel_task *accel_task = cb_arg;
99  
100  	spdk_accel_task_complete(accel_task, 0);
101  }
102  
103  static int
104  ioat_poll(void *arg)
105  {
106  	struct spdk_ioat_chan *chan = arg;
107  
108  	return spdk_ioat_process_events(chan) != 0 ? SPDK_POLLER_BUSY :
109  	       SPDK_POLLER_IDLE;
110  }
111  
112  static struct spdk_io_channel *ioat_get_io_channel(void);
113  
114  static bool
115  ioat_supports_opcode(enum accel_opcode opc)
116  {
117  	if (!g_ioat_initialized) {
118  		return false;
119  	}
120  
121  	switch (opc) {
122  	case ACCEL_OPC_COPY:
123  	case ACCEL_OPC_FILL:
124  		return true;
125  	default:
126  		return false;
127  	}
128  
129  }
130  
131  static int
132  ioat_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
133  {
134  	struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
135  	struct spdk_accel_task *tmp;
136  	int rc = 0;
137  
138  	if (accel_task->flags == ACCEL_FLAG_PERSISTENT) {
139  		SPDK_ERRLOG("IOAT does not support durable destinations.\n");
140  		return -EINVAL;
141  	}
142  
143  	do {
144  		switch (accel_task->op_code) {
145  		case ACCEL_OPC_FILL:
146  			rc = spdk_ioat_build_fill(ioat_ch->ioat_ch, accel_task, ioat_done,
147  						  accel_task->dst, accel_task->fill_pattern, accel_task->nbytes);
148  			break;
149  		case ACCEL_OPC_COPY:
150  			rc = spdk_ioat_build_copy(ioat_ch->ioat_ch, accel_task, ioat_done,
151  						  accel_task->dst, accel_task->src, accel_task->nbytes);
152  			break;
153  		default:
154  			assert(false);
155  			break;
156  		}
157  
158  		tmp = TAILQ_NEXT(accel_task, link);
159  
160  		/* Report any build errors via the callback now. */
161  		if (rc) {
162  			spdk_accel_task_complete(accel_task, rc);
163  		}
164  
165  		accel_task = tmp;
166  	} while (accel_task);
167  
168  	spdk_ioat_flush(ioat_ch->ioat_ch);
169  
170  	return 0;
171  }
172  
173  static int
174  ioat_create_cb(void *io_device, void *ctx_buf)
175  {
176  	struct ioat_io_channel *ch = ctx_buf;
177  	struct ioat_device *ioat_dev;
178  
179  	ioat_dev = ioat_allocate_device();
180  	if (ioat_dev == NULL) {
181  		return -1;
182  	}
183  
184  	ch->ioat_dev = ioat_dev;
185  	ch->ioat_ch = ioat_dev->ioat;
186  	ch->poller = SPDK_POLLER_REGISTER(ioat_poll, ch->ioat_ch, 0);
187  
188  	return 0;
189  }
190  
191  static void
192  ioat_destroy_cb(void *io_device, void *ctx_buf)
193  {
194  	struct ioat_io_channel *ch = ctx_buf;
195  
196  	ioat_free_device(ch->ioat_dev);
197  	spdk_poller_unregister(&ch->poller);
198  }
199  
200  static struct spdk_io_channel *
201  ioat_get_io_channel(void)
202  {
203  	return spdk_get_io_channel(&g_ioat_module);
204  }
205  
206  static bool
207  probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
208  {
209  	struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
210  	struct pci_device *pdev;
211  
212  	SPDK_INFOLOG(accel_ioat,
213  		     " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
214  		     pci_addr.domain,
215  		     pci_addr.bus,
216  		     pci_addr.dev,
217  		     pci_addr.func,
218  		     spdk_pci_device_get_vendor_id(pci_dev),
219  		     spdk_pci_device_get_device_id(pci_dev));
220  
221  	pdev = calloc(1, sizeof(*pdev));
222  	if (pdev == NULL) {
223  		return false;
224  	}
225  	pdev->pci_dev = pci_dev;
226  	TAILQ_INSERT_TAIL(&g_pci_devices, pdev, tailq);
227  
228  	/* Claim the device in case conflict with other process */
229  	if (spdk_pci_device_claim(pci_dev) < 0) {
230  		return false;
231  	}
232  
233  	return true;
234  }
235  
236  static void
237  attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *ioat)
238  {
239  	struct ioat_device *dev;
240  
241  	dev = calloc(1, sizeof(*dev));
242  	if (dev == NULL) {
243  		SPDK_ERRLOG("Failed to allocate device struct\n");
244  		return;
245  	}
246  
247  	dev->ioat = ioat;
248  	TAILQ_INSERT_TAIL(&g_devices, dev, tailq);
249  }
250  
251  void
252  accel_ioat_enable_probe(void)
253  {
254  	g_ioat_enable = true;
255  }
256  
257  static int
258  accel_ioat_init(void)
259  {
260  	if (!g_ioat_enable) {
261  		return 0;
262  	}
263  
264  	if (spdk_ioat_probe(NULL, probe_cb, attach_cb) != 0) {
265  		SPDK_ERRLOG("spdk_ioat_probe() failed\n");
266  		return -1;
267  	}
268  
269  	if (TAILQ_EMPTY(&g_devices)) {
270  		SPDK_NOTICELOG("No available ioat devices\n");
271  		return -1;
272  	}
273  
274  	g_ioat_initialized = true;
275  	SPDK_NOTICELOG("Accel framework IOAT module initialized.\n");
276  	spdk_io_device_register(&g_ioat_module, ioat_create_cb, ioat_destroy_cb,
277  				sizeof(struct ioat_io_channel), "ioat_accel_module");
278  	return 0;
279  }
280  
281  static void
282  _device_unregister_cb(void *io_device)
283  {
284  	struct ioat_device *dev = io_device;
285  	struct pci_device *pci_dev;
286  
287  	while (!TAILQ_EMPTY(&g_devices)) {
288  		dev = TAILQ_FIRST(&g_devices);
289  		TAILQ_REMOVE(&g_devices, dev, tailq);
290  		spdk_ioat_detach(dev->ioat);
291  		free(dev);
292  	}
293  
294  	while (!TAILQ_EMPTY(&g_pci_devices)) {
295  		pci_dev = TAILQ_FIRST(&g_pci_devices);
296  		TAILQ_REMOVE(&g_pci_devices, pci_dev, tailq);
297  		spdk_pci_device_detach(pci_dev->pci_dev);
298  		free(pci_dev);
299  	}
300  
301  	g_ioat_initialized = false;
302  
303  	spdk_accel_module_finish();
304  }
305  
306  static void
307  accel_ioat_exit(void *ctx)
308  {
309  	if (g_ioat_initialized) {
310  		spdk_io_device_unregister(&g_ioat_module, _device_unregister_cb);
311  	} else {
312  		spdk_accel_module_finish();
313  	}
314  }
315  
316  SPDK_LOG_REGISTER_COMPONENT(accel_ioat)
317