xref: /spdk/lib/ioat/ioat.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "ioat_internal.h"
9 
10 #include "spdk/env.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 
14 #include "spdk/log.h"
15 
16 struct ioat_driver {
17 	pthread_mutex_t			lock;
18 	TAILQ_HEAD(, spdk_ioat_chan)	attached_chans;
19 };
20 
21 static struct ioat_driver g_ioat_driver = {
22 	.lock = PTHREAD_MUTEX_INITIALIZER,
23 	.attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
24 };
25 
26 static uint64_t
27 ioat_get_chansts(struct spdk_ioat_chan *ioat)
28 {
29 	return spdk_mmio_read_8(&ioat->regs->chansts);
30 }
31 
32 static void
33 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
34 {
35 	spdk_mmio_write_8(&ioat->regs->chancmp, addr);
36 }
37 
38 static void
39 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
40 {
41 	spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
42 }
43 
44 static inline void
45 ioat_suspend(struct spdk_ioat_chan *ioat)
46 {
47 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
48 }
49 
50 static inline void
51 ioat_reset(struct spdk_ioat_chan *ioat)
52 {
53 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
54 }
55 
56 static inline uint32_t
57 ioat_reset_pending(struct spdk_ioat_chan *ioat)
58 {
59 	uint8_t cmd;
60 
61 	cmd = ioat->regs->chancmd;
62 	return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
63 }
64 
65 static int
66 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
67 {
68 	int regs_bar, rc;
69 	void *addr;
70 	uint64_t phys_addr, size;
71 
72 	regs_bar = 0;
73 	rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
74 	if (rc != 0 || addr == NULL) {
75 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
76 			    rc);
77 		return -1;
78 	}
79 
80 	ioat->regs = (volatile struct spdk_ioat_registers *)addr;
81 
82 	return 0;
83 }
84 
85 static int
86 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
87 {
88 	int rc = 0;
89 	void *addr = (void *)ioat->regs;
90 
91 	if (addr) {
92 		rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
93 	}
94 	return rc;
95 }
96 
97 
98 static inline uint32_t
99 ioat_get_active(struct spdk_ioat_chan *ioat)
100 {
101 	return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
102 }
103 
104 static inline uint32_t
105 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
106 {
107 	return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
108 }
109 
110 static uint32_t
111 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
112 {
113 	return index & ((1 << ioat->ring_size_order) - 1);
114 }
115 
116 static void
117 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
118 		    struct ioat_descriptor **desc,
119 		    union spdk_ioat_hw_desc **hw_desc)
120 {
121 	uint32_t i = ioat_get_ring_index(ioat, index);
122 
123 	*desc = &ioat->ring[i];
124 	*hw_desc = &ioat->hw_ring[i];
125 }
126 
127 static void
128 ioat_submit_single(struct spdk_ioat_chan *ioat)
129 {
130 	ioat->head++;
131 }
132 
133 void
134 spdk_ioat_flush(struct spdk_ioat_chan *ioat)
135 {
136 	uint32_t index = ioat_get_ring_index(ioat, ioat->head - 1);
137 	union spdk_ioat_hw_desc *hw_desc;
138 
139 	hw_desc = &ioat->hw_ring[index];
140 	hw_desc->dma.u.control.completion_update = 1;
141 	ioat->regs->dmacount = (uint16_t)ioat->head;
142 }
143 
144 static struct ioat_descriptor *
145 ioat_prep_null(struct spdk_ioat_chan *ioat)
146 {
147 	struct ioat_descriptor *desc;
148 	union spdk_ioat_hw_desc *hw_desc;
149 
150 	if (ioat_get_ring_space(ioat) < 1) {
151 		return NULL;
152 	}
153 
154 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
155 
156 	hw_desc->dma.u.control_raw = 0;
157 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
158 	hw_desc->dma.u.control.null = 1;
159 
160 	hw_desc->dma.size = 8;
161 	hw_desc->dma.src_addr = 0;
162 	hw_desc->dma.dest_addr = 0;
163 
164 	desc->callback_fn = NULL;
165 	desc->callback_arg = NULL;
166 
167 	ioat_submit_single(ioat);
168 
169 	return desc;
170 }
171 
172 static struct ioat_descriptor *
173 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
174 	       uint64_t src, uint32_t len)
175 {
176 	struct ioat_descriptor *desc;
177 	union spdk_ioat_hw_desc *hw_desc;
178 
179 	assert(len <= ioat->max_xfer_size);
180 
181 	if (ioat_get_ring_space(ioat) < 1) {
182 		return NULL;
183 	}
184 
185 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
186 
187 	hw_desc->dma.u.control_raw = 0;
188 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
189 
190 	hw_desc->dma.size = len;
191 	hw_desc->dma.src_addr = src;
192 	hw_desc->dma.dest_addr = dst;
193 
194 	desc->callback_fn = NULL;
195 	desc->callback_arg = NULL;
196 
197 	ioat_submit_single(ioat);
198 
199 	return desc;
200 }
201 
202 static struct ioat_descriptor *
203 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
204 	       uint64_t fill_pattern, uint32_t len)
205 {
206 	struct ioat_descriptor *desc;
207 	union spdk_ioat_hw_desc *hw_desc;
208 
209 	assert(len <= ioat->max_xfer_size);
210 
211 	if (ioat_get_ring_space(ioat) < 1) {
212 		return NULL;
213 	}
214 
215 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
216 
217 	hw_desc->fill.u.control_raw = 0;
218 	hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
219 
220 	hw_desc->fill.size = len;
221 	hw_desc->fill.src_data = fill_pattern;
222 	hw_desc->fill.dest_addr = dst;
223 
224 	desc->callback_fn = NULL;
225 	desc->callback_arg = NULL;
226 
227 	ioat_submit_single(ioat);
228 
229 	return desc;
230 }
231 
232 static int
233 ioat_reset_hw(struct spdk_ioat_chan *ioat)
234 {
235 	int timeout;
236 	uint64_t status;
237 	uint32_t chanerr;
238 	int rc;
239 
240 	status = ioat_get_chansts(ioat);
241 	if (is_ioat_active(status) || is_ioat_idle(status)) {
242 		ioat_suspend(ioat);
243 	}
244 
245 	timeout = 20; /* in milliseconds */
246 	while (is_ioat_active(status) || is_ioat_idle(status)) {
247 		spdk_delay_us(1000);
248 		timeout--;
249 		if (timeout == 0) {
250 			SPDK_ERRLOG("timed out waiting for suspend\n");
251 			return -1;
252 		}
253 		status = ioat_get_chansts(ioat);
254 	}
255 
256 	/*
257 	 * Clear any outstanding errors.
258 	 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
259 	 */
260 	chanerr = ioat->regs->chanerr;
261 	ioat->regs->chanerr = chanerr;
262 
263 	if (ioat->regs->cbver < SPDK_IOAT_VER_3_3) {
264 		rc = spdk_pci_device_cfg_read32(ioat->device, &chanerr,
265 						SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
266 		if (rc) {
267 			SPDK_ERRLOG("failed to read the internal channel error register\n");
268 			return -1;
269 		}
270 
271 		spdk_pci_device_cfg_write32(ioat->device, chanerr,
272 					    SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
273 	}
274 
275 	ioat_reset(ioat);
276 
277 	timeout = 20;
278 	while (ioat_reset_pending(ioat)) {
279 		spdk_delay_us(1000);
280 		timeout--;
281 		if (timeout == 0) {
282 			SPDK_ERRLOG("timed out waiting for reset\n");
283 			return -1;
284 		}
285 	}
286 
287 	return 0;
288 }
289 
290 static int
291 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
292 {
293 	struct ioat_descriptor *desc;
294 	uint64_t status, completed_descriptor, hw_desc_phys_addr, events_count = 0;
295 	uint32_t tail;
296 
297 	if (ioat->head == ioat->tail) {
298 		return 0;
299 	}
300 
301 	status = *ioat->comp_update;
302 	completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
303 
304 	if (is_ioat_halted(status)) {
305 		SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
306 		return -1;
307 	}
308 
309 	if (completed_descriptor == ioat->last_seen) {
310 		return 0;
311 	}
312 
313 	do {
314 		tail = ioat_get_ring_index(ioat, ioat->tail);
315 		desc = &ioat->ring[tail];
316 
317 		if (desc->callback_fn) {
318 			desc->callback_fn(desc->callback_arg);
319 		}
320 
321 		hw_desc_phys_addr = desc->phys_addr;
322 		ioat->tail++;
323 		events_count++;
324 	} while (hw_desc_phys_addr != completed_descriptor);
325 
326 	ioat->last_seen = hw_desc_phys_addr;
327 
328 	return events_count;
329 }
330 
331 static void
332 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
333 {
334 	ioat_unmap_pci_bar(ioat);
335 
336 	if (ioat->ring) {
337 		free(ioat->ring);
338 	}
339 
340 	if (ioat->hw_ring) {
341 		spdk_free(ioat->hw_ring);
342 	}
343 
344 	if (ioat->comp_update) {
345 		spdk_free((void *)ioat->comp_update);
346 		ioat->comp_update = NULL;
347 	}
348 }
349 
350 uint32_t
351 spdk_ioat_get_max_descriptors(struct spdk_ioat_chan *ioat)
352 {
353 	return 1 << ioat->ring_size_order;
354 }
355 
356 static int
357 ioat_channel_start(struct spdk_ioat_chan *ioat)
358 {
359 	uint8_t xfercap, version;
360 	uint64_t status;
361 	int i, num_descriptors;
362 	uint64_t comp_update_bus_addr = 0;
363 	uint64_t phys_addr;
364 
365 	if (ioat_map_pci_bar(ioat) != 0) {
366 		SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
367 		return -1;
368 	}
369 
370 	version = ioat->regs->cbver;
371 	if (version < SPDK_IOAT_VER_3_0) {
372 		SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
373 			    version >> 4, version & 0xF);
374 		return -1;
375 	}
376 
377 	/* Always support DMA copy */
378 	ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
379 	if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) {
380 		ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
381 	}
382 	xfercap = ioat->regs->xfercap;
383 
384 	/* Only bits [4:0] are valid. */
385 	xfercap &= 0x1f;
386 	if (xfercap == 0) {
387 		/* 0 means 4 GB max transfer size. */
388 		ioat->max_xfer_size = 1ULL << 32;
389 	} else if (xfercap < 12) {
390 		/* XFERCAP must be at least 12 (4 KB) according to the spec. */
391 		SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
392 		return -1;
393 	} else {
394 		ioat->max_xfer_size = 1U << xfercap;
395 	}
396 
397 	ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
398 					 NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
399 	if (ioat->comp_update == NULL) {
400 		return -1;
401 	}
402 
403 	comp_update_bus_addr = spdk_vtophys((void *)ioat->comp_update, NULL);
404 	if (comp_update_bus_addr == SPDK_VTOPHYS_ERROR) {
405 		return -1;
406 	}
407 
408 	ioat->ring_size_order = IOAT_DEFAULT_ORDER;
409 
410 	num_descriptors = 1 << ioat->ring_size_order;
411 
412 	ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
413 	if (!ioat->ring) {
414 		return -1;
415 	}
416 
417 	ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
418 				     NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
419 	if (!ioat->hw_ring) {
420 		return -1;
421 	}
422 
423 	for (i = 0; i < num_descriptors; i++) {
424 		phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL);
425 		if (phys_addr == SPDK_VTOPHYS_ERROR) {
426 			SPDK_ERRLOG("Failed to translate descriptor %u to physical address\n", i);
427 			return -1;
428 		}
429 
430 		ioat->ring[i].phys_addr = phys_addr;
431 		ioat->hw_ring[ioat_get_ring_index(ioat, i - 1)].generic.next = phys_addr;
432 	}
433 
434 	ioat->head = 0;
435 	ioat->tail = 0;
436 	ioat->last_seen = 0;
437 
438 	ioat_reset_hw(ioat);
439 
440 	ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
441 	ioat_write_chancmp(ioat, comp_update_bus_addr);
442 	ioat_write_chainaddr(ioat, ioat->ring[0].phys_addr);
443 
444 	ioat_prep_null(ioat);
445 	spdk_ioat_flush(ioat);
446 
447 	i = 100;
448 	while (i-- > 0) {
449 		spdk_delay_us(100);
450 		status = ioat_get_chansts(ioat);
451 		if (is_ioat_idle(status)) {
452 			break;
453 		}
454 	}
455 
456 	if (is_ioat_idle(status)) {
457 		ioat_process_channel_events(ioat);
458 	} else {
459 		SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
460 			    (void *)status, ioat->regs->chanerr);
461 		return -1;
462 	}
463 
464 	return 0;
465 }
466 
467 /* Caller must hold g_ioat_driver.lock */
468 static struct spdk_ioat_chan *
469 ioat_attach(struct spdk_pci_device *device)
470 {
471 	struct spdk_ioat_chan *ioat;
472 	uint32_t cmd_reg;
473 
474 	ioat = calloc(1, sizeof(struct spdk_ioat_chan));
475 	if (ioat == NULL) {
476 		return NULL;
477 	}
478 
479 	/* Enable PCI busmaster. */
480 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
481 	cmd_reg |= 0x4;
482 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
483 
484 	ioat->device = device;
485 
486 	if (ioat_channel_start(ioat) != 0) {
487 		ioat_channel_destruct(ioat);
488 		free(ioat);
489 		return NULL;
490 	}
491 
492 	return ioat;
493 }
494 
495 struct ioat_enum_ctx {
496 	spdk_ioat_probe_cb probe_cb;
497 	spdk_ioat_attach_cb attach_cb;
498 	void *cb_ctx;
499 };
500 
501 /* This function must only be called while holding g_ioat_driver.lock */
502 static int
503 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
504 {
505 	struct ioat_enum_ctx *enum_ctx = ctx;
506 	struct spdk_ioat_chan *ioat;
507 
508 	/* Verify that this device is not already attached */
509 	TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
510 		/*
511 		 * NOTE: This assumes that the PCI abstraction layer will use the same device handle
512 		 *  across enumerations; we could compare by BDF instead if this is not true.
513 		 */
514 		if (pci_dev == ioat->device) {
515 			return 0;
516 		}
517 	}
518 
519 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
520 		/*
521 		 * Since I/OAT init is relatively quick, just perform the full init during probing.
522 		 *  If this turns out to be a bottleneck later, this can be changed to work like
523 		 *  NVMe with a list of devices to initialize in parallel.
524 		 */
525 		ioat = ioat_attach(pci_dev);
526 		if (ioat == NULL) {
527 			SPDK_ERRLOG("ioat_attach() failed\n");
528 			return -1;
529 		}
530 
531 		TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
532 
533 		enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
534 	}
535 
536 	return 0;
537 }
538 
539 int
540 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
541 {
542 	int rc;
543 	struct ioat_enum_ctx enum_ctx;
544 
545 	pthread_mutex_lock(&g_ioat_driver.lock);
546 
547 	enum_ctx.probe_cb = probe_cb;
548 	enum_ctx.attach_cb = attach_cb;
549 	enum_ctx.cb_ctx = cb_ctx;
550 
551 	rc = spdk_pci_enumerate(spdk_pci_ioat_get_driver(), ioat_enum_cb, &enum_ctx);
552 
553 	pthread_mutex_unlock(&g_ioat_driver.lock);
554 
555 	return rc;
556 }
557 
558 void
559 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
560 {
561 	struct ioat_driver	*driver = &g_ioat_driver;
562 
563 	/* ioat should be in the free list (not registered to a thread)
564 	 * when calling ioat_detach().
565 	 */
566 	pthread_mutex_lock(&driver->lock);
567 	TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
568 	pthread_mutex_unlock(&driver->lock);
569 
570 	ioat_channel_destruct(ioat);
571 	free(ioat);
572 }
573 
574 int
575 spdk_ioat_build_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
576 		     void *dst, const void *src, uint64_t nbytes)
577 {
578 	struct ioat_descriptor	*last_desc;
579 	uint64_t	remaining, op_size;
580 	uint64_t	vdst, vsrc;
581 	uint64_t	pdst_addr, psrc_addr, dst_len, src_len;
582 	uint32_t	orig_head;
583 
584 	if (!ioat) {
585 		return -EINVAL;
586 	}
587 
588 	orig_head = ioat->head;
589 
590 	vdst = (uint64_t)dst;
591 	vsrc = (uint64_t)src;
592 
593 	remaining = nbytes;
594 	while (remaining) {
595 		src_len = dst_len = remaining;
596 
597 		psrc_addr = spdk_vtophys((void *)vsrc, &src_len);
598 		if (psrc_addr == SPDK_VTOPHYS_ERROR) {
599 			return -EINVAL;
600 		}
601 		pdst_addr = spdk_vtophys((void *)vdst, &dst_len);
602 		if (pdst_addr == SPDK_VTOPHYS_ERROR) {
603 			return -EINVAL;
604 		}
605 
606 		op_size = spdk_min(dst_len, src_len);
607 		op_size = spdk_min(op_size, ioat->max_xfer_size);
608 		remaining -= op_size;
609 
610 		last_desc = ioat_prep_copy(ioat, pdst_addr, psrc_addr, op_size);
611 
612 		if (remaining == 0 || last_desc == NULL) {
613 			break;
614 		}
615 
616 		vsrc += op_size;
617 		vdst += op_size;
618 
619 	}
620 	/* Issue null descriptor for null transfer */
621 	if (nbytes == 0) {
622 		last_desc = ioat_prep_null(ioat);
623 	}
624 
625 	if (last_desc) {
626 		last_desc->callback_fn = cb_fn;
627 		last_desc->callback_arg = cb_arg;
628 	} else {
629 		/*
630 		 * Ran out of descriptors in the ring - reset head to leave things as they were
631 		 * in case we managed to fill out any descriptors.
632 		 */
633 		ioat->head = orig_head;
634 		return -ENOMEM;
635 	}
636 
637 	return 0;
638 }
639 
640 int
641 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
642 		      void *dst, const void *src, uint64_t nbytes)
643 {
644 	int rc;
645 
646 	rc = spdk_ioat_build_copy(ioat, cb_arg, cb_fn, dst, src, nbytes);
647 	if (rc != 0) {
648 		return rc;
649 	}
650 
651 	spdk_ioat_flush(ioat);
652 	return 0;
653 }
654 
655 int
656 spdk_ioat_build_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
657 		     void *dst, uint64_t fill_pattern, uint64_t nbytes)
658 {
659 	struct ioat_descriptor	*last_desc = NULL;
660 	uint64_t	remaining, op_size;
661 	uint64_t	vdst;
662 	uint64_t	pdst_addr, dst_len;
663 	uint32_t	orig_head;
664 
665 	if (!ioat) {
666 		return -EINVAL;
667 	}
668 
669 	if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
670 		SPDK_ERRLOG("Channel does not support memory fill\n");
671 		return -1;
672 	}
673 
674 	orig_head = ioat->head;
675 
676 	vdst = (uint64_t)dst;
677 	remaining = nbytes;
678 
679 	while (remaining) {
680 		dst_len = remaining;
681 		pdst_addr = spdk_vtophys((void *)vdst, &dst_len);
682 		if (pdst_addr == SPDK_VTOPHYS_ERROR) {
683 			return -EINVAL;
684 		}
685 
686 		op_size = spdk_min(dst_len, ioat->max_xfer_size);
687 		remaining -= op_size;
688 
689 		last_desc = ioat_prep_fill(ioat, pdst_addr, fill_pattern, op_size);
690 
691 		if (remaining == 0 || last_desc == NULL) {
692 			break;
693 		}
694 
695 		vdst += op_size;
696 	}
697 
698 	if (last_desc) {
699 		last_desc->callback_fn = cb_fn;
700 		last_desc->callback_arg = cb_arg;
701 	} else {
702 		/*
703 		 * Ran out of descriptors in the ring - reset head to leave things as they were
704 		 * in case we managed to fill out any descriptors.
705 		 */
706 		ioat->head = orig_head;
707 		return -ENOMEM;
708 	}
709 
710 	return 0;
711 }
712 
713 int
714 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
715 		      void *dst, uint64_t fill_pattern, uint64_t nbytes)
716 {
717 	int rc;
718 
719 	rc = spdk_ioat_build_fill(ioat, cb_arg, cb_fn, dst, fill_pattern, nbytes);
720 	if (rc != 0) {
721 		return rc;
722 	}
723 
724 	spdk_ioat_flush(ioat);
725 	return 0;
726 }
727 
728 uint32_t
729 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
730 {
731 	if (!ioat) {
732 		return 0;
733 	}
734 	return ioat->dma_capabilities;
735 }
736 
737 int
738 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
739 {
740 	return ioat_process_channel_events(ioat);
741 }
742 
743 SPDK_LOG_REGISTER_COMPONENT(ioat)
744