xref: /spdk/lib/ioat/ioat.c (revision d27b24c94b3e506868d5eaa7b93fddc8abfe250f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ioat_internal.h"
35 
36 #include "spdk/env.h"
37 
38 #include "spdk_internal/log.h"
39 
40 #include <pthread.h>
41 
42 struct ioat_driver {
43 	pthread_mutex_t			lock;
44 	TAILQ_HEAD(, spdk_ioat_chan)	attached_chans;
45 };
46 
47 static struct ioat_driver g_ioat_driver = {
48 	.lock = PTHREAD_MUTEX_INITIALIZER,
49 	.attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
50 };
51 
52 static uint64_t
53 ioat_get_chansts(struct spdk_ioat_chan *ioat)
54 {
55 	return spdk_mmio_read_8(&ioat->regs->chansts);
56 }
57 
58 static void
59 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
60 {
61 	spdk_mmio_write_8(&ioat->regs->chancmp, addr);
62 }
63 
64 static void
65 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
66 {
67 	spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
68 }
69 
70 static inline void
71 ioat_suspend(struct spdk_ioat_chan *ioat)
72 {
73 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
74 }
75 
76 static inline void
77 ioat_reset(struct spdk_ioat_chan *ioat)
78 {
79 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
80 }
81 
82 static inline uint32_t
83 ioat_reset_pending(struct spdk_ioat_chan *ioat)
84 {
85 	uint8_t cmd;
86 
87 	cmd = ioat->regs->chancmd;
88 	return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
89 }
90 
91 static int
92 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
93 {
94 	int regs_bar, rc;
95 	void *addr;
96 	uint64_t phys_addr, size;
97 
98 	regs_bar = 0;
99 	rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
100 	if (rc != 0 || addr == NULL) {
101 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
102 			    rc);
103 		return -1;
104 	}
105 
106 	ioat->regs = (volatile struct spdk_ioat_registers *)addr;
107 
108 	return 0;
109 }
110 
111 static int
112 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
113 {
114 	int rc = 0;
115 	void *addr = (void *)ioat->regs;
116 
117 	if (addr) {
118 		rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
119 	}
120 	return rc;
121 }
122 
123 
124 static inline uint32_t
125 ioat_get_active(struct spdk_ioat_chan *ioat)
126 {
127 	return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
128 }
129 
130 static inline uint32_t
131 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
132 {
133 	return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
134 }
135 
136 static uint32_t
137 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
138 {
139 	return index & ((1 << ioat->ring_size_order) - 1);
140 }
141 
142 static void
143 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
144 		    struct ioat_descriptor **desc,
145 		    union spdk_ioat_hw_desc **hw_desc)
146 {
147 	uint32_t i = ioat_get_ring_index(ioat, index);
148 
149 	*desc = &ioat->ring[i];
150 	*hw_desc = &ioat->hw_ring[i];
151 }
152 
153 static uint64_t
154 ioat_get_desc_phys_addr(struct spdk_ioat_chan *ioat, uint32_t index)
155 {
156 	return ioat->hw_ring_phys_addr +
157 	       ioat_get_ring_index(ioat, index) * sizeof(union spdk_ioat_hw_desc);
158 }
159 
160 static void
161 ioat_submit_single(struct spdk_ioat_chan *ioat)
162 {
163 	ioat->head++;
164 }
165 
166 static void
167 ioat_flush(struct spdk_ioat_chan *ioat)
168 {
169 	ioat->regs->dmacount = (uint16_t)ioat->head;
170 }
171 
172 static struct ioat_descriptor *
173 ioat_prep_null(struct spdk_ioat_chan *ioat)
174 {
175 	struct ioat_descriptor *desc;
176 	union spdk_ioat_hw_desc *hw_desc;
177 
178 	if (ioat_get_ring_space(ioat) < 1) {
179 		return NULL;
180 	}
181 
182 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
183 
184 	hw_desc->dma.u.control_raw = 0;
185 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
186 	hw_desc->dma.u.control.null = 1;
187 	hw_desc->dma.u.control.completion_update = 1;
188 
189 	hw_desc->dma.size = 8;
190 	hw_desc->dma.src_addr = 0;
191 	hw_desc->dma.dest_addr = 0;
192 
193 	desc->callback_fn = NULL;
194 	desc->callback_arg = NULL;
195 
196 	ioat_submit_single(ioat);
197 
198 	return desc;
199 }
200 
201 static struct ioat_descriptor *
202 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
203 	       uint64_t src, uint32_t len)
204 {
205 	struct ioat_descriptor *desc;
206 	union spdk_ioat_hw_desc *hw_desc;
207 
208 	assert(len <= ioat->max_xfer_size);
209 
210 	if (ioat_get_ring_space(ioat) < 1) {
211 		return NULL;
212 	}
213 
214 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
215 
216 	hw_desc->dma.u.control_raw = 0;
217 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
218 	hw_desc->dma.u.control.completion_update = 1;
219 
220 	hw_desc->dma.size = len;
221 	hw_desc->dma.src_addr = src;
222 	hw_desc->dma.dest_addr = dst;
223 
224 	desc->callback_fn = NULL;
225 	desc->callback_arg = NULL;
226 
227 	ioat_submit_single(ioat);
228 
229 	return desc;
230 }
231 
232 static struct ioat_descriptor *
233 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
234 	       uint64_t fill_pattern, uint32_t len)
235 {
236 	struct ioat_descriptor *desc;
237 	union spdk_ioat_hw_desc *hw_desc;
238 
239 	assert(len <= ioat->max_xfer_size);
240 
241 	if (ioat_get_ring_space(ioat) < 1) {
242 		return NULL;
243 	}
244 
245 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
246 
247 	hw_desc->fill.u.control_raw = 0;
248 	hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
249 	hw_desc->fill.u.control.completion_update = 1;
250 
251 	hw_desc->fill.size = len;
252 	hw_desc->fill.src_data = fill_pattern;
253 	hw_desc->fill.dest_addr = dst;
254 
255 	desc->callback_fn = NULL;
256 	desc->callback_arg = NULL;
257 
258 	ioat_submit_single(ioat);
259 
260 	return desc;
261 }
262 
263 static int ioat_reset_hw(struct spdk_ioat_chan *ioat)
264 {
265 	int timeout;
266 	uint64_t status;
267 	uint32_t chanerr;
268 
269 	status = ioat_get_chansts(ioat);
270 	if (is_ioat_active(status) || is_ioat_idle(status)) {
271 		ioat_suspend(ioat);
272 	}
273 
274 	timeout = 20; /* in milliseconds */
275 	while (is_ioat_active(status) || is_ioat_idle(status)) {
276 		spdk_delay_us(1000);
277 		timeout--;
278 		if (timeout == 0) {
279 			SPDK_ERRLOG("timed out waiting for suspend\n");
280 			return -1;
281 		}
282 		status = ioat_get_chansts(ioat);
283 	}
284 
285 	/*
286 	 * Clear any outstanding errors.
287 	 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
288 	 */
289 	chanerr = ioat->regs->chanerr;
290 	ioat->regs->chanerr = chanerr;
291 
292 	ioat_reset(ioat);
293 
294 	timeout = 20;
295 	while (ioat_reset_pending(ioat)) {
296 		spdk_delay_us(1000);
297 		timeout--;
298 		if (timeout == 0) {
299 			SPDK_ERRLOG("timed out waiting for reset\n");
300 			return -1;
301 		}
302 	}
303 
304 	return 0;
305 }
306 
307 static int
308 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
309 {
310 	struct ioat_descriptor *desc;
311 	uint64_t status, completed_descriptor, hw_desc_phys_addr;
312 	uint32_t tail;
313 
314 	if (ioat->head == ioat->tail) {
315 		return 0;
316 	}
317 
318 	status = *ioat->comp_update;
319 	completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
320 
321 	if (is_ioat_halted(status)) {
322 		SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
323 		return -1;
324 	}
325 
326 	if (completed_descriptor == ioat->last_seen) {
327 		return 0;
328 	}
329 
330 	do {
331 		tail = ioat_get_ring_index(ioat, ioat->tail);
332 		desc = &ioat->ring[tail];
333 
334 		if (desc->callback_fn) {
335 			desc->callback_fn(desc->callback_arg);
336 		}
337 
338 		hw_desc_phys_addr = ioat_get_desc_phys_addr(ioat, ioat->tail);
339 		ioat->tail++;
340 	} while (hw_desc_phys_addr != completed_descriptor);
341 
342 	ioat->last_seen = hw_desc_phys_addr;
343 	return 0;
344 }
345 
346 static int
347 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
348 {
349 	ioat_unmap_pci_bar(ioat);
350 
351 	if (ioat->ring) {
352 		free(ioat->ring);
353 	}
354 
355 	if (ioat->hw_ring) {
356 		spdk_free(ioat->hw_ring);
357 	}
358 
359 	if (ioat->comp_update) {
360 		spdk_free((void *)ioat->comp_update);
361 		ioat->comp_update = NULL;
362 	}
363 
364 	return 0;
365 }
366 
367 static int
368 ioat_channel_start(struct spdk_ioat_chan *ioat)
369 {
370 	uint8_t xfercap, version;
371 	uint64_t status;
372 	int i, num_descriptors;
373 	uint64_t comp_update_bus_addr = 0;
374 
375 	if (ioat_map_pci_bar(ioat) != 0) {
376 		SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
377 		return -1;
378 	}
379 
380 	version = ioat->regs->cbver;
381 	if (version < SPDK_IOAT_VER_3_0) {
382 		SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
383 			    version >> 4, version & 0xF);
384 		return -1;
385 	}
386 
387 	/* Always support DMA copy */
388 	ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
389 	if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL)
390 		ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
391 	xfercap = ioat->regs->xfercap;
392 
393 	/* Only bits [4:0] are valid. */
394 	xfercap &= 0x1f;
395 	if (xfercap == 0) {
396 		/* 0 means 4 GB max transfer size. */
397 		ioat->max_xfer_size = 1ULL << 32;
398 	} else if (xfercap < 12) {
399 		/* XFCERCAP must be at least 12 (4 KB) according to the spec. */
400 		SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
401 		return -1;
402 	} else {
403 		ioat->max_xfer_size = 1U << xfercap;
404 	}
405 
406 	ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
407 					 &comp_update_bus_addr);
408 	if (ioat->comp_update == NULL) {
409 		return -1;
410 	}
411 
412 	ioat->ring_size_order = IOAT_DEFAULT_ORDER;
413 
414 	num_descriptors = 1 << ioat->ring_size_order;
415 
416 	ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
417 	if (!ioat->ring) {
418 		return -1;
419 	}
420 
421 	ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
422 				     &ioat->hw_ring_phys_addr);
423 	if (!ioat->hw_ring) {
424 		return -1;
425 	}
426 
427 	for (i = 0; i < num_descriptors; i++) {
428 		ioat->hw_ring[i].generic.next = ioat_get_desc_phys_addr(ioat, i + 1);
429 	}
430 
431 	ioat->head = 0;
432 	ioat->tail = 0;
433 	ioat->last_seen = 0;
434 
435 	ioat_reset_hw(ioat);
436 
437 	ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
438 	ioat_write_chancmp(ioat, comp_update_bus_addr);
439 	ioat_write_chainaddr(ioat, ioat->hw_ring_phys_addr);
440 
441 	ioat_prep_null(ioat);
442 	ioat_flush(ioat);
443 
444 	i = 100;
445 	while (i-- > 0) {
446 		spdk_delay_us(100);
447 		status = ioat_get_chansts(ioat);
448 		if (is_ioat_idle(status))
449 			break;
450 	}
451 
452 	if (is_ioat_idle(status)) {
453 		ioat_process_channel_events(ioat);
454 	} else {
455 		SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
456 			    (void *)status, ioat->regs->chanerr);
457 		return -1;
458 	}
459 
460 	return 0;
461 }
462 
463 /* Caller must hold g_ioat_driver.lock */
464 static struct spdk_ioat_chan *
465 ioat_attach(void *device)
466 {
467 	struct spdk_ioat_chan 	*ioat;
468 	uint32_t cmd_reg;
469 
470 	ioat = calloc(1, sizeof(struct spdk_ioat_chan));
471 	if (ioat == NULL) {
472 		return NULL;
473 	}
474 
475 	/* Enable PCI busmaster. */
476 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
477 	cmd_reg |= 0x4;
478 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
479 
480 	ioat->device = device;
481 
482 	if (ioat_channel_start(ioat) != 0) {
483 		ioat_channel_destruct(ioat);
484 		free(ioat);
485 		return NULL;
486 	}
487 
488 	return ioat;
489 }
490 
491 struct ioat_enum_ctx {
492 	spdk_ioat_probe_cb probe_cb;
493 	spdk_ioat_attach_cb attach_cb;
494 	void *cb_ctx;
495 };
496 
497 /* This function must only be called while holding g_ioat_driver.lock */
498 static int
499 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
500 {
501 	struct ioat_enum_ctx *enum_ctx = ctx;
502 	struct spdk_ioat_chan *ioat;
503 
504 	/* Verify that this device is not already attached */
505 	TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
506 		/*
507 		 * NOTE: This assumes that the PCI abstraction layer will use the same device handle
508 		 *  across enumerations; we could compare by BDF instead if this is not true.
509 		 */
510 		if (pci_dev == ioat->device) {
511 			return 0;
512 		}
513 	}
514 
515 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
516 		/*
517 		 * Since I/OAT init is relatively quick, just perform the full init during probing.
518 		 *  If this turns out to be a bottleneck later, this can be changed to work like
519 		 *  NVMe with a list of devices to initialize in parallel.
520 		 */
521 		ioat = ioat_attach(pci_dev);
522 		if (ioat == NULL) {
523 			SPDK_ERRLOG("ioat_attach() failed\n");
524 			return -1;
525 		}
526 
527 		TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
528 
529 		enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
530 	}
531 
532 	return 0;
533 }
534 
535 int
536 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
537 {
538 	int rc;
539 	struct ioat_enum_ctx enum_ctx;
540 
541 	pthread_mutex_lock(&g_ioat_driver.lock);
542 
543 	enum_ctx.probe_cb = probe_cb;
544 	enum_ctx.attach_cb = attach_cb;
545 	enum_ctx.cb_ctx = cb_ctx;
546 
547 	rc = spdk_pci_enumerate(SPDK_PCI_DEVICE_IOAT, ioat_enum_cb, &enum_ctx);
548 
549 	pthread_mutex_unlock(&g_ioat_driver.lock);
550 
551 	return rc;
552 }
553 
554 int
555 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
556 {
557 	struct ioat_driver	*driver = &g_ioat_driver;
558 
559 	/* ioat should be in the free list (not registered to a thread)
560 	 * when calling ioat_detach().
561 	 */
562 	pthread_mutex_lock(&driver->lock);
563 	TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
564 	pthread_mutex_unlock(&driver->lock);
565 
566 	ioat_channel_destruct(ioat);
567 	free(ioat);
568 
569 	return 0;
570 }
571 
572 #define min(a, b) (((a)<(b))?(a):(b))
573 
574 #define _2MB_PAGE(ptr)		((ptr) & ~(0x200000 - 1))
575 #define _2MB_OFFSET(ptr)	((ptr) &  (0x200000 - 1))
576 
577 int64_t
578 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
579 		      void *dst, const void *src, uint64_t nbytes)
580 {
581 	struct ioat_descriptor	*last_desc;
582 	uint64_t	remaining, op_size;
583 	uint64_t	vdst, vsrc;
584 	uint64_t	vdst_page, vsrc_page;
585 	uint64_t	pdst_page, psrc_page;
586 	uint32_t	orig_head;
587 
588 	if (!ioat) {
589 		return -1;
590 	}
591 
592 	orig_head = ioat->head;
593 
594 	vdst = (uint64_t)dst;
595 	vsrc = (uint64_t)src;
596 	vsrc_page = _2MB_PAGE(vsrc);
597 	vdst_page = _2MB_PAGE(vdst);
598 	psrc_page = spdk_vtophys((void *)vsrc_page);
599 	pdst_page = spdk_vtophys((void *)vdst_page);
600 
601 	remaining = nbytes;
602 
603 	while (remaining) {
604 		op_size = remaining;
605 		op_size = min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
606 		op_size = min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
607 		op_size = min(op_size, ioat->max_xfer_size);
608 		remaining -= op_size;
609 
610 		last_desc = ioat_prep_copy(ioat,
611 					   pdst_page + _2MB_OFFSET(vdst),
612 					   psrc_page + _2MB_OFFSET(vsrc),
613 					   op_size);
614 
615 		if (remaining == 0 || last_desc == NULL) {
616 			break;
617 		}
618 
619 		vsrc += op_size;
620 		vdst += op_size;
621 
622 		if (_2MB_PAGE(vsrc) != vsrc_page) {
623 			vsrc_page = _2MB_PAGE(vsrc);
624 			psrc_page = spdk_vtophys((void *)vsrc_page);
625 		}
626 
627 		if (_2MB_PAGE(vdst) != vdst_page) {
628 			vdst_page = _2MB_PAGE(vdst);
629 			pdst_page = spdk_vtophys((void *)vdst_page);
630 		}
631 	}
632 	/* Issue null descriptor for null transfer */
633 	if (nbytes == 0) {
634 		last_desc = ioat_prep_null(ioat);
635 	}
636 
637 	if (last_desc) {
638 		last_desc->callback_fn = cb_fn;
639 		last_desc->callback_arg = cb_arg;
640 	} else {
641 		/*
642 		 * Ran out of descriptors in the ring - reset head to leave things as they were
643 		 * in case we managed to fill out any descriptors.
644 		 */
645 		ioat->head = orig_head;
646 		return -1;
647 	}
648 
649 	ioat_flush(ioat);
650 	return nbytes;
651 }
652 
653 int64_t
654 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
655 		      void *dst, uint64_t fill_pattern, uint64_t nbytes)
656 {
657 	struct ioat_descriptor	*last_desc = NULL;
658 	uint64_t	remaining, op_size;
659 	uint64_t	vdst;
660 	uint32_t	orig_head;
661 
662 	if (!ioat) {
663 		return -1;
664 	}
665 
666 	if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
667 		SPDK_ERRLOG("Channel does not support memory fill\n");
668 		return -1;
669 	}
670 
671 	orig_head = ioat->head;
672 
673 	vdst = (uint64_t)dst;
674 	remaining = nbytes;
675 
676 	while (remaining) {
677 		op_size = remaining;
678 		op_size = min(op_size, ioat->max_xfer_size);
679 		remaining -= op_size;
680 
681 		last_desc = ioat_prep_fill(ioat,
682 					   spdk_vtophys((void *)vdst),
683 					   fill_pattern,
684 					   op_size);
685 
686 		if (remaining == 0 || last_desc == NULL) {
687 			break;
688 		}
689 
690 		vdst += op_size;
691 	}
692 
693 	if (last_desc) {
694 		last_desc->callback_fn = cb_fn;
695 		last_desc->callback_arg = cb_arg;
696 	} else {
697 		/*
698 		 * Ran out of descriptors in the ring - reset head to leave things as they were
699 		 * in case we managed to fill out any descriptors.
700 		 */
701 		ioat->head = orig_head;
702 		return -1;
703 	}
704 
705 	ioat_flush(ioat);
706 	return nbytes;
707 }
708 
709 uint32_t
710 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
711 {
712 	if (!ioat) {
713 		return 0;
714 	}
715 	return ioat->dma_capabilities;
716 }
717 
718 int
719 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
720 {
721 	return ioat_process_channel_events(ioat);
722 }
723 
724 SPDK_LOG_REGISTER_TRACE_FLAG("ioat", SPDK_TRACE_IOAT)
725