xref: /spdk/lib/ioat/ioat.c (revision 04c48172b9879a8824de83c842087d871c433d12)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ioat_internal.h"
35 
36 #include "spdk/log.h"
37 #include "spdk/env.h"
38 
39 #include <pthread.h>
40 
41 struct ioat_driver {
42 	pthread_mutex_t			lock;
43 	TAILQ_HEAD(, spdk_ioat_chan)	attached_chans;
44 };
45 
46 static struct ioat_driver g_ioat_driver = {
47 	.lock = PTHREAD_MUTEX_INITIALIZER,
48 	.attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
49 };
50 
51 static uint64_t
52 ioat_get_chansts(struct spdk_ioat_chan *ioat)
53 {
54 	return spdk_mmio_read_8(&ioat->regs->chansts);
55 }
56 
57 static void
58 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
59 {
60 	spdk_mmio_write_8(&ioat->regs->chancmp, addr);
61 }
62 
63 static void
64 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
65 {
66 	spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
67 }
68 
69 static inline void
70 ioat_suspend(struct spdk_ioat_chan *ioat)
71 {
72 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
73 }
74 
75 static inline void
76 ioat_reset(struct spdk_ioat_chan *ioat)
77 {
78 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
79 }
80 
81 static inline uint32_t
82 ioat_reset_pending(struct spdk_ioat_chan *ioat)
83 {
84 	uint8_t cmd;
85 
86 	cmd = ioat->regs->chancmd;
87 	return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
88 }
89 
90 static int
91 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
92 {
93 	int regs_bar, rc;
94 	void *addr;
95 	uint64_t phys_addr, size;
96 
97 	regs_bar = 0;
98 	rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
99 	if (rc != 0 || addr == NULL) {
100 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
101 			    rc);
102 		return -1;
103 	}
104 
105 	ioat->regs = (volatile struct spdk_ioat_registers *)addr;
106 
107 	return 0;
108 }
109 
110 static int
111 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
112 {
113 	int rc = 0;
114 	void *addr = (void *)ioat->regs;
115 
116 	if (addr) {
117 		rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
118 	}
119 	return rc;
120 }
121 
122 
123 static inline uint32_t
124 ioat_get_active(struct spdk_ioat_chan *ioat)
125 {
126 	return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
127 }
128 
129 static inline uint32_t
130 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
131 {
132 	return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
133 }
134 
135 static uint32_t
136 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
137 {
138 	return index & ((1 << ioat->ring_size_order) - 1);
139 }
140 
141 static void
142 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
143 		    struct ioat_descriptor **desc,
144 		    union spdk_ioat_hw_desc **hw_desc)
145 {
146 	uint32_t i = ioat_get_ring_index(ioat, index);
147 
148 	*desc = &ioat->ring[i];
149 	*hw_desc = &ioat->hw_ring[i];
150 }
151 
152 static uint64_t
153 ioat_get_desc_phys_addr(struct spdk_ioat_chan *ioat, uint32_t index)
154 {
155 	return ioat->hw_ring_phys_addr +
156 	       ioat_get_ring_index(ioat, index) * sizeof(union spdk_ioat_hw_desc);
157 }
158 
159 static void
160 ioat_submit_single(struct spdk_ioat_chan *ioat)
161 {
162 	ioat->head++;
163 }
164 
165 static void
166 ioat_flush(struct spdk_ioat_chan *ioat)
167 {
168 	ioat->regs->dmacount = (uint16_t)ioat->head;
169 }
170 
171 static struct ioat_descriptor *
172 ioat_prep_null(struct spdk_ioat_chan *ioat)
173 {
174 	struct ioat_descriptor *desc;
175 	union spdk_ioat_hw_desc *hw_desc;
176 
177 	if (ioat_get_ring_space(ioat) < 1) {
178 		return NULL;
179 	}
180 
181 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
182 
183 	hw_desc->dma.u.control_raw = 0;
184 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
185 	hw_desc->dma.u.control.null = 1;
186 	hw_desc->dma.u.control.completion_update = 1;
187 
188 	hw_desc->dma.size = 8;
189 	hw_desc->dma.src_addr = 0;
190 	hw_desc->dma.dest_addr = 0;
191 
192 	desc->callback_fn = NULL;
193 	desc->callback_arg = NULL;
194 
195 	ioat_submit_single(ioat);
196 
197 	return desc;
198 }
199 
200 static struct ioat_descriptor *
201 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
202 	       uint64_t src, uint32_t len)
203 {
204 	struct ioat_descriptor *desc;
205 	union spdk_ioat_hw_desc *hw_desc;
206 
207 	assert(len <= ioat->max_xfer_size);
208 
209 	if (ioat_get_ring_space(ioat) < 1) {
210 		return NULL;
211 	}
212 
213 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
214 
215 	hw_desc->dma.u.control_raw = 0;
216 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
217 	hw_desc->dma.u.control.completion_update = 1;
218 
219 	hw_desc->dma.size = len;
220 	hw_desc->dma.src_addr = src;
221 	hw_desc->dma.dest_addr = dst;
222 
223 	desc->callback_fn = NULL;
224 	desc->callback_arg = NULL;
225 
226 	ioat_submit_single(ioat);
227 
228 	return desc;
229 }
230 
231 static struct ioat_descriptor *
232 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
233 	       uint64_t fill_pattern, uint32_t len)
234 {
235 	struct ioat_descriptor *desc;
236 	union spdk_ioat_hw_desc *hw_desc;
237 
238 	assert(len <= ioat->max_xfer_size);
239 
240 	if (ioat_get_ring_space(ioat) < 1) {
241 		return NULL;
242 	}
243 
244 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
245 
246 	hw_desc->fill.u.control_raw = 0;
247 	hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
248 	hw_desc->fill.u.control.completion_update = 1;
249 
250 	hw_desc->fill.size = len;
251 	hw_desc->fill.src_data = fill_pattern;
252 	hw_desc->fill.dest_addr = dst;
253 
254 	desc->callback_fn = NULL;
255 	desc->callback_arg = NULL;
256 
257 	ioat_submit_single(ioat);
258 
259 	return desc;
260 }
261 
262 static int ioat_reset_hw(struct spdk_ioat_chan *ioat)
263 {
264 	int timeout;
265 	uint64_t status;
266 	uint32_t chanerr;
267 
268 	status = ioat_get_chansts(ioat);
269 	if (is_ioat_active(status) || is_ioat_idle(status)) {
270 		ioat_suspend(ioat);
271 	}
272 
273 	timeout = 20; /* in milliseconds */
274 	while (is_ioat_active(status) || is_ioat_idle(status)) {
275 		spdk_delay_us(1000);
276 		timeout--;
277 		if (timeout == 0) {
278 			SPDK_ERRLOG("timed out waiting for suspend\n");
279 			return -1;
280 		}
281 		status = ioat_get_chansts(ioat);
282 	}
283 
284 	/*
285 	 * Clear any outstanding errors.
286 	 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
287 	 */
288 	chanerr = ioat->regs->chanerr;
289 	ioat->regs->chanerr = chanerr;
290 
291 	ioat_reset(ioat);
292 
293 	timeout = 20;
294 	while (ioat_reset_pending(ioat)) {
295 		spdk_delay_us(1000);
296 		timeout--;
297 		if (timeout == 0) {
298 			SPDK_ERRLOG("timed out waiting for reset\n");
299 			return -1;
300 		}
301 	}
302 
303 	return 0;
304 }
305 
306 static int
307 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
308 {
309 	struct ioat_descriptor *desc;
310 	uint64_t status, completed_descriptor, hw_desc_phys_addr;
311 	uint32_t tail;
312 
313 	if (ioat->head == ioat->tail) {
314 		return 0;
315 	}
316 
317 	status = *ioat->comp_update;
318 	completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
319 
320 	if (is_ioat_halted(status)) {
321 		SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
322 		return -1;
323 	}
324 
325 	if (completed_descriptor == ioat->last_seen) {
326 		return 0;
327 	}
328 
329 	do {
330 		tail = ioat_get_ring_index(ioat, ioat->tail);
331 		desc = &ioat->ring[tail];
332 
333 		if (desc->callback_fn) {
334 			desc->callback_fn(desc->callback_arg);
335 		}
336 
337 		hw_desc_phys_addr = ioat_get_desc_phys_addr(ioat, ioat->tail);
338 		ioat->tail++;
339 	} while (hw_desc_phys_addr != completed_descriptor);
340 
341 	ioat->last_seen = hw_desc_phys_addr;
342 	return 0;
343 }
344 
345 static int
346 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
347 {
348 	ioat_unmap_pci_bar(ioat);
349 
350 	if (ioat->ring) {
351 		free(ioat->ring);
352 	}
353 
354 	if (ioat->hw_ring) {
355 		spdk_free(ioat->hw_ring);
356 	}
357 
358 	if (ioat->comp_update) {
359 		spdk_free((void *)ioat->comp_update);
360 		ioat->comp_update = NULL;
361 	}
362 
363 	return 0;
364 }
365 
366 static int
367 ioat_channel_start(struct spdk_ioat_chan *ioat)
368 {
369 	uint8_t xfercap, version;
370 	uint64_t status;
371 	int i, num_descriptors;
372 	uint64_t comp_update_bus_addr = 0;
373 
374 	if (ioat_map_pci_bar(ioat) != 0) {
375 		SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
376 		return -1;
377 	}
378 
379 	version = ioat->regs->cbver;
380 	if (version < SPDK_IOAT_VER_3_0) {
381 		SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
382 			    version >> 4, version & 0xF);
383 		return -1;
384 	}
385 
386 	/* Always support DMA copy */
387 	ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
388 	if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL)
389 		ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
390 	xfercap = ioat->regs->xfercap;
391 
392 	/* Only bits [4:0] are valid. */
393 	xfercap &= 0x1f;
394 	if (xfercap == 0) {
395 		/* 0 means 4 GB max transfer size. */
396 		ioat->max_xfer_size = 1ULL << 32;
397 	} else if (xfercap < 12) {
398 		/* XFCERCAP must be at least 12 (4 KB) according to the spec. */
399 		SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
400 		return -1;
401 	} else {
402 		ioat->max_xfer_size = 1U << xfercap;
403 	}
404 
405 	ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
406 					 &comp_update_bus_addr);
407 	if (ioat->comp_update == NULL) {
408 		return -1;
409 	}
410 
411 	ioat->ring_size_order = IOAT_DEFAULT_ORDER;
412 
413 	num_descriptors = 1 << ioat->ring_size_order;
414 
415 	ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
416 	if (!ioat->ring) {
417 		return -1;
418 	}
419 
420 	ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
421 				     &ioat->hw_ring_phys_addr);
422 	if (!ioat->hw_ring) {
423 		return -1;
424 	}
425 
426 	for (i = 0; i < num_descriptors; i++) {
427 		ioat->hw_ring[i].generic.next = ioat_get_desc_phys_addr(ioat, i + 1);
428 	}
429 
430 	ioat->head = 0;
431 	ioat->tail = 0;
432 	ioat->last_seen = 0;
433 
434 	ioat_reset_hw(ioat);
435 
436 	ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
437 	ioat_write_chancmp(ioat, comp_update_bus_addr);
438 	ioat_write_chainaddr(ioat, ioat->hw_ring_phys_addr);
439 
440 	ioat_prep_null(ioat);
441 	ioat_flush(ioat);
442 
443 	i = 100;
444 	while (i-- > 0) {
445 		spdk_delay_us(100);
446 		status = ioat_get_chansts(ioat);
447 		if (is_ioat_idle(status))
448 			break;
449 	}
450 
451 	if (is_ioat_idle(status)) {
452 		ioat_process_channel_events(ioat);
453 	} else {
454 		SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
455 			    (void *)status, ioat->regs->chanerr);
456 		return -1;
457 	}
458 
459 	return 0;
460 }
461 
462 /* Caller must hold g_ioat_driver.lock */
463 static struct spdk_ioat_chan *
464 ioat_attach(void *device)
465 {
466 	struct spdk_ioat_chan 	*ioat;
467 	uint32_t cmd_reg;
468 
469 	ioat = calloc(1, sizeof(struct spdk_ioat_chan));
470 	if (ioat == NULL) {
471 		return NULL;
472 	}
473 
474 	/* Enable PCI busmaster. */
475 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
476 	cmd_reg |= 0x4;
477 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
478 
479 	ioat->device = device;
480 
481 	if (ioat_channel_start(ioat) != 0) {
482 		ioat_channel_destruct(ioat);
483 		free(ioat);
484 		return NULL;
485 	}
486 
487 	return ioat;
488 }
489 
490 struct ioat_enum_ctx {
491 	spdk_ioat_probe_cb probe_cb;
492 	spdk_ioat_attach_cb attach_cb;
493 	void *cb_ctx;
494 };
495 
496 /* This function must only be called while holding g_ioat_driver.lock */
497 static int
498 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
499 {
500 	struct ioat_enum_ctx *enum_ctx = ctx;
501 	struct spdk_ioat_chan *ioat;
502 
503 	/* Verify that this device is not already attached */
504 	TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
505 		/*
506 		 * NOTE: This assumes that the PCI abstraction layer will use the same device handle
507 		 *  across enumerations; we could compare by BDF instead if this is not true.
508 		 */
509 		if (pci_dev == ioat->device) {
510 			return 0;
511 		}
512 	}
513 
514 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
515 		/*
516 		 * Since I/OAT init is relatively quick, just perform the full init during probing.
517 		 *  If this turns out to be a bottleneck later, this can be changed to work like
518 		 *  NVMe with a list of devices to initialize in parallel.
519 		 */
520 		ioat = ioat_attach(pci_dev);
521 		if (ioat == NULL) {
522 			SPDK_ERRLOG("ioat_attach() failed\n");
523 			return -1;
524 		}
525 
526 		TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
527 
528 		enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
529 	}
530 
531 	return 0;
532 }
533 
534 int
535 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
536 {
537 	int rc;
538 	struct ioat_enum_ctx enum_ctx;
539 
540 	pthread_mutex_lock(&g_ioat_driver.lock);
541 
542 	enum_ctx.probe_cb = probe_cb;
543 	enum_ctx.attach_cb = attach_cb;
544 	enum_ctx.cb_ctx = cb_ctx;
545 
546 	rc = spdk_pci_enumerate(SPDK_PCI_DEVICE_IOAT, ioat_enum_cb, &enum_ctx);
547 
548 	pthread_mutex_unlock(&g_ioat_driver.lock);
549 
550 	return rc;
551 }
552 
553 int
554 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
555 {
556 	struct ioat_driver	*driver = &g_ioat_driver;
557 
558 	/* ioat should be in the free list (not registered to a thread)
559 	 * when calling ioat_detach().
560 	 */
561 	pthread_mutex_lock(&driver->lock);
562 	TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
563 	pthread_mutex_unlock(&driver->lock);
564 
565 	ioat_channel_destruct(ioat);
566 	free(ioat);
567 
568 	return 0;
569 }
570 
571 #define min(a, b) (((a)<(b))?(a):(b))
572 
573 #define _2MB_PAGE(ptr)		((ptr) & ~(0x200000 - 1))
574 #define _2MB_OFFSET(ptr)	((ptr) &  (0x200000 - 1))
575 
576 int64_t
577 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
578 		      void *dst, const void *src, uint64_t nbytes)
579 {
580 	struct ioat_descriptor	*last_desc;
581 	uint64_t	remaining, op_size;
582 	uint64_t	vdst, vsrc;
583 	uint64_t	vdst_page, vsrc_page;
584 	uint64_t	pdst_page, psrc_page;
585 	uint32_t	orig_head;
586 
587 	if (!ioat) {
588 		return -1;
589 	}
590 
591 	orig_head = ioat->head;
592 
593 	vdst = (uint64_t)dst;
594 	vsrc = (uint64_t)src;
595 	vsrc_page = _2MB_PAGE(vsrc);
596 	vdst_page = _2MB_PAGE(vdst);
597 	psrc_page = spdk_vtophys((void *)vsrc_page);
598 	pdst_page = spdk_vtophys((void *)vdst_page);
599 
600 	remaining = nbytes;
601 
602 	while (remaining) {
603 		op_size = remaining;
604 		op_size = min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
605 		op_size = min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
606 		op_size = min(op_size, ioat->max_xfer_size);
607 		remaining -= op_size;
608 
609 		last_desc = ioat_prep_copy(ioat,
610 					   pdst_page + _2MB_OFFSET(vdst),
611 					   psrc_page + _2MB_OFFSET(vsrc),
612 					   op_size);
613 
614 		if (remaining == 0 || last_desc == NULL) {
615 			break;
616 		}
617 
618 		vsrc += op_size;
619 		vdst += op_size;
620 
621 		if (_2MB_PAGE(vsrc) != vsrc_page) {
622 			vsrc_page = _2MB_PAGE(vsrc);
623 			psrc_page = spdk_vtophys((void *)vsrc_page);
624 		}
625 
626 		if (_2MB_PAGE(vdst) != vdst_page) {
627 			vdst_page = _2MB_PAGE(vdst);
628 			pdst_page = spdk_vtophys((void *)vdst_page);
629 		}
630 	}
631 	/* Issue null descriptor for null transfer */
632 	if (nbytes == 0) {
633 		last_desc = ioat_prep_null(ioat);
634 	}
635 
636 	if (last_desc) {
637 		last_desc->callback_fn = cb_fn;
638 		last_desc->callback_arg = cb_arg;
639 	} else {
640 		/*
641 		 * Ran out of descriptors in the ring - reset head to leave things as they were
642 		 * in case we managed to fill out any descriptors.
643 		 */
644 		ioat->head = orig_head;
645 		return -1;
646 	}
647 
648 	ioat_flush(ioat);
649 	return nbytes;
650 }
651 
652 int64_t
653 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
654 		      void *dst, uint64_t fill_pattern, uint64_t nbytes)
655 {
656 	struct ioat_descriptor	*last_desc = NULL;
657 	uint64_t	remaining, op_size;
658 	uint64_t	vdst;
659 	uint32_t	orig_head;
660 
661 	if (!ioat) {
662 		return -1;
663 	}
664 
665 	if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
666 		SPDK_ERRLOG("Channel does not support memory fill\n");
667 		return -1;
668 	}
669 
670 	orig_head = ioat->head;
671 
672 	vdst = (uint64_t)dst;
673 	remaining = nbytes;
674 
675 	while (remaining) {
676 		op_size = remaining;
677 		op_size = min(op_size, ioat->max_xfer_size);
678 		remaining -= op_size;
679 
680 		last_desc = ioat_prep_fill(ioat,
681 					   spdk_vtophys((void *)vdst),
682 					   fill_pattern,
683 					   op_size);
684 
685 		if (remaining == 0 || last_desc == NULL) {
686 			break;
687 		}
688 
689 		vdst += op_size;
690 	}
691 
692 	if (last_desc) {
693 		last_desc->callback_fn = cb_fn;
694 		last_desc->callback_arg = cb_arg;
695 	} else {
696 		/*
697 		 * Ran out of descriptors in the ring - reset head to leave things as they were
698 		 * in case we managed to fill out any descriptors.
699 		 */
700 		ioat->head = orig_head;
701 		return -1;
702 	}
703 
704 	ioat_flush(ioat);
705 	return nbytes;
706 }
707 
708 uint32_t
709 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
710 {
711 	if (!ioat) {
712 		return 0;
713 	}
714 	return ioat->dma_capabilities;
715 }
716 
717 int
718 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
719 {
720 	return ioat_process_channel_events(ioat);
721 }
722 
723 SPDK_LOG_REGISTER_TRACE_FLAG("ioat", SPDK_TRACE_IOAT)
724