xref: /spdk/lib/ioat/ioat.c (revision 2fac05e919e1940137e4502f01beabb81ebbef9c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "ioat_internal.h"
37 
38 #include "spdk/env.h"
39 #include "spdk/util.h"
40 
41 #include "spdk_internal/log.h"
42 
43 struct ioat_driver {
44 	pthread_mutex_t			lock;
45 	TAILQ_HEAD(, spdk_ioat_chan)	attached_chans;
46 };
47 
48 static struct ioat_driver g_ioat_driver = {
49 	.lock = PTHREAD_MUTEX_INITIALIZER,
50 	.attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
51 };
52 
53 static uint64_t
54 ioat_get_chansts(struct spdk_ioat_chan *ioat)
55 {
56 	return spdk_mmio_read_8(&ioat->regs->chansts);
57 }
58 
59 static void
60 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
61 {
62 	spdk_mmio_write_8(&ioat->regs->chancmp, addr);
63 }
64 
65 static void
66 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
67 {
68 	spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
69 }
70 
71 static inline void
72 ioat_suspend(struct spdk_ioat_chan *ioat)
73 {
74 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
75 }
76 
77 static inline void
78 ioat_reset(struct spdk_ioat_chan *ioat)
79 {
80 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
81 }
82 
83 static inline uint32_t
84 ioat_reset_pending(struct spdk_ioat_chan *ioat)
85 {
86 	uint8_t cmd;
87 
88 	cmd = ioat->regs->chancmd;
89 	return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
90 }
91 
92 static int
93 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
94 {
95 	int regs_bar, rc;
96 	void *addr;
97 	uint64_t phys_addr, size;
98 
99 	regs_bar = 0;
100 	rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
101 	if (rc != 0 || addr == NULL) {
102 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
103 			    rc);
104 		return -1;
105 	}
106 
107 	ioat->regs = (volatile struct spdk_ioat_registers *)addr;
108 
109 	return 0;
110 }
111 
112 static int
113 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
114 {
115 	int rc = 0;
116 	void *addr = (void *)ioat->regs;
117 
118 	if (addr) {
119 		rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
120 	}
121 	return rc;
122 }
123 
124 
125 static inline uint32_t
126 ioat_get_active(struct spdk_ioat_chan *ioat)
127 {
128 	return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
129 }
130 
131 static inline uint32_t
132 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
133 {
134 	return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
135 }
136 
137 static uint32_t
138 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
139 {
140 	return index & ((1 << ioat->ring_size_order) - 1);
141 }
142 
143 static void
144 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
145 		    struct ioat_descriptor **desc,
146 		    union spdk_ioat_hw_desc **hw_desc)
147 {
148 	uint32_t i = ioat_get_ring_index(ioat, index);
149 
150 	*desc = &ioat->ring[i];
151 	*hw_desc = &ioat->hw_ring[i];
152 }
153 
154 static uint64_t
155 ioat_get_desc_phys_addr(struct spdk_ioat_chan *ioat, uint32_t index)
156 {
157 	return ioat->hw_ring_phys_addr +
158 	       ioat_get_ring_index(ioat, index) * sizeof(union spdk_ioat_hw_desc);
159 }
160 
161 static void
162 ioat_submit_single(struct spdk_ioat_chan *ioat)
163 {
164 	ioat->head++;
165 }
166 
167 static void
168 ioat_flush(struct spdk_ioat_chan *ioat)
169 {
170 	ioat->regs->dmacount = (uint16_t)ioat->head;
171 }
172 
173 static struct ioat_descriptor *
174 ioat_prep_null(struct spdk_ioat_chan *ioat)
175 {
176 	struct ioat_descriptor *desc;
177 	union spdk_ioat_hw_desc *hw_desc;
178 
179 	if (ioat_get_ring_space(ioat) < 1) {
180 		return NULL;
181 	}
182 
183 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
184 
185 	hw_desc->dma.u.control_raw = 0;
186 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
187 	hw_desc->dma.u.control.null = 1;
188 	hw_desc->dma.u.control.completion_update = 1;
189 
190 	hw_desc->dma.size = 8;
191 	hw_desc->dma.src_addr = 0;
192 	hw_desc->dma.dest_addr = 0;
193 
194 	desc->callback_fn = NULL;
195 	desc->callback_arg = NULL;
196 
197 	ioat_submit_single(ioat);
198 
199 	return desc;
200 }
201 
202 static struct ioat_descriptor *
203 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
204 	       uint64_t src, uint32_t len)
205 {
206 	struct ioat_descriptor *desc;
207 	union spdk_ioat_hw_desc *hw_desc;
208 
209 	assert(len <= ioat->max_xfer_size);
210 
211 	if (ioat_get_ring_space(ioat) < 1) {
212 		return NULL;
213 	}
214 
215 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
216 
217 	hw_desc->dma.u.control_raw = 0;
218 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
219 	hw_desc->dma.u.control.completion_update = 1;
220 
221 	hw_desc->dma.size = len;
222 	hw_desc->dma.src_addr = src;
223 	hw_desc->dma.dest_addr = dst;
224 
225 	desc->callback_fn = NULL;
226 	desc->callback_arg = NULL;
227 
228 	ioat_submit_single(ioat);
229 
230 	return desc;
231 }
232 
233 static struct ioat_descriptor *
234 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
235 	       uint64_t fill_pattern, uint32_t len)
236 {
237 	struct ioat_descriptor *desc;
238 	union spdk_ioat_hw_desc *hw_desc;
239 
240 	assert(len <= ioat->max_xfer_size);
241 
242 	if (ioat_get_ring_space(ioat) < 1) {
243 		return NULL;
244 	}
245 
246 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
247 
248 	hw_desc->fill.u.control_raw = 0;
249 	hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
250 	hw_desc->fill.u.control.completion_update = 1;
251 
252 	hw_desc->fill.size = len;
253 	hw_desc->fill.src_data = fill_pattern;
254 	hw_desc->fill.dest_addr = dst;
255 
256 	desc->callback_fn = NULL;
257 	desc->callback_arg = NULL;
258 
259 	ioat_submit_single(ioat);
260 
261 	return desc;
262 }
263 
264 static int ioat_reset_hw(struct spdk_ioat_chan *ioat)
265 {
266 	int timeout;
267 	uint64_t status;
268 	uint32_t chanerr;
269 
270 	status = ioat_get_chansts(ioat);
271 	if (is_ioat_active(status) || is_ioat_idle(status)) {
272 		ioat_suspend(ioat);
273 	}
274 
275 	timeout = 20; /* in milliseconds */
276 	while (is_ioat_active(status) || is_ioat_idle(status)) {
277 		spdk_delay_us(1000);
278 		timeout--;
279 		if (timeout == 0) {
280 			SPDK_ERRLOG("timed out waiting for suspend\n");
281 			return -1;
282 		}
283 		status = ioat_get_chansts(ioat);
284 	}
285 
286 	/*
287 	 * Clear any outstanding errors.
288 	 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
289 	 */
290 	chanerr = ioat->regs->chanerr;
291 	ioat->regs->chanerr = chanerr;
292 
293 	ioat_reset(ioat);
294 
295 	timeout = 20;
296 	while (ioat_reset_pending(ioat)) {
297 		spdk_delay_us(1000);
298 		timeout--;
299 		if (timeout == 0) {
300 			SPDK_ERRLOG("timed out waiting for reset\n");
301 			return -1;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 static int
309 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
310 {
311 	struct ioat_descriptor *desc;
312 	uint64_t status, completed_descriptor, hw_desc_phys_addr;
313 	uint32_t tail;
314 
315 	if (ioat->head == ioat->tail) {
316 		return 0;
317 	}
318 
319 	status = *ioat->comp_update;
320 	completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
321 
322 	if (is_ioat_halted(status)) {
323 		SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
324 		return -1;
325 	}
326 
327 	if (completed_descriptor == ioat->last_seen) {
328 		return 0;
329 	}
330 
331 	do {
332 		tail = ioat_get_ring_index(ioat, ioat->tail);
333 		desc = &ioat->ring[tail];
334 
335 		if (desc->callback_fn) {
336 			desc->callback_fn(desc->callback_arg);
337 		}
338 
339 		hw_desc_phys_addr = ioat_get_desc_phys_addr(ioat, ioat->tail);
340 		ioat->tail++;
341 	} while (hw_desc_phys_addr != completed_descriptor);
342 
343 	ioat->last_seen = hw_desc_phys_addr;
344 	return 0;
345 }
346 
347 static int
348 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
349 {
350 	ioat_unmap_pci_bar(ioat);
351 
352 	if (ioat->ring) {
353 		free(ioat->ring);
354 	}
355 
356 	if (ioat->hw_ring) {
357 		spdk_dma_free(ioat->hw_ring);
358 	}
359 
360 	if (ioat->comp_update) {
361 		spdk_dma_free((void *)ioat->comp_update);
362 		ioat->comp_update = NULL;
363 	}
364 
365 	return 0;
366 }
367 
368 static int
369 ioat_channel_start(struct spdk_ioat_chan *ioat)
370 {
371 	uint8_t xfercap, version;
372 	uint64_t status;
373 	int i, num_descriptors;
374 	uint64_t comp_update_bus_addr = 0;
375 
376 	if (ioat_map_pci_bar(ioat) != 0) {
377 		SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
378 		return -1;
379 	}
380 
381 	version = ioat->regs->cbver;
382 	if (version < SPDK_IOAT_VER_3_0) {
383 		SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
384 			    version >> 4, version & 0xF);
385 		return -1;
386 	}
387 
388 	/* Always support DMA copy */
389 	ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
390 	if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) {
391 		ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
392 	}
393 	xfercap = ioat->regs->xfercap;
394 
395 	/* Only bits [4:0] are valid. */
396 	xfercap &= 0x1f;
397 	if (xfercap == 0) {
398 		/* 0 means 4 GB max transfer size. */
399 		ioat->max_xfer_size = 1ULL << 32;
400 	} else if (xfercap < 12) {
401 		/* XFCERCAP must be at least 12 (4 KB) according to the spec. */
402 		SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
403 		return -1;
404 	} else {
405 		ioat->max_xfer_size = 1U << xfercap;
406 	}
407 
408 	ioat->comp_update = spdk_dma_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
409 					     &comp_update_bus_addr);
410 	if (ioat->comp_update == NULL) {
411 		return -1;
412 	}
413 
414 	ioat->ring_size_order = IOAT_DEFAULT_ORDER;
415 
416 	num_descriptors = 1 << ioat->ring_size_order;
417 
418 	ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
419 	if (!ioat->ring) {
420 		return -1;
421 	}
422 
423 	ioat->hw_ring = spdk_dma_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
424 					 &ioat->hw_ring_phys_addr);
425 	if (!ioat->hw_ring) {
426 		return -1;
427 	}
428 
429 	for (i = 0; i < num_descriptors; i++) {
430 		ioat->hw_ring[i].generic.next = ioat_get_desc_phys_addr(ioat, i + 1);
431 	}
432 
433 	ioat->head = 0;
434 	ioat->tail = 0;
435 	ioat->last_seen = 0;
436 
437 	ioat_reset_hw(ioat);
438 
439 	ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
440 	ioat_write_chancmp(ioat, comp_update_bus_addr);
441 	ioat_write_chainaddr(ioat, ioat->hw_ring_phys_addr);
442 
443 	ioat_prep_null(ioat);
444 	ioat_flush(ioat);
445 
446 	i = 100;
447 	while (i-- > 0) {
448 		spdk_delay_us(100);
449 		status = ioat_get_chansts(ioat);
450 		if (is_ioat_idle(status)) {
451 			break;
452 		}
453 	}
454 
455 	if (is_ioat_idle(status)) {
456 		ioat_process_channel_events(ioat);
457 	} else {
458 		SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
459 			    (void *)status, ioat->regs->chanerr);
460 		return -1;
461 	}
462 
463 	return 0;
464 }
465 
466 /* Caller must hold g_ioat_driver.lock */
467 static struct spdk_ioat_chan *
468 ioat_attach(void *device)
469 {
470 	struct spdk_ioat_chan *ioat;
471 	uint32_t cmd_reg;
472 
473 	ioat = calloc(1, sizeof(struct spdk_ioat_chan));
474 	if (ioat == NULL) {
475 		return NULL;
476 	}
477 
478 	/* Enable PCI busmaster. */
479 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
480 	cmd_reg |= 0x4;
481 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
482 
483 	ioat->device = device;
484 
485 	if (ioat_channel_start(ioat) != 0) {
486 		ioat_channel_destruct(ioat);
487 		free(ioat);
488 		return NULL;
489 	}
490 
491 	return ioat;
492 }
493 
494 struct ioat_enum_ctx {
495 	spdk_ioat_probe_cb probe_cb;
496 	spdk_ioat_attach_cb attach_cb;
497 	void *cb_ctx;
498 };
499 
500 /* This function must only be called while holding g_ioat_driver.lock */
501 static int
502 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
503 {
504 	struct ioat_enum_ctx *enum_ctx = ctx;
505 	struct spdk_ioat_chan *ioat;
506 
507 	/* Verify that this device is not already attached */
508 	TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
509 		/*
510 		 * NOTE: This assumes that the PCI abstraction layer will use the same device handle
511 		 *  across enumerations; we could compare by BDF instead if this is not true.
512 		 */
513 		if (pci_dev == ioat->device) {
514 			return 0;
515 		}
516 	}
517 
518 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
519 		/*
520 		 * Since I/OAT init is relatively quick, just perform the full init during probing.
521 		 *  If this turns out to be a bottleneck later, this can be changed to work like
522 		 *  NVMe with a list of devices to initialize in parallel.
523 		 */
524 		ioat = ioat_attach(pci_dev);
525 		if (ioat == NULL) {
526 			SPDK_ERRLOG("ioat_attach() failed\n");
527 			return -1;
528 		}
529 
530 		TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
531 
532 		enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
533 	}
534 
535 	return 0;
536 }
537 
538 int
539 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
540 {
541 	int rc;
542 	struct ioat_enum_ctx enum_ctx;
543 
544 	pthread_mutex_lock(&g_ioat_driver.lock);
545 
546 	enum_ctx.probe_cb = probe_cb;
547 	enum_ctx.attach_cb = attach_cb;
548 	enum_ctx.cb_ctx = cb_ctx;
549 
550 	rc = spdk_pci_ioat_enumerate(ioat_enum_cb, &enum_ctx);
551 
552 	pthread_mutex_unlock(&g_ioat_driver.lock);
553 
554 	return rc;
555 }
556 
557 int
558 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
559 {
560 	struct ioat_driver	*driver = &g_ioat_driver;
561 
562 	/* ioat should be in the free list (not registered to a thread)
563 	 * when calling ioat_detach().
564 	 */
565 	pthread_mutex_lock(&driver->lock);
566 	TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
567 	pthread_mutex_unlock(&driver->lock);
568 
569 	ioat_channel_destruct(ioat);
570 	free(ioat);
571 
572 	return 0;
573 }
574 
575 #define _2MB_PAGE(ptr)		((ptr) & ~(0x200000 - 1))
576 #define _2MB_OFFSET(ptr)	((ptr) &  (0x200000 - 1))
577 
578 int
579 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
580 		      void *dst, const void *src, uint64_t nbytes)
581 {
582 	struct ioat_descriptor	*last_desc;
583 	uint64_t	remaining, op_size;
584 	uint64_t	vdst, vsrc;
585 	uint64_t	vdst_page, vsrc_page;
586 	uint64_t	pdst_page, psrc_page;
587 	uint32_t	orig_head;
588 
589 	if (!ioat) {
590 		return -EINVAL;
591 	}
592 
593 	orig_head = ioat->head;
594 
595 	vdst = (uint64_t)dst;
596 	vsrc = (uint64_t)src;
597 	vdst_page = vsrc_page = 0;
598 	pdst_page = psrc_page = SPDK_VTOPHYS_ERROR;
599 
600 	remaining = nbytes;
601 	while (remaining) {
602 		if (_2MB_PAGE(vsrc) != vsrc_page) {
603 			vsrc_page = _2MB_PAGE(vsrc);
604 			psrc_page = spdk_vtophys((void *)vsrc_page);
605 		}
606 
607 		if (_2MB_PAGE(vdst) != vdst_page) {
608 			vdst_page = _2MB_PAGE(vdst);
609 			pdst_page = spdk_vtophys((void *)vdst_page);
610 		}
611 		op_size = remaining;
612 		op_size = spdk_min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
613 		op_size = spdk_min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
614 		op_size = spdk_min(op_size, ioat->max_xfer_size);
615 		remaining -= op_size;
616 
617 		last_desc = ioat_prep_copy(ioat,
618 					   pdst_page + _2MB_OFFSET(vdst),
619 					   psrc_page + _2MB_OFFSET(vsrc),
620 					   op_size);
621 
622 		if (remaining == 0 || last_desc == NULL) {
623 			break;
624 		}
625 
626 		vsrc += op_size;
627 		vdst += op_size;
628 
629 	}
630 	/* Issue null descriptor for null transfer */
631 	if (nbytes == 0) {
632 		last_desc = ioat_prep_null(ioat);
633 	}
634 
635 	if (last_desc) {
636 		last_desc->callback_fn = cb_fn;
637 		last_desc->callback_arg = cb_arg;
638 	} else {
639 		/*
640 		 * Ran out of descriptors in the ring - reset head to leave things as they were
641 		 * in case we managed to fill out any descriptors.
642 		 */
643 		ioat->head = orig_head;
644 		return -ENOMEM;
645 	}
646 
647 	ioat_flush(ioat);
648 	return 0;
649 }
650 
651 int
652 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
653 		      void *dst, uint64_t fill_pattern, uint64_t nbytes)
654 {
655 	struct ioat_descriptor	*last_desc = NULL;
656 	uint64_t	remaining, op_size;
657 	uint64_t	vdst;
658 	uint32_t	orig_head;
659 
660 	if (!ioat) {
661 		return -EINVAL;
662 	}
663 
664 	if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
665 		SPDK_ERRLOG("Channel does not support memory fill\n");
666 		return -1;
667 	}
668 
669 	orig_head = ioat->head;
670 
671 	vdst = (uint64_t)dst;
672 	remaining = nbytes;
673 
674 	while (remaining) {
675 		op_size = remaining;
676 		op_size = spdk_min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
677 		op_size = spdk_min(op_size, ioat->max_xfer_size);
678 		remaining -= op_size;
679 
680 		last_desc = ioat_prep_fill(ioat,
681 					   spdk_vtophys((void *)vdst),
682 					   fill_pattern,
683 					   op_size);
684 
685 		if (remaining == 0 || last_desc == NULL) {
686 			break;
687 		}
688 
689 		vdst += op_size;
690 	}
691 
692 	if (last_desc) {
693 		last_desc->callback_fn = cb_fn;
694 		last_desc->callback_arg = cb_arg;
695 	} else {
696 		/*
697 		 * Ran out of descriptors in the ring - reset head to leave things as they were
698 		 * in case we managed to fill out any descriptors.
699 		 */
700 		ioat->head = orig_head;
701 		return -ENOMEM;
702 	}
703 
704 	ioat_flush(ioat);
705 	return 0;
706 }
707 
708 uint32_t
709 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
710 {
711 	if (!ioat) {
712 		return 0;
713 	}
714 	return ioat->dma_capabilities;
715 }
716 
717 int
718 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
719 {
720 	return ioat_process_channel_events(ioat);
721 }
722 
723 SPDK_LOG_REGISTER_COMPONENT("ioat", SPDK_LOG_IOAT)
724