xref: /spdk/lib/ioat/ioat.c (revision 552e21cce6cccbf833ed9109827e08337377d7ce)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "ioat_internal.h"
37 
38 #include "spdk/env.h"
39 #include "spdk/util.h"
40 
41 #include "spdk_internal/log.h"
42 #include "spdk_internal/memory.h"
43 
44 struct ioat_driver {
45 	pthread_mutex_t			lock;
46 	TAILQ_HEAD(, spdk_ioat_chan)	attached_chans;
47 };
48 
49 static struct ioat_driver g_ioat_driver = {
50 	.lock = PTHREAD_MUTEX_INITIALIZER,
51 	.attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
52 };
53 
54 static uint64_t
55 ioat_get_chansts(struct spdk_ioat_chan *ioat)
56 {
57 	return spdk_mmio_read_8(&ioat->regs->chansts);
58 }
59 
60 static void
61 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
62 {
63 	spdk_mmio_write_8(&ioat->regs->chancmp, addr);
64 }
65 
66 static void
67 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
68 {
69 	spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
70 }
71 
72 static inline void
73 ioat_suspend(struct spdk_ioat_chan *ioat)
74 {
75 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
76 }
77 
78 static inline void
79 ioat_reset(struct spdk_ioat_chan *ioat)
80 {
81 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
82 }
83 
84 static inline uint32_t
85 ioat_reset_pending(struct spdk_ioat_chan *ioat)
86 {
87 	uint8_t cmd;
88 
89 	cmd = ioat->regs->chancmd;
90 	return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
91 }
92 
93 static int
94 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
95 {
96 	int regs_bar, rc;
97 	void *addr;
98 	uint64_t phys_addr, size;
99 
100 	regs_bar = 0;
101 	rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
102 	if (rc != 0 || addr == NULL) {
103 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
104 			    rc);
105 		return -1;
106 	}
107 
108 	ioat->regs = (volatile struct spdk_ioat_registers *)addr;
109 
110 	return 0;
111 }
112 
113 static int
114 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
115 {
116 	int rc = 0;
117 	void *addr = (void *)ioat->regs;
118 
119 	if (addr) {
120 		rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
121 	}
122 	return rc;
123 }
124 
125 
126 static inline uint32_t
127 ioat_get_active(struct spdk_ioat_chan *ioat)
128 {
129 	return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
130 }
131 
132 static inline uint32_t
133 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
134 {
135 	return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
136 }
137 
138 static uint32_t
139 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
140 {
141 	return index & ((1 << ioat->ring_size_order) - 1);
142 }
143 
144 static void
145 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
146 		    struct ioat_descriptor **desc,
147 		    union spdk_ioat_hw_desc **hw_desc)
148 {
149 	uint32_t i = ioat_get_ring_index(ioat, index);
150 
151 	*desc = &ioat->ring[i];
152 	*hw_desc = &ioat->hw_ring[i];
153 }
154 
155 static void
156 ioat_submit_single(struct spdk_ioat_chan *ioat)
157 {
158 	ioat->head++;
159 }
160 
161 static void
162 ioat_flush(struct spdk_ioat_chan *ioat)
163 {
164 	ioat->regs->dmacount = (uint16_t)ioat->head;
165 }
166 
167 static struct ioat_descriptor *
168 ioat_prep_null(struct spdk_ioat_chan *ioat)
169 {
170 	struct ioat_descriptor *desc;
171 	union spdk_ioat_hw_desc *hw_desc;
172 
173 	if (ioat_get_ring_space(ioat) < 1) {
174 		return NULL;
175 	}
176 
177 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
178 
179 	hw_desc->dma.u.control_raw = 0;
180 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
181 	hw_desc->dma.u.control.null = 1;
182 	hw_desc->dma.u.control.completion_update = 1;
183 
184 	hw_desc->dma.size = 8;
185 	hw_desc->dma.src_addr = 0;
186 	hw_desc->dma.dest_addr = 0;
187 
188 	desc->callback_fn = NULL;
189 	desc->callback_arg = NULL;
190 
191 	ioat_submit_single(ioat);
192 
193 	return desc;
194 }
195 
196 static struct ioat_descriptor *
197 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
198 	       uint64_t src, uint32_t len)
199 {
200 	struct ioat_descriptor *desc;
201 	union spdk_ioat_hw_desc *hw_desc;
202 
203 	assert(len <= ioat->max_xfer_size);
204 
205 	if (ioat_get_ring_space(ioat) < 1) {
206 		return NULL;
207 	}
208 
209 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
210 
211 	hw_desc->dma.u.control_raw = 0;
212 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
213 	hw_desc->dma.u.control.completion_update = 1;
214 
215 	hw_desc->dma.size = len;
216 	hw_desc->dma.src_addr = src;
217 	hw_desc->dma.dest_addr = dst;
218 
219 	desc->callback_fn = NULL;
220 	desc->callback_arg = NULL;
221 
222 	ioat_submit_single(ioat);
223 
224 	return desc;
225 }
226 
227 static struct ioat_descriptor *
228 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
229 	       uint64_t fill_pattern, uint32_t len)
230 {
231 	struct ioat_descriptor *desc;
232 	union spdk_ioat_hw_desc *hw_desc;
233 
234 	assert(len <= ioat->max_xfer_size);
235 
236 	if (ioat_get_ring_space(ioat) < 1) {
237 		return NULL;
238 	}
239 
240 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
241 
242 	hw_desc->fill.u.control_raw = 0;
243 	hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
244 	hw_desc->fill.u.control.completion_update = 1;
245 
246 	hw_desc->fill.size = len;
247 	hw_desc->fill.src_data = fill_pattern;
248 	hw_desc->fill.dest_addr = dst;
249 
250 	desc->callback_fn = NULL;
251 	desc->callback_arg = NULL;
252 
253 	ioat_submit_single(ioat);
254 
255 	return desc;
256 }
257 
258 static int ioat_reset_hw(struct spdk_ioat_chan *ioat)
259 {
260 	int timeout;
261 	uint64_t status;
262 	uint32_t chanerr;
263 	int rc;
264 
265 	status = ioat_get_chansts(ioat);
266 	if (is_ioat_active(status) || is_ioat_idle(status)) {
267 		ioat_suspend(ioat);
268 	}
269 
270 	timeout = 20; /* in milliseconds */
271 	while (is_ioat_active(status) || is_ioat_idle(status)) {
272 		spdk_delay_us(1000);
273 		timeout--;
274 		if (timeout == 0) {
275 			SPDK_ERRLOG("timed out waiting for suspend\n");
276 			return -1;
277 		}
278 		status = ioat_get_chansts(ioat);
279 	}
280 
281 	/*
282 	 * Clear any outstanding errors.
283 	 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
284 	 */
285 	chanerr = ioat->regs->chanerr;
286 	ioat->regs->chanerr = chanerr;
287 
288 	if (ioat->regs->cbver < SPDK_IOAT_VER_3_3) {
289 		rc = spdk_pci_device_cfg_read32(ioat->device, &chanerr,
290 						SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
291 		if (rc) {
292 			SPDK_ERRLOG("failed to read the internal channel error register\n");
293 			return -1;
294 		}
295 
296 		spdk_pci_device_cfg_write32(ioat->device, chanerr,
297 					    SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
298 	}
299 
300 	ioat_reset(ioat);
301 
302 	timeout = 20;
303 	while (ioat_reset_pending(ioat)) {
304 		spdk_delay_us(1000);
305 		timeout--;
306 		if (timeout == 0) {
307 			SPDK_ERRLOG("timed out waiting for reset\n");
308 			return -1;
309 		}
310 	}
311 
312 	return 0;
313 }
314 
315 static int
316 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
317 {
318 	struct ioat_descriptor *desc;
319 	uint64_t status, completed_descriptor, hw_desc_phys_addr;
320 	uint32_t tail;
321 
322 	if (ioat->head == ioat->tail) {
323 		return 0;
324 	}
325 
326 	status = *ioat->comp_update;
327 	completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
328 
329 	if (is_ioat_halted(status)) {
330 		SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
331 		return -1;
332 	}
333 
334 	if (completed_descriptor == ioat->last_seen) {
335 		return 0;
336 	}
337 
338 	do {
339 		tail = ioat_get_ring_index(ioat, ioat->tail);
340 		desc = &ioat->ring[tail];
341 
342 		if (desc->callback_fn) {
343 			desc->callback_fn(desc->callback_arg);
344 		}
345 
346 		hw_desc_phys_addr = desc->phys_addr;
347 		ioat->tail++;
348 	} while (hw_desc_phys_addr != completed_descriptor);
349 
350 	ioat->last_seen = hw_desc_phys_addr;
351 	return 0;
352 }
353 
354 static void
355 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
356 {
357 	ioat_unmap_pci_bar(ioat);
358 
359 	if (ioat->ring) {
360 		free(ioat->ring);
361 	}
362 
363 	if (ioat->hw_ring) {
364 		spdk_dma_free(ioat->hw_ring);
365 	}
366 
367 	if (ioat->comp_update) {
368 		spdk_dma_free((void *)ioat->comp_update);
369 		ioat->comp_update = NULL;
370 	}
371 }
372 
373 static int
374 ioat_channel_start(struct spdk_ioat_chan *ioat)
375 {
376 	uint8_t xfercap, version;
377 	uint64_t status;
378 	int i, num_descriptors;
379 	uint64_t comp_update_bus_addr = 0;
380 	uint64_t phys_addr;
381 
382 	if (ioat_map_pci_bar(ioat) != 0) {
383 		SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
384 		return -1;
385 	}
386 
387 	version = ioat->regs->cbver;
388 	if (version < SPDK_IOAT_VER_3_0) {
389 		SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
390 			    version >> 4, version & 0xF);
391 		return -1;
392 	}
393 
394 	/* Always support DMA copy */
395 	ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
396 	if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) {
397 		ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
398 	}
399 	xfercap = ioat->regs->xfercap;
400 
401 	/* Only bits [4:0] are valid. */
402 	xfercap &= 0x1f;
403 	if (xfercap == 0) {
404 		/* 0 means 4 GB max transfer size. */
405 		ioat->max_xfer_size = 1ULL << 32;
406 	} else if (xfercap < 12) {
407 		/* XFERCAP must be at least 12 (4 KB) according to the spec. */
408 		SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
409 		return -1;
410 	} else {
411 		ioat->max_xfer_size = 1U << xfercap;
412 	}
413 
414 	ioat->comp_update = spdk_dma_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
415 					     &comp_update_bus_addr);
416 	if (ioat->comp_update == NULL) {
417 		return -1;
418 	}
419 
420 	ioat->ring_size_order = IOAT_DEFAULT_ORDER;
421 
422 	num_descriptors = 1 << ioat->ring_size_order;
423 
424 	ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
425 	if (!ioat->ring) {
426 		return -1;
427 	}
428 
429 	ioat->hw_ring = spdk_dma_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
430 					 NULL);
431 	if (!ioat->hw_ring) {
432 		return -1;
433 	}
434 
435 	for (i = 0; i < num_descriptors; i++) {
436 		phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL);
437 		if (phys_addr == SPDK_VTOPHYS_ERROR) {
438 			SPDK_ERRLOG("Failed to translate descriptor %u to physical address\n", i);
439 			return -1;
440 		}
441 
442 		ioat->ring[i].phys_addr = phys_addr;
443 		ioat->hw_ring[ioat_get_ring_index(ioat, i - 1)].generic.next = phys_addr;
444 	}
445 
446 	ioat->head = 0;
447 	ioat->tail = 0;
448 	ioat->last_seen = 0;
449 
450 	ioat_reset_hw(ioat);
451 
452 	ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
453 	ioat_write_chancmp(ioat, comp_update_bus_addr);
454 	ioat_write_chainaddr(ioat, ioat->ring[0].phys_addr);
455 
456 	ioat_prep_null(ioat);
457 	ioat_flush(ioat);
458 
459 	i = 100;
460 	while (i-- > 0) {
461 		spdk_delay_us(100);
462 		status = ioat_get_chansts(ioat);
463 		if (is_ioat_idle(status)) {
464 			break;
465 		}
466 	}
467 
468 	if (is_ioat_idle(status)) {
469 		ioat_process_channel_events(ioat);
470 	} else {
471 		SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
472 			    (void *)status, ioat->regs->chanerr);
473 		return -1;
474 	}
475 
476 	return 0;
477 }
478 
479 /* Caller must hold g_ioat_driver.lock */
480 static struct spdk_ioat_chan *
481 ioat_attach(struct spdk_pci_device *device)
482 {
483 	struct spdk_ioat_chan *ioat;
484 	uint32_t cmd_reg;
485 
486 	ioat = calloc(1, sizeof(struct spdk_ioat_chan));
487 	if (ioat == NULL) {
488 		return NULL;
489 	}
490 
491 	/* Enable PCI busmaster. */
492 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
493 	cmd_reg |= 0x4;
494 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
495 
496 	ioat->device = device;
497 
498 	if (ioat_channel_start(ioat) != 0) {
499 		ioat_channel_destruct(ioat);
500 		free(ioat);
501 		return NULL;
502 	}
503 
504 	return ioat;
505 }
506 
507 struct ioat_enum_ctx {
508 	spdk_ioat_probe_cb probe_cb;
509 	spdk_ioat_attach_cb attach_cb;
510 	void *cb_ctx;
511 };
512 
513 /* This function must only be called while holding g_ioat_driver.lock */
514 static int
515 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
516 {
517 	struct ioat_enum_ctx *enum_ctx = ctx;
518 	struct spdk_ioat_chan *ioat;
519 
520 	/* Verify that this device is not already attached */
521 	TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
522 		/*
523 		 * NOTE: This assumes that the PCI abstraction layer will use the same device handle
524 		 *  across enumerations; we could compare by BDF instead if this is not true.
525 		 */
526 		if (pci_dev == ioat->device) {
527 			return 0;
528 		}
529 	}
530 
531 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
532 		/*
533 		 * Since I/OAT init is relatively quick, just perform the full init during probing.
534 		 *  If this turns out to be a bottleneck later, this can be changed to work like
535 		 *  NVMe with a list of devices to initialize in parallel.
536 		 */
537 		ioat = ioat_attach(pci_dev);
538 		if (ioat == NULL) {
539 			SPDK_ERRLOG("ioat_attach() failed\n");
540 			return -1;
541 		}
542 
543 		TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
544 
545 		enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
546 	}
547 
548 	return 0;
549 }
550 
551 int
552 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
553 {
554 	int rc;
555 	struct ioat_enum_ctx enum_ctx;
556 
557 	pthread_mutex_lock(&g_ioat_driver.lock);
558 
559 	enum_ctx.probe_cb = probe_cb;
560 	enum_ctx.attach_cb = attach_cb;
561 	enum_ctx.cb_ctx = cb_ctx;
562 
563 	rc = spdk_pci_enumerate(spdk_pci_ioat_get_driver(), ioat_enum_cb, &enum_ctx);
564 
565 	pthread_mutex_unlock(&g_ioat_driver.lock);
566 
567 	return rc;
568 }
569 
570 void
571 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
572 {
573 	struct ioat_driver	*driver = &g_ioat_driver;
574 
575 	/* ioat should be in the free list (not registered to a thread)
576 	 * when calling ioat_detach().
577 	 */
578 	pthread_mutex_lock(&driver->lock);
579 	TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
580 	pthread_mutex_unlock(&driver->lock);
581 
582 	ioat_channel_destruct(ioat);
583 	free(ioat);
584 }
585 
586 int
587 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
588 		      void *dst, const void *src, uint64_t nbytes)
589 {
590 	struct ioat_descriptor	*last_desc;
591 	uint64_t	remaining, op_size;
592 	uint64_t	vdst, vsrc;
593 	uint64_t	vdst_page, vsrc_page;
594 	uint64_t	pdst_page, psrc_page;
595 	uint32_t	orig_head;
596 
597 	if (!ioat) {
598 		return -EINVAL;
599 	}
600 
601 	orig_head = ioat->head;
602 
603 	vdst = (uint64_t)dst;
604 	vsrc = (uint64_t)src;
605 	vdst_page = vsrc_page = 0;
606 	pdst_page = psrc_page = SPDK_VTOPHYS_ERROR;
607 
608 	remaining = nbytes;
609 	while (remaining) {
610 		if (_2MB_PAGE(vsrc) != vsrc_page) {
611 			vsrc_page = _2MB_PAGE(vsrc);
612 			psrc_page = spdk_vtophys((void *)vsrc_page, NULL);
613 		}
614 
615 		if (_2MB_PAGE(vdst) != vdst_page) {
616 			vdst_page = _2MB_PAGE(vdst);
617 			pdst_page = spdk_vtophys((void *)vdst_page, NULL);
618 		}
619 		op_size = remaining;
620 		op_size = spdk_min(op_size, (VALUE_2MB - _2MB_OFFSET(vsrc)));
621 		op_size = spdk_min(op_size, (VALUE_2MB - _2MB_OFFSET(vdst)));
622 		op_size = spdk_min(op_size, ioat->max_xfer_size);
623 		remaining -= op_size;
624 
625 		last_desc = ioat_prep_copy(ioat,
626 					   pdst_page + _2MB_OFFSET(vdst),
627 					   psrc_page + _2MB_OFFSET(vsrc),
628 					   op_size);
629 
630 		if (remaining == 0 || last_desc == NULL) {
631 			break;
632 		}
633 
634 		vsrc += op_size;
635 		vdst += op_size;
636 
637 	}
638 	/* Issue null descriptor for null transfer */
639 	if (nbytes == 0) {
640 		last_desc = ioat_prep_null(ioat);
641 	}
642 
643 	if (last_desc) {
644 		last_desc->callback_fn = cb_fn;
645 		last_desc->callback_arg = cb_arg;
646 	} else {
647 		/*
648 		 * Ran out of descriptors in the ring - reset head to leave things as they were
649 		 * in case we managed to fill out any descriptors.
650 		 */
651 		ioat->head = orig_head;
652 		return -ENOMEM;
653 	}
654 
655 	ioat_flush(ioat);
656 	return 0;
657 }
658 
659 int
660 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
661 		      void *dst, uint64_t fill_pattern, uint64_t nbytes)
662 {
663 	struct ioat_descriptor	*last_desc = NULL;
664 	uint64_t	remaining, op_size;
665 	uint64_t	vdst;
666 	uint32_t	orig_head;
667 
668 	if (!ioat) {
669 		return -EINVAL;
670 	}
671 
672 	if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
673 		SPDK_ERRLOG("Channel does not support memory fill\n");
674 		return -1;
675 	}
676 
677 	orig_head = ioat->head;
678 
679 	vdst = (uint64_t)dst;
680 	remaining = nbytes;
681 
682 	while (remaining) {
683 		op_size = remaining;
684 		op_size = spdk_min(op_size, (VALUE_2MB - _2MB_OFFSET(vdst)));
685 		op_size = spdk_min(op_size, ioat->max_xfer_size);
686 		remaining -= op_size;
687 
688 		last_desc = ioat_prep_fill(ioat,
689 					   spdk_vtophys((void *)vdst, NULL),
690 					   fill_pattern,
691 					   op_size);
692 
693 		if (remaining == 0 || last_desc == NULL) {
694 			break;
695 		}
696 
697 		vdst += op_size;
698 	}
699 
700 	if (last_desc) {
701 		last_desc->callback_fn = cb_fn;
702 		last_desc->callback_arg = cb_arg;
703 	} else {
704 		/*
705 		 * Ran out of descriptors in the ring - reset head to leave things as they were
706 		 * in case we managed to fill out any descriptors.
707 		 */
708 		ioat->head = orig_head;
709 		return -ENOMEM;
710 	}
711 
712 	ioat_flush(ioat);
713 	return 0;
714 }
715 
716 uint32_t
717 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
718 {
719 	if (!ioat) {
720 		return 0;
721 	}
722 	return ioat->dma_capabilities;
723 }
724 
725 int
726 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
727 {
728 	return ioat_process_channel_events(ioat);
729 }
730 
731 SPDK_LOG_REGISTER_COMPONENT("ioat", SPDK_LOG_IOAT)
732