xref: /spdk/lib/ioat/ioat.c (revision 88e3ffd7b6c5ec1ea1a660354d25f02c766092e1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "ioat_internal.h"
37 
38 #include "spdk/env.h"
39 #include "spdk/util.h"
40 #include "spdk/memory.h"
41 
42 #include "spdk/log.h"
43 
44 struct ioat_driver {
45 	pthread_mutex_t			lock;
46 	TAILQ_HEAD(, spdk_ioat_chan)	attached_chans;
47 };
48 
49 static struct ioat_driver g_ioat_driver = {
50 	.lock = PTHREAD_MUTEX_INITIALIZER,
51 	.attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
52 };
53 
54 static uint64_t
55 ioat_get_chansts(struct spdk_ioat_chan *ioat)
56 {
57 	return spdk_mmio_read_8(&ioat->regs->chansts);
58 }
59 
60 static void
61 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
62 {
63 	spdk_mmio_write_8(&ioat->regs->chancmp, addr);
64 }
65 
66 static void
67 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
68 {
69 	spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
70 }
71 
72 static inline void
73 ioat_suspend(struct spdk_ioat_chan *ioat)
74 {
75 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
76 }
77 
78 static inline void
79 ioat_reset(struct spdk_ioat_chan *ioat)
80 {
81 	ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
82 }
83 
84 static inline uint32_t
85 ioat_reset_pending(struct spdk_ioat_chan *ioat)
86 {
87 	uint8_t cmd;
88 
89 	cmd = ioat->regs->chancmd;
90 	return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
91 }
92 
93 static int
94 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
95 {
96 	int regs_bar, rc;
97 	void *addr;
98 	uint64_t phys_addr, size;
99 
100 	regs_bar = 0;
101 	rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
102 	if (rc != 0 || addr == NULL) {
103 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
104 			    rc);
105 		return -1;
106 	}
107 
108 	ioat->regs = (volatile struct spdk_ioat_registers *)addr;
109 
110 	return 0;
111 }
112 
113 static int
114 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
115 {
116 	int rc = 0;
117 	void *addr = (void *)ioat->regs;
118 
119 	if (addr) {
120 		rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
121 	}
122 	return rc;
123 }
124 
125 
126 static inline uint32_t
127 ioat_get_active(struct spdk_ioat_chan *ioat)
128 {
129 	return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
130 }
131 
132 static inline uint32_t
133 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
134 {
135 	return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
136 }
137 
138 static uint32_t
139 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
140 {
141 	return index & ((1 << ioat->ring_size_order) - 1);
142 }
143 
144 static void
145 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
146 		    struct ioat_descriptor **desc,
147 		    union spdk_ioat_hw_desc **hw_desc)
148 {
149 	uint32_t i = ioat_get_ring_index(ioat, index);
150 
151 	*desc = &ioat->ring[i];
152 	*hw_desc = &ioat->hw_ring[i];
153 }
154 
155 static void
156 ioat_submit_single(struct spdk_ioat_chan *ioat)
157 {
158 	ioat->head++;
159 }
160 
161 void
162 spdk_ioat_flush(struct spdk_ioat_chan *ioat)
163 {
164 	uint32_t index = ioat_get_ring_index(ioat, ioat->head - 1);
165 	union spdk_ioat_hw_desc *hw_desc;
166 
167 	hw_desc = &ioat->hw_ring[index];
168 	hw_desc->dma.u.control.completion_update = 1;
169 	ioat->regs->dmacount = (uint16_t)ioat->head;
170 }
171 
172 static struct ioat_descriptor *
173 ioat_prep_null(struct spdk_ioat_chan *ioat)
174 {
175 	struct ioat_descriptor *desc;
176 	union spdk_ioat_hw_desc *hw_desc;
177 
178 	if (ioat_get_ring_space(ioat) < 1) {
179 		return NULL;
180 	}
181 
182 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
183 
184 	hw_desc->dma.u.control_raw = 0;
185 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
186 	hw_desc->dma.u.control.null = 1;
187 
188 	hw_desc->dma.size = 8;
189 	hw_desc->dma.src_addr = 0;
190 	hw_desc->dma.dest_addr = 0;
191 
192 	desc->callback_fn = NULL;
193 	desc->callback_arg = NULL;
194 
195 	ioat_submit_single(ioat);
196 
197 	return desc;
198 }
199 
200 static struct ioat_descriptor *
201 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
202 	       uint64_t src, uint32_t len)
203 {
204 	struct ioat_descriptor *desc;
205 	union spdk_ioat_hw_desc *hw_desc;
206 
207 	assert(len <= ioat->max_xfer_size);
208 
209 	if (ioat_get_ring_space(ioat) < 1) {
210 		return NULL;
211 	}
212 
213 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
214 
215 	hw_desc->dma.u.control_raw = 0;
216 	hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
217 
218 	hw_desc->dma.size = len;
219 	hw_desc->dma.src_addr = src;
220 	hw_desc->dma.dest_addr = dst;
221 
222 	desc->callback_fn = NULL;
223 	desc->callback_arg = NULL;
224 
225 	ioat_submit_single(ioat);
226 
227 	return desc;
228 }
229 
230 static struct ioat_descriptor *
231 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
232 	       uint64_t fill_pattern, uint32_t len)
233 {
234 	struct ioat_descriptor *desc;
235 	union spdk_ioat_hw_desc *hw_desc;
236 
237 	assert(len <= ioat->max_xfer_size);
238 
239 	if (ioat_get_ring_space(ioat) < 1) {
240 		return NULL;
241 	}
242 
243 	ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
244 
245 	hw_desc->fill.u.control_raw = 0;
246 	hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
247 
248 	hw_desc->fill.size = len;
249 	hw_desc->fill.src_data = fill_pattern;
250 	hw_desc->fill.dest_addr = dst;
251 
252 	desc->callback_fn = NULL;
253 	desc->callback_arg = NULL;
254 
255 	ioat_submit_single(ioat);
256 
257 	return desc;
258 }
259 
260 static int ioat_reset_hw(struct spdk_ioat_chan *ioat)
261 {
262 	int timeout;
263 	uint64_t status;
264 	uint32_t chanerr;
265 	int rc;
266 
267 	status = ioat_get_chansts(ioat);
268 	if (is_ioat_active(status) || is_ioat_idle(status)) {
269 		ioat_suspend(ioat);
270 	}
271 
272 	timeout = 20; /* in milliseconds */
273 	while (is_ioat_active(status) || is_ioat_idle(status)) {
274 		spdk_delay_us(1000);
275 		timeout--;
276 		if (timeout == 0) {
277 			SPDK_ERRLOG("timed out waiting for suspend\n");
278 			return -1;
279 		}
280 		status = ioat_get_chansts(ioat);
281 	}
282 
283 	/*
284 	 * Clear any outstanding errors.
285 	 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
286 	 */
287 	chanerr = ioat->regs->chanerr;
288 	ioat->regs->chanerr = chanerr;
289 
290 	if (ioat->regs->cbver < SPDK_IOAT_VER_3_3) {
291 		rc = spdk_pci_device_cfg_read32(ioat->device, &chanerr,
292 						SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
293 		if (rc) {
294 			SPDK_ERRLOG("failed to read the internal channel error register\n");
295 			return -1;
296 		}
297 
298 		spdk_pci_device_cfg_write32(ioat->device, chanerr,
299 					    SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
300 	}
301 
302 	ioat_reset(ioat);
303 
304 	timeout = 20;
305 	while (ioat_reset_pending(ioat)) {
306 		spdk_delay_us(1000);
307 		timeout--;
308 		if (timeout == 0) {
309 			SPDK_ERRLOG("timed out waiting for reset\n");
310 			return -1;
311 		}
312 	}
313 
314 	return 0;
315 }
316 
317 static int
318 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
319 {
320 	struct ioat_descriptor *desc;
321 	uint64_t status, completed_descriptor, hw_desc_phys_addr, events_count = 0;
322 	uint32_t tail;
323 
324 	if (ioat->head == ioat->tail) {
325 		return 0;
326 	}
327 
328 	status = *ioat->comp_update;
329 	completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
330 
331 	if (is_ioat_halted(status)) {
332 		SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
333 		return -1;
334 	}
335 
336 	if (completed_descriptor == ioat->last_seen) {
337 		return 0;
338 	}
339 
340 	do {
341 		tail = ioat_get_ring_index(ioat, ioat->tail);
342 		desc = &ioat->ring[tail];
343 
344 		if (desc->callback_fn) {
345 			desc->callback_fn(desc->callback_arg);
346 		}
347 
348 		hw_desc_phys_addr = desc->phys_addr;
349 		ioat->tail++;
350 		events_count++;
351 	} while (hw_desc_phys_addr != completed_descriptor);
352 
353 	ioat->last_seen = hw_desc_phys_addr;
354 
355 	return events_count;
356 }
357 
358 static void
359 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
360 {
361 	ioat_unmap_pci_bar(ioat);
362 
363 	if (ioat->ring) {
364 		free(ioat->ring);
365 	}
366 
367 	if (ioat->hw_ring) {
368 		spdk_free(ioat->hw_ring);
369 	}
370 
371 	if (ioat->comp_update) {
372 		spdk_free((void *)ioat->comp_update);
373 		ioat->comp_update = NULL;
374 	}
375 }
376 
377 uint32_t
378 spdk_ioat_get_max_descriptors(struct spdk_ioat_chan *ioat)
379 {
380 	return 1 << ioat->ring_size_order;
381 }
382 
383 static int
384 ioat_channel_start(struct spdk_ioat_chan *ioat)
385 {
386 	uint8_t xfercap, version;
387 	uint64_t status;
388 	int i, num_descriptors;
389 	uint64_t comp_update_bus_addr = 0;
390 	uint64_t phys_addr;
391 
392 	if (ioat_map_pci_bar(ioat) != 0) {
393 		SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
394 		return -1;
395 	}
396 
397 	version = ioat->regs->cbver;
398 	if (version < SPDK_IOAT_VER_3_0) {
399 		SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
400 			    version >> 4, version & 0xF);
401 		return -1;
402 	}
403 
404 	/* Always support DMA copy */
405 	ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
406 	if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) {
407 		ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
408 	}
409 	xfercap = ioat->regs->xfercap;
410 
411 	/* Only bits [4:0] are valid. */
412 	xfercap &= 0x1f;
413 	if (xfercap == 0) {
414 		/* 0 means 4 GB max transfer size. */
415 		ioat->max_xfer_size = 1ULL << 32;
416 	} else if (xfercap < 12) {
417 		/* XFERCAP must be at least 12 (4 KB) according to the spec. */
418 		SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
419 		return -1;
420 	} else {
421 		ioat->max_xfer_size = 1U << xfercap;
422 	}
423 
424 	ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
425 					 NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
426 	if (ioat->comp_update == NULL) {
427 		return -1;
428 	}
429 
430 	comp_update_bus_addr = spdk_vtophys((void *)ioat->comp_update, NULL);
431 	if (comp_update_bus_addr == SPDK_VTOPHYS_ERROR) {
432 		spdk_free((void *)ioat->comp_update);
433 		return -1;
434 	}
435 
436 	ioat->ring_size_order = IOAT_DEFAULT_ORDER;
437 
438 	num_descriptors = 1 << ioat->ring_size_order;
439 
440 	ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
441 	if (!ioat->ring) {
442 		return -1;
443 	}
444 
445 	ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
446 				     NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
447 	if (!ioat->hw_ring) {
448 		return -1;
449 	}
450 
451 	for (i = 0; i < num_descriptors; i++) {
452 		phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL);
453 		if (phys_addr == SPDK_VTOPHYS_ERROR) {
454 			SPDK_ERRLOG("Failed to translate descriptor %u to physical address\n", i);
455 			return -1;
456 		}
457 
458 		ioat->ring[i].phys_addr = phys_addr;
459 		ioat->hw_ring[ioat_get_ring_index(ioat, i - 1)].generic.next = phys_addr;
460 	}
461 
462 	ioat->head = 0;
463 	ioat->tail = 0;
464 	ioat->last_seen = 0;
465 
466 	ioat_reset_hw(ioat);
467 
468 	ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
469 	ioat_write_chancmp(ioat, comp_update_bus_addr);
470 	ioat_write_chainaddr(ioat, ioat->ring[0].phys_addr);
471 
472 	ioat_prep_null(ioat);
473 	spdk_ioat_flush(ioat);
474 
475 	i = 100;
476 	while (i-- > 0) {
477 		spdk_delay_us(100);
478 		status = ioat_get_chansts(ioat);
479 		if (is_ioat_idle(status)) {
480 			break;
481 		}
482 	}
483 
484 	if (is_ioat_idle(status)) {
485 		ioat_process_channel_events(ioat);
486 	} else {
487 		SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
488 			    (void *)status, ioat->regs->chanerr);
489 		return -1;
490 	}
491 
492 	return 0;
493 }
494 
495 /* Caller must hold g_ioat_driver.lock */
496 static struct spdk_ioat_chan *
497 ioat_attach(struct spdk_pci_device *device)
498 {
499 	struct spdk_ioat_chan *ioat;
500 	uint32_t cmd_reg;
501 
502 	ioat = calloc(1, sizeof(struct spdk_ioat_chan));
503 	if (ioat == NULL) {
504 		return NULL;
505 	}
506 
507 	/* Enable PCI busmaster. */
508 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
509 	cmd_reg |= 0x4;
510 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
511 
512 	ioat->device = device;
513 
514 	if (ioat_channel_start(ioat) != 0) {
515 		ioat_channel_destruct(ioat);
516 		free(ioat);
517 		return NULL;
518 	}
519 
520 	return ioat;
521 }
522 
523 struct ioat_enum_ctx {
524 	spdk_ioat_probe_cb probe_cb;
525 	spdk_ioat_attach_cb attach_cb;
526 	void *cb_ctx;
527 };
528 
529 /* This function must only be called while holding g_ioat_driver.lock */
530 static int
531 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
532 {
533 	struct ioat_enum_ctx *enum_ctx = ctx;
534 	struct spdk_ioat_chan *ioat;
535 
536 	/* Verify that this device is not already attached */
537 	TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
538 		/*
539 		 * NOTE: This assumes that the PCI abstraction layer will use the same device handle
540 		 *  across enumerations; we could compare by BDF instead if this is not true.
541 		 */
542 		if (pci_dev == ioat->device) {
543 			return 0;
544 		}
545 	}
546 
547 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
548 		/*
549 		 * Since I/OAT init is relatively quick, just perform the full init during probing.
550 		 *  If this turns out to be a bottleneck later, this can be changed to work like
551 		 *  NVMe with a list of devices to initialize in parallel.
552 		 */
553 		ioat = ioat_attach(pci_dev);
554 		if (ioat == NULL) {
555 			SPDK_ERRLOG("ioat_attach() failed\n");
556 			return -1;
557 		}
558 
559 		TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
560 
561 		enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
562 	}
563 
564 	return 0;
565 }
566 
567 int
568 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
569 {
570 	int rc;
571 	struct ioat_enum_ctx enum_ctx;
572 
573 	pthread_mutex_lock(&g_ioat_driver.lock);
574 
575 	enum_ctx.probe_cb = probe_cb;
576 	enum_ctx.attach_cb = attach_cb;
577 	enum_ctx.cb_ctx = cb_ctx;
578 
579 	rc = spdk_pci_enumerate(spdk_pci_ioat_get_driver(), ioat_enum_cb, &enum_ctx);
580 
581 	pthread_mutex_unlock(&g_ioat_driver.lock);
582 
583 	return rc;
584 }
585 
586 void
587 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
588 {
589 	struct ioat_driver	*driver = &g_ioat_driver;
590 
591 	/* ioat should be in the free list (not registered to a thread)
592 	 * when calling ioat_detach().
593 	 */
594 	pthread_mutex_lock(&driver->lock);
595 	TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
596 	pthread_mutex_unlock(&driver->lock);
597 
598 	ioat_channel_destruct(ioat);
599 	free(ioat);
600 }
601 
602 int
603 spdk_ioat_build_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
604 		     void *dst, const void *src, uint64_t nbytes)
605 {
606 	struct ioat_descriptor	*last_desc;
607 	uint64_t	remaining, op_size;
608 	uint64_t	vdst, vsrc;
609 	uint64_t	pdst_addr, psrc_addr, dst_len, src_len;
610 	uint32_t	orig_head;
611 
612 	if (!ioat) {
613 		return -EINVAL;
614 	}
615 
616 	orig_head = ioat->head;
617 
618 	vdst = (uint64_t)dst;
619 	vsrc = (uint64_t)src;
620 
621 	remaining = nbytes;
622 	while (remaining) {
623 		src_len = dst_len = remaining;
624 
625 		psrc_addr = spdk_vtophys((void *)vsrc, &src_len);
626 		if (psrc_addr == SPDK_VTOPHYS_ERROR) {
627 			return -EINVAL;
628 		}
629 		pdst_addr = spdk_vtophys((void *)vdst, &dst_len);
630 		if (pdst_addr == SPDK_VTOPHYS_ERROR) {
631 			return -EINVAL;
632 		}
633 
634 		op_size = spdk_min(dst_len, src_len);
635 		op_size = spdk_min(op_size, ioat->max_xfer_size);
636 		remaining -= op_size;
637 
638 		last_desc = ioat_prep_copy(ioat, pdst_addr, psrc_addr, op_size);
639 
640 		if (remaining == 0 || last_desc == NULL) {
641 			break;
642 		}
643 
644 		vsrc += op_size;
645 		vdst += op_size;
646 
647 	}
648 	/* Issue null descriptor for null transfer */
649 	if (nbytes == 0) {
650 		last_desc = ioat_prep_null(ioat);
651 	}
652 
653 	if (last_desc) {
654 		last_desc->callback_fn = cb_fn;
655 		last_desc->callback_arg = cb_arg;
656 	} else {
657 		/*
658 		 * Ran out of descriptors in the ring - reset head to leave things as they were
659 		 * in case we managed to fill out any descriptors.
660 		 */
661 		ioat->head = orig_head;
662 		return -ENOMEM;
663 	}
664 
665 	return 0;
666 }
667 
668 int
669 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
670 		      void *dst, const void *src, uint64_t nbytes)
671 {
672 	int rc;
673 
674 	rc = spdk_ioat_build_copy(ioat, cb_arg, cb_fn, dst, src, nbytes);
675 	if (rc != 0) {
676 		return rc;
677 	}
678 
679 	spdk_ioat_flush(ioat);
680 	return 0;
681 }
682 
683 int
684 spdk_ioat_build_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
685 		     void *dst, uint64_t fill_pattern, uint64_t nbytes)
686 {
687 	struct ioat_descriptor	*last_desc = NULL;
688 	uint64_t	remaining, op_size;
689 	uint64_t	vdst;
690 	uint64_t	pdst_addr, dst_len;
691 	uint32_t	orig_head;
692 
693 	if (!ioat) {
694 		return -EINVAL;
695 	}
696 
697 	if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
698 		SPDK_ERRLOG("Channel does not support memory fill\n");
699 		return -1;
700 	}
701 
702 	orig_head = ioat->head;
703 
704 	vdst = (uint64_t)dst;
705 	remaining = nbytes;
706 
707 	while (remaining) {
708 		dst_len = remaining;
709 		pdst_addr = spdk_vtophys((void *)vdst, &dst_len);
710 		if (pdst_addr == SPDK_VTOPHYS_ERROR) {
711 			return -EINVAL;
712 		}
713 
714 		op_size = spdk_min(dst_len, ioat->max_xfer_size);
715 		remaining -= op_size;
716 
717 		last_desc = ioat_prep_fill(ioat, pdst_addr, fill_pattern, op_size);
718 
719 		if (remaining == 0 || last_desc == NULL) {
720 			break;
721 		}
722 
723 		vdst += op_size;
724 	}
725 
726 	if (last_desc) {
727 		last_desc->callback_fn = cb_fn;
728 		last_desc->callback_arg = cb_arg;
729 	} else {
730 		/*
731 		 * Ran out of descriptors in the ring - reset head to leave things as they were
732 		 * in case we managed to fill out any descriptors.
733 		 */
734 		ioat->head = orig_head;
735 		return -ENOMEM;
736 	}
737 
738 	return 0;
739 }
740 
741 int
742 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
743 		      void *dst, uint64_t fill_pattern, uint64_t nbytes)
744 {
745 	int rc;
746 
747 	rc = spdk_ioat_build_fill(ioat, cb_arg, cb_fn, dst, fill_pattern, nbytes);
748 	if (rc != 0) {
749 		return rc;
750 	}
751 
752 	spdk_ioat_flush(ioat);
753 	return 0;
754 }
755 
756 uint32_t
757 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
758 {
759 	if (!ioat) {
760 		return 0;
761 	}
762 	return ioat->dma_capabilities;
763 }
764 
765 int
766 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
767 {
768 	return ioat_process_channel_events(ioat);
769 }
770 
771 SPDK_LOG_REGISTER_COMPONENT(ioat)
772