1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "ioat_internal.h" 37 38 #include "spdk/env.h" 39 #include "spdk/util.h" 40 41 #include "spdk_internal/log.h" 42 #include "spdk_internal/memory.h" 43 44 struct ioat_driver { 45 pthread_mutex_t lock; 46 TAILQ_HEAD(, spdk_ioat_chan) attached_chans; 47 }; 48 49 static struct ioat_driver g_ioat_driver = { 50 .lock = PTHREAD_MUTEX_INITIALIZER, 51 .attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans), 52 }; 53 54 static uint64_t 55 ioat_get_chansts(struct spdk_ioat_chan *ioat) 56 { 57 return spdk_mmio_read_8(&ioat->regs->chansts); 58 } 59 60 static void 61 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr) 62 { 63 spdk_mmio_write_8(&ioat->regs->chancmp, addr); 64 } 65 66 static void 67 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr) 68 { 69 spdk_mmio_write_8(&ioat->regs->chainaddr, addr); 70 } 71 72 static inline void 73 ioat_suspend(struct spdk_ioat_chan *ioat) 74 { 75 ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND; 76 } 77 78 static inline void 79 ioat_reset(struct spdk_ioat_chan *ioat) 80 { 81 ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET; 82 } 83 84 static inline uint32_t 85 ioat_reset_pending(struct spdk_ioat_chan *ioat) 86 { 87 uint8_t cmd; 88 89 cmd = ioat->regs->chancmd; 90 return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET; 91 } 92 93 static int 94 ioat_map_pci_bar(struct spdk_ioat_chan *ioat) 95 { 96 int regs_bar, rc; 97 void *addr; 98 uint64_t phys_addr, size; 99 100 regs_bar = 0; 101 rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size); 102 if (rc != 0 || addr == NULL) { 103 SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", 104 rc); 105 return -1; 106 } 107 108 ioat->regs = (volatile struct spdk_ioat_registers *)addr; 109 110 return 0; 111 } 112 113 static int 114 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat) 115 { 116 int rc = 0; 117 void *addr = (void *)ioat->regs; 118 119 if (addr) { 120 rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr); 121 } 122 return rc; 123 } 124 125 126 static inline uint32_t 127 ioat_get_active(struct spdk_ioat_chan *ioat) 128 { 129 return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1); 130 } 131 132 static inline uint32_t 133 ioat_get_ring_space(struct spdk_ioat_chan *ioat) 134 { 135 return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1; 136 } 137 138 static uint32_t 139 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index) 140 { 141 return index & ((1 << ioat->ring_size_order) - 1); 142 } 143 144 static void 145 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index, 146 struct ioat_descriptor **desc, 147 union spdk_ioat_hw_desc **hw_desc) 148 { 149 uint32_t i = ioat_get_ring_index(ioat, index); 150 151 *desc = &ioat->ring[i]; 152 *hw_desc = &ioat->hw_ring[i]; 153 } 154 155 static void 156 ioat_submit_single(struct spdk_ioat_chan *ioat) 157 { 158 ioat->head++; 159 } 160 161 void 162 spdk_ioat_flush(struct spdk_ioat_chan *ioat) 163 { 164 uint32_t index = ioat_get_ring_index(ioat, ioat->head - 1); 165 union spdk_ioat_hw_desc *hw_desc; 166 167 hw_desc = &ioat->hw_ring[index]; 168 hw_desc->dma.u.control.completion_update = 1; 169 ioat->regs->dmacount = (uint16_t)ioat->head; 170 } 171 172 static struct ioat_descriptor * 173 ioat_prep_null(struct spdk_ioat_chan *ioat) 174 { 175 struct ioat_descriptor *desc; 176 union spdk_ioat_hw_desc *hw_desc; 177 178 if (ioat_get_ring_space(ioat) < 1) { 179 return NULL; 180 } 181 182 ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc); 183 184 hw_desc->dma.u.control_raw = 0; 185 hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY; 186 hw_desc->dma.u.control.null = 1; 187 188 hw_desc->dma.size = 8; 189 hw_desc->dma.src_addr = 0; 190 hw_desc->dma.dest_addr = 0; 191 192 desc->callback_fn = NULL; 193 desc->callback_arg = NULL; 194 195 ioat_submit_single(ioat); 196 197 return desc; 198 } 199 200 static struct ioat_descriptor * 201 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst, 202 uint64_t src, uint32_t len) 203 { 204 struct ioat_descriptor *desc; 205 union spdk_ioat_hw_desc *hw_desc; 206 207 assert(len <= ioat->max_xfer_size); 208 209 if (ioat_get_ring_space(ioat) < 1) { 210 return NULL; 211 } 212 213 ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc); 214 215 hw_desc->dma.u.control_raw = 0; 216 hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY; 217 218 hw_desc->dma.size = len; 219 hw_desc->dma.src_addr = src; 220 hw_desc->dma.dest_addr = dst; 221 222 desc->callback_fn = NULL; 223 desc->callback_arg = NULL; 224 225 ioat_submit_single(ioat); 226 227 return desc; 228 } 229 230 static struct ioat_descriptor * 231 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst, 232 uint64_t fill_pattern, uint32_t len) 233 { 234 struct ioat_descriptor *desc; 235 union spdk_ioat_hw_desc *hw_desc; 236 237 assert(len <= ioat->max_xfer_size); 238 239 if (ioat_get_ring_space(ioat) < 1) { 240 return NULL; 241 } 242 243 ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc); 244 245 hw_desc->fill.u.control_raw = 0; 246 hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL; 247 248 hw_desc->fill.size = len; 249 hw_desc->fill.src_data = fill_pattern; 250 hw_desc->fill.dest_addr = dst; 251 252 desc->callback_fn = NULL; 253 desc->callback_arg = NULL; 254 255 ioat_submit_single(ioat); 256 257 return desc; 258 } 259 260 static int ioat_reset_hw(struct spdk_ioat_chan *ioat) 261 { 262 int timeout; 263 uint64_t status; 264 uint32_t chanerr; 265 int rc; 266 267 status = ioat_get_chansts(ioat); 268 if (is_ioat_active(status) || is_ioat_idle(status)) { 269 ioat_suspend(ioat); 270 } 271 272 timeout = 20; /* in milliseconds */ 273 while (is_ioat_active(status) || is_ioat_idle(status)) { 274 spdk_delay_us(1000); 275 timeout--; 276 if (timeout == 0) { 277 SPDK_ERRLOG("timed out waiting for suspend\n"); 278 return -1; 279 } 280 status = ioat_get_chansts(ioat); 281 } 282 283 /* 284 * Clear any outstanding errors. 285 * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything. 286 */ 287 chanerr = ioat->regs->chanerr; 288 ioat->regs->chanerr = chanerr; 289 290 if (ioat->regs->cbver < SPDK_IOAT_VER_3_3) { 291 rc = spdk_pci_device_cfg_read32(ioat->device, &chanerr, 292 SPDK_IOAT_PCI_CHANERR_INT_OFFSET); 293 if (rc) { 294 SPDK_ERRLOG("failed to read the internal channel error register\n"); 295 return -1; 296 } 297 298 spdk_pci_device_cfg_write32(ioat->device, chanerr, 299 SPDK_IOAT_PCI_CHANERR_INT_OFFSET); 300 } 301 302 ioat_reset(ioat); 303 304 timeout = 20; 305 while (ioat_reset_pending(ioat)) { 306 spdk_delay_us(1000); 307 timeout--; 308 if (timeout == 0) { 309 SPDK_ERRLOG("timed out waiting for reset\n"); 310 return -1; 311 } 312 } 313 314 return 0; 315 } 316 317 static int 318 ioat_process_channel_events(struct spdk_ioat_chan *ioat) 319 { 320 struct ioat_descriptor *desc; 321 uint64_t status, completed_descriptor, hw_desc_phys_addr; 322 uint32_t tail; 323 324 if (ioat->head == ioat->tail) { 325 return 0; 326 } 327 328 status = *ioat->comp_update; 329 completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 330 331 if (is_ioat_halted(status)) { 332 SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr); 333 return -1; 334 } 335 336 if (completed_descriptor == ioat->last_seen) { 337 return 0; 338 } 339 340 do { 341 tail = ioat_get_ring_index(ioat, ioat->tail); 342 desc = &ioat->ring[tail]; 343 344 if (desc->callback_fn) { 345 desc->callback_fn(desc->callback_arg); 346 } 347 348 hw_desc_phys_addr = desc->phys_addr; 349 ioat->tail++; 350 } while (hw_desc_phys_addr != completed_descriptor); 351 352 ioat->last_seen = hw_desc_phys_addr; 353 return 0; 354 } 355 356 static void 357 ioat_channel_destruct(struct spdk_ioat_chan *ioat) 358 { 359 ioat_unmap_pci_bar(ioat); 360 361 if (ioat->ring) { 362 free(ioat->ring); 363 } 364 365 if (ioat->hw_ring) { 366 spdk_free(ioat->hw_ring); 367 } 368 369 if (ioat->comp_update) { 370 spdk_free((void *)ioat->comp_update); 371 ioat->comp_update = NULL; 372 } 373 374 spdk_pci_device_detach(ioat->device); 375 } 376 377 static int 378 ioat_channel_start(struct spdk_ioat_chan *ioat) 379 { 380 uint8_t xfercap, version; 381 uint64_t status; 382 int i, num_descriptors; 383 uint64_t comp_update_bus_addr = 0; 384 uint64_t phys_addr; 385 386 if (ioat_map_pci_bar(ioat) != 0) { 387 SPDK_ERRLOG("ioat_map_pci_bar() failed\n"); 388 return -1; 389 } 390 391 version = ioat->regs->cbver; 392 if (version < SPDK_IOAT_VER_3_0) { 393 SPDK_ERRLOG(" unsupported IOAT version %u.%u\n", 394 version >> 4, version & 0xF); 395 return -1; 396 } 397 398 /* Always support DMA copy */ 399 ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED; 400 if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) { 401 ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED; 402 } 403 xfercap = ioat->regs->xfercap; 404 405 /* Only bits [4:0] are valid. */ 406 xfercap &= 0x1f; 407 if (xfercap == 0) { 408 /* 0 means 4 GB max transfer size. */ 409 ioat->max_xfer_size = 1ULL << 32; 410 } else if (xfercap < 12) { 411 /* XFERCAP must be at least 12 (4 KB) according to the spec. */ 412 SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap); 413 return -1; 414 } else { 415 ioat->max_xfer_size = 1U << xfercap; 416 } 417 418 ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN, 419 NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 420 if (ioat->comp_update == NULL) { 421 return -1; 422 } 423 424 comp_update_bus_addr = spdk_vtophys((void *)ioat->comp_update, NULL); 425 if (comp_update_bus_addr == SPDK_VTOPHYS_ERROR) { 426 spdk_free((void *)ioat->comp_update); 427 return -1; 428 } 429 430 ioat->ring_size_order = IOAT_DEFAULT_ORDER; 431 432 num_descriptors = 1 << ioat->ring_size_order; 433 434 ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor)); 435 if (!ioat->ring) { 436 return -1; 437 } 438 439 ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64, 440 NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 441 if (!ioat->hw_ring) { 442 return -1; 443 } 444 445 for (i = 0; i < num_descriptors; i++) { 446 phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL); 447 if (phys_addr == SPDK_VTOPHYS_ERROR) { 448 SPDK_ERRLOG("Failed to translate descriptor %u to physical address\n", i); 449 return -1; 450 } 451 452 ioat->ring[i].phys_addr = phys_addr; 453 ioat->hw_ring[ioat_get_ring_index(ioat, i - 1)].generic.next = phys_addr; 454 } 455 456 ioat->head = 0; 457 ioat->tail = 0; 458 ioat->last_seen = 0; 459 460 ioat_reset_hw(ioat); 461 462 ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN; 463 ioat_write_chancmp(ioat, comp_update_bus_addr); 464 ioat_write_chainaddr(ioat, ioat->ring[0].phys_addr); 465 466 ioat_prep_null(ioat); 467 spdk_ioat_flush(ioat); 468 469 i = 100; 470 while (i-- > 0) { 471 spdk_delay_us(100); 472 status = ioat_get_chansts(ioat); 473 if (is_ioat_idle(status)) { 474 break; 475 } 476 } 477 478 if (is_ioat_idle(status)) { 479 ioat_process_channel_events(ioat); 480 } else { 481 SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n", 482 (void *)status, ioat->regs->chanerr); 483 return -1; 484 } 485 486 return 0; 487 } 488 489 /* Caller must hold g_ioat_driver.lock */ 490 static struct spdk_ioat_chan * 491 ioat_attach(struct spdk_pci_device *device) 492 { 493 struct spdk_ioat_chan *ioat; 494 uint32_t cmd_reg; 495 496 ioat = calloc(1, sizeof(struct spdk_ioat_chan)); 497 if (ioat == NULL) { 498 return NULL; 499 } 500 501 /* Enable PCI busmaster. */ 502 spdk_pci_device_cfg_read32(device, &cmd_reg, 4); 503 cmd_reg |= 0x4; 504 spdk_pci_device_cfg_write32(device, cmd_reg, 4); 505 506 ioat->device = device; 507 508 if (ioat_channel_start(ioat) != 0) { 509 ioat_channel_destruct(ioat); 510 free(ioat); 511 return NULL; 512 } 513 514 return ioat; 515 } 516 517 struct ioat_enum_ctx { 518 spdk_ioat_probe_cb probe_cb; 519 spdk_ioat_attach_cb attach_cb; 520 void *cb_ctx; 521 }; 522 523 /* This function must only be called while holding g_ioat_driver.lock */ 524 static int 525 ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev) 526 { 527 struct ioat_enum_ctx *enum_ctx = ctx; 528 struct spdk_ioat_chan *ioat; 529 530 /* Verify that this device is not already attached */ 531 TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) { 532 /* 533 * NOTE: This assumes that the PCI abstraction layer will use the same device handle 534 * across enumerations; we could compare by BDF instead if this is not true. 535 */ 536 if (pci_dev == ioat->device) { 537 return 0; 538 } 539 } 540 541 if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) { 542 /* 543 * Since I/OAT init is relatively quick, just perform the full init during probing. 544 * If this turns out to be a bottleneck later, this can be changed to work like 545 * NVMe with a list of devices to initialize in parallel. 546 */ 547 ioat = ioat_attach(pci_dev); 548 if (ioat == NULL) { 549 SPDK_ERRLOG("ioat_attach() failed\n"); 550 return -1; 551 } 552 553 TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq); 554 555 enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat); 556 } 557 558 return 0; 559 } 560 561 int 562 spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb) 563 { 564 int rc; 565 struct ioat_enum_ctx enum_ctx; 566 567 pthread_mutex_lock(&g_ioat_driver.lock); 568 569 enum_ctx.probe_cb = probe_cb; 570 enum_ctx.attach_cb = attach_cb; 571 enum_ctx.cb_ctx = cb_ctx; 572 573 rc = spdk_pci_enumerate(spdk_pci_ioat_get_driver(), ioat_enum_cb, &enum_ctx); 574 575 pthread_mutex_unlock(&g_ioat_driver.lock); 576 577 return rc; 578 } 579 580 void 581 spdk_ioat_detach(struct spdk_ioat_chan *ioat) 582 { 583 struct ioat_driver *driver = &g_ioat_driver; 584 585 /* ioat should be in the free list (not registered to a thread) 586 * when calling ioat_detach(). 587 */ 588 pthread_mutex_lock(&driver->lock); 589 TAILQ_REMOVE(&driver->attached_chans, ioat, tailq); 590 pthread_mutex_unlock(&driver->lock); 591 592 ioat_channel_destruct(ioat); 593 free(ioat); 594 } 595 596 int 597 spdk_ioat_build_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn, 598 void *dst, const void *src, uint64_t nbytes) 599 { 600 struct ioat_descriptor *last_desc; 601 uint64_t remaining, op_size; 602 uint64_t vdst, vsrc; 603 uint64_t vdst_page, vsrc_page; 604 uint64_t pdst_page, psrc_page; 605 uint32_t orig_head; 606 607 if (!ioat) { 608 return -EINVAL; 609 } 610 611 orig_head = ioat->head; 612 613 vdst = (uint64_t)dst; 614 vsrc = (uint64_t)src; 615 vdst_page = vsrc_page = 0; 616 pdst_page = psrc_page = SPDK_VTOPHYS_ERROR; 617 618 remaining = nbytes; 619 while (remaining) { 620 if (_2MB_PAGE(vsrc) != vsrc_page) { 621 vsrc_page = _2MB_PAGE(vsrc); 622 psrc_page = spdk_vtophys((void *)vsrc_page, NULL); 623 } 624 625 if (_2MB_PAGE(vdst) != vdst_page) { 626 vdst_page = _2MB_PAGE(vdst); 627 pdst_page = spdk_vtophys((void *)vdst_page, NULL); 628 } 629 op_size = remaining; 630 op_size = spdk_min(op_size, (VALUE_2MB - _2MB_OFFSET(vsrc))); 631 op_size = spdk_min(op_size, (VALUE_2MB - _2MB_OFFSET(vdst))); 632 op_size = spdk_min(op_size, ioat->max_xfer_size); 633 remaining -= op_size; 634 635 last_desc = ioat_prep_copy(ioat, 636 pdst_page + _2MB_OFFSET(vdst), 637 psrc_page + _2MB_OFFSET(vsrc), 638 op_size); 639 640 if (remaining == 0 || last_desc == NULL) { 641 break; 642 } 643 644 vsrc += op_size; 645 vdst += op_size; 646 647 } 648 /* Issue null descriptor for null transfer */ 649 if (nbytes == 0) { 650 last_desc = ioat_prep_null(ioat); 651 } 652 653 if (last_desc) { 654 last_desc->callback_fn = cb_fn; 655 last_desc->callback_arg = cb_arg; 656 } else { 657 /* 658 * Ran out of descriptors in the ring - reset head to leave things as they were 659 * in case we managed to fill out any descriptors. 660 */ 661 ioat->head = orig_head; 662 return -ENOMEM; 663 } 664 665 return 0; 666 } 667 668 int 669 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn, 670 void *dst, const void *src, uint64_t nbytes) 671 { 672 int rc; 673 674 rc = spdk_ioat_build_copy(ioat, cb_arg, cb_fn, dst, src, nbytes); 675 if (rc != 0) { 676 return rc; 677 } 678 679 spdk_ioat_flush(ioat); 680 return 0; 681 } 682 683 int 684 spdk_ioat_build_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn, 685 void *dst, uint64_t fill_pattern, uint64_t nbytes) 686 { 687 struct ioat_descriptor *last_desc = NULL; 688 uint64_t remaining, op_size; 689 uint64_t vdst; 690 uint32_t orig_head; 691 692 if (!ioat) { 693 return -EINVAL; 694 } 695 696 if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) { 697 SPDK_ERRLOG("Channel does not support memory fill\n"); 698 return -1; 699 } 700 701 orig_head = ioat->head; 702 703 vdst = (uint64_t)dst; 704 remaining = nbytes; 705 706 while (remaining) { 707 op_size = remaining; 708 op_size = spdk_min(op_size, (VALUE_2MB - _2MB_OFFSET(vdst))); 709 op_size = spdk_min(op_size, ioat->max_xfer_size); 710 remaining -= op_size; 711 712 last_desc = ioat_prep_fill(ioat, 713 spdk_vtophys((void *)vdst, NULL), 714 fill_pattern, 715 op_size); 716 717 if (remaining == 0 || last_desc == NULL) { 718 break; 719 } 720 721 vdst += op_size; 722 } 723 724 if (last_desc) { 725 last_desc->callback_fn = cb_fn; 726 last_desc->callback_arg = cb_arg; 727 } else { 728 /* 729 * Ran out of descriptors in the ring - reset head to leave things as they were 730 * in case we managed to fill out any descriptors. 731 */ 732 ioat->head = orig_head; 733 return -ENOMEM; 734 } 735 736 return 0; 737 } 738 739 int 740 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn, 741 void *dst, uint64_t fill_pattern, uint64_t nbytes) 742 { 743 int rc; 744 745 rc = spdk_ioat_build_fill(ioat, cb_arg, cb_fn, dst, fill_pattern, nbytes); 746 if (rc != 0) { 747 return rc; 748 } 749 750 spdk_ioat_flush(ioat); 751 return 0; 752 } 753 754 uint32_t 755 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat) 756 { 757 if (!ioat) { 758 return 0; 759 } 760 return ioat->dma_capabilities; 761 } 762 763 int 764 spdk_ioat_process_events(struct spdk_ioat_chan *ioat) 765 { 766 return ioat_process_channel_events(ioat); 767 } 768 769 SPDK_LOG_REGISTER_COMPONENT("ioat", SPDK_LOG_IOAT) 770