Lines Matching defs:ioat
27 ioat_get_chansts(struct spdk_ioat_chan *ioat)
29 return spdk_mmio_read_8(&ioat->regs->chansts);
33 ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
35 spdk_mmio_write_8(&ioat->regs->chancmp, addr);
39 ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
41 spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
45 ioat_suspend(struct spdk_ioat_chan *ioat)
47 ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
51 ioat_reset(struct spdk_ioat_chan *ioat)
53 ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
57 ioat_reset_pending(struct spdk_ioat_chan *ioat)
61 cmd = ioat->regs->chancmd;
66 ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
73 rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
80 ioat->regs = (volatile struct spdk_ioat_registers *)addr;
86 ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
89 void *addr = (void *)ioat->regs;
92 rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
99 ioat_get_active(struct spdk_ioat_chan *ioat)
101 return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
105 ioat_get_ring_space(struct spdk_ioat_chan *ioat)
107 return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
111 ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
113 return index & ((1 << ioat->ring_size_order) - 1);
117 ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
121 uint32_t i = ioat_get_ring_index(ioat, index);
123 *desc = &ioat->ring[i];
124 *hw_desc = &ioat->hw_ring[i];
128 ioat_submit_single(struct spdk_ioat_chan *ioat)
130 ioat->head++;
134 spdk_ioat_flush(struct spdk_ioat_chan *ioat)
136 uint32_t index = ioat_get_ring_index(ioat, ioat->head - 1);
139 hw_desc = &ioat->hw_ring[index];
141 ioat->regs->dmacount = (uint16_t)ioat->head;
145 ioat_prep_null(struct spdk_ioat_chan *ioat)
150 if (ioat_get_ring_space(ioat) < 1) {
154 ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
167 ioat_submit_single(ioat);
173 ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
179 assert(len <= ioat->max_xfer_size);
181 if (ioat_get_ring_space(ioat) < 1) {
185 ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
197 ioat_submit_single(ioat);
203 ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
209 assert(len <= ioat->max_xfer_size);
211 if (ioat_get_ring_space(ioat) < 1) {
215 ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
227 ioat_submit_single(ioat);
233 ioat_reset_hw(struct spdk_ioat_chan *ioat)
240 status = ioat_get_chansts(ioat);
242 ioat_suspend(ioat);
253 status = ioat_get_chansts(ioat);
260 chanerr = ioat->regs->chanerr;
261 ioat->regs->chanerr = chanerr;
263 if (ioat->regs->cbver < SPDK_IOAT_VER_3_3) {
264 rc = spdk_pci_device_cfg_read32(ioat->device, &chanerr,
271 spdk_pci_device_cfg_write32(ioat->device, chanerr,
275 ioat_reset(ioat);
278 while (ioat_reset_pending(ioat)) {
291 ioat_process_channel_events(struct spdk_ioat_chan *ioat)
297 if (ioat->head == ioat->tail) {
301 status = *ioat->comp_update;
305 SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
309 if (completed_descriptor == ioat->last_seen) {
314 tail = ioat_get_ring_index(ioat, ioat->tail);
315 desc = &ioat->ring[tail];
322 ioat->tail++;
326 ioat->last_seen = hw_desc_phys_addr;
332 ioat_channel_destruct(struct spdk_ioat_chan *ioat)
334 ioat_unmap_pci_bar(ioat);
336 if (ioat->ring) {
337 free(ioat->ring);
340 if (ioat->hw_ring) {
341 spdk_free(ioat->hw_ring);
344 if (ioat->comp_update) {
345 spdk_free((void *)ioat->comp_update);
346 ioat->comp_update = NULL;
351 spdk_ioat_get_max_descriptors(struct spdk_ioat_chan *ioat)
353 return 1 << ioat->ring_size_order;
357 ioat_channel_start(struct spdk_ioat_chan *ioat)
365 if (ioat_map_pci_bar(ioat) != 0) {
370 version = ioat->regs->cbver;
378 ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
379 if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) {
380 ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
382 xfercap = ioat->regs->xfercap;
388 ioat->max_xfer_size = 1ULL << 32;
394 ioat->max_xfer_size = 1U << xfercap;
397 ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
399 if (ioat->comp_update == NULL) {
403 comp_update_bus_addr = spdk_vtophys((void *)ioat->comp_update, NULL);
408 ioat->ring_size_order = IOAT_DEFAULT_ORDER;
410 num_descriptors = 1 << ioat->ring_size_order;
412 ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
413 if (!ioat->ring) {
417 ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
419 if (!ioat->hw_ring) {
424 phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL);
430 ioat->ring[i].phys_addr = phys_addr;
431 ioat->hw_ring[ioat_get_ring_index(ioat, i - 1)].generic.next = phys_addr;
434 ioat->head = 0;
435 ioat->tail = 0;
436 ioat->last_seen = 0;
438 ioat_reset_hw(ioat);
440 ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
441 ioat_write_chancmp(ioat, comp_update_bus_addr);
442 ioat_write_chainaddr(ioat, ioat->ring[0].phys_addr);
444 ioat_prep_null(ioat);
445 spdk_ioat_flush(ioat);
450 status = ioat_get_chansts(ioat);
457 ioat_process_channel_events(ioat);
460 (void *)status, ioat->regs->chanerr);
471 struct spdk_ioat_chan *ioat;
474 ioat = calloc(1, sizeof(struct spdk_ioat_chan));
475 if (ioat == NULL) {
484 ioat->device = device;
486 if (ioat_channel_start(ioat) != 0) {
487 ioat_channel_destruct(ioat);
488 free(ioat);
492 return ioat;
506 struct spdk_ioat_chan *ioat;
509 TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
514 if (pci_dev == ioat->device) {
525 ioat = ioat_attach(pci_dev);
526 if (ioat == NULL) {
531 TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
533 enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
559 spdk_ioat_detach(struct spdk_ioat_chan *ioat)
563 /* ioat should be in the free list (not registered to a thread)
567 TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
570 ioat_channel_destruct(ioat);
571 free(ioat);
575 spdk_ioat_build_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
584 if (!ioat) {
588 orig_head = ioat->head;
607 op_size = spdk_min(op_size, ioat->max_xfer_size);
610 last_desc = ioat_prep_copy(ioat, pdst_addr, psrc_addr, op_size);
622 last_desc = ioat_prep_null(ioat);
633 ioat->head = orig_head;
641 spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
646 rc = spdk_ioat_build_copy(ioat, cb_arg, cb_fn, dst, src, nbytes);
651 spdk_ioat_flush(ioat);
656 spdk_ioat_build_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
665 if (!ioat) {
669 if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
674 orig_head = ioat->head;
686 op_size = spdk_min(dst_len, ioat->max_xfer_size);
689 last_desc = ioat_prep_fill(ioat, pdst_addr, fill_pattern, op_size);
706 ioat->head = orig_head;
714 spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
719 rc = spdk_ioat_build_fill(ioat, cb_arg, cb_fn, dst, fill_pattern, nbytes);
724 spdk_ioat_flush(ioat);
729 spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
731 if (!ioat) {
734 return ioat->dma_capabilities;
738 spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
740 return ioat_process_channel_events(ioat);
743 SPDK_LOG_REGISTER_COMPONENT(ioat)