1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #ifndef FTL_NV_CACHE_IO_H 7 #define FTL_NV_CACHE_IO_H 8 9 #include "spdk/bdev.h" 10 #include "ftl_core.h" 11 12 #ifndef SPDK_FTL_VSS_EMU 13 14 static inline int 15 ftl_nv_cache_bdev_readv_blocks_with_md(struct spdk_ftl_dev *dev, 16 struct spdk_bdev_desc *desc, 17 struct spdk_io_channel *ch, 18 struct iovec *iov, int iovcnt, void *md, 19 uint64_t offset_blocks, uint64_t num_blocks, 20 spdk_bdev_io_completion_cb cb, void *cb_arg) 21 { 22 return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, 23 offset_blocks, num_blocks, 24 cb, cb_arg); 25 } 26 27 static inline int 28 ftl_nv_cache_bdev_writev_blocks_with_md(struct spdk_ftl_dev *dev, 29 struct spdk_bdev_desc *desc, 30 struct spdk_io_channel *ch, 31 struct iovec *iov, int iovcnt, void *md_buf, 32 uint64_t offset_blocks, uint64_t num_blocks, 33 spdk_bdev_io_completion_cb cb, void *cb_arg) 34 { 35 return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, 36 offset_blocks, num_blocks, cb, 37 cb_arg); 38 } 39 40 static inline int 41 ftl_nv_cache_bdev_read_blocks_with_md(struct spdk_ftl_dev *dev, 42 struct spdk_bdev_desc *desc, 43 struct spdk_io_channel *ch, 44 void *buf, void *md, 45 uint64_t offset_blocks, uint64_t num_blocks, 46 spdk_bdev_io_completion_cb cb, void *cb_arg) 47 { 48 return spdk_bdev_read_blocks_with_md(desc, ch, buf, md ? : g_ftl_read_buf, 49 offset_blocks, num_blocks, 50 cb, cb_arg); 51 } 52 53 static inline int 54 ftl_nv_cache_bdev_write_blocks_with_md(struct spdk_ftl_dev *dev, 55 struct spdk_bdev_desc *desc, 56 struct spdk_io_channel *ch, 57 void *buf, void *md, 58 uint64_t offset_blocks, uint64_t num_blocks, 59 spdk_bdev_io_completion_cb cb, void *cb_arg) 60 { 61 return spdk_bdev_write_blocks_with_md(desc, ch, buf, md ? : g_ftl_write_buf, 62 offset_blocks, num_blocks, 63 cb, cb_arg); 64 } 65 66 #else 67 68 /* TODO: Maybe we can add a non-power-fail-safe support for VSS in AIO bdev and get rid of this */ 69 static inline void 70 ftl_nv_cache_bdev_get_md(struct spdk_ftl_dev *dev, 71 uint64_t offset_blocks, uint64_t num_blocks, 72 void *md_buf) 73 { 74 struct ftl_md *vss = dev->layout.md[FTL_LAYOUT_REGION_TYPE_VSS]; 75 union ftl_md_vss *src; 76 union ftl_md_vss *dst = md_buf; 77 union ftl_md_vss *dst_end = dst + num_blocks; 78 79 assert(offset_blocks + num_blocks <= dev->layout.nvc.total_blocks); 80 81 if (!md_buf) { 82 return; 83 } 84 85 src = ftl_md_get_buffer(vss); 86 src += offset_blocks; 87 while (dst < dst_end) { 88 *dst = *src; 89 dst++; 90 src++; 91 } 92 } 93 94 static inline int 95 ftl_nv_cache_bdev_readv_blocks_with_md(struct spdk_ftl_dev *dev, 96 struct spdk_bdev_desc *desc, 97 struct spdk_io_channel *ch, 98 struct iovec *iov, int iovcnt, void *md, 99 uint64_t offset_blocks, uint64_t num_blocks, 100 spdk_bdev_io_completion_cb cb, void *cb_arg) 101 { 102 assert(desc == dev->nv_cache.bdev_desc); 103 ftl_nv_cache_bdev_get_md(dev, offset_blocks, num_blocks, md); 104 return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, 105 num_blocks, cb, cb_arg); 106 } 107 108 static inline void 109 ftl_nv_cache_bdev_set_md(struct spdk_ftl_dev *dev, 110 uint64_t offset_blocks, uint64_t num_blocks, 111 void *md_buf) 112 { 113 struct ftl_md *vss = dev->layout.md[FTL_LAYOUT_REGION_TYPE_VSS]; 114 union ftl_md_vss *src = md_buf; 115 union ftl_md_vss *src_end = src + num_blocks; 116 union ftl_md_vss *dst; 117 118 assert(offset_blocks + num_blocks <= dev->layout.nvc.total_blocks); 119 120 if (!md_buf) { 121 return; 122 } 123 124 dst = ftl_md_get_buffer(vss); 125 dst += offset_blocks; 126 while (src < src_end) { 127 *dst = *src; 128 dst++; 129 src++; 130 } 131 } 132 133 static inline int 134 ftl_nv_cache_bdev_writev_blocks_with_md(struct spdk_ftl_dev *dev, 135 struct spdk_bdev_desc *desc, 136 struct spdk_io_channel *ch, 137 struct iovec *iov, int iovcnt, void *md_buf, 138 uint64_t offset_blocks, uint64_t num_blocks, 139 spdk_bdev_io_completion_cb cb, void *cb_arg) 140 { 141 assert(desc == dev->nv_cache.bdev_desc); 142 ftl_nv_cache_bdev_set_md(dev, offset_blocks, num_blocks, md_buf); 143 return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, 144 offset_blocks, num_blocks, 145 cb, cb_arg); 146 } 147 148 static inline int 149 ftl_nv_cache_bdev_read_blocks_with_md(struct spdk_ftl_dev *dev, 150 struct spdk_bdev_desc *desc, 151 struct spdk_io_channel *ch, 152 void *buf, void *md, 153 uint64_t offset_blocks, uint64_t num_blocks, 154 spdk_bdev_io_completion_cb cb, void *cb_arg) 155 { 156 assert(desc == dev->nv_cache.bdev_desc); 157 ftl_nv_cache_bdev_get_md(dev, offset_blocks, num_blocks, md); 158 return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, 159 num_blocks, cb, cb_arg); 160 } 161 162 static inline int 163 ftl_nv_cache_bdev_write_blocks_with_md(struct spdk_ftl_dev *dev, 164 struct spdk_bdev_desc *desc, 165 struct spdk_io_channel *ch, 166 void *buf, void *md, 167 uint64_t offset_blocks, uint64_t num_blocks, 168 spdk_bdev_io_completion_cb cb, void *cb_arg) 169 { 170 assert(desc == dev->nv_cache.bdev_desc); 171 ftl_nv_cache_bdev_set_md(dev, offset_blocks, num_blocks, md); 172 return spdk_bdev_write_blocks(desc, ch, buf, 173 offset_blocks, num_blocks, 174 cb, cb_arg); 175 } 176 177 #endif 178 #endif /* FTL_NV_CACHE_IO_H */ 179