1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 #include "spdk/ftl.h" 8 #include "spdk/likely.h" 9 #include "spdk/util.h" 10 11 #include "ftl_io.h" 12 #include "ftl_core.h" 13 #include "ftl_debug.h" 14 15 void 16 ftl_io_inc_req(struct ftl_io *io) 17 { 18 io->dev->num_inflight++; 19 io->req_cnt++; 20 } 21 22 void 23 ftl_io_dec_req(struct ftl_io *io) 24 { 25 assert(io->dev->num_inflight > 0); 26 assert(io->req_cnt > 0); 27 28 io->dev->num_inflight--; 29 io->req_cnt--; 30 } 31 32 struct iovec * 33 ftl_io_iovec(struct ftl_io *io) 34 { 35 return &io->iov[0]; 36 } 37 38 uint64_t 39 ftl_io_get_lba(const struct ftl_io *io, size_t offset) 40 { 41 assert(offset < io->num_blocks); 42 return io->lba + offset; 43 } 44 45 uint64_t 46 ftl_io_current_lba(const struct ftl_io *io) 47 { 48 return ftl_io_get_lba(io, io->pos); 49 } 50 51 void 52 ftl_io_advance(struct ftl_io *io, size_t num_blocks) 53 { 54 struct iovec *iov = ftl_io_iovec(io); 55 size_t iov_blocks, block_left = num_blocks; 56 57 io->pos += num_blocks; 58 59 if (io->iov_cnt == 0) { 60 return; 61 } 62 63 while (block_left > 0) { 64 assert(io->iov_pos < io->iov_cnt); 65 iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE; 66 67 if (io->iov_off + block_left < iov_blocks) { 68 io->iov_off += block_left; 69 break; 70 } 71 72 assert(iov_blocks > io->iov_off); 73 block_left -= (iov_blocks - io->iov_off); 74 io->iov_off = 0; 75 io->iov_pos++; 76 } 77 } 78 79 size_t 80 ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt) 81 { 82 size_t num_blocks = 0, i = 0; 83 84 for (; i < iov_cnt; ++i) { 85 if (iov[i].iov_len & (FTL_BLOCK_SIZE - 1)) { 86 return 0; 87 } 88 89 num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE; 90 } 91 92 return num_blocks; 93 } 94 95 void * 96 ftl_io_iovec_addr(struct ftl_io *io) 97 { 98 assert(io->iov_pos < io->iov_cnt); 99 assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len); 100 101 return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base + 102 io->iov_off * FTL_BLOCK_SIZE; 103 } 104 105 size_t 106 ftl_io_iovec_len_left(struct ftl_io *io) 107 { 108 if (io->iov_pos < io->iov_cnt) { 109 struct iovec *iov = ftl_io_iovec(io); 110 return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off; 111 } else { 112 return 0; 113 } 114 } 115 116 static void 117 ftl_io_cb(struct ftl_io *io, void *arg, int status) 118 { 119 struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch); 120 size_t result __attribute__((unused)); 121 122 if (spdk_unlikely(status)) { 123 io->status = status; 124 125 if (-EAGAIN == status) { 126 /* IO has to be rescheduled again */ 127 switch (io->type) { 128 case FTL_IO_READ: 129 ftl_io_clear(io); 130 TAILQ_INSERT_HEAD(&io->dev->rd_sq, io, queue_entry); 131 break; 132 case FTL_IO_WRITE: 133 ftl_io_clear(io); 134 TAILQ_INSERT_HEAD(&io->dev->wr_sq, io, queue_entry); 135 break; 136 default: 137 /* Unknown IO type, complete to the user */ 138 assert(0); 139 break; 140 } 141 142 } 143 144 if (!io->status) { 145 /* IO rescheduled, return from the function */ 146 return; 147 } 148 } 149 150 if (io->map) { 151 ftl_mempool_put(ioch->map_pool, io->map); 152 } 153 154 result = spdk_ring_enqueue(ioch->cq, (void **)&io, 1, NULL); 155 assert(result != 0); 156 } 157 158 int 159 ftl_io_init(struct spdk_io_channel *_ioch, struct ftl_io *io, uint64_t lba, size_t num_blocks, 160 struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type) 161 { 162 struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(_ioch); 163 struct spdk_ftl_dev *dev = ioch->dev; 164 165 memset(io, 0, sizeof(struct ftl_io)); 166 io->ioch = _ioch; 167 168 io->flags |= FTL_IO_INITIALIZED; 169 io->type = type; 170 io->dev = dev; 171 io->lba = FTL_LBA_INVALID; 172 io->addr = FTL_ADDR_INVALID; 173 io->cb_ctx = cb_ctx; 174 io->lba = lba; 175 io->user_fn = cb_fn; 176 io->iov = iov; 177 io->iov_cnt = iov_cnt; 178 io->num_blocks = num_blocks; 179 180 return 0; 181 } 182 183 static void 184 ftl_io_complete_verify(struct ftl_io *io) 185 { 186 struct spdk_ftl_dev *dev = io->dev; 187 uint64_t i; 188 uint64_t lba = io->lba; 189 190 assert(io->num_blocks <= dev->xfer_size); 191 192 if (FTL_IO_WRITE == io->type) { 193 return; 194 } 195 196 if (spdk_unlikely(io->status)) { 197 return; 198 } 199 200 for (i = 0; i < io->num_blocks; i++, lba++) { 201 ftl_addr current_addr = ftl_l2p_get(dev, lba); 202 203 /* If user read request gets stuck for whatever reason, then it's possible the LBA 204 * has been relocated by GC or compaction and it may no longer be safe to return data 205 * from that address */ 206 if (spdk_unlikely(current_addr != io->map[i])) { 207 io->status = -EAGAIN; 208 break; 209 } 210 } 211 } 212 213 void 214 ftl_io_complete(struct ftl_io *io) 215 { 216 io->flags &= ~FTL_IO_INITIALIZED; 217 io->done = true; 218 219 if (io->flags & FTL_IO_PINNED) { 220 ftl_io_complete_verify(io); 221 ftl_l2p_unpin(io->dev, io->lba, io->num_blocks); 222 } 223 224 ftl_io_cb(io, io->cb_ctx, io->status); 225 } 226 227 void 228 ftl_io_fail(struct ftl_io *io, int status) 229 { 230 io->status = status; 231 ftl_io_advance(io, io->num_blocks - io->pos); 232 } 233 234 void 235 ftl_io_clear(struct ftl_io *io) 236 { 237 io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0; 238 io->done = false; 239 io->status = 0; 240 io->flags = 0; 241 } 242