1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "thread/thread_internal.h" 35 #include "bs_scheduler.c" 36 37 38 #define DEV_BUFFER_SIZE (64 * 1024 * 1024) 39 #define DEV_BUFFER_BLOCKLEN (4096) 40 #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN) 41 uint8_t *g_dev_buffer; 42 uint64_t g_dev_write_bytes; 43 uint64_t g_dev_read_bytes; 44 bool g_dev_writev_ext_called; 45 bool g_dev_readv_ext_called; 46 struct spdk_blob_ext_io_opts g_blob_ext_io_opts; 47 48 struct spdk_power_failure_counters { 49 uint64_t general_counter; 50 uint64_t read_counter; 51 uint64_t write_counter; 52 uint64_t unmap_counter; 53 uint64_t write_zero_counter; 54 uint64_t flush_counter; 55 }; 56 57 static struct spdk_power_failure_counters g_power_failure_counters = {}; 58 59 struct spdk_power_failure_thresholds { 60 uint64_t general_threshold; 61 uint64_t read_threshold; 62 uint64_t write_threshold; 63 uint64_t unmap_threshold; 64 uint64_t write_zero_threshold; 65 uint64_t flush_threshold; 66 }; 67 68 static struct spdk_power_failure_thresholds g_power_failure_thresholds = {}; 69 70 static uint64_t g_power_failure_rc; 71 72 void dev_reset_power_failure_event(void); 73 void dev_reset_power_failure_counters(void); 74 void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds); 75 76 void 77 dev_reset_power_failure_event(void) 78 { 79 memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters)); 80 memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds)); 81 g_power_failure_rc = 0; 82 } 83 84 void 85 dev_reset_power_failure_counters(void) 86 { 87 memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters)); 88 g_power_failure_rc = 0; 89 } 90 91 /** 92 * Set power failure event. Power failure will occur after given number 93 * of IO operations. It may occur after number of particular operations 94 * (read, write, unmap, write zero or flush) or after given number of 95 * any IO operations (general_threshold). Value 0 means that the threshold 96 * is disabled. Any other value is the number of operation starting from 97 * which power failure event will happen. 98 */ 99 void 100 dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds) 101 { 102 g_power_failure_thresholds = thresholds; 103 } 104 105 /* Define here for UT only. */ 106 struct spdk_io_channel g_io_channel; 107 108 static struct spdk_io_channel * 109 dev_create_channel(struct spdk_bs_dev *dev) 110 { 111 return &g_io_channel; 112 } 113 114 static void 115 dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel) 116 { 117 } 118 119 static void 120 dev_destroy(struct spdk_bs_dev *dev) 121 { 122 free(dev); 123 } 124 125 126 static void 127 dev_complete_cb(void *arg) 128 { 129 struct spdk_bs_dev_cb_args *cb_args = arg; 130 131 cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc); 132 } 133 134 static void 135 dev_complete(void *arg) 136 { 137 _bs_send_msg(dev_complete_cb, arg, NULL); 138 } 139 140 static void 141 dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload, 142 uint64_t lba, uint32_t lba_count, 143 struct spdk_bs_dev_cb_args *cb_args) 144 { 145 uint64_t offset, length; 146 147 if (g_power_failure_thresholds.read_threshold != 0) { 148 g_power_failure_counters.read_counter++; 149 } 150 151 if (g_power_failure_thresholds.general_threshold != 0) { 152 g_power_failure_counters.general_counter++; 153 } 154 155 if ((g_power_failure_thresholds.read_threshold == 0 || 156 g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) && 157 (g_power_failure_thresholds.general_threshold == 0 || 158 g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 159 offset = lba * dev->blocklen; 160 length = lba_count * dev->blocklen; 161 SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 162 163 if (length > 0) { 164 memcpy(payload, &g_dev_buffer[offset], length); 165 g_dev_read_bytes += length; 166 } 167 } else { 168 g_power_failure_rc = -EIO; 169 } 170 171 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 172 } 173 174 static void 175 dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload, 176 uint64_t lba, uint32_t lba_count, 177 struct spdk_bs_dev_cb_args *cb_args) 178 { 179 uint64_t offset, length; 180 181 if (g_power_failure_thresholds.write_threshold != 0) { 182 g_power_failure_counters.write_counter++; 183 } 184 185 if (g_power_failure_thresholds.general_threshold != 0) { 186 g_power_failure_counters.general_counter++; 187 } 188 189 if ((g_power_failure_thresholds.write_threshold == 0 || 190 g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) && 191 (g_power_failure_thresholds.general_threshold == 0 || 192 g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 193 offset = lba * dev->blocklen; 194 length = lba_count * dev->blocklen; 195 SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 196 197 memcpy(&g_dev_buffer[offset], payload, length); 198 g_dev_write_bytes += length; 199 } else { 200 g_power_failure_rc = -EIO; 201 } 202 203 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 204 } 205 206 static void 207 __check_iov(struct iovec *iov, int iovcnt, uint64_t length) 208 { 209 int i; 210 211 for (i = 0; i < iovcnt; i++) { 212 length -= iov[i].iov_len; 213 } 214 215 CU_ASSERT(length == 0); 216 } 217 218 static void 219 dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 220 struct iovec *iov, int iovcnt, 221 uint64_t lba, uint32_t lba_count, 222 struct spdk_bs_dev_cb_args *cb_args) 223 { 224 uint64_t offset, length; 225 int i; 226 227 if (g_power_failure_thresholds.read_threshold != 0) { 228 g_power_failure_counters.read_counter++; 229 } 230 231 if (g_power_failure_thresholds.general_threshold != 0) { 232 g_power_failure_counters.general_counter++; 233 } 234 235 if ((g_power_failure_thresholds.read_threshold == 0 || 236 g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) && 237 (g_power_failure_thresholds.general_threshold == 0 || 238 g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 239 offset = lba * dev->blocklen; 240 length = lba_count * dev->blocklen; 241 SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 242 __check_iov(iov, iovcnt, length); 243 244 for (i = 0; i < iovcnt; i++) { 245 memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len); 246 offset += iov[i].iov_len; 247 } 248 249 g_dev_read_bytes += length; 250 } else { 251 g_power_failure_rc = -EIO; 252 } 253 254 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 255 } 256 257 static void 258 dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 259 struct iovec *iov, int iovcnt, 260 uint64_t lba, uint32_t lba_count, 261 struct spdk_bs_dev_cb_args *cb_args, 262 struct spdk_blob_ext_io_opts *io_opts) 263 { 264 g_dev_readv_ext_called = true; 265 g_blob_ext_io_opts = *io_opts; 266 dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args); 267 } 268 269 static void 270 dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 271 struct iovec *iov, int iovcnt, 272 uint64_t lba, uint32_t lba_count, 273 struct spdk_bs_dev_cb_args *cb_args) 274 { 275 uint64_t offset, length; 276 int i; 277 278 if (g_power_failure_thresholds.write_threshold != 0) { 279 g_power_failure_counters.write_counter++; 280 } 281 282 if (g_power_failure_thresholds.general_threshold != 0) { 283 g_power_failure_counters.general_counter++; 284 } 285 286 if ((g_power_failure_thresholds.write_threshold == 0 || 287 g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) && 288 (g_power_failure_thresholds.general_threshold == 0 || 289 g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 290 offset = lba * dev->blocklen; 291 length = lba_count * dev->blocklen; 292 SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 293 __check_iov(iov, iovcnt, length); 294 295 for (i = 0; i < iovcnt; i++) { 296 memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len); 297 offset += iov[i].iov_len; 298 } 299 300 g_dev_write_bytes += length; 301 } else { 302 g_power_failure_rc = -EIO; 303 } 304 305 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 306 } 307 308 static void 309 dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 310 struct iovec *iov, int iovcnt, 311 uint64_t lba, uint32_t lba_count, 312 struct spdk_bs_dev_cb_args *cb_args, 313 struct spdk_blob_ext_io_opts *io_opts) 314 { 315 g_dev_writev_ext_called = true; 316 g_blob_ext_io_opts = *io_opts; 317 dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args); 318 } 319 320 static void 321 dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 322 struct spdk_bs_dev_cb_args *cb_args) 323 { 324 if (g_power_failure_thresholds.flush_threshold != 0) { 325 g_power_failure_counters.flush_counter++; 326 } 327 328 if (g_power_failure_thresholds.general_threshold != 0) { 329 g_power_failure_counters.general_counter++; 330 } 331 332 if ((g_power_failure_thresholds.flush_threshold != 0 && 333 g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold) || 334 (g_power_failure_thresholds.general_threshold != 0 && 335 g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) { 336 g_power_failure_rc = -EIO; 337 } 338 339 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 340 } 341 342 static void 343 dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 344 uint64_t lba, uint64_t lba_count, 345 struct spdk_bs_dev_cb_args *cb_args) 346 { 347 uint64_t offset, length; 348 349 if (g_power_failure_thresholds.unmap_threshold != 0) { 350 g_power_failure_counters.unmap_counter++; 351 } 352 353 if (g_power_failure_thresholds.general_threshold != 0) { 354 g_power_failure_counters.general_counter++; 355 } 356 357 if ((g_power_failure_thresholds.unmap_threshold == 0 || 358 g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold) && 359 (g_power_failure_thresholds.general_threshold == 0 || 360 g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 361 offset = lba * dev->blocklen; 362 length = lba_count * dev->blocklen; 363 SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 364 memset(&g_dev_buffer[offset], 0, length); 365 } else { 366 g_power_failure_rc = -EIO; 367 } 368 369 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 370 } 371 372 static void 373 dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 374 uint64_t lba, uint64_t lba_count, 375 struct spdk_bs_dev_cb_args *cb_args) 376 { 377 uint64_t offset, length; 378 379 if (g_power_failure_thresholds.write_zero_threshold != 0) { 380 g_power_failure_counters.write_zero_counter++; 381 } 382 383 if (g_power_failure_thresholds.general_threshold != 0) { 384 g_power_failure_counters.general_counter++; 385 } 386 387 if ((g_power_failure_thresholds.write_zero_threshold == 0 || 388 g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold) && 389 (g_power_failure_thresholds.general_threshold == 0 || 390 g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 391 offset = lba * dev->blocklen; 392 length = lba_count * dev->blocklen; 393 SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 394 memset(&g_dev_buffer[offset], 0, length); 395 g_dev_write_bytes += length; 396 } else { 397 g_power_failure_rc = -EIO; 398 } 399 400 spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 401 } 402 403 static struct spdk_bs_dev * 404 init_dev(void) 405 { 406 struct spdk_bs_dev *dev = calloc(1, sizeof(*dev)); 407 408 SPDK_CU_ASSERT_FATAL(dev != NULL); 409 410 dev->create_channel = dev_create_channel; 411 dev->destroy_channel = dev_destroy_channel; 412 dev->destroy = dev_destroy; 413 dev->read = dev_read; 414 dev->write = dev_write; 415 dev->readv = dev_readv; 416 dev->writev = dev_writev; 417 dev->readv_ext = dev_readv_ext; 418 dev->writev_ext = dev_writev_ext; 419 dev->flush = dev_flush; 420 dev->unmap = dev_unmap; 421 dev->write_zeroes = dev_write_zeroes; 422 dev->blockcnt = DEV_BUFFER_BLOCKCNT; 423 dev->blocklen = DEV_BUFFER_BLOCKLEN; 424 425 return dev; 426 } 427