1488570ebSJim Harris /* SPDX-License-Identifier: BSD-3-Clause 2a6dbe372Spaul luse * Copyright (C) 2017 Intel Corporation. 3728d0013SDaniel Verkamp * All rights reserved. 40f4bfd55SMike Gerdts * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5728d0013SDaniel Verkamp */ 6728d0013SDaniel Verkamp 75fc0475cSJiewei Ke #include "thread/thread_internal.h" 83ca9cd76SPiotr Pelplinski #include "bs_scheduler.c" 93ca9cd76SPiotr Pelplinski 10494c252aSJim Harris 11728d0013SDaniel Verkamp #define DEV_BUFFER_SIZE (64 * 1024 * 1024) 12728d0013SDaniel Verkamp #define DEV_BUFFER_BLOCKLEN (4096) 13728d0013SDaniel Verkamp #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN) 14d0dfbef6SJim Harris #define DEV_MAX_PHYS_BLOCKLEN (16384) 151390df9eSJim Harris #define FIRST_DATA_CLUSTER(bs) \ 161390df9eSJim Harris ((DEV_BUFFER_SIZE / spdk_bs_get_cluster_size(bs)) - spdk_bs_total_data_cluster_count(bs)) 171390df9eSJim Harris 18728d0013SDaniel Verkamp uint8_t *g_dev_buffer; 19d3c6335bSTomasz Kulasek uint64_t g_dev_write_bytes; 20d3c6335bSTomasz Kulasek uint64_t g_dev_read_bytes; 210f4bfd55SMike Gerdts uint64_t g_dev_copy_bytes; 22a2360845SAlexey Marchuk bool g_dev_writev_ext_called; 23a2360845SAlexey Marchuk bool g_dev_readv_ext_called; 240f4bfd55SMike Gerdts bool g_dev_copy_enabled; 25a2360845SAlexey Marchuk struct spdk_blob_ext_io_opts g_blob_ext_io_opts; 26d0dfbef6SJim Harris uint32_t g_phys_blocklen; 27728d0013SDaniel Verkamp 2838c2a6f8SMaciej Szwed struct spdk_power_failure_counters { 2938c2a6f8SMaciej Szwed uint64_t general_counter; 3038c2a6f8SMaciej Szwed uint64_t read_counter; 3138c2a6f8SMaciej Szwed uint64_t write_counter; 3238c2a6f8SMaciej Szwed uint64_t unmap_counter; 3338c2a6f8SMaciej Szwed uint64_t write_zero_counter; 3438c2a6f8SMaciej Szwed uint64_t flush_counter; 3538c2a6f8SMaciej Szwed }; 3638c2a6f8SMaciej Szwed 3738c2a6f8SMaciej Szwed static struct spdk_power_failure_counters g_power_failure_counters = {}; 3838c2a6f8SMaciej Szwed 3938c2a6f8SMaciej Szwed struct spdk_power_failure_thresholds { 4038c2a6f8SMaciej Szwed uint64_t general_threshold; 4138c2a6f8SMaciej Szwed uint64_t read_threshold; 4238c2a6f8SMaciej Szwed uint64_t write_threshold; 4338c2a6f8SMaciej Szwed uint64_t unmap_threshold; 4438c2a6f8SMaciej Szwed uint64_t write_zero_threshold; 4538c2a6f8SMaciej Szwed uint64_t flush_threshold; 4638c2a6f8SMaciej Szwed }; 4738c2a6f8SMaciej Szwed 4838c2a6f8SMaciej Szwed static struct spdk_power_failure_thresholds g_power_failure_thresholds = {}; 4938c2a6f8SMaciej Szwed 5038c2a6f8SMaciej Szwed static uint64_t g_power_failure_rc; 5138c2a6f8SMaciej Szwed 5238c2a6f8SMaciej Szwed void dev_reset_power_failure_event(void); 5338c2a6f8SMaciej Szwed void dev_reset_power_failure_counters(void); 5438c2a6f8SMaciej Szwed void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds); 5538c2a6f8SMaciej Szwed 5638c2a6f8SMaciej Szwed void 5738c2a6f8SMaciej Szwed dev_reset_power_failure_event(void) 5838c2a6f8SMaciej Szwed { 5938c2a6f8SMaciej Szwed memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters)); 6038c2a6f8SMaciej Szwed memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds)); 6138c2a6f8SMaciej Szwed g_power_failure_rc = 0; 6238c2a6f8SMaciej Szwed } 6338c2a6f8SMaciej Szwed 6438c2a6f8SMaciej Szwed void 6538c2a6f8SMaciej Szwed dev_reset_power_failure_counters(void) 6638c2a6f8SMaciej Szwed { 6738c2a6f8SMaciej Szwed memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters)); 6838c2a6f8SMaciej Szwed g_power_failure_rc = 0; 6938c2a6f8SMaciej Szwed } 7038c2a6f8SMaciej Szwed 7138c2a6f8SMaciej Szwed /** 7238c2a6f8SMaciej Szwed * Set power failure event. Power failure will occur after given number 73c9c7c281SJosh Soref * of IO operations. It may occur after number of particular operations 7438c2a6f8SMaciej Szwed * (read, write, unmap, write zero or flush) or after given number of 75c9c7c281SJosh Soref * any IO operations (general_threshold). Value 0 means that the threshold 7638c2a6f8SMaciej Szwed * is disabled. Any other value is the number of operation starting from 7738c2a6f8SMaciej Szwed * which power failure event will happen. 7838c2a6f8SMaciej Szwed */ 7938c2a6f8SMaciej Szwed void 8038c2a6f8SMaciej Szwed dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds) 8138c2a6f8SMaciej Szwed { 8238c2a6f8SMaciej Szwed g_power_failure_thresholds = thresholds; 8338c2a6f8SMaciej Szwed } 8438c2a6f8SMaciej Szwed 8558fb5a17SGangCao /* Define here for UT only. */ 86fb8acd90SJim Harris struct spdk_io_channel g_io_channel; 8758fb5a17SGangCao 88728d0013SDaniel Verkamp static struct spdk_io_channel * 89728d0013SDaniel Verkamp dev_create_channel(struct spdk_bs_dev *dev) 90728d0013SDaniel Verkamp { 9158fb5a17SGangCao return &g_io_channel; 92728d0013SDaniel Verkamp } 93728d0013SDaniel Verkamp 94728d0013SDaniel Verkamp static void 95728d0013SDaniel Verkamp dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel) 96728d0013SDaniel Verkamp { 97728d0013SDaniel Verkamp } 98728d0013SDaniel Verkamp 99728d0013SDaniel Verkamp static void 100728d0013SDaniel Verkamp dev_destroy(struct spdk_bs_dev *dev) 101728d0013SDaniel Verkamp { 10270eff4fbSTomasz Zawadzki free(dev); 103728d0013SDaniel Verkamp } 104728d0013SDaniel Verkamp 1053ca9cd76SPiotr Pelplinski 106728d0013SDaniel Verkamp static void 1073ca9cd76SPiotr Pelplinski dev_complete_cb(void *arg) 108494c252aSJim Harris { 109494c252aSJim Harris struct spdk_bs_dev_cb_args *cb_args = arg; 110494c252aSJim Harris 11138c2a6f8SMaciej Szwed cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc); 112494c252aSJim Harris } 113494c252aSJim Harris 114494c252aSJim Harris static void 1153ca9cd76SPiotr Pelplinski dev_complete(void *arg) 1163ca9cd76SPiotr Pelplinski { 1173ca9cd76SPiotr Pelplinski _bs_send_msg(dev_complete_cb, arg, NULL); 1183ca9cd76SPiotr Pelplinski } 1193ca9cd76SPiotr Pelplinski 1203ca9cd76SPiotr Pelplinski static void 121728d0013SDaniel Verkamp dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload, 122728d0013SDaniel Verkamp uint64_t lba, uint32_t lba_count, 123728d0013SDaniel Verkamp struct spdk_bs_dev_cb_args *cb_args) 124728d0013SDaniel Verkamp { 125728d0013SDaniel Verkamp uint64_t offset, length; 126728d0013SDaniel Verkamp 12738c2a6f8SMaciej Szwed if (g_power_failure_thresholds.read_threshold != 0) { 12838c2a6f8SMaciej Szwed g_power_failure_counters.read_counter++; 12938c2a6f8SMaciej Szwed } 13038c2a6f8SMaciej Szwed 13138c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 13238c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 13338c2a6f8SMaciej Szwed } 13438c2a6f8SMaciej Szwed 13538c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.read_threshold == 0 || 13638c2a6f8SMaciej Szwed g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) && 13738c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold == 0 || 13838c2a6f8SMaciej Szwed g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 13932f35c16SPiotr Pelplinski offset = lba * dev->blocklen; 14032f35c16SPiotr Pelplinski length = lba_count * dev->blocklen; 141728d0013SDaniel Verkamp SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 14238c2a6f8SMaciej Szwed 14356d702abSKonrad Sztyber if (length > 0) { 144728d0013SDaniel Verkamp memcpy(payload, &g_dev_buffer[offset], length); 145d3c6335bSTomasz Kulasek g_dev_read_bytes += length; 14656d702abSKonrad Sztyber } 14738c2a6f8SMaciej Szwed } else { 14838c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 14938c2a6f8SMaciej Szwed } 15038c2a6f8SMaciej Szwed 151494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 152728d0013SDaniel Verkamp } 153728d0013SDaniel Verkamp 154728d0013SDaniel Verkamp static void 155728d0013SDaniel Verkamp dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload, 156728d0013SDaniel Verkamp uint64_t lba, uint32_t lba_count, 157728d0013SDaniel Verkamp struct spdk_bs_dev_cb_args *cb_args) 158728d0013SDaniel Verkamp { 159728d0013SDaniel Verkamp uint64_t offset, length; 160728d0013SDaniel Verkamp 16138c2a6f8SMaciej Szwed if (g_power_failure_thresholds.write_threshold != 0) { 16238c2a6f8SMaciej Szwed g_power_failure_counters.write_counter++; 16338c2a6f8SMaciej Szwed } 16438c2a6f8SMaciej Szwed 16538c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 16638c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 16738c2a6f8SMaciej Szwed } 16838c2a6f8SMaciej Szwed 16938c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.write_threshold == 0 || 17038c2a6f8SMaciej Szwed g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) && 17138c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold == 0 || 17238c2a6f8SMaciej Szwed g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 17332f35c16SPiotr Pelplinski offset = lba * dev->blocklen; 17432f35c16SPiotr Pelplinski length = lba_count * dev->blocklen; 175728d0013SDaniel Verkamp SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 17638c2a6f8SMaciej Szwed 177728d0013SDaniel Verkamp memcpy(&g_dev_buffer[offset], payload, length); 178d3c6335bSTomasz Kulasek g_dev_write_bytes += length; 17938c2a6f8SMaciej Szwed } else { 18038c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 18138c2a6f8SMaciej Szwed } 18238c2a6f8SMaciej Szwed 183494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 184728d0013SDaniel Verkamp } 185728d0013SDaniel Verkamp 186728d0013SDaniel Verkamp static void 187179ed697SJim Harris __check_iov(struct iovec *iov, int iovcnt, uint64_t length) 188179ed697SJim Harris { 189179ed697SJim Harris int i; 190179ed697SJim Harris 191179ed697SJim Harris for (i = 0; i < iovcnt; i++) { 192179ed697SJim Harris length -= iov[i].iov_len; 193179ed697SJim Harris } 194179ed697SJim Harris 195179ed697SJim Harris CU_ASSERT(length == 0); 196179ed697SJim Harris } 197179ed697SJim Harris 198179ed697SJim Harris static void 199179ed697SJim Harris dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 200179ed697SJim Harris struct iovec *iov, int iovcnt, 201179ed697SJim Harris uint64_t lba, uint32_t lba_count, 202179ed697SJim Harris struct spdk_bs_dev_cb_args *cb_args) 203179ed697SJim Harris { 204179ed697SJim Harris uint64_t offset, length; 205179ed697SJim Harris int i; 206179ed697SJim Harris 20738c2a6f8SMaciej Szwed if (g_power_failure_thresholds.read_threshold != 0) { 20838c2a6f8SMaciej Szwed g_power_failure_counters.read_counter++; 20938c2a6f8SMaciej Szwed } 21038c2a6f8SMaciej Szwed 21138c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 21238c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 21338c2a6f8SMaciej Szwed } 21438c2a6f8SMaciej Szwed 21538c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.read_threshold == 0 || 21638c2a6f8SMaciej Szwed g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) && 21738c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold == 0 || 21838c2a6f8SMaciej Szwed g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 21932f35c16SPiotr Pelplinski offset = lba * dev->blocklen; 22032f35c16SPiotr Pelplinski length = lba_count * dev->blocklen; 221179ed697SJim Harris SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 222179ed697SJim Harris __check_iov(iov, iovcnt, length); 223179ed697SJim Harris 224179ed697SJim Harris for (i = 0; i < iovcnt; i++) { 225179ed697SJim Harris memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len); 226179ed697SJim Harris offset += iov[i].iov_len; 227179ed697SJim Harris } 228179ed697SJim Harris 229d3c6335bSTomasz Kulasek g_dev_read_bytes += length; 23038c2a6f8SMaciej Szwed } else { 23138c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 23238c2a6f8SMaciej Szwed } 23338c2a6f8SMaciej Szwed 234494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 235179ed697SJim Harris } 236179ed697SJim Harris 237179ed697SJim Harris static void 238ba8f1a9eSAlexey Marchuk dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 239ba8f1a9eSAlexey Marchuk struct iovec *iov, int iovcnt, 240ba8f1a9eSAlexey Marchuk uint64_t lba, uint32_t lba_count, 241ba8f1a9eSAlexey Marchuk struct spdk_bs_dev_cb_args *cb_args, 242ba8f1a9eSAlexey Marchuk struct spdk_blob_ext_io_opts *io_opts) 243ba8f1a9eSAlexey Marchuk { 244a2360845SAlexey Marchuk g_dev_readv_ext_called = true; 245a2360845SAlexey Marchuk g_blob_ext_io_opts = *io_opts; 246ba8f1a9eSAlexey Marchuk dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args); 247ba8f1a9eSAlexey Marchuk } 248ba8f1a9eSAlexey Marchuk 249ba8f1a9eSAlexey Marchuk static void 250179ed697SJim Harris dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 251179ed697SJim Harris struct iovec *iov, int iovcnt, 252179ed697SJim Harris uint64_t lba, uint32_t lba_count, 253179ed697SJim Harris struct spdk_bs_dev_cb_args *cb_args) 254179ed697SJim Harris { 255179ed697SJim Harris uint64_t offset, length; 256179ed697SJim Harris int i; 257179ed697SJim Harris 25838c2a6f8SMaciej Szwed if (g_power_failure_thresholds.write_threshold != 0) { 25938c2a6f8SMaciej Szwed g_power_failure_counters.write_counter++; 26038c2a6f8SMaciej Szwed } 26138c2a6f8SMaciej Szwed 26238c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 26338c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 26438c2a6f8SMaciej Szwed } 26538c2a6f8SMaciej Szwed 26638c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.write_threshold == 0 || 26738c2a6f8SMaciej Szwed g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) && 26838c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold == 0 || 26938c2a6f8SMaciej Szwed g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 27032f35c16SPiotr Pelplinski offset = lba * dev->blocklen; 27132f35c16SPiotr Pelplinski length = lba_count * dev->blocklen; 272179ed697SJim Harris SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 273179ed697SJim Harris __check_iov(iov, iovcnt, length); 274179ed697SJim Harris 275179ed697SJim Harris for (i = 0; i < iovcnt; i++) { 276179ed697SJim Harris memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len); 277179ed697SJim Harris offset += iov[i].iov_len; 278179ed697SJim Harris } 279179ed697SJim Harris 280d3c6335bSTomasz Kulasek g_dev_write_bytes += length; 28138c2a6f8SMaciej Szwed } else { 28238c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 28338c2a6f8SMaciej Szwed } 28438c2a6f8SMaciej Szwed 285494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 286179ed697SJim Harris } 287179ed697SJim Harris 288179ed697SJim Harris static void 289ba8f1a9eSAlexey Marchuk dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 290ba8f1a9eSAlexey Marchuk struct iovec *iov, int iovcnt, 291ba8f1a9eSAlexey Marchuk uint64_t lba, uint32_t lba_count, 292ba8f1a9eSAlexey Marchuk struct spdk_bs_dev_cb_args *cb_args, 293ba8f1a9eSAlexey Marchuk struct spdk_blob_ext_io_opts *io_opts) 294ba8f1a9eSAlexey Marchuk { 295a2360845SAlexey Marchuk g_dev_writev_ext_called = true; 296a2360845SAlexey Marchuk g_blob_ext_io_opts = *io_opts; 297ba8f1a9eSAlexey Marchuk dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args); 298ba8f1a9eSAlexey Marchuk } 299ba8f1a9eSAlexey Marchuk 300ba8f1a9eSAlexey Marchuk static void 301728d0013SDaniel Verkamp dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 302728d0013SDaniel Verkamp struct spdk_bs_dev_cb_args *cb_args) 303728d0013SDaniel Verkamp { 30438c2a6f8SMaciej Szwed if (g_power_failure_thresholds.flush_threshold != 0) { 30538c2a6f8SMaciej Szwed g_power_failure_counters.flush_counter++; 30638c2a6f8SMaciej Szwed } 30738c2a6f8SMaciej Szwed 30838c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 30938c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 31038c2a6f8SMaciej Szwed } 31138c2a6f8SMaciej Szwed 31238c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.flush_threshold != 0 && 31338c2a6f8SMaciej Szwed g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold) || 31438c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold != 0 && 31538c2a6f8SMaciej Szwed g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) { 31638c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 31738c2a6f8SMaciej Szwed } 31838c2a6f8SMaciej Szwed 319494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 320728d0013SDaniel Verkamp } 321728d0013SDaniel Verkamp 322728d0013SDaniel Verkamp static void 323728d0013SDaniel Verkamp dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 324f01146aeSJim Harris uint64_t lba, uint64_t lba_count, 325728d0013SDaniel Verkamp struct spdk_bs_dev_cb_args *cb_args) 326728d0013SDaniel Verkamp { 327728d0013SDaniel Verkamp uint64_t offset, length; 328728d0013SDaniel Verkamp 32938c2a6f8SMaciej Szwed if (g_power_failure_thresholds.unmap_threshold != 0) { 33038c2a6f8SMaciej Szwed g_power_failure_counters.unmap_counter++; 33138c2a6f8SMaciej Szwed } 33238c2a6f8SMaciej Szwed 33338c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 33438c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 33538c2a6f8SMaciej Szwed } 33638c2a6f8SMaciej Szwed 33738c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.unmap_threshold == 0 || 33838c2a6f8SMaciej Szwed g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold) && 33938c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold == 0 || 34038c2a6f8SMaciej Szwed g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 34132f35c16SPiotr Pelplinski offset = lba * dev->blocklen; 34232f35c16SPiotr Pelplinski length = lba_count * dev->blocklen; 343728d0013SDaniel Verkamp SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 34411130d09SMaciej Szwed memset(&g_dev_buffer[offset], 0, length); 34538c2a6f8SMaciej Szwed } else { 34638c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 34738c2a6f8SMaciej Szwed } 34838c2a6f8SMaciej Szwed 349494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 3503f9cbe51SSeth Howell } 3513f9cbe51SSeth Howell 3523f9cbe51SSeth Howell static void 3533f9cbe51SSeth Howell dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, 354f01146aeSJim Harris uint64_t lba, uint64_t lba_count, 3553f9cbe51SSeth Howell struct spdk_bs_dev_cb_args *cb_args) 3563f9cbe51SSeth Howell { 3573f9cbe51SSeth Howell uint64_t offset, length; 3583f9cbe51SSeth Howell 35938c2a6f8SMaciej Szwed if (g_power_failure_thresholds.write_zero_threshold != 0) { 36038c2a6f8SMaciej Szwed g_power_failure_counters.write_zero_counter++; 36138c2a6f8SMaciej Szwed } 36238c2a6f8SMaciej Szwed 36338c2a6f8SMaciej Szwed if (g_power_failure_thresholds.general_threshold != 0) { 36438c2a6f8SMaciej Szwed g_power_failure_counters.general_counter++; 36538c2a6f8SMaciej Szwed } 36638c2a6f8SMaciej Szwed 36738c2a6f8SMaciej Szwed if ((g_power_failure_thresholds.write_zero_threshold == 0 || 36838c2a6f8SMaciej Szwed g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold) && 36938c2a6f8SMaciej Szwed (g_power_failure_thresholds.general_threshold == 0 || 37038c2a6f8SMaciej Szwed g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { 37132f35c16SPiotr Pelplinski offset = lba * dev->blocklen; 37232f35c16SPiotr Pelplinski length = lba_count * dev->blocklen; 3733f9cbe51SSeth Howell SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); 374728d0013SDaniel Verkamp memset(&g_dev_buffer[offset], 0, length); 375d3c6335bSTomasz Kulasek g_dev_write_bytes += length; 37638c2a6f8SMaciej Szwed } else { 37738c2a6f8SMaciej Szwed g_power_failure_rc = -EIO; 37838c2a6f8SMaciej Szwed } 37938c2a6f8SMaciej Szwed 380494c252aSJim Harris spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); 381728d0013SDaniel Verkamp } 382728d0013SDaniel Verkamp 3830f4bfd55SMike Gerdts static bool 3840f4bfd55SMike Gerdts dev_translate_lba(struct spdk_bs_dev *dev, uint64_t lba, uint64_t *base_lba) 3850f4bfd55SMike Gerdts { 3860f4bfd55SMike Gerdts *base_lba = lba; 3870f4bfd55SMike Gerdts return true; 3880f4bfd55SMike Gerdts } 3890f4bfd55SMike Gerdts 3900f4bfd55SMike Gerdts static void 3910f4bfd55SMike Gerdts dev_copy(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, uint64_t dst_lba, 3920f4bfd55SMike Gerdts uint64_t src_lba, uint64_t lba_count, struct spdk_bs_dev_cb_args *cb_args) 3930f4bfd55SMike Gerdts { 3940f4bfd55SMike Gerdts void *dst = &g_dev_buffer[dst_lba * dev->blocklen]; 3950f4bfd55SMike Gerdts const void *src = &g_dev_buffer[src_lba * dev->blocklen]; 3960f4bfd55SMike Gerdts uint64_t size = lba_count * dev->blocklen; 3970f4bfd55SMike Gerdts 3980f4bfd55SMike Gerdts memcpy(dst, src, size); 3990f4bfd55SMike Gerdts g_dev_copy_bytes += size; 4000f4bfd55SMike Gerdts 4010f4bfd55SMike Gerdts cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0); 4020f4bfd55SMike Gerdts } 4030f4bfd55SMike Gerdts 40470eff4fbSTomasz Zawadzki static struct spdk_bs_dev * 40570eff4fbSTomasz Zawadzki init_dev(void) 406728d0013SDaniel Verkamp { 40770eff4fbSTomasz Zawadzki struct spdk_bs_dev *dev = calloc(1, sizeof(*dev)); 40870eff4fbSTomasz Zawadzki 40970eff4fbSTomasz Zawadzki SPDK_CU_ASSERT_FATAL(dev != NULL); 41070eff4fbSTomasz Zawadzki 411728d0013SDaniel Verkamp dev->create_channel = dev_create_channel; 412728d0013SDaniel Verkamp dev->destroy_channel = dev_destroy_channel; 413728d0013SDaniel Verkamp dev->destroy = dev_destroy; 414728d0013SDaniel Verkamp dev->read = dev_read; 415728d0013SDaniel Verkamp dev->write = dev_write; 416179ed697SJim Harris dev->readv = dev_readv; 417179ed697SJim Harris dev->writev = dev_writev; 418ba8f1a9eSAlexey Marchuk dev->readv_ext = dev_readv_ext; 419ba8f1a9eSAlexey Marchuk dev->writev_ext = dev_writev_ext; 420728d0013SDaniel Verkamp dev->flush = dev_flush; 421728d0013SDaniel Verkamp dev->unmap = dev_unmap; 4223f9cbe51SSeth Howell dev->write_zeroes = dev_write_zeroes; 4230f4bfd55SMike Gerdts dev->translate_lba = dev_translate_lba; 4240f4bfd55SMike Gerdts dev->copy = g_dev_copy_enabled ? dev_copy : NULL; 425728d0013SDaniel Verkamp dev->blockcnt = DEV_BUFFER_BLOCKCNT; 426728d0013SDaniel Verkamp dev->blocklen = DEV_BUFFER_BLOCKLEN; 427*2dc4a231SAtul Malakar dev->phys_blocklen = g_phys_blocklen; 42870eff4fbSTomasz Zawadzki 42970eff4fbSTomasz Zawadzki return dev; 430728d0013SDaniel Verkamp } 431