1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/likely.h" 7 8 #include "ftl_writer.h" 9 #include "ftl_band.h" 10 11 void 12 ftl_writer_init(struct spdk_ftl_dev *dev, struct ftl_writer *writer, 13 uint64_t limit, enum ftl_band_type type) 14 { 15 memset(writer, 0, sizeof(*writer)); 16 writer->dev = dev; 17 TAILQ_INIT(&writer->rq_queue); 18 TAILQ_INIT(&writer->full_bands); 19 writer->limit = limit; 20 writer->halt = true; 21 writer->writer_type = type; 22 } 23 24 static bool 25 can_write(struct ftl_writer *writer) 26 { 27 if (spdk_unlikely(writer->halt)) { 28 return false; 29 } 30 31 return writer->band->md->state == FTL_BAND_STATE_OPEN; 32 } 33 34 void 35 ftl_writer_band_state_change(struct ftl_band *band) 36 { 37 struct ftl_writer *writer = band->owner.priv; 38 39 switch (band->md->state) { 40 case FTL_BAND_STATE_FULL: 41 assert(writer->band == band); 42 TAILQ_INSERT_TAIL(&writer->full_bands, band, queue_entry); 43 writer->band = NULL; 44 break; 45 46 case FTL_BAND_STATE_CLOSED: 47 assert(writer->num_bands > 0); 48 writer->num_bands--; 49 ftl_band_clear_owner(band, ftl_writer_band_state_change, writer); 50 writer->last_seq_id = band->md->close_seq_id; 51 break; 52 53 default: 54 break; 55 } 56 } 57 58 static void 59 close_full_bands(struct ftl_writer *writer) 60 { 61 struct ftl_band *band, *next; 62 63 TAILQ_FOREACH_SAFE(band, &writer->full_bands, queue_entry, next) { 64 if (band->queue_depth) { 65 continue; 66 } 67 68 TAILQ_REMOVE(&writer->full_bands, band, queue_entry); 69 ftl_band_close(band); 70 } 71 } 72 73 static bool 74 is_active(struct ftl_writer *writer) 75 { 76 if (writer->dev->limit < writer->limit) { 77 return false; 78 } 79 80 return true; 81 } 82 83 static struct ftl_band * 84 get_band(struct ftl_writer *writer) 85 { 86 if (spdk_unlikely(!writer->band)) { 87 if (!is_active(writer)) { 88 return NULL; 89 } 90 91 if (spdk_unlikely(NULL != writer->next_band)) { 92 if (FTL_BAND_STATE_OPEN == writer->next_band->md->state) { 93 writer->band = writer->next_band; 94 writer->next_band = NULL; 95 96 return writer->band; 97 } else { 98 assert(FTL_BAND_STATE_OPEN == writer->next_band->md->state); 99 ftl_abort(); 100 } 101 } 102 103 if (writer->num_bands >= FTL_LAYOUT_REGION_TYPE_P2L_COUNT / 2) { 104 /* Maximum number of opened band exceed (we split this 105 * value between and compaction and GC writer 106 */ 107 return NULL; 108 } 109 110 writer->band = ftl_band_get_next_free(writer->dev); 111 if (writer->band) { 112 writer->num_bands++; 113 ftl_band_set_owner(writer->band, 114 ftl_writer_band_state_change, writer); 115 116 if (ftl_band_write_prep(writer->band)) { 117 /* 118 * This error might happen due to allocation failure. However number 119 * of open bands is controlled and it should have enough resources 120 * to do it. So here is better to perform a crash and recover from 121 * shared memory to bring back stable state. 122 * */ 123 ftl_abort(); 124 } 125 } else { 126 return NULL; 127 } 128 } 129 130 if (spdk_likely(writer->band->md->state == FTL_BAND_STATE_OPEN)) { 131 return writer->band; 132 } else { 133 if (spdk_unlikely(writer->band->md->state == FTL_BAND_STATE_PREP)) { 134 ftl_band_open(writer->band, writer->writer_type); 135 } 136 return NULL; 137 } 138 } 139 140 void 141 ftl_writer_run(struct ftl_writer *writer) 142 { 143 struct ftl_band *band; 144 struct ftl_rq *rq; 145 146 close_full_bands(writer); 147 148 if (!TAILQ_EMPTY(&writer->rq_queue)) { 149 band = get_band(writer); 150 if (spdk_unlikely(!band)) { 151 return; 152 } 153 154 if (!can_write(writer)) { 155 return; 156 } 157 158 /* Finally we can write to band */ 159 rq = TAILQ_FIRST(&writer->rq_queue); 160 TAILQ_REMOVE(&writer->rq_queue, rq, qentry); 161 ftl_band_rq_write(writer->band, rq); 162 } 163 } 164 165 static void 166 ftl_writer_pad_band_cb(struct ftl_rq *rq) 167 { 168 assert(1 == rq->iter.qd); 169 rq->iter.qd = 0; 170 } 171 172 static void 173 ftl_writer_pad_band(struct ftl_writer *writer) 174 { 175 struct spdk_ftl_dev *dev = writer->dev; 176 177 assert(dev->conf.prep_upgrade_on_shutdown); 178 assert(writer->band); 179 assert(0 == writer->band->queue_depth); 180 181 /* First allocate the padding FTL request */ 182 if (!writer->pad) { 183 writer->pad = ftl_rq_new(dev, dev->md_size); 184 if (!writer->pad) { 185 FTL_ERRLOG(dev, "Cannot allocate FTL request to pad the band"); 186 return; 187 } 188 writer->pad->owner.cb = ftl_writer_pad_band_cb; 189 } 190 191 if (writer->pad->iter.qd) { 192 /* The band is handling the pad request already */ 193 return; 194 } 195 196 if (writer->band->md->state == FTL_BAND_STATE_OPEN) { 197 ftl_band_rq_write(writer->band, writer->pad); 198 writer->pad->iter.qd++; 199 } 200 } 201 202 bool 203 ftl_writer_is_halted(struct ftl_writer *writer) 204 { 205 if (spdk_unlikely(!TAILQ_EMPTY(&writer->full_bands))) { 206 return false; 207 } 208 209 if (writer->band) { 210 if (writer->band->md->state != FTL_BAND_STATE_OPEN) { 211 return false; 212 } 213 214 if (writer->band->queue_depth) { 215 return false; 216 } 217 } 218 219 if (writer->dev->conf.prep_upgrade_on_shutdown) { 220 if (writer->band) { 221 ftl_writer_pad_band(writer); 222 } else if (writer->num_bands) { 223 return false; 224 } else { 225 /* All bands closed, free padding request */ 226 ftl_rq_del(writer->pad); 227 writer->pad = NULL; 228 } 229 } 230 231 return writer->halt; 232 } 233 234 uint64_t 235 ftl_writer_get_free_blocks(struct ftl_writer *writer) 236 { 237 uint64_t free_blocks = 0; 238 239 if (writer->band) { 240 free_blocks += ftl_band_user_blocks_left(writer->band, 241 writer->band->md->iter.offset); 242 } 243 244 if (writer->next_band) { 245 free_blocks += ftl_band_user_blocks_left(writer->next_band, 246 writer->next_band->md->iter.offset); 247 } 248 249 return free_blocks; 250 } 251