xref: /spdk/lib/ftl/ftl_writer.c (revision 588dfe314bb83d86effdf67ec42837b11c2620bf)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/likely.h"
7 
8 #include "ftl_writer.h"
9 #include "ftl_band.h"
10 
11 void
12 ftl_writer_init(struct spdk_ftl_dev *dev, struct ftl_writer *writer,
13 		uint64_t limit, enum ftl_band_type type)
14 {
15 	memset(writer, 0, sizeof(*writer));
16 	writer->dev = dev;
17 	TAILQ_INIT(&writer->rq_queue);
18 	TAILQ_INIT(&writer->full_bands);
19 	writer->limit = limit;
20 	writer->halt = true;
21 	writer->writer_type = type;
22 }
23 
24 static bool
25 can_write(struct ftl_writer *writer)
26 {
27 	if (spdk_unlikely(writer->halt)) {
28 		return false;
29 	}
30 
31 	return writer->band->md->state == FTL_BAND_STATE_OPEN;
32 }
33 
34 void
35 ftl_writer_band_state_change(struct ftl_band *band)
36 {
37 	struct ftl_writer *writer = band->owner.priv;
38 
39 	switch (band->md->state) {
40 	case FTL_BAND_STATE_FULL:
41 		assert(writer->band == band);
42 		TAILQ_INSERT_TAIL(&writer->full_bands, band, queue_entry);
43 		writer->band = NULL;
44 		break;
45 
46 	case FTL_BAND_STATE_CLOSED:
47 		assert(writer->num_bands > 0);
48 		writer->num_bands--;
49 		ftl_band_clear_owner(band, ftl_writer_band_state_change, writer);
50 		writer->last_seq_id = band->md->close_seq_id;
51 		break;
52 
53 	default:
54 		break;
55 	}
56 }
57 
58 static void
59 close_full_bands(struct ftl_writer *writer)
60 {
61 	struct ftl_band *band, *next;
62 
63 	TAILQ_FOREACH_SAFE(band, &writer->full_bands, queue_entry, next) {
64 		if (band->queue_depth) {
65 			continue;
66 		}
67 
68 		TAILQ_REMOVE(&writer->full_bands, band, queue_entry);
69 		ftl_band_close(band);
70 	}
71 }
72 
73 static bool
74 is_active(struct ftl_writer *writer)
75 {
76 	if (writer->dev->limit < writer->limit) {
77 		return false;
78 	}
79 
80 	return true;
81 }
82 
83 static struct ftl_band *
84 get_band(struct ftl_writer *writer)
85 {
86 	if (spdk_unlikely(!writer->band)) {
87 		if (!is_active(writer)) {
88 			return NULL;
89 		}
90 
91 		if (spdk_unlikely(NULL != writer->next_band)) {
92 			if (FTL_BAND_STATE_OPEN == writer->next_band->md->state) {
93 				writer->band = writer->next_band;
94 				writer->next_band = NULL;
95 
96 				return writer->band;
97 			} else {
98 				assert(FTL_BAND_STATE_OPEN == writer->next_band->md->state);
99 				ftl_abort();
100 			}
101 		}
102 
103 		if (writer->num_bands >= FTL_LAYOUT_REGION_TYPE_P2L_COUNT / 2) {
104 			/* Maximum number of opened band exceed (we split this
105 			 * value between and compaction and GC writer
106 			 */
107 			return NULL;
108 		}
109 
110 		writer->band = ftl_band_get_next_free(writer->dev);
111 		if (writer->band) {
112 			writer->num_bands++;
113 			ftl_band_set_owner(writer->band,
114 					   ftl_writer_band_state_change, writer);
115 
116 			if (ftl_band_write_prep(writer->band)) {
117 				/*
118 				 * This error might happen due to allocation failure. However number
119 				 * of open bands is controlled and it should have enough resources
120 				 * to do it. So here is better to perform a crash and recover from
121 				 * shared memory to bring back stable state.
122 				 *  */
123 				ftl_abort();
124 			}
125 		} else {
126 			return NULL;
127 		}
128 	}
129 
130 	if (spdk_likely(writer->band->md->state == FTL_BAND_STATE_OPEN)) {
131 		return writer->band;
132 	} else {
133 		if (spdk_unlikely(writer->band->md->state == FTL_BAND_STATE_PREP)) {
134 			ftl_band_open(writer->band, writer->writer_type);
135 		}
136 		return NULL;
137 	}
138 }
139 
140 void
141 ftl_writer_run(struct ftl_writer *writer)
142 {
143 	struct ftl_band *band;
144 	struct ftl_rq *rq;
145 
146 	close_full_bands(writer);
147 
148 	if (!TAILQ_EMPTY(&writer->rq_queue)) {
149 		band = get_band(writer);
150 		if (spdk_unlikely(!band)) {
151 			return;
152 		}
153 
154 		if (!can_write(writer)) {
155 			return;
156 		}
157 
158 		/* Finally we can write to band */
159 		rq = TAILQ_FIRST(&writer->rq_queue);
160 		TAILQ_REMOVE(&writer->rq_queue, rq, qentry);
161 		ftl_band_rq_write(writer->band, rq);
162 	}
163 }
164 
165 bool
166 ftl_writer_is_halted(struct ftl_writer *writer)
167 {
168 	if (spdk_unlikely(!TAILQ_EMPTY(&writer->full_bands))) {
169 		return false;
170 	}
171 
172 	if (writer->band) {
173 		if (writer->band->md->state != FTL_BAND_STATE_OPEN) {
174 			return false;
175 		}
176 
177 		if (writer->band->queue_depth) {
178 			return false;
179 		}
180 	}
181 
182 	return writer->halt;
183 }
184 
185 uint64_t
186 ftl_writer_get_free_blocks(struct ftl_writer *writer)
187 {
188 	uint64_t free_blocks = 0;
189 
190 	if (writer->band) {
191 		free_blocks += ftl_band_user_blocks_left(writer->band,
192 				writer->band->md->iter.offset);
193 	}
194 
195 	if (writer->next_band) {
196 		free_blocks += ftl_band_user_blocks_left(writer->next_band,
197 				writer->next_band->md->iter.offset);
198 	}
199 
200 	return free_blocks;
201 }
202