xref: /spdk/lib/ftl/ftl_writer.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/likely.h"
7 
8 #include "ftl_writer.h"
9 #include "ftl_band.h"
10 
11 void
12 ftl_writer_init(struct spdk_ftl_dev *dev, struct ftl_writer *writer,
13 		uint64_t limit, enum ftl_band_type type)
14 {
15 	memset(writer, 0, sizeof(*writer));
16 	writer->dev = dev;
17 	TAILQ_INIT(&writer->rq_queue);
18 	TAILQ_INIT(&writer->full_bands);
19 	writer->limit = limit;
20 	writer->halt = true;
21 	writer->writer_type = type;
22 }
23 
24 static bool
25 can_write(struct ftl_writer *writer)
26 {
27 	if (spdk_unlikely(writer->halt)) {
28 		return false;
29 	}
30 
31 	return writer->band->md->state == FTL_BAND_STATE_OPEN;
32 }
33 
34 void
35 ftl_writer_band_state_change(struct ftl_band *band)
36 {
37 	struct ftl_writer *writer = band->owner.priv;
38 
39 	switch (band->md->state) {
40 	case FTL_BAND_STATE_FULL:
41 		assert(writer->band == band);
42 		TAILQ_INSERT_TAIL(&writer->full_bands, band, queue_entry);
43 		writer->band = NULL;
44 		break;
45 
46 	case FTL_BAND_STATE_CLOSED:
47 		assert(writer->num_bands > 0);
48 		writer->num_bands--;
49 		ftl_band_clear_owner(band, ftl_writer_band_state_change, writer);
50 		break;
51 
52 	default:
53 		break;
54 	}
55 }
56 
57 static void
58 close_full_bands(struct ftl_writer *writer)
59 {
60 	struct ftl_band *band, *next;
61 
62 	TAILQ_FOREACH_SAFE(band, &writer->full_bands, queue_entry, next) {
63 		if (band->queue_depth) {
64 			continue;
65 		}
66 
67 		TAILQ_REMOVE(&writer->full_bands, band, queue_entry);
68 		ftl_band_close(band);
69 	}
70 }
71 
72 static bool
73 is_active(struct ftl_writer *writer)
74 {
75 	if (writer->dev->limit < writer->limit) {
76 		return false;
77 	}
78 
79 	return true;
80 }
81 
82 static struct ftl_band *
83 get_band(struct ftl_writer *writer)
84 {
85 	if (spdk_unlikely(!writer->band)) {
86 		if (!is_active(writer)) {
87 			return NULL;
88 		}
89 
90 		if (spdk_unlikely(NULL != writer->next_band)) {
91 			if (FTL_BAND_STATE_OPEN == writer->next_band->md->state) {
92 				writer->band = writer->next_band;
93 				writer->next_band = NULL;
94 
95 				return writer->band;
96 			} else {
97 				assert(FTL_BAND_STATE_OPEN == writer->next_band->md->state);
98 				ftl_abort();
99 			}
100 		}
101 
102 		writer->band = ftl_band_get_next_free(writer->dev);
103 		if (writer->band) {
104 			writer->num_bands++;
105 			ftl_band_set_owner(writer->band,
106 					   ftl_writer_band_state_change, writer);
107 
108 			if (ftl_band_write_prep(writer->band)) {
109 				/*
110 				 * This error might happen due to allocation failure. However number
111 				 * of open bands is controlled and it should have enough resources
112 				 * to do it. So here is better to perform a crash and recover from
113 				 * shared memory to bring back stable state.
114 				 *  */
115 				ftl_abort();
116 			}
117 		} else {
118 			return NULL;
119 		}
120 	}
121 
122 	if (spdk_likely(writer->band->md->state == FTL_BAND_STATE_OPEN)) {
123 		return writer->band;
124 	} else {
125 		if (spdk_unlikely(writer->band->md->state == FTL_BAND_STATE_PREP)) {
126 			ftl_band_open(writer->band, writer->writer_type);
127 		}
128 		return NULL;
129 	}
130 }
131 
132 void
133 ftl_writer_run(struct ftl_writer *writer)
134 {
135 	struct ftl_band *band;
136 	struct ftl_rq *rq;
137 
138 	close_full_bands(writer);
139 
140 	if (!TAILQ_EMPTY(&writer->rq_queue)) {
141 		band = get_band(writer);
142 		if (spdk_unlikely(!band)) {
143 			return;
144 		}
145 
146 		if (!can_write(writer)) {
147 			return;
148 		}
149 
150 		/* Finally we can write to band */
151 		rq = TAILQ_FIRST(&writer->rq_queue);
152 		TAILQ_REMOVE(&writer->rq_queue, rq, qentry);
153 		ftl_band_rq_write(writer->band, rq);
154 	}
155 }
156 
157 bool
158 ftl_writer_is_halted(struct ftl_writer *writer)
159 {
160 	if (spdk_unlikely(!TAILQ_EMPTY(&writer->full_bands))) {
161 		return false;
162 	}
163 
164 	if (writer->band) {
165 		if (writer->band->md->state != FTL_BAND_STATE_OPEN) {
166 			return false;
167 		}
168 
169 		if (writer->band->queue_depth) {
170 			return false;
171 		}
172 	}
173 
174 	return writer->halt;
175 }
176 
177 uint64_t
178 ftl_writer_get_free_blocks(struct ftl_writer *writer)
179 {
180 	uint64_t free_blocks = 0;
181 
182 	if (writer->band) {
183 		free_blocks += ftl_band_user_blocks_left(writer->band,
184 				writer->band->md->iter.offset);
185 	}
186 
187 	if (writer->next_band) {
188 		free_blocks += ftl_band_user_blocks_left(writer->next_band,
189 				writer->next_band->md->iter.offset);
190 	}
191 
192 	return free_blocks;
193 }
194