xref: /spdk/lib/ftl/ftl_band_ops.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/queue.h"
8 #include "spdk/bdev_module.h"
9 
10 #include "ftl_core.h"
11 #include "ftl_band.h"
12 #include "ftl_internal.h"
13 
14 static void
15 write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
16 {
17 	struct ftl_rq *rq = arg;
18 	struct ftl_band *band = rq->io.band;
19 
20 	rq->success = success;
21 
22 	assert(band->queue_depth > 0);
23 	band->queue_depth--;
24 
25 	rq->owner.cb(rq);
26 	spdk_bdev_free_io(bdev_io);
27 }
28 
29 static void
30 ftl_band_rq_bdev_write(void *_rq)
31 {
32 	struct ftl_rq *rq = _rq;
33 	struct ftl_band *band = rq->io.band;
34 	struct spdk_ftl_dev *dev = band->dev;
35 	int rc;
36 
37 	rc = spdk_bdev_writev_blocks(dev->base_bdev_desc, dev->base_ioch,
38 				     rq->io_vec, rq->io_vec_size,
39 				     rq->io.addr, rq->num_blocks,
40 				     write_rq_end, rq);
41 
42 	if (spdk_unlikely(rc)) {
43 		if (rc == -ENOMEM) {
44 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
45 			rq->io.bdev_io_wait.bdev = bdev;
46 			rq->io.bdev_io_wait.cb_fn = ftl_band_rq_bdev_write;
47 			rq->io.bdev_io_wait.cb_arg = rq;
48 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &rq->io.bdev_io_wait);
49 		} else {
50 			ftl_abort();
51 		}
52 	}
53 }
54 
55 void
56 ftl_band_rq_write(struct ftl_band *band, struct ftl_rq *rq)
57 {
58 	struct spdk_ftl_dev *dev = band->dev;
59 
60 	rq->success = false;
61 	rq->io.band = band;
62 	rq->io.addr = band->md->iter.addr;
63 
64 	ftl_band_rq_bdev_write(rq);
65 
66 	band->queue_depth++;
67 	dev->io_activity_total += rq->num_blocks;
68 
69 	ftl_band_iter_advance(band, rq->num_blocks);
70 	if (ftl_band_filled(band, band->md->iter.offset)) {
71 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
72 		band->owner.state_change_fn(band);
73 	}
74 }
75 
76 static void ftl_band_rq_bdev_read(void *_entry);
77 
78 static void
79 read_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
80 {
81 	struct ftl_rq_entry *entry = arg;
82 	struct ftl_band *band = entry->io.band;
83 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
84 
85 	rq->success = success;
86 	if (spdk_unlikely(!success)) {
87 		ftl_band_rq_bdev_read(entry);
88 		spdk_bdev_free_io(bdev_io);
89 		return;
90 	}
91 
92 	assert(band->queue_depth > 0);
93 	band->queue_depth--;
94 
95 	rq->owner.cb(rq);
96 	spdk_bdev_free_io(bdev_io);
97 }
98 
99 static void
100 ftl_band_rq_bdev_read(void *_entry)
101 {
102 	struct ftl_rq_entry *entry = _entry;
103 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
104 	struct spdk_ftl_dev *dev = rq->dev;
105 	int rc;
106 
107 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, entry->io_payload,
108 				   entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
109 				   read_rq_end, entry);
110 	if (spdk_unlikely(rc)) {
111 		if (rc == -ENOMEM) {
112 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
113 			entry->bdev_io.wait_entry.bdev = bdev;
114 			entry->bdev_io.wait_entry.cb_fn = ftl_band_rq_bdev_read;
115 			entry->bdev_io.wait_entry.cb_arg = entry;
116 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &entry->bdev_io.wait_entry);
117 		} else {
118 			ftl_abort();
119 		}
120 	}
121 }
122 
123 void
124 ftl_band_rq_read(struct ftl_band *band, struct ftl_rq *rq)
125 {
126 	struct spdk_ftl_dev *dev = band->dev;
127 	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
128 
129 	assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
130 
131 	rq->success = false;
132 	rq->io.band = band;
133 	rq->io.addr = band->md->iter.addr;
134 	entry->io.band = band;
135 	entry->bdev_io.offset_blocks = rq->io.addr;
136 	entry->bdev_io.num_blocks = rq->iter.count;
137 
138 	ftl_band_rq_bdev_read(entry);
139 
140 	dev->io_activity_total += rq->num_blocks;
141 	band->queue_depth++;
142 }
143 
144 static void
145 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
146 {
147 	struct ftl_basic_rq *brq = arg;
148 	struct ftl_band *band = brq->io.band;
149 
150 	brq->success = success;
151 
152 	assert(band->queue_depth > 0);
153 	band->queue_depth--;
154 
155 	brq->owner.cb(brq);
156 	spdk_bdev_free_io(bdev_io);
157 }
158 
159 static void
160 ftl_band_brq_bdev_write(void *_brq)
161 {
162 	struct ftl_basic_rq *brq = _brq;
163 	struct spdk_ftl_dev *dev = brq->dev;
164 	int rc;
165 
166 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
167 				    brq->io_payload, brq->io.addr,
168 				    brq->num_blocks, write_brq_end, brq);
169 
170 	if (spdk_unlikely(rc)) {
171 		if (rc == -ENOMEM) {
172 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
173 			brq->io.bdev_io_wait.bdev = bdev;
174 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_write;
175 			brq->io.bdev_io_wait.cb_arg = brq;
176 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
177 		} else {
178 			ftl_abort();
179 		}
180 	}
181 }
182 
183 void
184 ftl_band_basic_rq_write(struct ftl_band *band, struct ftl_basic_rq *brq)
185 {
186 	struct spdk_ftl_dev *dev = band->dev;
187 
188 	brq->io.addr = band->md->iter.addr;
189 	brq->io.band = band;
190 	brq->success = false;
191 
192 	ftl_band_brq_bdev_write(brq);
193 
194 	dev->io_activity_total += brq->num_blocks;
195 	band->queue_depth++;
196 	ftl_band_iter_advance(band, brq->num_blocks);
197 	if (ftl_band_filled(band, band->md->iter.offset)) {
198 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
199 		band->owner.state_change_fn(band);
200 	}
201 }
202 
203 static void
204 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
205 {
206 	struct ftl_basic_rq *brq = arg;
207 	struct ftl_band *band = brq->io.band;
208 
209 	brq->success = success;
210 
211 	assert(band->queue_depth > 0);
212 	band->queue_depth--;
213 
214 	brq->owner.cb(brq);
215 	spdk_bdev_free_io(bdev_io);
216 }
217 
218 static void
219 ftl_band_brq_bdev_read(void *_brq)
220 {
221 	struct ftl_basic_rq *brq = _brq;
222 	struct spdk_ftl_dev *dev = brq->dev;
223 	int rc;
224 
225 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
226 				   brq->io_payload, brq->io.addr,
227 				   brq->num_blocks, read_brq_end, brq);
228 	if (spdk_unlikely(rc)) {
229 		if (rc == -ENOMEM) {
230 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
231 			brq->io.bdev_io_wait.bdev = bdev;
232 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_read;
233 			brq->io.bdev_io_wait.cb_arg = brq;
234 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
235 		} else {
236 			ftl_abort();
237 		}
238 	}
239 }
240 
241 void
242 ftl_band_basic_rq_read(struct ftl_band *band, struct ftl_basic_rq *brq)
243 {
244 	struct spdk_ftl_dev *dev = brq->dev;
245 
246 	brq->io.band = band;
247 
248 	ftl_band_brq_bdev_read(brq);
249 
250 	brq->io.band->queue_depth++;
251 	dev->io_activity_total += brq->num_blocks;
252 }
253 
254 static void
255 band_open_cb(int status, void *cb_arg)
256 {
257 	struct ftl_band *band = cb_arg;
258 
259 	if (spdk_unlikely(status)) {
260 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
261 		return;
262 	}
263 
264 	ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
265 }
266 
267 void
268 ftl_band_open(struct ftl_band *band, enum ftl_band_type type)
269 {
270 	struct spdk_ftl_dev *dev = band->dev;
271 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
272 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD];
273 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
274 
275 	ftl_band_set_type(band, type);
276 	ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
277 
278 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
279 	p2l_map->band_dma_md->state = FTL_BAND_STATE_OPEN;
280 	p2l_map->band_dma_md->p2l_map_checksum = 0;
281 
282 	if (spdk_unlikely(0 != band->p2l_map.num_valid)) {
283 		/*
284 		 * This is inconsistent state, a band with valid block,
285 		 * it could be moved on the free list
286 		 */
287 		assert(false && 0 == band->p2l_map.num_valid);
288 		ftl_abort();
289 	}
290 
291 	ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
292 			     band_open_cb, band, &band->md_persist_entry_ctx);
293 }
294 
295 static void
296 band_close_cb(int status, void *cb_arg)
297 {
298 	struct ftl_band *band = cb_arg;
299 
300 	if (spdk_unlikely(status)) {
301 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
302 		return;
303 	}
304 
305 	band->md->p2l_map_checksum = band->p2l_map.band_dma_md->p2l_map_checksum;
306 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
307 }
308 
309 static void
310 band_map_write_cb(struct ftl_basic_rq *brq)
311 {
312 	struct ftl_band *band = brq->io.band;
313 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
314 	struct spdk_ftl_dev *dev = band->dev;
315 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD];
316 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
317 	uint32_t band_map_crc;
318 
319 	if (spdk_likely(brq->success)) {
320 
321 		band_map_crc = spdk_crc32c_update(p2l_map->band_map,
322 						  ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE, 0);
323 		memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
324 		p2l_map->band_dma_md->state = FTL_BAND_STATE_CLOSED;
325 		p2l_map->band_dma_md->p2l_map_checksum = band_map_crc;
326 
327 		ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
328 				     band_close_cb, band, &band->md_persist_entry_ctx);
329 	} else {
330 		/* Try to retry in case of failure */
331 		ftl_band_brq_bdev_write(brq);
332 		band->queue_depth++;
333 	}
334 }
335 
336 void
337 ftl_band_close(struct ftl_band *band)
338 {
339 	struct spdk_ftl_dev *dev = band->dev;
340 	void *metadata = band->p2l_map.band_map;
341 	uint64_t num_blocks = ftl_tail_md_num_blocks(dev);
342 
343 	/* Write P2L map first, after completion, set the state to close on nvcache, then internally */
344 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
345 	ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks);
346 	ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band);
347 
348 	ftl_band_basic_rq_write(band, &band->metadata_rq);
349 }
350 
351 static void
352 band_free_cb(int status, void *ctx)
353 {
354 	struct ftl_band *band = (struct ftl_band *)ctx;
355 
356 	if (spdk_unlikely(status)) {
357 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
358 		return;
359 	}
360 
361 	ftl_band_release_p2l_map(band);
362 	FTL_DEBUGLOG(band->dev, "Band is going to free state. Band id: %u\n", band->id);
363 	ftl_band_set_state(band, FTL_BAND_STATE_FREE);
364 	assert(0 == band->p2l_map.ref_cnt);
365 }
366 
367 void
368 ftl_band_free(struct ftl_band *band)
369 {
370 	struct spdk_ftl_dev *dev = band->dev;
371 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
372 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
373 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD];
374 
375 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
376 	p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE;
377 	p2l_map->band_dma_md->p2l_map_checksum = 0;
378 
379 	ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
380 			     band_free_cb, band, &band->md_persist_entry_ctx);
381 
382 	/* TODO: The whole band erase code should probably be done here instead */
383 }
384 
385 static void
386 read_md_cb(struct ftl_basic_rq *brq)
387 {
388 	struct ftl_band *band = brq->owner.priv;
389 	struct spdk_ftl_dev *dev = band->dev;
390 	ftl_band_ops_cb cb;
391 	uint32_t band_map_crc;
392 	bool success = true;
393 	void *priv;
394 
395 	cb = band->owner.ops_fn;
396 	priv = band->owner.priv;
397 
398 	if (!brq->success) {
399 		ftl_band_basic_rq_read(band, &band->metadata_rq);
400 		return;
401 	}
402 
403 	band_map_crc = spdk_crc32c_update(band->p2l_map.band_map,
404 					  ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE, 0);
405 	if (band->md->p2l_map_checksum && band->md->p2l_map_checksum != band_map_crc) {
406 		FTL_ERRLOG(dev, "GC error, inconsistent P2L map CRC\n");
407 		success = false;
408 	}
409 	band->owner.ops_fn = NULL;
410 	band->owner.priv = NULL;
411 	cb(band, priv, success);
412 }
413 
414 static int
415 _read_md(struct ftl_band *band)
416 {
417 	struct spdk_ftl_dev *dev = band->dev;
418 	struct ftl_basic_rq *rq = &band->metadata_rq;
419 
420 	if (ftl_band_alloc_p2l_map(band)) {
421 		return -ENOMEM;
422 	}
423 
424 	/* Read P2L map */
425 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_p2l_map_num_blocks(dev));
426 	ftl_basic_rq_set_owner(rq, read_md_cb, band);
427 
428 	rq->io.band = band;
429 	rq->io.addr = ftl_band_p2l_map_addr(band);
430 
431 	ftl_band_basic_rq_read(band, &band->metadata_rq);
432 
433 	return 0;
434 }
435 
436 static void
437 read_md(void *band)
438 {
439 	int rc;
440 
441 	rc = _read_md(band);
442 	if (spdk_unlikely(rc)) {
443 		spdk_thread_send_msg(spdk_get_thread(), read_md, band);
444 	}
445 }
446 
447 static void
448 read_tail_md_cb(struct ftl_basic_rq *brq)
449 {
450 	struct ftl_band *band = brq->owner.priv;
451 	enum ftl_md_status status = FTL_MD_IO_FAILURE;
452 	ftl_band_md_cb cb;
453 	void *priv;
454 
455 	if (spdk_unlikely(!brq->success)) {
456 		/* Retries the read in case of error */
457 		ftl_band_basic_rq_read(band, &band->metadata_rq);
458 		return;
459 	}
460 
461 	cb = band->owner.md_fn;
462 	band->owner.md_fn = NULL;
463 
464 	priv = band->owner.priv;
465 	band->owner.priv = NULL;
466 
467 	status = FTL_MD_SUCCESS;
468 
469 	cb(band, priv, status);
470 }
471 
472 void
473 ftl_band_read_tail_brq_md(struct ftl_band *band, ftl_band_md_cb cb, void *cntx)
474 {
475 	struct spdk_ftl_dev *dev = band->dev;
476 	struct ftl_basic_rq *rq = &band->metadata_rq;
477 
478 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_tail_md_num_blocks(dev));
479 	ftl_basic_rq_set_owner(rq, read_tail_md_cb, band);
480 
481 	assert(!band->owner.md_fn);
482 	assert(!band->owner.priv);
483 	band->owner.md_fn = cb;
484 	band->owner.priv = cntx;
485 
486 	rq->io.band = band;
487 	rq->io.addr = band->tail_md_addr;
488 
489 	ftl_band_basic_rq_read(band, &band->metadata_rq);
490 }
491 
492 void
493 ftl_band_get_next_gc(struct spdk_ftl_dev *dev, ftl_band_ops_cb cb, void *cntx)
494 {
495 	struct ftl_band *band = ftl_band_search_next_to_reloc(dev);
496 
497 	/* if disk is very small, GC start very early that no band is ready for it */
498 	if (spdk_unlikely(!band)) {
499 		cb(NULL, cntx, false);
500 		return;
501 	}
502 
503 	/* Only one owner is allowed */
504 	assert(!band->queue_depth);
505 	assert(!band->owner.ops_fn);
506 	assert(!band->owner.priv);
507 	band->owner.ops_fn = cb;
508 	band->owner.priv = cntx;
509 
510 	read_md(band);
511 }
512