xref: /spdk/lib/ftl/ftl_l2p.c (revision ea8f5b27612fa03698a9ce3ad4bd37765d9cdfa5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "ftl_l2p.h"
7 #include "ftl_band.h"
8 #include "ftl_nv_cache.h"
9 #include "ftl_l2p_cache.h"
10 #include "ftl_l2p_flat.h"
11 
12 
13 /* TODO: Verify why function pointers had worse performance than compile time constants */
14 #ifdef SPDK_FTL_L2P_FLAT
15 #define FTL_L2P_OP(name)	ftl_l2p_flat_ ## name
16 #else
17 #define FTL_L2P_OP(name)	ftl_l2p_cache_ ## name
18 #endif
19 
20 
21 int
22 ftl_l2p_init(struct spdk_ftl_dev *dev)
23 {
24 	TAILQ_INIT(&dev->l2p_deferred_pins);
25 	return FTL_L2P_OP(init)(dev);
26 }
27 
28 void
29 ftl_l2p_deinit(struct spdk_ftl_dev *dev)
30 {
31 	FTL_L2P_OP(deinit)(dev);
32 }
33 
34 static inline void
35 ftl_l2p_pin_ctx_init(struct ftl_l2p_pin_ctx *pin_ctx, uint64_t lba, uint64_t count,
36 		     ftl_l2p_pin_cb cb, void *cb_ctx)
37 {
38 	pin_ctx->lba = lba;
39 	pin_ctx->count = count;
40 	pin_ctx->cb = cb;
41 	pin_ctx->cb_ctx = cb_ctx;
42 }
43 
44 void
45 ftl_l2p_pin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count, ftl_l2p_pin_cb cb, void *cb_ctx,
46 	    struct ftl_l2p_pin_ctx *pin_ctx)
47 {
48 	ftl_l2p_pin_ctx_init(pin_ctx, lba, count, cb, cb_ctx);
49 	FTL_L2P_OP(pin)(dev, pin_ctx);
50 }
51 
52 void
53 ftl_l2p_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count)
54 {
55 	FTL_L2P_OP(unpin)(dev, lba, count);
56 }
57 
58 void
59 ftl_l2p_pin_skip(struct spdk_ftl_dev *dev, ftl_l2p_pin_cb cb, void *cb_ctx,
60 		 struct ftl_l2p_pin_ctx *pin_ctx)
61 {
62 	ftl_l2p_pin_ctx_init(pin_ctx, FTL_LBA_INVALID, 0, cb, cb_ctx);
63 	cb(dev, 0, pin_ctx);
64 }
65 
66 void
67 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
68 {
69 	FTL_L2P_OP(set)(dev, lba, addr);
70 }
71 
72 ftl_addr
73 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba)
74 {
75 	return FTL_L2P_OP(get)(dev, lba);
76 }
77 
78 void
79 ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
80 {
81 	FTL_L2P_OP(clear)(dev, cb, cb_ctx);
82 }
83 
84 void
85 ftl_l2p_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
86 {
87 	FTL_L2P_OP(restore)(dev, cb, cb_ctx);
88 }
89 
90 void
91 ftl_l2p_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
92 {
93 	FTL_L2P_OP(persist)(dev, cb, cb_ctx);
94 }
95 
96 void
97 ftl_l2p_process(struct spdk_ftl_dev *dev)
98 {
99 	struct ftl_l2p_pin_ctx *pin_ctx;
100 
101 	pin_ctx = TAILQ_FIRST(&dev->l2p_deferred_pins);
102 	if (pin_ctx) {
103 		TAILQ_REMOVE(&dev->l2p_deferred_pins, pin_ctx, link);
104 		FTL_L2P_OP(pin)(dev, pin_ctx);
105 	}
106 
107 	FTL_L2P_OP(process)(dev);
108 }
109 
110 bool
111 ftl_l2p_is_halted(struct spdk_ftl_dev *dev)
112 {
113 	if (!TAILQ_EMPTY(&dev->l2p_deferred_pins)) {
114 		return false;
115 	}
116 
117 	return FTL_L2P_OP(is_halted)(dev);
118 }
119 
120 void
121 ftl_l2p_halt(struct spdk_ftl_dev *dev)
122 {
123 	return FTL_L2P_OP(halt)(dev);
124 }
125 
126 void
127 ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
128 {
129 	struct ftl_nv_cache_chunk *current_chunk, *new_chunk;
130 	ftl_addr current_addr;
131 	/* Updating L2P for data in cache device - used by user writes.
132 	 * Split off from updating L2P in base due to extra edge cases for handling dirty shutdown in the cache case,
133 	 * namely keeping two simultaneous writes to same LBA consistent before/after shutdown - on base device we
134 	 * can simply ignore the L2P update, here we need to keep the address with more advanced write pointer
135 	 */
136 	assert(ftl_check_core_thread(dev));
137 	assert(new_addr != FTL_ADDR_INVALID);
138 	assert(ftl_addr_in_nvc(dev, new_addr));
139 
140 	current_addr = ftl_l2p_get(dev, lba);
141 
142 	if (current_addr != FTL_ADDR_INVALID) {
143 
144 		/* Check if write-after-write happened (two simultaneous user writes to the same LBA) */
145 		if (spdk_unlikely(current_addr != old_addr
146 				  && ftl_addr_in_nvc(dev, current_addr))) {
147 
148 			current_chunk = ftl_nv_cache_get_chunk_from_addr(dev, current_addr);
149 			new_chunk = ftl_nv_cache_get_chunk_from_addr(dev, new_addr);
150 
151 			/* To keep data consistency after recovery skip oldest block */
152 			/* If both user writes are to the same chunk, the highest address should 'win', to keep data after
153 			 * dirty shutdown recovery consistent. If they're on different chunks, then higher seq_id chunk 'wins' */
154 			if (current_chunk == new_chunk) {
155 				if (new_addr < current_addr) {
156 					return;
157 				}
158 			} else {
159 				if (new_chunk->md->seq_id < current_chunk->md->seq_id) {
160 					return;
161 				}
162 			}
163 		}
164 
165 		/* For recovery from SHM case valid maps need to be set before l2p set and
166 		 * invalidated after it */
167 
168 		/* DO NOT CHANGE ORDER - START */
169 		ftl_nv_cache_set_addr(dev, lba, new_addr);
170 		ftl_l2p_set(dev, lba, new_addr);
171 		ftl_invalidate_addr(dev, current_addr);
172 		/* DO NOT CHANGE ORDER - END */
173 		return;
174 	}
175 
176 	/* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */
177 	/* DO NOT CHANGE ORDER - START (need to set P2L maps/valid map first) */
178 	ftl_nv_cache_set_addr(dev, lba, new_addr);
179 	ftl_l2p_set(dev, lba, new_addr);
180 	/* DO NOT CHANGE ORDER - END */
181 }
182 
183 void
184 ftl_l2p_update_base(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
185 {
186 	ftl_addr current_addr;
187 
188 	/* Updating L2P for data in base device - used by compaction and GC, may be invalidated by user write.
189 	 * Split off from updating L2P in cache due to extra edge cases for handling dirty shutdown in the cache case.
190 	 * Also some assumptions are not the same (can't assign INVALID address for base device - trim cases are done on cache)
191 	 */
192 	assert(ftl_check_core_thread(dev));
193 	assert(new_addr != FTL_ADDR_INVALID);
194 	assert(old_addr != FTL_ADDR_INVALID);
195 	assert(!ftl_addr_in_nvc(dev, new_addr));
196 
197 	current_addr = ftl_l2p_get(dev, lba);
198 
199 	if (current_addr == old_addr) {
200 		/* DO NOT CHANGE ORDER - START (need to set L2P (and valid bits), before invalidating old ones,
201 		 * due to dirty shutdown from shm recovery - it's ok to have too many bits set, but not ok to
202 		 * have too many cleared) */
203 		ftl_band_set_addr(ftl_band_from_addr(dev, new_addr), lba, new_addr);
204 		ftl_l2p_set(dev, lba, new_addr);
205 		/* DO NOT CHANGE ORDER - END */
206 	} else {
207 		/* new addr could be set by running p2l checkpoint but in the time window between
208 		 * p2l checkpoint completion and l2p set operation new data could be written on
209 		 * open chunk so this address need to be invalidated */
210 		ftl_invalidate_addr(dev, new_addr);
211 	}
212 
213 	ftl_invalidate_addr(dev, old_addr);
214 }
215 
216 void
217 ftl_l2p_pin_complete(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
218 {
219 	if (spdk_unlikely(status == -EAGAIN)) {
220 		TAILQ_INSERT_TAIL(&dev->l2p_deferred_pins, pin_ctx, link);
221 	} else {
222 		pin_ctx->cb(dev, status, pin_ctx);
223 	}
224 }
225