xref: /spdk/lib/blobfs/blobfs.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/blobfs.h"
9 #include "cache_tree.h"
10 
11 #include "spdk/queue.h"
12 #include "spdk/thread.h"
13 #include "spdk/assert.h"
14 #include "spdk/env.h"
15 #include "spdk/util.h"
16 #include "spdk/log.h"
17 #include "spdk/trace.h"
18 
19 #include "spdk_internal/trace_defs.h"
20 
21 #define BLOBFS_TRACE(file, str, args...) \
22 	SPDK_DEBUGLOG(blobfs, "file=%s " str, file->name, ##args)
23 
24 #define BLOBFS_TRACE_RW(file, str, args...) \
25 	SPDK_DEBUGLOG(blobfs_rw, "file=%s " str, file->name, ##args)
26 
27 #define BLOBFS_DEFAULT_CACHE_SIZE (4ULL * 1024 * 1024 * 1024)
28 #define SPDK_BLOBFS_DEFAULT_OPTS_CLUSTER_SZ (1024 * 1024)
29 
30 #define SPDK_BLOBFS_SIGNATURE	"BLOBFS"
31 
32 static uint64_t g_fs_cache_size = BLOBFS_DEFAULT_CACHE_SIZE;
33 static struct spdk_mempool *g_cache_pool;
34 static TAILQ_HEAD(, spdk_file) g_caches = TAILQ_HEAD_INITIALIZER(g_caches);
35 static struct spdk_poller *g_cache_pool_mgmt_poller;
36 static struct spdk_thread *g_cache_pool_thread;
37 #define BLOBFS_CACHE_POOL_POLL_PERIOD_IN_US 1000ULL
38 static int g_fs_count = 0;
39 static pthread_mutex_t g_cache_init_lock = PTHREAD_MUTEX_INITIALIZER;
40 
41 SPDK_TRACE_REGISTER_FN(blobfs_trace, "blobfs", TRACE_GROUP_BLOBFS)
42 {
43 	struct spdk_trace_tpoint_opts opts[] = {
44 		{
45 			"BLOBFS_XATTR_START", TRACE_BLOBFS_XATTR_START,
46 			OWNER_NONE, OBJECT_NONE, 0,
47 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
48 		},
49 		{
50 			"BLOBFS_XATTR_END", TRACE_BLOBFS_XATTR_END,
51 			OWNER_NONE, OBJECT_NONE, 0,
52 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
53 		},
54 		{
55 			"BLOBFS_OPEN", TRACE_BLOBFS_OPEN,
56 			OWNER_NONE, OBJECT_NONE, 0,
57 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
58 		},
59 		{
60 			"BLOBFS_CLOSE", TRACE_BLOBFS_CLOSE,
61 			OWNER_NONE, OBJECT_NONE, 0,
62 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
63 		},
64 		{
65 			"BLOBFS_DELETE_START", TRACE_BLOBFS_DELETE_START,
66 			OWNER_NONE, OBJECT_NONE, 0,
67 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
68 		},
69 		{
70 			"BLOBFS_DELETE_DONE", TRACE_BLOBFS_DELETE_DONE,
71 			OWNER_NONE, OBJECT_NONE, 0,
72 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
73 		}
74 	};
75 
76 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
77 }
78 
79 void
80 cache_buffer_free(struct cache_buffer *cache_buffer)
81 {
82 	spdk_mempool_put(g_cache_pool, cache_buffer->buf);
83 	free(cache_buffer);
84 }
85 
86 #define CACHE_READAHEAD_THRESHOLD	(128 * 1024)
87 
88 struct spdk_file {
89 	struct spdk_filesystem	*fs;
90 	struct spdk_blob	*blob;
91 	char			*name;
92 	uint64_t		length;
93 	bool                    is_deleted;
94 	bool			open_for_writing;
95 	uint64_t		length_flushed;
96 	uint64_t		length_xattr;
97 	uint64_t		append_pos;
98 	uint64_t		seq_byte_count;
99 	uint64_t		next_seq_offset;
100 	uint32_t		priority;
101 	TAILQ_ENTRY(spdk_file)	tailq;
102 	spdk_blob_id		blobid;
103 	uint32_t		ref_count;
104 	pthread_spinlock_t	lock;
105 	struct cache_buffer	*last;
106 	struct cache_tree	*tree;
107 	TAILQ_HEAD(open_requests_head, spdk_fs_request) open_requests;
108 	TAILQ_HEAD(sync_requests_head, spdk_fs_request) sync_requests;
109 	TAILQ_ENTRY(spdk_file)	cache_tailq;
110 };
111 
112 struct spdk_deleted_file {
113 	spdk_blob_id	id;
114 	TAILQ_ENTRY(spdk_deleted_file)	tailq;
115 };
116 
117 struct spdk_filesystem {
118 	struct spdk_blob_store	*bs;
119 	TAILQ_HEAD(, spdk_file)	files;
120 	struct spdk_bs_opts	bs_opts;
121 	struct spdk_bs_dev	*bdev;
122 	fs_send_request_fn	send_request;
123 
124 	struct {
125 		uint32_t		max_ops;
126 		struct spdk_io_channel	*sync_io_channel;
127 		struct spdk_fs_channel	*sync_fs_channel;
128 	} sync_target;
129 
130 	struct {
131 		uint32_t		max_ops;
132 		struct spdk_io_channel	*md_io_channel;
133 		struct spdk_fs_channel	*md_fs_channel;
134 	} md_target;
135 
136 	struct {
137 		uint32_t		max_ops;
138 	} io_target;
139 };
140 
141 struct spdk_fs_cb_args {
142 	union {
143 		spdk_fs_op_with_handle_complete		fs_op_with_handle;
144 		spdk_fs_op_complete			fs_op;
145 		spdk_file_op_with_handle_complete	file_op_with_handle;
146 		spdk_file_op_complete			file_op;
147 		spdk_file_stat_op_complete		stat_op;
148 	} fn;
149 	void *arg;
150 	sem_t *sem;
151 	struct spdk_filesystem *fs;
152 	struct spdk_file *file;
153 	int rc;
154 	int *rwerrno;
155 	struct iovec *iovs;
156 	uint32_t iovcnt;
157 	struct iovec iov;
158 	union {
159 		struct {
160 			TAILQ_HEAD(, spdk_deleted_file)	deleted_files;
161 		} fs_load;
162 		struct {
163 			uint64_t	length;
164 		} truncate;
165 		struct {
166 			struct spdk_io_channel	*channel;
167 			void		*pin_buf;
168 			int		is_read;
169 			off_t		offset;
170 			size_t		length;
171 			uint64_t	start_lba;
172 			uint64_t	num_lba;
173 			uint32_t	blocklen;
174 		} rw;
175 		struct {
176 			const char	*old_name;
177 			const char	*new_name;
178 		} rename;
179 		struct {
180 			struct cache_buffer	*cache_buffer;
181 			uint64_t		length;
182 		} flush;
183 		struct {
184 			struct cache_buffer	*cache_buffer;
185 			uint64_t		length;
186 			uint64_t		offset;
187 		} readahead;
188 		struct {
189 			/* offset of the file when the sync request was made */
190 			uint64_t			offset;
191 			TAILQ_ENTRY(spdk_fs_request)	tailq;
192 			bool				xattr_in_progress;
193 			/* length written to the xattr for this file - this should
194 			 * always be the same as the offset if only one thread is
195 			 * writing to the file, but could differ if multiple threads
196 			 * are appending
197 			 */
198 			uint64_t			length;
199 		} sync;
200 		struct {
201 			uint32_t			num_clusters;
202 		} resize;
203 		struct {
204 			const char	*name;
205 			uint32_t	flags;
206 			TAILQ_ENTRY(spdk_fs_request)	tailq;
207 		} open;
208 		struct {
209 			const char		*name;
210 			struct spdk_blob	*blob;
211 		} create;
212 		struct {
213 			const char	*name;
214 		} delete;
215 		struct {
216 			const char	*name;
217 		} stat;
218 	} op;
219 };
220 
221 static void file_free(struct spdk_file *file);
222 static void fs_io_device_unregister(struct spdk_filesystem *fs);
223 static void fs_free_io_channels(struct spdk_filesystem *fs);
224 
225 void
226 spdk_fs_opts_init(struct spdk_blobfs_opts *opts)
227 {
228 	opts->cluster_sz = SPDK_BLOBFS_DEFAULT_OPTS_CLUSTER_SZ;
229 }
230 
231 static int _blobfs_cache_pool_reclaim(void *arg);
232 
233 static bool
234 blobfs_cache_pool_need_reclaim(void)
235 {
236 	size_t count;
237 
238 	count = spdk_mempool_count(g_cache_pool);
239 	/* We define a aggressive policy here as the requirements from db_bench are batched, so start the poller
240 	 *  when the number of available cache buffer is less than 1/5 of total buffers.
241 	 */
242 	if (count > (size_t)g_fs_cache_size / CACHE_BUFFER_SIZE / 5) {
243 		return false;
244 	}
245 
246 	return true;
247 }
248 
249 static void
250 __start_cache_pool_mgmt(void *ctx)
251 {
252 	assert(g_cache_pool == NULL);
253 
254 	g_cache_pool = spdk_mempool_create("spdk_fs_cache",
255 					   g_fs_cache_size / CACHE_BUFFER_SIZE,
256 					   CACHE_BUFFER_SIZE,
257 					   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
258 					   SPDK_ENV_SOCKET_ID_ANY);
259 	if (!g_cache_pool) {
260 		if (spdk_mempool_lookup("spdk_fs_cache") != NULL) {
261 			SPDK_ERRLOG("Unable to allocate mempool: already exists\n");
262 			SPDK_ERRLOG("Probably running in multiprocess environment, which is "
263 				    "unsupported by the blobfs library\n");
264 		} else {
265 			SPDK_ERRLOG("Create mempool failed, you may "
266 				    "increase the memory and try again\n");
267 		}
268 		assert(false);
269 	}
270 
271 	assert(g_cache_pool_mgmt_poller == NULL);
272 	g_cache_pool_mgmt_poller = SPDK_POLLER_REGISTER(_blobfs_cache_pool_reclaim, NULL,
273 				   BLOBFS_CACHE_POOL_POLL_PERIOD_IN_US);
274 }
275 
276 static void
277 __stop_cache_pool_mgmt(void *ctx)
278 {
279 	spdk_poller_unregister(&g_cache_pool_mgmt_poller);
280 
281 	assert(g_cache_pool != NULL);
282 	assert(spdk_mempool_count(g_cache_pool) == g_fs_cache_size / CACHE_BUFFER_SIZE);
283 	spdk_mempool_free(g_cache_pool);
284 	g_cache_pool = NULL;
285 
286 	spdk_thread_exit(g_cache_pool_thread);
287 }
288 
289 static void
290 initialize_global_cache(void)
291 {
292 	pthread_mutex_lock(&g_cache_init_lock);
293 	if (g_fs_count == 0) {
294 		g_cache_pool_thread = spdk_thread_create("cache_pool_mgmt", NULL);
295 		assert(g_cache_pool_thread != NULL);
296 		spdk_thread_send_msg(g_cache_pool_thread, __start_cache_pool_mgmt, NULL);
297 	}
298 	g_fs_count++;
299 	pthread_mutex_unlock(&g_cache_init_lock);
300 }
301 
302 static void
303 free_global_cache(void)
304 {
305 	pthread_mutex_lock(&g_cache_init_lock);
306 	g_fs_count--;
307 	if (g_fs_count == 0) {
308 		spdk_thread_send_msg(g_cache_pool_thread, __stop_cache_pool_mgmt, NULL);
309 	}
310 	pthread_mutex_unlock(&g_cache_init_lock);
311 }
312 
313 static uint64_t
314 __file_get_blob_size(struct spdk_file *file)
315 {
316 	uint64_t cluster_sz;
317 
318 	cluster_sz = file->fs->bs_opts.cluster_sz;
319 	return cluster_sz * spdk_blob_get_num_clusters(file->blob);
320 }
321 
322 struct spdk_fs_request {
323 	struct spdk_fs_cb_args		args;
324 	TAILQ_ENTRY(spdk_fs_request)	link;
325 	struct spdk_fs_channel		*channel;
326 };
327 
328 struct spdk_fs_channel {
329 	struct spdk_fs_request		*req_mem;
330 	TAILQ_HEAD(, spdk_fs_request)	reqs;
331 	sem_t				sem;
332 	struct spdk_filesystem		*fs;
333 	struct spdk_io_channel		*bs_channel;
334 	fs_send_request_fn		send_request;
335 	bool				sync;
336 	uint32_t			outstanding_reqs;
337 	pthread_spinlock_t		lock;
338 };
339 
340 /* For now, this is effectively an alias. But eventually we'll shift
341  * some data members over. */
342 struct spdk_fs_thread_ctx {
343 	struct spdk_fs_channel	ch;
344 };
345 
346 static struct spdk_fs_request *
347 alloc_fs_request_with_iov(struct spdk_fs_channel *channel, uint32_t iovcnt)
348 {
349 	struct spdk_fs_request *req;
350 	struct iovec *iovs = NULL;
351 
352 	if (iovcnt > 1) {
353 		iovs = calloc(iovcnt, sizeof(struct iovec));
354 		if (!iovs) {
355 			return NULL;
356 		}
357 	}
358 
359 	if (channel->sync) {
360 		pthread_spin_lock(&channel->lock);
361 	}
362 
363 	req = TAILQ_FIRST(&channel->reqs);
364 	if (req) {
365 		channel->outstanding_reqs++;
366 		TAILQ_REMOVE(&channel->reqs, req, link);
367 	}
368 
369 	if (channel->sync) {
370 		pthread_spin_unlock(&channel->lock);
371 	}
372 
373 	if (req == NULL) {
374 		SPDK_ERRLOG("Cannot allocate req on spdk_fs_channel =%p\n", channel);
375 		free(iovs);
376 		return NULL;
377 	}
378 	memset(req, 0, sizeof(*req));
379 	req->channel = channel;
380 	if (iovcnt > 1) {
381 		req->args.iovs = iovs;
382 	} else {
383 		req->args.iovs = &req->args.iov;
384 	}
385 	req->args.iovcnt = iovcnt;
386 
387 	return req;
388 }
389 
390 static struct spdk_fs_request *
391 alloc_fs_request(struct spdk_fs_channel *channel)
392 {
393 	return alloc_fs_request_with_iov(channel, 0);
394 }
395 
396 static void
397 free_fs_request(struct spdk_fs_request *req)
398 {
399 	struct spdk_fs_channel *channel = req->channel;
400 
401 	if (req->args.iovcnt > 1) {
402 		free(req->args.iovs);
403 	}
404 
405 	if (channel->sync) {
406 		pthread_spin_lock(&channel->lock);
407 	}
408 
409 	TAILQ_INSERT_HEAD(&req->channel->reqs, req, link);
410 	channel->outstanding_reqs--;
411 
412 	if (channel->sync) {
413 		pthread_spin_unlock(&channel->lock);
414 	}
415 }
416 
417 static int
418 fs_channel_create(struct spdk_filesystem *fs, struct spdk_fs_channel *channel,
419 		  uint32_t max_ops)
420 {
421 	uint32_t i;
422 
423 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_fs_request));
424 	if (!channel->req_mem) {
425 		return -1;
426 	}
427 
428 	channel->outstanding_reqs = 0;
429 	TAILQ_INIT(&channel->reqs);
430 	sem_init(&channel->sem, 0, 0);
431 
432 	for (i = 0; i < max_ops; i++) {
433 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
434 	}
435 
436 	channel->fs = fs;
437 
438 	return 0;
439 }
440 
441 static int
442 fs_md_channel_create(void *io_device, void *ctx_buf)
443 {
444 	struct spdk_filesystem		*fs;
445 	struct spdk_fs_channel		*channel = ctx_buf;
446 
447 	fs = SPDK_CONTAINEROF(io_device, struct spdk_filesystem, md_target);
448 
449 	return fs_channel_create(fs, channel, fs->md_target.max_ops);
450 }
451 
452 static int
453 fs_sync_channel_create(void *io_device, void *ctx_buf)
454 {
455 	struct spdk_filesystem		*fs;
456 	struct spdk_fs_channel		*channel = ctx_buf;
457 
458 	fs = SPDK_CONTAINEROF(io_device, struct spdk_filesystem, sync_target);
459 
460 	return fs_channel_create(fs, channel, fs->sync_target.max_ops);
461 }
462 
463 static int
464 fs_io_channel_create(void *io_device, void *ctx_buf)
465 {
466 	struct spdk_filesystem		*fs;
467 	struct spdk_fs_channel		*channel = ctx_buf;
468 
469 	fs = SPDK_CONTAINEROF(io_device, struct spdk_filesystem, io_target);
470 
471 	return fs_channel_create(fs, channel, fs->io_target.max_ops);
472 }
473 
474 static void
475 fs_channel_destroy(void *io_device, void *ctx_buf)
476 {
477 	struct spdk_fs_channel *channel = ctx_buf;
478 
479 	if (channel->outstanding_reqs > 0) {
480 		SPDK_ERRLOG("channel freed with %" PRIu32 " outstanding requests!\n",
481 			    channel->outstanding_reqs);
482 	}
483 
484 	free(channel->req_mem);
485 	if (channel->bs_channel != NULL) {
486 		spdk_bs_free_io_channel(channel->bs_channel);
487 	}
488 }
489 
490 static void
491 __send_request_direct(fs_request_fn fn, void *arg)
492 {
493 	fn(arg);
494 }
495 
496 static void
497 common_fs_bs_init(struct spdk_filesystem *fs, struct spdk_blob_store *bs)
498 {
499 	fs->bs = bs;
500 	fs->bs_opts.cluster_sz = spdk_bs_get_cluster_size(bs);
501 	fs->md_target.md_fs_channel->bs_channel = spdk_bs_alloc_io_channel(fs->bs);
502 	fs->md_target.md_fs_channel->send_request = __send_request_direct;
503 	fs->sync_target.sync_fs_channel->bs_channel = spdk_bs_alloc_io_channel(fs->bs);
504 	fs->sync_target.sync_fs_channel->send_request = __send_request_direct;
505 
506 	initialize_global_cache();
507 }
508 
509 static void
510 init_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
511 {
512 	struct spdk_fs_request *req = ctx;
513 	struct spdk_fs_cb_args *args = &req->args;
514 	struct spdk_filesystem *fs = args->fs;
515 
516 	if (bserrno == 0) {
517 		common_fs_bs_init(fs, bs);
518 	} else {
519 		free(fs);
520 		fs = NULL;
521 	}
522 
523 	args->fn.fs_op_with_handle(args->arg, fs, bserrno);
524 	free_fs_request(req);
525 }
526 
527 static struct spdk_filesystem *
528 fs_alloc(struct spdk_bs_dev *dev, fs_send_request_fn send_request_fn)
529 {
530 	struct spdk_filesystem *fs;
531 
532 	fs = calloc(1, sizeof(*fs));
533 	if (fs == NULL) {
534 		return NULL;
535 	}
536 
537 	fs->bdev = dev;
538 	fs->send_request = send_request_fn;
539 	TAILQ_INIT(&fs->files);
540 
541 	fs->md_target.max_ops = 512;
542 	spdk_io_device_register(&fs->md_target, fs_md_channel_create, fs_channel_destroy,
543 				sizeof(struct spdk_fs_channel), "blobfs_md");
544 	fs->md_target.md_io_channel = spdk_get_io_channel(&fs->md_target);
545 	fs->md_target.md_fs_channel = spdk_io_channel_get_ctx(fs->md_target.md_io_channel);
546 
547 	fs->sync_target.max_ops = 512;
548 	spdk_io_device_register(&fs->sync_target, fs_sync_channel_create, fs_channel_destroy,
549 				sizeof(struct spdk_fs_channel), "blobfs_sync");
550 	fs->sync_target.sync_io_channel = spdk_get_io_channel(&fs->sync_target);
551 	fs->sync_target.sync_fs_channel = spdk_io_channel_get_ctx(fs->sync_target.sync_io_channel);
552 
553 	fs->io_target.max_ops = 512;
554 	spdk_io_device_register(&fs->io_target, fs_io_channel_create, fs_channel_destroy,
555 				sizeof(struct spdk_fs_channel), "blobfs_io");
556 
557 	return fs;
558 }
559 
560 static void
561 __wake_caller(void *arg, int fserrno)
562 {
563 	struct spdk_fs_cb_args *args = arg;
564 
565 	if ((args->rwerrno != NULL) && (*(args->rwerrno) == 0) && fserrno) {
566 		*(args->rwerrno) = fserrno;
567 	}
568 	args->rc = fserrno;
569 	sem_post(args->sem);
570 }
571 
572 void
573 spdk_fs_init(struct spdk_bs_dev *dev, struct spdk_blobfs_opts *opt,
574 	     fs_send_request_fn send_request_fn,
575 	     spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
576 {
577 	struct spdk_filesystem *fs;
578 	struct spdk_fs_request *req;
579 	struct spdk_fs_cb_args *args;
580 	struct spdk_bs_opts opts = {};
581 
582 	fs = fs_alloc(dev, send_request_fn);
583 	if (fs == NULL) {
584 		cb_fn(cb_arg, NULL, -ENOMEM);
585 		return;
586 	}
587 
588 	req = alloc_fs_request(fs->md_target.md_fs_channel);
589 	if (req == NULL) {
590 		fs_free_io_channels(fs);
591 		fs_io_device_unregister(fs);
592 		cb_fn(cb_arg, NULL, -ENOMEM);
593 		return;
594 	}
595 
596 	args = &req->args;
597 	args->fn.fs_op_with_handle = cb_fn;
598 	args->arg = cb_arg;
599 	args->fs = fs;
600 
601 	spdk_bs_opts_init(&opts, sizeof(opts));
602 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), SPDK_BLOBFS_SIGNATURE);
603 	if (opt) {
604 		opts.cluster_sz = opt->cluster_sz;
605 	}
606 	spdk_bs_init(dev, &opts, init_cb, req);
607 }
608 
609 static struct spdk_file *
610 file_alloc(struct spdk_filesystem *fs)
611 {
612 	struct spdk_file *file;
613 
614 	file = calloc(1, sizeof(*file));
615 	if (file == NULL) {
616 		return NULL;
617 	}
618 
619 	file->tree = calloc(1, sizeof(*file->tree));
620 	if (file->tree == NULL) {
621 		free(file);
622 		return NULL;
623 	}
624 
625 	if (pthread_spin_init(&file->lock, 0)) {
626 		free(file->tree);
627 		free(file);
628 		return NULL;
629 	}
630 
631 	file->fs = fs;
632 	TAILQ_INIT(&file->open_requests);
633 	TAILQ_INIT(&file->sync_requests);
634 	TAILQ_INSERT_TAIL(&fs->files, file, tailq);
635 	file->priority = SPDK_FILE_PRIORITY_LOW;
636 	return file;
637 }
638 
639 static void fs_load_done(void *ctx, int bserrno);
640 
641 static int
642 _handle_deleted_files(struct spdk_fs_request *req)
643 {
644 	struct spdk_fs_cb_args *args = &req->args;
645 	struct spdk_filesystem *fs = args->fs;
646 
647 	if (!TAILQ_EMPTY(&args->op.fs_load.deleted_files)) {
648 		struct spdk_deleted_file *deleted_file;
649 
650 		deleted_file = TAILQ_FIRST(&args->op.fs_load.deleted_files);
651 		TAILQ_REMOVE(&args->op.fs_load.deleted_files, deleted_file, tailq);
652 		spdk_bs_delete_blob(fs->bs, deleted_file->id, fs_load_done, req);
653 		free(deleted_file);
654 		return 0;
655 	}
656 
657 	return 1;
658 }
659 
660 static void
661 fs_load_done(void *ctx, int bserrno)
662 {
663 	struct spdk_fs_request *req = ctx;
664 	struct spdk_fs_cb_args *args = &req->args;
665 	struct spdk_filesystem *fs = args->fs;
666 
667 	/* The filesystem has been loaded.  Now check if there are any files that
668 	 *  were marked for deletion before last unload.  Do not complete the
669 	 *  fs_load callback until all of them have been deleted on disk.
670 	 */
671 	if (_handle_deleted_files(req) == 0) {
672 		/* We found a file that's been marked for deleting but not actually
673 		 *  deleted yet.  This function will get called again once the delete
674 		 *  operation is completed.
675 		 */
676 		return;
677 	}
678 
679 	args->fn.fs_op_with_handle(args->arg, fs, 0);
680 	free_fs_request(req);
681 
682 }
683 
684 static void
685 iter_cb(void *ctx, struct spdk_blob *blob, int rc)
686 {
687 	struct spdk_fs_request *req = ctx;
688 	struct spdk_fs_cb_args *args = &req->args;
689 	struct spdk_filesystem *fs = args->fs;
690 	uint64_t *length;
691 	const char *name;
692 	uint32_t *is_deleted;
693 	size_t value_len;
694 
695 	if (rc < 0) {
696 		args->fn.fs_op_with_handle(args->arg, fs, rc);
697 		free_fs_request(req);
698 		return;
699 	}
700 
701 	rc = spdk_blob_get_xattr_value(blob, "name", (const void **)&name, &value_len);
702 	if (rc < 0) {
703 		args->fn.fs_op_with_handle(args->arg, fs, rc);
704 		free_fs_request(req);
705 		return;
706 	}
707 
708 	rc = spdk_blob_get_xattr_value(blob, "length", (const void **)&length, &value_len);
709 	if (rc < 0) {
710 		args->fn.fs_op_with_handle(args->arg, fs, rc);
711 		free_fs_request(req);
712 		return;
713 	}
714 
715 	assert(value_len == 8);
716 
717 	/* This file could be deleted last time without close it, then app crashed, so we delete it now */
718 	rc = spdk_blob_get_xattr_value(blob, "is_deleted", (const void **)&is_deleted, &value_len);
719 	if (rc < 0) {
720 		struct spdk_file *f;
721 
722 		f = file_alloc(fs);
723 		if (f == NULL) {
724 			SPDK_ERRLOG("Cannot allocate file to handle deleted file on disk\n");
725 			args->fn.fs_op_with_handle(args->arg, fs, -ENOMEM);
726 			free_fs_request(req);
727 			return;
728 		}
729 
730 		f->name = strdup(name);
731 		if (!f->name) {
732 			SPDK_ERRLOG("Cannot allocate memory for file name\n");
733 			args->fn.fs_op_with_handle(args->arg, fs, -ENOMEM);
734 			free_fs_request(req);
735 			file_free(f);
736 			return;
737 		}
738 
739 		f->blobid = spdk_blob_get_id(blob);
740 		f->length = *length;
741 		f->length_flushed = *length;
742 		f->length_xattr = *length;
743 		f->append_pos = *length;
744 		SPDK_DEBUGLOG(blobfs, "added file %s length=%ju\n", f->name, f->length);
745 	} else {
746 		struct spdk_deleted_file *deleted_file;
747 
748 		deleted_file = calloc(1, sizeof(*deleted_file));
749 		if (deleted_file == NULL) {
750 			args->fn.fs_op_with_handle(args->arg, fs, -ENOMEM);
751 			free_fs_request(req);
752 			return;
753 		}
754 		deleted_file->id = spdk_blob_get_id(blob);
755 		TAILQ_INSERT_TAIL(&args->op.fs_load.deleted_files, deleted_file, tailq);
756 	}
757 }
758 
759 static void
760 load_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
761 {
762 	struct spdk_fs_request *req = ctx;
763 	struct spdk_fs_cb_args *args = &req->args;
764 	struct spdk_filesystem *fs = args->fs;
765 	struct spdk_bs_type bstype;
766 	static const struct spdk_bs_type blobfs_type = {SPDK_BLOBFS_SIGNATURE};
767 	static const struct spdk_bs_type zeros;
768 
769 	if (bserrno != 0) {
770 		args->fn.fs_op_with_handle(args->arg, NULL, bserrno);
771 		free_fs_request(req);
772 		fs_free_io_channels(fs);
773 		fs_io_device_unregister(fs);
774 		return;
775 	}
776 
777 	bstype = spdk_bs_get_bstype(bs);
778 
779 	if (!memcmp(&bstype, &zeros, sizeof(bstype))) {
780 		SPDK_DEBUGLOG(blobfs, "assigning bstype\n");
781 		spdk_bs_set_bstype(bs, blobfs_type);
782 	} else if (memcmp(&bstype, &blobfs_type, sizeof(bstype))) {
783 		SPDK_ERRLOG("not blobfs\n");
784 		SPDK_LOGDUMP(blobfs, "bstype", &bstype, sizeof(bstype));
785 		args->fn.fs_op_with_handle(args->arg, NULL, -EINVAL);
786 		free_fs_request(req);
787 		fs_free_io_channels(fs);
788 		fs_io_device_unregister(fs);
789 		return;
790 	}
791 
792 	common_fs_bs_init(fs, bs);
793 	fs_load_done(req, 0);
794 }
795 
796 static void
797 fs_io_device_unregister(struct spdk_filesystem *fs)
798 {
799 	assert(fs != NULL);
800 	spdk_io_device_unregister(&fs->md_target, NULL);
801 	spdk_io_device_unregister(&fs->sync_target, NULL);
802 	spdk_io_device_unregister(&fs->io_target, NULL);
803 	free(fs);
804 }
805 
806 static void
807 fs_free_io_channels(struct spdk_filesystem *fs)
808 {
809 	assert(fs != NULL);
810 	spdk_fs_free_io_channel(fs->md_target.md_io_channel);
811 	spdk_fs_free_io_channel(fs->sync_target.sync_io_channel);
812 }
813 
814 void
815 spdk_fs_load(struct spdk_bs_dev *dev, fs_send_request_fn send_request_fn,
816 	     spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
817 {
818 	struct spdk_filesystem *fs;
819 	struct spdk_fs_cb_args *args;
820 	struct spdk_fs_request *req;
821 	struct spdk_bs_opts	bs_opts;
822 
823 	fs = fs_alloc(dev, send_request_fn);
824 	if (fs == NULL) {
825 		cb_fn(cb_arg, NULL, -ENOMEM);
826 		return;
827 	}
828 
829 	req = alloc_fs_request(fs->md_target.md_fs_channel);
830 	if (req == NULL) {
831 		fs_free_io_channels(fs);
832 		fs_io_device_unregister(fs);
833 		cb_fn(cb_arg, NULL, -ENOMEM);
834 		return;
835 	}
836 
837 	args = &req->args;
838 	args->fn.fs_op_with_handle = cb_fn;
839 	args->arg = cb_arg;
840 	args->fs = fs;
841 	TAILQ_INIT(&args->op.fs_load.deleted_files);
842 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
843 	bs_opts.iter_cb_fn = iter_cb;
844 	bs_opts.iter_cb_arg = req;
845 	spdk_bs_load(dev, &bs_opts, load_cb, req);
846 }
847 
848 static void
849 unload_cb(void *ctx, int bserrno)
850 {
851 	struct spdk_fs_request *req = ctx;
852 	struct spdk_fs_cb_args *args = &req->args;
853 	struct spdk_filesystem *fs = args->fs;
854 	struct spdk_file *file, *tmp;
855 
856 	TAILQ_FOREACH_SAFE(file, &fs->files, tailq, tmp) {
857 		TAILQ_REMOVE(&fs->files, file, tailq);
858 		file_free(file);
859 	}
860 
861 	free_global_cache();
862 
863 	args->fn.fs_op(args->arg, bserrno);
864 	free(req);
865 
866 	fs_io_device_unregister(fs);
867 }
868 
869 void
870 spdk_fs_unload(struct spdk_filesystem *fs, spdk_fs_op_complete cb_fn, void *cb_arg)
871 {
872 	struct spdk_fs_request *req;
873 	struct spdk_fs_cb_args *args;
874 
875 	/*
876 	 * We must free the md_channel before unloading the blobstore, so just
877 	 *  allocate this request from the general heap.
878 	 */
879 	req = calloc(1, sizeof(*req));
880 	if (req == NULL) {
881 		cb_fn(cb_arg, -ENOMEM);
882 		return;
883 	}
884 
885 	args = &req->args;
886 	args->fn.fs_op = cb_fn;
887 	args->arg = cb_arg;
888 	args->fs = fs;
889 
890 	fs_free_io_channels(fs);
891 	spdk_bs_unload(fs->bs, unload_cb, req);
892 }
893 
894 static struct spdk_file *
895 fs_find_file(struct spdk_filesystem *fs, const char *name)
896 {
897 	struct spdk_file *file;
898 
899 	TAILQ_FOREACH(file, &fs->files, tailq) {
900 		if (!strncmp(name, file->name, SPDK_FILE_NAME_MAX)) {
901 			return file;
902 		}
903 	}
904 
905 	return NULL;
906 }
907 
908 void
909 spdk_fs_file_stat_async(struct spdk_filesystem *fs, const char *name,
910 			spdk_file_stat_op_complete cb_fn, void *cb_arg)
911 {
912 	struct spdk_file_stat stat;
913 	struct spdk_file *f = NULL;
914 
915 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
916 		cb_fn(cb_arg, NULL, -ENAMETOOLONG);
917 		return;
918 	}
919 
920 	f = fs_find_file(fs, name);
921 	if (f != NULL) {
922 		stat.blobid = f->blobid;
923 		stat.size = f->append_pos >= f->length ? f->append_pos : f->length;
924 		cb_fn(cb_arg, &stat, 0);
925 		return;
926 	}
927 
928 	cb_fn(cb_arg, NULL, -ENOENT);
929 }
930 
931 static void
932 __copy_stat(void *arg, struct spdk_file_stat *stat, int fserrno)
933 {
934 	struct spdk_fs_request *req = arg;
935 	struct spdk_fs_cb_args *args = &req->args;
936 
937 	args->rc = fserrno;
938 	if (fserrno == 0) {
939 		memcpy(args->arg, stat, sizeof(*stat));
940 	}
941 	sem_post(args->sem);
942 }
943 
944 static void
945 __file_stat(void *arg)
946 {
947 	struct spdk_fs_request *req = arg;
948 	struct spdk_fs_cb_args *args = &req->args;
949 
950 	spdk_fs_file_stat_async(args->fs, args->op.stat.name,
951 				args->fn.stat_op, req);
952 }
953 
954 int
955 spdk_fs_file_stat(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
956 		  const char *name, struct spdk_file_stat *stat)
957 {
958 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
959 	struct spdk_fs_request *req;
960 	int rc;
961 
962 	req = alloc_fs_request(channel);
963 	if (req == NULL) {
964 		SPDK_ERRLOG("Cannot allocate stat req on file=%s\n", name);
965 		return -ENOMEM;
966 	}
967 
968 	req->args.fs = fs;
969 	req->args.op.stat.name = name;
970 	req->args.fn.stat_op = __copy_stat;
971 	req->args.arg = stat;
972 	req->args.sem = &channel->sem;
973 	channel->send_request(__file_stat, req);
974 	sem_wait(&channel->sem);
975 
976 	rc = req->args.rc;
977 	free_fs_request(req);
978 
979 	return rc;
980 }
981 
982 static void
983 fs_create_blob_close_cb(void *ctx, int bserrno)
984 {
985 	int rc;
986 	struct spdk_fs_request *req = ctx;
987 	struct spdk_fs_cb_args *args = &req->args;
988 
989 	rc = args->rc ? args->rc : bserrno;
990 	args->fn.file_op(args->arg, rc);
991 	free_fs_request(req);
992 }
993 
994 static void
995 fs_create_blob_resize_cb(void *ctx, int bserrno)
996 {
997 	struct spdk_fs_request *req = ctx;
998 	struct spdk_fs_cb_args *args = &req->args;
999 	struct spdk_file *f = args->file;
1000 	struct spdk_blob *blob = args->op.create.blob;
1001 	uint64_t length = 0;
1002 
1003 	args->rc = bserrno;
1004 	if (bserrno) {
1005 		spdk_blob_close(blob, fs_create_blob_close_cb, args);
1006 		return;
1007 	}
1008 
1009 	spdk_blob_set_xattr(blob, "name", f->name, strlen(f->name) + 1);
1010 	spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
1011 
1012 	spdk_blob_close(blob, fs_create_blob_close_cb, args);
1013 }
1014 
1015 static void
1016 fs_create_blob_open_cb(void *ctx, struct spdk_blob *blob, int bserrno)
1017 {
1018 	struct spdk_fs_request *req = ctx;
1019 	struct spdk_fs_cb_args *args = &req->args;
1020 
1021 	if (bserrno) {
1022 		args->fn.file_op(args->arg, bserrno);
1023 		free_fs_request(req);
1024 		return;
1025 	}
1026 
1027 	args->op.create.blob = blob;
1028 	spdk_blob_resize(blob, 1, fs_create_blob_resize_cb, req);
1029 }
1030 
1031 static void
1032 fs_create_blob_create_cb(void *ctx, spdk_blob_id blobid, int bserrno)
1033 {
1034 	struct spdk_fs_request *req = ctx;
1035 	struct spdk_fs_cb_args *args = &req->args;
1036 	struct spdk_file *f = args->file;
1037 
1038 	if (bserrno) {
1039 		args->fn.file_op(args->arg, bserrno);
1040 		free_fs_request(req);
1041 		return;
1042 	}
1043 
1044 	f->blobid = blobid;
1045 	spdk_bs_open_blob(f->fs->bs, blobid, fs_create_blob_open_cb, req);
1046 }
1047 
1048 void
1049 spdk_fs_create_file_async(struct spdk_filesystem *fs, const char *name,
1050 			  spdk_file_op_complete cb_fn, void *cb_arg)
1051 {
1052 	struct spdk_file *file;
1053 	struct spdk_fs_request *req;
1054 	struct spdk_fs_cb_args *args;
1055 
1056 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1057 		cb_fn(cb_arg, -ENAMETOOLONG);
1058 		return;
1059 	}
1060 
1061 	file = fs_find_file(fs, name);
1062 	if (file != NULL) {
1063 		cb_fn(cb_arg, -EEXIST);
1064 		return;
1065 	}
1066 
1067 	file = file_alloc(fs);
1068 	if (file == NULL) {
1069 		SPDK_ERRLOG("Cannot allocate new file for creation\n");
1070 		cb_fn(cb_arg, -ENOMEM);
1071 		return;
1072 	}
1073 
1074 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1075 	if (req == NULL) {
1076 		SPDK_ERRLOG("Cannot allocate create async req for file=%s\n", name);
1077 		TAILQ_REMOVE(&fs->files, file, tailq);
1078 		file_free(file);
1079 		cb_fn(cb_arg, -ENOMEM);
1080 		return;
1081 	}
1082 
1083 	args = &req->args;
1084 	args->file = file;
1085 	args->fn.file_op = cb_fn;
1086 	args->arg = cb_arg;
1087 
1088 	file->name = strdup(name);
1089 	if (!file->name) {
1090 		SPDK_ERRLOG("Cannot allocate file->name for file=%s\n", name);
1091 		free_fs_request(req);
1092 		TAILQ_REMOVE(&fs->files, file, tailq);
1093 		file_free(file);
1094 		cb_fn(cb_arg, -ENOMEM);
1095 		return;
1096 	}
1097 	spdk_bs_create_blob(fs->bs, fs_create_blob_create_cb, args);
1098 }
1099 
1100 static void
1101 __fs_create_file_done(void *arg, int fserrno)
1102 {
1103 	struct spdk_fs_request *req = arg;
1104 	struct spdk_fs_cb_args *args = &req->args;
1105 
1106 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.create.name);
1107 	__wake_caller(args, fserrno);
1108 }
1109 
1110 static void
1111 __fs_create_file(void *arg)
1112 {
1113 	struct spdk_fs_request *req = arg;
1114 	struct spdk_fs_cb_args *args = &req->args;
1115 
1116 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.create.name);
1117 	spdk_fs_create_file_async(args->fs, args->op.create.name, __fs_create_file_done, req);
1118 }
1119 
1120 int
1121 spdk_fs_create_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx, const char *name)
1122 {
1123 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1124 	struct spdk_fs_request *req;
1125 	struct spdk_fs_cb_args *args;
1126 	int rc;
1127 
1128 	SPDK_DEBUGLOG(blobfs, "file=%s\n", name);
1129 
1130 	req = alloc_fs_request(channel);
1131 	if (req == NULL) {
1132 		SPDK_ERRLOG("Cannot allocate req to create file=%s\n", name);
1133 		return -ENOMEM;
1134 	}
1135 
1136 	args = &req->args;
1137 	args->fs = fs;
1138 	args->op.create.name = name;
1139 	args->sem = &channel->sem;
1140 	fs->send_request(__fs_create_file, req);
1141 	sem_wait(&channel->sem);
1142 	rc = args->rc;
1143 	free_fs_request(req);
1144 
1145 	return rc;
1146 }
1147 
1148 static void
1149 fs_open_blob_done(void *ctx, struct spdk_blob *blob, int bserrno)
1150 {
1151 	struct spdk_fs_request *req = ctx;
1152 	struct spdk_fs_cb_args *args = &req->args;
1153 	struct spdk_file *f = args->file;
1154 
1155 	f->blob = blob;
1156 	while (!TAILQ_EMPTY(&f->open_requests)) {
1157 		req = TAILQ_FIRST(&f->open_requests);
1158 		args = &req->args;
1159 		TAILQ_REMOVE(&f->open_requests, req, args.op.open.tailq);
1160 		spdk_trace_record(TRACE_BLOBFS_OPEN, 0, 0, 0, f->name);
1161 		args->fn.file_op_with_handle(args->arg, f, bserrno);
1162 		free_fs_request(req);
1163 	}
1164 }
1165 
1166 static void
1167 fs_open_blob_create_cb(void *ctx, int bserrno)
1168 {
1169 	struct spdk_fs_request *req = ctx;
1170 	struct spdk_fs_cb_args *args = &req->args;
1171 	struct spdk_file *file = args->file;
1172 	struct spdk_filesystem *fs = args->fs;
1173 
1174 	if (file == NULL) {
1175 		/*
1176 		 * This is from an open with CREATE flag - the file
1177 		 *  is now created so look it up in the file list for this
1178 		 *  filesystem.
1179 		 */
1180 		file = fs_find_file(fs, args->op.open.name);
1181 		assert(file != NULL);
1182 		args->file = file;
1183 	}
1184 
1185 	file->ref_count++;
1186 	TAILQ_INSERT_TAIL(&file->open_requests, req, args.op.open.tailq);
1187 	if (file->ref_count == 1) {
1188 		assert(file->blob == NULL);
1189 		spdk_bs_open_blob(fs->bs, file->blobid, fs_open_blob_done, req);
1190 	} else if (file->blob != NULL) {
1191 		fs_open_blob_done(req, file->blob, 0);
1192 	} else {
1193 		/*
1194 		 * The blob open for this file is in progress due to a previous
1195 		 *  open request.  When that open completes, it will invoke the
1196 		 *  open callback for this request.
1197 		 */
1198 	}
1199 }
1200 
1201 void
1202 spdk_fs_open_file_async(struct spdk_filesystem *fs, const char *name, uint32_t flags,
1203 			spdk_file_op_with_handle_complete cb_fn, void *cb_arg)
1204 {
1205 	struct spdk_file *f = NULL;
1206 	struct spdk_fs_request *req;
1207 	struct spdk_fs_cb_args *args;
1208 
1209 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1210 		cb_fn(cb_arg, NULL, -ENAMETOOLONG);
1211 		return;
1212 	}
1213 
1214 	f = fs_find_file(fs, name);
1215 	if (f == NULL && !(flags & SPDK_BLOBFS_OPEN_CREATE)) {
1216 		cb_fn(cb_arg, NULL, -ENOENT);
1217 		return;
1218 	}
1219 
1220 	if (f != NULL && f->is_deleted == true) {
1221 		cb_fn(cb_arg, NULL, -ENOENT);
1222 		return;
1223 	}
1224 
1225 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1226 	if (req == NULL) {
1227 		SPDK_ERRLOG("Cannot allocate async open req for file=%s\n", name);
1228 		cb_fn(cb_arg, NULL, -ENOMEM);
1229 		return;
1230 	}
1231 
1232 	args = &req->args;
1233 	args->fn.file_op_with_handle = cb_fn;
1234 	args->arg = cb_arg;
1235 	args->file = f;
1236 	args->fs = fs;
1237 	args->op.open.name = name;
1238 
1239 	if (f == NULL) {
1240 		spdk_fs_create_file_async(fs, name, fs_open_blob_create_cb, req);
1241 	} else {
1242 		fs_open_blob_create_cb(req, 0);
1243 	}
1244 }
1245 
1246 static void
1247 __fs_open_file_done(void *arg, struct spdk_file *file, int bserrno)
1248 {
1249 	struct spdk_fs_request *req = arg;
1250 	struct spdk_fs_cb_args *args = &req->args;
1251 
1252 	args->file = file;
1253 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.open.name);
1254 	__wake_caller(args, bserrno);
1255 }
1256 
1257 static void
1258 __fs_open_file(void *arg)
1259 {
1260 	struct spdk_fs_request *req = arg;
1261 	struct spdk_fs_cb_args *args = &req->args;
1262 
1263 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.open.name);
1264 	spdk_fs_open_file_async(args->fs, args->op.open.name, args->op.open.flags,
1265 				__fs_open_file_done, req);
1266 }
1267 
1268 int
1269 spdk_fs_open_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
1270 		  const char *name, uint32_t flags, struct spdk_file **file)
1271 {
1272 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1273 	struct spdk_fs_request *req;
1274 	struct spdk_fs_cb_args *args;
1275 	int rc;
1276 
1277 	SPDK_DEBUGLOG(blobfs, "file=%s\n", name);
1278 
1279 	req = alloc_fs_request(channel);
1280 	if (req == NULL) {
1281 		SPDK_ERRLOG("Cannot allocate req for opening file=%s\n", name);
1282 		return -ENOMEM;
1283 	}
1284 
1285 	args = &req->args;
1286 	args->fs = fs;
1287 	args->op.open.name = name;
1288 	args->op.open.flags = flags;
1289 	args->sem = &channel->sem;
1290 	fs->send_request(__fs_open_file, req);
1291 	sem_wait(&channel->sem);
1292 	rc = args->rc;
1293 	if (rc == 0) {
1294 		*file = args->file;
1295 	} else {
1296 		*file = NULL;
1297 	}
1298 	free_fs_request(req);
1299 
1300 	return rc;
1301 }
1302 
1303 static void
1304 fs_rename_blob_close_cb(void *ctx, int bserrno)
1305 {
1306 	struct spdk_fs_request *req = ctx;
1307 	struct spdk_fs_cb_args *args = &req->args;
1308 
1309 	args->fn.fs_op(args->arg, bserrno);
1310 	free_fs_request(req);
1311 }
1312 
1313 static void
1314 fs_rename_blob_open_cb(void *ctx, struct spdk_blob *blob, int bserrno)
1315 {
1316 	struct spdk_fs_request *req = ctx;
1317 	struct spdk_fs_cb_args *args = &req->args;
1318 	const char *new_name = args->op.rename.new_name;
1319 
1320 	spdk_blob_set_xattr(blob, "name", new_name, strlen(new_name) + 1);
1321 	spdk_blob_close(blob, fs_rename_blob_close_cb, req);
1322 }
1323 
1324 static void
1325 _fs_md_rename_file(struct spdk_fs_request *req)
1326 {
1327 	struct spdk_fs_cb_args *args = &req->args;
1328 	struct spdk_file *f;
1329 
1330 	f = fs_find_file(args->fs, args->op.rename.old_name);
1331 	if (f == NULL) {
1332 		args->fn.fs_op(args->arg, -ENOENT);
1333 		free_fs_request(req);
1334 		return;
1335 	}
1336 
1337 	free(f->name);
1338 	f->name = strdup(args->op.rename.new_name);
1339 	if (!f->name) {
1340 		SPDK_ERRLOG("Cannot allocate memory for file name\n");
1341 		args->fn.fs_op(args->arg, -ENOMEM);
1342 		free_fs_request(req);
1343 		return;
1344 	}
1345 
1346 	args->file = f;
1347 	spdk_bs_open_blob(args->fs->bs, f->blobid, fs_rename_blob_open_cb, req);
1348 }
1349 
1350 static void
1351 fs_rename_delete_done(void *arg, int fserrno)
1352 {
1353 	_fs_md_rename_file(arg);
1354 }
1355 
1356 void
1357 spdk_fs_rename_file_async(struct spdk_filesystem *fs,
1358 			  const char *old_name, const char *new_name,
1359 			  spdk_file_op_complete cb_fn, void *cb_arg)
1360 {
1361 	struct spdk_file *f;
1362 	struct spdk_fs_request *req;
1363 	struct spdk_fs_cb_args *args;
1364 
1365 	SPDK_DEBUGLOG(blobfs, "old=%s new=%s\n", old_name, new_name);
1366 	if (strnlen(new_name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1367 		cb_fn(cb_arg, -ENAMETOOLONG);
1368 		return;
1369 	}
1370 
1371 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1372 	if (req == NULL) {
1373 		SPDK_ERRLOG("Cannot allocate rename async req for renaming file from %s to %s\n", old_name,
1374 			    new_name);
1375 		cb_fn(cb_arg, -ENOMEM);
1376 		return;
1377 	}
1378 
1379 	args = &req->args;
1380 	args->fn.fs_op = cb_fn;
1381 	args->fs = fs;
1382 	args->arg = cb_arg;
1383 	args->op.rename.old_name = old_name;
1384 	args->op.rename.new_name = new_name;
1385 
1386 	f = fs_find_file(fs, new_name);
1387 	if (f == NULL) {
1388 		_fs_md_rename_file(req);
1389 		return;
1390 	}
1391 
1392 	/*
1393 	 * The rename overwrites an existing file.  So delete the existing file, then
1394 	 *  do the actual rename.
1395 	 */
1396 	spdk_fs_delete_file_async(fs, new_name, fs_rename_delete_done, req);
1397 }
1398 
1399 static void
1400 __fs_rename_file_done(void *arg, int fserrno)
1401 {
1402 	struct spdk_fs_request *req = arg;
1403 	struct spdk_fs_cb_args *args = &req->args;
1404 
1405 	__wake_caller(args, fserrno);
1406 }
1407 
1408 static void
1409 __fs_rename_file(void *arg)
1410 {
1411 	struct spdk_fs_request *req = arg;
1412 	struct spdk_fs_cb_args *args = &req->args;
1413 
1414 	spdk_fs_rename_file_async(args->fs, args->op.rename.old_name, args->op.rename.new_name,
1415 				  __fs_rename_file_done, req);
1416 }
1417 
1418 int
1419 spdk_fs_rename_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
1420 		    const char *old_name, const char *new_name)
1421 {
1422 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1423 	struct spdk_fs_request *req;
1424 	struct spdk_fs_cb_args *args;
1425 	int rc;
1426 
1427 	req = alloc_fs_request(channel);
1428 	if (req == NULL) {
1429 		SPDK_ERRLOG("Cannot allocate rename req for file=%s\n", old_name);
1430 		return -ENOMEM;
1431 	}
1432 
1433 	args = &req->args;
1434 
1435 	args->fs = fs;
1436 	args->op.rename.old_name = old_name;
1437 	args->op.rename.new_name = new_name;
1438 	args->sem = &channel->sem;
1439 	fs->send_request(__fs_rename_file, req);
1440 	sem_wait(&channel->sem);
1441 	rc = args->rc;
1442 	free_fs_request(req);
1443 	return rc;
1444 }
1445 
1446 static void
1447 blob_delete_cb(void *ctx, int bserrno)
1448 {
1449 	struct spdk_fs_request *req = ctx;
1450 	struct spdk_fs_cb_args *args = &req->args;
1451 
1452 	args->fn.file_op(args->arg, bserrno);
1453 	free_fs_request(req);
1454 }
1455 
1456 void
1457 spdk_fs_delete_file_async(struct spdk_filesystem *fs, const char *name,
1458 			  spdk_file_op_complete cb_fn, void *cb_arg)
1459 {
1460 	struct spdk_file *f;
1461 	spdk_blob_id blobid;
1462 	struct spdk_fs_request *req;
1463 	struct spdk_fs_cb_args *args;
1464 
1465 	SPDK_DEBUGLOG(blobfs, "file=%s\n", name);
1466 
1467 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1468 		cb_fn(cb_arg, -ENAMETOOLONG);
1469 		return;
1470 	}
1471 
1472 	f = fs_find_file(fs, name);
1473 	if (f == NULL) {
1474 		SPDK_ERRLOG("Cannot find the file=%s to deleted\n", name);
1475 		cb_fn(cb_arg, -ENOENT);
1476 		return;
1477 	}
1478 
1479 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1480 	if (req == NULL) {
1481 		SPDK_ERRLOG("Cannot allocate the req for the file=%s to deleted\n", name);
1482 		cb_fn(cb_arg, -ENOMEM);
1483 		return;
1484 	}
1485 
1486 	args = &req->args;
1487 	args->fn.file_op = cb_fn;
1488 	args->arg = cb_arg;
1489 
1490 	if (f->ref_count > 0) {
1491 		/* If the ref > 0, we mark the file as deleted and delete it when we close it. */
1492 		f->is_deleted = true;
1493 		spdk_blob_set_xattr(f->blob, "is_deleted", &f->is_deleted, sizeof(bool));
1494 		spdk_blob_sync_md(f->blob, blob_delete_cb, req);
1495 		return;
1496 	}
1497 
1498 	blobid = f->blobid;
1499 	TAILQ_REMOVE(&fs->files, f, tailq);
1500 
1501 	file_free(f);
1502 
1503 	spdk_bs_delete_blob(fs->bs, blobid, blob_delete_cb, req);
1504 }
1505 
1506 static void
1507 __fs_delete_file_done(void *arg, int fserrno)
1508 {
1509 	struct spdk_fs_request *req = arg;
1510 	struct spdk_fs_cb_args *args = &req->args;
1511 
1512 	spdk_trace_record(TRACE_BLOBFS_DELETE_DONE, 0, 0, 0, args->op.delete.name);
1513 	__wake_caller(args, fserrno);
1514 }
1515 
1516 static void
1517 __fs_delete_file(void *arg)
1518 {
1519 	struct spdk_fs_request *req = arg;
1520 	struct spdk_fs_cb_args *args = &req->args;
1521 
1522 	spdk_trace_record(TRACE_BLOBFS_DELETE_START, 0, 0, 0, args->op.delete.name);
1523 	spdk_fs_delete_file_async(args->fs, args->op.delete.name, __fs_delete_file_done, req);
1524 }
1525 
1526 int
1527 spdk_fs_delete_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
1528 		    const char *name)
1529 {
1530 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1531 	struct spdk_fs_request *req;
1532 	struct spdk_fs_cb_args *args;
1533 	int rc;
1534 
1535 	req = alloc_fs_request(channel);
1536 	if (req == NULL) {
1537 		SPDK_DEBUGLOG(blobfs, "Cannot allocate req to delete file=%s\n", name);
1538 		return -ENOMEM;
1539 	}
1540 
1541 	args = &req->args;
1542 	args->fs = fs;
1543 	args->op.delete.name = name;
1544 	args->sem = &channel->sem;
1545 	fs->send_request(__fs_delete_file, req);
1546 	sem_wait(&channel->sem);
1547 	rc = args->rc;
1548 	free_fs_request(req);
1549 
1550 	return rc;
1551 }
1552 
1553 spdk_fs_iter
1554 spdk_fs_iter_first(struct spdk_filesystem *fs)
1555 {
1556 	struct spdk_file *f;
1557 
1558 	f = TAILQ_FIRST(&fs->files);
1559 	return f;
1560 }
1561 
1562 spdk_fs_iter
1563 spdk_fs_iter_next(spdk_fs_iter iter)
1564 {
1565 	struct spdk_file *f = iter;
1566 
1567 	if (f == NULL) {
1568 		return NULL;
1569 	}
1570 
1571 	f = TAILQ_NEXT(f, tailq);
1572 	return f;
1573 }
1574 
1575 const char *
1576 spdk_file_get_name(struct spdk_file *file)
1577 {
1578 	return file->name;
1579 }
1580 
1581 uint64_t
1582 spdk_file_get_length(struct spdk_file *file)
1583 {
1584 	uint64_t length;
1585 
1586 	assert(file != NULL);
1587 
1588 	length = file->append_pos >= file->length ? file->append_pos : file->length;
1589 	SPDK_DEBUGLOG(blobfs, "file=%s length=0x%jx\n", file->name, length);
1590 	return length;
1591 }
1592 
1593 static void
1594 fs_truncate_complete_cb(void *ctx, int bserrno)
1595 {
1596 	struct spdk_fs_request *req = ctx;
1597 	struct spdk_fs_cb_args *args = &req->args;
1598 
1599 	args->fn.file_op(args->arg, bserrno);
1600 	free_fs_request(req);
1601 }
1602 
1603 static void
1604 fs_truncate_resize_cb(void *ctx, int bserrno)
1605 {
1606 	struct spdk_fs_request *req = ctx;
1607 	struct spdk_fs_cb_args *args = &req->args;
1608 	struct spdk_file *file = args->file;
1609 	uint64_t *length = &args->op.truncate.length;
1610 
1611 	if (bserrno) {
1612 		args->fn.file_op(args->arg, bserrno);
1613 		free_fs_request(req);
1614 		return;
1615 	}
1616 
1617 	spdk_blob_set_xattr(file->blob, "length", length, sizeof(*length));
1618 
1619 	file->length = *length;
1620 	if (file->append_pos > file->length) {
1621 		file->append_pos = file->length;
1622 	}
1623 
1624 	spdk_blob_sync_md(file->blob, fs_truncate_complete_cb, req);
1625 }
1626 
1627 static uint64_t
1628 __bytes_to_clusters(uint64_t length, uint64_t cluster_sz)
1629 {
1630 	return (length + cluster_sz - 1) / cluster_sz;
1631 }
1632 
1633 void
1634 spdk_file_truncate_async(struct spdk_file *file, uint64_t length,
1635 			 spdk_file_op_complete cb_fn, void *cb_arg)
1636 {
1637 	struct spdk_filesystem *fs;
1638 	size_t num_clusters;
1639 	struct spdk_fs_request *req;
1640 	struct spdk_fs_cb_args *args;
1641 
1642 	SPDK_DEBUGLOG(blobfs, "file=%s old=0x%jx new=0x%jx\n", file->name, file->length, length);
1643 	if (length == file->length) {
1644 		cb_fn(cb_arg, 0);
1645 		return;
1646 	}
1647 
1648 	req = alloc_fs_request(file->fs->md_target.md_fs_channel);
1649 	if (req == NULL) {
1650 		cb_fn(cb_arg, -ENOMEM);
1651 		return;
1652 	}
1653 
1654 	args = &req->args;
1655 	args->fn.file_op = cb_fn;
1656 	args->arg = cb_arg;
1657 	args->file = file;
1658 	args->op.truncate.length = length;
1659 	fs = file->fs;
1660 
1661 	num_clusters = __bytes_to_clusters(length, fs->bs_opts.cluster_sz);
1662 
1663 	spdk_blob_resize(file->blob, num_clusters, fs_truncate_resize_cb, req);
1664 }
1665 
1666 static void
1667 __truncate(void *arg)
1668 {
1669 	struct spdk_fs_request *req = arg;
1670 	struct spdk_fs_cb_args *args = &req->args;
1671 
1672 	spdk_file_truncate_async(args->file, args->op.truncate.length,
1673 				 args->fn.file_op, args);
1674 }
1675 
1676 int
1677 spdk_file_truncate(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
1678 		   uint64_t length)
1679 {
1680 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1681 	struct spdk_fs_request *req;
1682 	struct spdk_fs_cb_args *args;
1683 	int rc;
1684 
1685 	req = alloc_fs_request(channel);
1686 	if (req == NULL) {
1687 		return -ENOMEM;
1688 	}
1689 
1690 	args = &req->args;
1691 
1692 	args->file = file;
1693 	args->op.truncate.length = length;
1694 	args->fn.file_op = __wake_caller;
1695 	args->sem = &channel->sem;
1696 
1697 	channel->send_request(__truncate, req);
1698 	sem_wait(&channel->sem);
1699 	rc = args->rc;
1700 	free_fs_request(req);
1701 
1702 	return rc;
1703 }
1704 
1705 static void
1706 __rw_done(void *ctx, int bserrno)
1707 {
1708 	struct spdk_fs_request *req = ctx;
1709 	struct spdk_fs_cb_args *args = &req->args;
1710 
1711 	spdk_free(args->op.rw.pin_buf);
1712 	args->fn.file_op(args->arg, bserrno);
1713 	free_fs_request(req);
1714 }
1715 
1716 static void
1717 __read_done(void *ctx, int bserrno)
1718 {
1719 	struct spdk_fs_request *req = ctx;
1720 	struct spdk_fs_cb_args *args = &req->args;
1721 	void *buf;
1722 
1723 	if (bserrno) {
1724 		__rw_done(req, bserrno);
1725 		return;
1726 	}
1727 
1728 	assert(req != NULL);
1729 	buf = (void *)((uintptr_t)args->op.rw.pin_buf + (args->op.rw.offset & (args->op.rw.blocklen - 1)));
1730 	if (args->op.rw.is_read) {
1731 		spdk_copy_buf_to_iovs(args->iovs, args->iovcnt, buf, args->op.rw.length);
1732 		__rw_done(req, 0);
1733 	} else {
1734 		spdk_copy_iovs_to_buf(buf, args->op.rw.length, args->iovs, args->iovcnt);
1735 		spdk_blob_io_write(args->file->blob, args->op.rw.channel,
1736 				   args->op.rw.pin_buf,
1737 				   args->op.rw.start_lba, args->op.rw.num_lba,
1738 				   __rw_done, req);
1739 	}
1740 }
1741 
1742 static void
1743 __do_blob_read(void *ctx, int fserrno)
1744 {
1745 	struct spdk_fs_request *req = ctx;
1746 	struct spdk_fs_cb_args *args = &req->args;
1747 
1748 	if (fserrno) {
1749 		__rw_done(req, fserrno);
1750 		return;
1751 	}
1752 	spdk_blob_io_read(args->file->blob, args->op.rw.channel,
1753 			  args->op.rw.pin_buf,
1754 			  args->op.rw.start_lba, args->op.rw.num_lba,
1755 			  __read_done, req);
1756 }
1757 
1758 static void
1759 __get_page_parameters(struct spdk_file *file, uint64_t offset, uint64_t length,
1760 		      uint64_t *start_lba, uint32_t *lba_size, uint64_t *num_lba)
1761 {
1762 	uint64_t end_lba;
1763 
1764 	*lba_size = spdk_bs_get_io_unit_size(file->fs->bs);
1765 	*start_lba = offset / *lba_size;
1766 	end_lba = (offset + length - 1) / *lba_size;
1767 	*num_lba = (end_lba - *start_lba + 1);
1768 }
1769 
1770 static bool
1771 __is_lba_aligned(struct spdk_file *file, uint64_t offset, uint64_t length)
1772 {
1773 	uint32_t lba_size = spdk_bs_get_io_unit_size(file->fs->bs);
1774 
1775 	if ((offset % lba_size == 0) && (length % lba_size == 0)) {
1776 		return true;
1777 	}
1778 
1779 	return false;
1780 }
1781 
1782 static void
1783 _fs_request_setup_iovs(struct spdk_fs_request *req, struct iovec *iovs, uint32_t iovcnt)
1784 {
1785 	uint32_t i;
1786 
1787 	for (i = 0; i < iovcnt; i++) {
1788 		req->args.iovs[i].iov_base = iovs[i].iov_base;
1789 		req->args.iovs[i].iov_len = iovs[i].iov_len;
1790 	}
1791 }
1792 
1793 static void
1794 __readvwritev(struct spdk_file *file, struct spdk_io_channel *_channel,
1795 	      struct iovec *iovs, uint32_t iovcnt, uint64_t offset, uint64_t length,
1796 	      spdk_file_op_complete cb_fn, void *cb_arg, int is_read)
1797 {
1798 	struct spdk_fs_request *req;
1799 	struct spdk_fs_cb_args *args;
1800 	struct spdk_fs_channel *channel = spdk_io_channel_get_ctx(_channel);
1801 	uint64_t start_lba, num_lba, pin_buf_length;
1802 	uint32_t lba_size;
1803 
1804 	if (is_read && offset + length > file->length) {
1805 		cb_fn(cb_arg, -EINVAL);
1806 		return;
1807 	}
1808 
1809 	req = alloc_fs_request_with_iov(channel, iovcnt);
1810 	if (req == NULL) {
1811 		cb_fn(cb_arg, -ENOMEM);
1812 		return;
1813 	}
1814 
1815 	__get_page_parameters(file, offset, length, &start_lba, &lba_size, &num_lba);
1816 
1817 	args = &req->args;
1818 	args->fn.file_op = cb_fn;
1819 	args->arg = cb_arg;
1820 	args->file = file;
1821 	args->op.rw.channel = channel->bs_channel;
1822 	_fs_request_setup_iovs(req, iovs, iovcnt);
1823 	args->op.rw.is_read = is_read;
1824 	args->op.rw.offset = offset;
1825 	args->op.rw.blocklen = lba_size;
1826 
1827 	pin_buf_length = num_lba * lba_size;
1828 	args->op.rw.length = pin_buf_length;
1829 	args->op.rw.pin_buf = spdk_malloc(pin_buf_length, lba_size, NULL,
1830 					  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1831 	if (args->op.rw.pin_buf == NULL) {
1832 		SPDK_DEBUGLOG(blobfs, "Failed to allocate buf for: file=%s offset=%jx length=%jx\n",
1833 			      file->name, offset, length);
1834 		free_fs_request(req);
1835 		cb_fn(cb_arg, -ENOMEM);
1836 		return;
1837 	}
1838 
1839 	args->op.rw.start_lba = start_lba;
1840 	args->op.rw.num_lba = num_lba;
1841 
1842 	if (!is_read && file->length < offset + length) {
1843 		spdk_file_truncate_async(file, offset + length, __do_blob_read, req);
1844 	} else if (!is_read && __is_lba_aligned(file, offset, length)) {
1845 		spdk_copy_iovs_to_buf(args->op.rw.pin_buf, args->op.rw.length, args->iovs, args->iovcnt);
1846 		spdk_blob_io_write(args->file->blob, args->op.rw.channel,
1847 				   args->op.rw.pin_buf,
1848 				   args->op.rw.start_lba, args->op.rw.num_lba,
1849 				   __rw_done, req);
1850 	} else {
1851 		__do_blob_read(req, 0);
1852 	}
1853 }
1854 
1855 static void
1856 __readwrite(struct spdk_file *file, struct spdk_io_channel *channel,
1857 	    void *payload, uint64_t offset, uint64_t length,
1858 	    spdk_file_op_complete cb_fn, void *cb_arg, int is_read)
1859 {
1860 	struct iovec iov;
1861 
1862 	iov.iov_base = payload;
1863 	iov.iov_len = (size_t)length;
1864 
1865 	__readvwritev(file, channel, &iov, 1, offset, length, cb_fn, cb_arg, is_read);
1866 }
1867 
1868 void
1869 spdk_file_write_async(struct spdk_file *file, struct spdk_io_channel *channel,
1870 		      void *payload, uint64_t offset, uint64_t length,
1871 		      spdk_file_op_complete cb_fn, void *cb_arg)
1872 {
1873 	__readwrite(file, channel, payload, offset, length, cb_fn, cb_arg, 0);
1874 }
1875 
1876 void
1877 spdk_file_writev_async(struct spdk_file *file, struct spdk_io_channel *channel,
1878 		       struct iovec *iovs, uint32_t iovcnt, uint64_t offset, uint64_t length,
1879 		       spdk_file_op_complete cb_fn, void *cb_arg)
1880 {
1881 	SPDK_DEBUGLOG(blobfs, "file=%s offset=%jx length=%jx\n",
1882 		      file->name, offset, length);
1883 
1884 	__readvwritev(file, channel, iovs, iovcnt, offset, length, cb_fn, cb_arg, 0);
1885 }
1886 
1887 void
1888 spdk_file_read_async(struct spdk_file *file, struct spdk_io_channel *channel,
1889 		     void *payload, uint64_t offset, uint64_t length,
1890 		     spdk_file_op_complete cb_fn, void *cb_arg)
1891 {
1892 	SPDK_DEBUGLOG(blobfs, "file=%s offset=%jx length=%jx\n",
1893 		      file->name, offset, length);
1894 
1895 	__readwrite(file, channel, payload, offset, length, cb_fn, cb_arg, 1);
1896 }
1897 
1898 void
1899 spdk_file_readv_async(struct spdk_file *file, struct spdk_io_channel *channel,
1900 		      struct iovec *iovs, uint32_t iovcnt, uint64_t offset, uint64_t length,
1901 		      spdk_file_op_complete cb_fn, void *cb_arg)
1902 {
1903 	SPDK_DEBUGLOG(blobfs, "file=%s offset=%jx length=%jx\n",
1904 		      file->name, offset, length);
1905 
1906 	__readvwritev(file, channel, iovs, iovcnt, offset, length, cb_fn, cb_arg, 1);
1907 }
1908 
1909 struct spdk_io_channel *
1910 spdk_fs_alloc_io_channel(struct spdk_filesystem *fs)
1911 {
1912 	struct spdk_io_channel *io_channel;
1913 	struct spdk_fs_channel *fs_channel;
1914 
1915 	io_channel = spdk_get_io_channel(&fs->io_target);
1916 	fs_channel = spdk_io_channel_get_ctx(io_channel);
1917 	fs_channel->bs_channel = spdk_bs_alloc_io_channel(fs->bs);
1918 	fs_channel->send_request = __send_request_direct;
1919 
1920 	return io_channel;
1921 }
1922 
1923 void
1924 spdk_fs_free_io_channel(struct spdk_io_channel *channel)
1925 {
1926 	spdk_put_io_channel(channel);
1927 }
1928 
1929 struct spdk_fs_thread_ctx *
1930 spdk_fs_alloc_thread_ctx(struct spdk_filesystem *fs)
1931 {
1932 	struct spdk_fs_thread_ctx *ctx;
1933 
1934 	ctx = calloc(1, sizeof(*ctx));
1935 	if (!ctx) {
1936 		return NULL;
1937 	}
1938 
1939 	if (pthread_spin_init(&ctx->ch.lock, 0)) {
1940 		free(ctx);
1941 		return NULL;
1942 	}
1943 
1944 	fs_channel_create(fs, &ctx->ch, 512);
1945 
1946 	ctx->ch.send_request = fs->send_request;
1947 	ctx->ch.sync = 1;
1948 
1949 	return ctx;
1950 }
1951 
1952 
1953 void
1954 spdk_fs_free_thread_ctx(struct spdk_fs_thread_ctx *ctx)
1955 {
1956 	assert(ctx->ch.sync == 1);
1957 
1958 	while (true) {
1959 		pthread_spin_lock(&ctx->ch.lock);
1960 		if (ctx->ch.outstanding_reqs == 0) {
1961 			pthread_spin_unlock(&ctx->ch.lock);
1962 			break;
1963 		}
1964 		pthread_spin_unlock(&ctx->ch.lock);
1965 		usleep(1000);
1966 	}
1967 
1968 	fs_channel_destroy(NULL, &ctx->ch);
1969 	free(ctx);
1970 }
1971 
1972 int
1973 spdk_fs_set_cache_size(uint64_t size_in_mb)
1974 {
1975 	/* setting g_fs_cache_size is only permitted if cache pool
1976 	 * is already freed or hasn't been initialized
1977 	 */
1978 	if (g_cache_pool != NULL) {
1979 		return -EPERM;
1980 	}
1981 
1982 	g_fs_cache_size = size_in_mb * 1024 * 1024;
1983 
1984 	return 0;
1985 }
1986 
1987 uint64_t
1988 spdk_fs_get_cache_size(void)
1989 {
1990 	return g_fs_cache_size / (1024 * 1024);
1991 }
1992 
1993 static void __file_flush(void *ctx);
1994 
1995 /* Try to free some cache buffers from this file.
1996  */
1997 static int
1998 reclaim_cache_buffers(struct spdk_file *file)
1999 {
2000 	int rc;
2001 
2002 	BLOBFS_TRACE(file, "free=%s\n", file->name);
2003 
2004 	/* The function is safe to be called with any threads, while the file
2005 	 * lock maybe locked by other thread for now, so try to get the file
2006 	 * lock here.
2007 	 */
2008 	rc = pthread_spin_trylock(&file->lock);
2009 	if (rc != 0) {
2010 		return -1;
2011 	}
2012 
2013 	if (file->tree->present_mask == 0) {
2014 		pthread_spin_unlock(&file->lock);
2015 		return -1;
2016 	}
2017 	tree_free_buffers(file->tree);
2018 
2019 	TAILQ_REMOVE(&g_caches, file, cache_tailq);
2020 	/* If not freed, put it in the end of the queue */
2021 	if (file->tree->present_mask != 0) {
2022 		TAILQ_INSERT_TAIL(&g_caches, file, cache_tailq);
2023 	} else {
2024 		file->last = NULL;
2025 	}
2026 	pthread_spin_unlock(&file->lock);
2027 
2028 	return 0;
2029 }
2030 
2031 static int
2032 _blobfs_cache_pool_reclaim(void *arg)
2033 {
2034 	struct spdk_file *file, *tmp;
2035 	int rc;
2036 
2037 	if (!blobfs_cache_pool_need_reclaim()) {
2038 		return SPDK_POLLER_IDLE;
2039 	}
2040 
2041 	TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
2042 		if (!file->open_for_writing &&
2043 		    file->priority == SPDK_FILE_PRIORITY_LOW) {
2044 			rc = reclaim_cache_buffers(file);
2045 			if (rc < 0) {
2046 				continue;
2047 			}
2048 			if (!blobfs_cache_pool_need_reclaim()) {
2049 				return SPDK_POLLER_BUSY;
2050 			}
2051 			break;
2052 		}
2053 	}
2054 
2055 	TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
2056 		if (!file->open_for_writing) {
2057 			rc = reclaim_cache_buffers(file);
2058 			if (rc < 0) {
2059 				continue;
2060 			}
2061 			if (!blobfs_cache_pool_need_reclaim()) {
2062 				return SPDK_POLLER_BUSY;
2063 			}
2064 			break;
2065 		}
2066 	}
2067 
2068 	TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
2069 		rc = reclaim_cache_buffers(file);
2070 		if (rc < 0) {
2071 			continue;
2072 		}
2073 		break;
2074 	}
2075 
2076 	return SPDK_POLLER_BUSY;
2077 }
2078 
2079 static void
2080 _add_file_to_cache_pool(void *ctx)
2081 {
2082 	struct spdk_file *file = ctx;
2083 
2084 	TAILQ_INSERT_TAIL(&g_caches, file, cache_tailq);
2085 }
2086 
2087 static void
2088 _remove_file_from_cache_pool(void *ctx)
2089 {
2090 	struct spdk_file *file = ctx;
2091 
2092 	TAILQ_REMOVE(&g_caches, file, cache_tailq);
2093 }
2094 
2095 static struct cache_buffer *
2096 cache_insert_buffer(struct spdk_file *file, uint64_t offset)
2097 {
2098 	struct cache_buffer *buf;
2099 	int count = 0;
2100 	bool need_update = false;
2101 
2102 	buf = calloc(1, sizeof(*buf));
2103 	if (buf == NULL) {
2104 		SPDK_DEBUGLOG(blobfs, "calloc failed\n");
2105 		return NULL;
2106 	}
2107 
2108 	do {
2109 		buf->buf = spdk_mempool_get(g_cache_pool);
2110 		if (buf->buf) {
2111 			break;
2112 		}
2113 		if (count++ == 100) {
2114 			SPDK_ERRLOG("Could not allocate cache buffer for file=%p on offset=%jx\n",
2115 				    file, offset);
2116 			free(buf);
2117 			return NULL;
2118 		}
2119 		usleep(BLOBFS_CACHE_POOL_POLL_PERIOD_IN_US);
2120 	} while (true);
2121 
2122 	buf->buf_size = CACHE_BUFFER_SIZE;
2123 	buf->offset = offset;
2124 
2125 	if (file->tree->present_mask == 0) {
2126 		need_update = true;
2127 	}
2128 	file->tree = tree_insert_buffer(file->tree, buf);
2129 
2130 	if (need_update) {
2131 		spdk_thread_send_msg(g_cache_pool_thread, _add_file_to_cache_pool, file);
2132 	}
2133 
2134 	return buf;
2135 }
2136 
2137 static struct cache_buffer *
2138 cache_append_buffer(struct spdk_file *file)
2139 {
2140 	struct cache_buffer *last;
2141 
2142 	assert(file->last == NULL || file->last->bytes_filled == file->last->buf_size);
2143 	assert((file->append_pos % CACHE_BUFFER_SIZE) == 0);
2144 
2145 	last = cache_insert_buffer(file, file->append_pos);
2146 	if (last == NULL) {
2147 		SPDK_DEBUGLOG(blobfs, "cache_insert_buffer failed\n");
2148 		return NULL;
2149 	}
2150 
2151 	file->last = last;
2152 
2153 	return last;
2154 }
2155 
2156 static void __check_sync_reqs(struct spdk_file *file);
2157 
2158 static void
2159 __file_cache_finish_sync(void *ctx, int bserrno)
2160 {
2161 	struct spdk_file *file;
2162 	struct spdk_fs_request *sync_req = ctx;
2163 	struct spdk_fs_cb_args *sync_args;
2164 
2165 	sync_args = &sync_req->args;
2166 	file = sync_args->file;
2167 	pthread_spin_lock(&file->lock);
2168 	file->length_xattr = sync_args->op.sync.length;
2169 	assert(sync_args->op.sync.offset <= file->length_flushed);
2170 	spdk_trace_record(TRACE_BLOBFS_XATTR_END, 0, sync_args->op.sync.offset,
2171 			  0, file->name);
2172 	BLOBFS_TRACE(file, "sync done offset=%jx\n", sync_args->op.sync.offset);
2173 	TAILQ_REMOVE(&file->sync_requests, sync_req, args.op.sync.tailq);
2174 	pthread_spin_unlock(&file->lock);
2175 
2176 	sync_args->fn.file_op(sync_args->arg, bserrno);
2177 
2178 	free_fs_request(sync_req);
2179 	__check_sync_reqs(file);
2180 }
2181 
2182 static void
2183 __check_sync_reqs(struct spdk_file *file)
2184 {
2185 	struct spdk_fs_request *sync_req;
2186 
2187 	pthread_spin_lock(&file->lock);
2188 
2189 	TAILQ_FOREACH(sync_req, &file->sync_requests, args.op.sync.tailq) {
2190 		if (sync_req->args.op.sync.offset <= file->length_flushed) {
2191 			break;
2192 		}
2193 	}
2194 
2195 	if (sync_req != NULL && !sync_req->args.op.sync.xattr_in_progress) {
2196 		BLOBFS_TRACE(file, "set xattr length 0x%jx\n", file->length_flushed);
2197 		sync_req->args.op.sync.xattr_in_progress = true;
2198 		sync_req->args.op.sync.length = file->length_flushed;
2199 		spdk_blob_set_xattr(file->blob, "length", &file->length_flushed,
2200 				    sizeof(file->length_flushed));
2201 
2202 		pthread_spin_unlock(&file->lock);
2203 		spdk_trace_record(TRACE_BLOBFS_XATTR_START, 0, file->length_flushed,
2204 				  0, file->name);
2205 		spdk_blob_sync_md(file->blob, __file_cache_finish_sync, sync_req);
2206 	} else {
2207 		pthread_spin_unlock(&file->lock);
2208 	}
2209 }
2210 
2211 static void
2212 __file_flush_done(void *ctx, int bserrno)
2213 {
2214 	struct spdk_fs_request *req = ctx;
2215 	struct spdk_fs_cb_args *args = &req->args;
2216 	struct spdk_file *file = args->file;
2217 	struct cache_buffer *next = args->op.flush.cache_buffer;
2218 
2219 	BLOBFS_TRACE(file, "length=%jx\n", args->op.flush.length);
2220 
2221 	pthread_spin_lock(&file->lock);
2222 	next->in_progress = false;
2223 	next->bytes_flushed += args->op.flush.length;
2224 	file->length_flushed += args->op.flush.length;
2225 	if (file->length_flushed > file->length) {
2226 		file->length = file->length_flushed;
2227 	}
2228 	if (next->bytes_flushed == next->buf_size) {
2229 		BLOBFS_TRACE(file, "write buffer fully flushed 0x%jx\n", file->length_flushed);
2230 		next = tree_find_buffer(file->tree, file->length_flushed);
2231 	}
2232 
2233 	/*
2234 	 * Assert that there is no cached data that extends past the end of the underlying
2235 	 *  blob.
2236 	 */
2237 	assert(next == NULL || next->offset < __file_get_blob_size(file) ||
2238 	       next->bytes_filled == 0);
2239 
2240 	pthread_spin_unlock(&file->lock);
2241 
2242 	__check_sync_reqs(file);
2243 
2244 	__file_flush(req);
2245 }
2246 
2247 static void
2248 __file_flush(void *ctx)
2249 {
2250 	struct spdk_fs_request *req = ctx;
2251 	struct spdk_fs_cb_args *args = &req->args;
2252 	struct spdk_file *file = args->file;
2253 	struct cache_buffer *next;
2254 	uint64_t offset, length, start_lba, num_lba;
2255 	uint32_t lba_size;
2256 
2257 	pthread_spin_lock(&file->lock);
2258 	next = tree_find_buffer(file->tree, file->length_flushed);
2259 	if (next == NULL || next->in_progress ||
2260 	    ((next->bytes_filled < next->buf_size) && TAILQ_EMPTY(&file->sync_requests))) {
2261 		/*
2262 		 * There is either no data to flush, a flush I/O is already in
2263 		 *  progress, or the next buffer is partially filled but there's no
2264 		 *  outstanding request to sync it.
2265 		 * So return immediately - if a flush I/O is in progress we will flush
2266 		 *  more data after that is completed, or a partial buffer will get flushed
2267 		 *  when it is either filled or the file is synced.
2268 		 */
2269 		free_fs_request(req);
2270 		if (next == NULL) {
2271 			/*
2272 			 * For cases where a file's cache was evicted, and then the
2273 			 *  file was later appended, we will write the data directly
2274 			 *  to disk and bypass cache.  So just update length_flushed
2275 			 *  here to reflect that all data was already written to disk.
2276 			 */
2277 			file->length_flushed = file->append_pos;
2278 		}
2279 		pthread_spin_unlock(&file->lock);
2280 		if (next == NULL) {
2281 			/*
2282 			 * There is no data to flush, but we still need to check for any
2283 			 *  outstanding sync requests to make sure metadata gets updated.
2284 			 */
2285 			__check_sync_reqs(file);
2286 		}
2287 		return;
2288 	}
2289 
2290 	offset = next->offset + next->bytes_flushed;
2291 	length = next->bytes_filled - next->bytes_flushed;
2292 	if (length == 0) {
2293 		free_fs_request(req);
2294 		pthread_spin_unlock(&file->lock);
2295 		/*
2296 		 * There is no data to flush, but we still need to check for any
2297 		 *  outstanding sync requests to make sure metadata gets updated.
2298 		 */
2299 		__check_sync_reqs(file);
2300 		return;
2301 	}
2302 	args->op.flush.length = length;
2303 	args->op.flush.cache_buffer = next;
2304 
2305 	__get_page_parameters(file, offset, length, &start_lba, &lba_size, &num_lba);
2306 
2307 	next->in_progress = true;
2308 	BLOBFS_TRACE(file, "offset=0x%jx length=0x%jx page start=0x%jx num=0x%jx\n",
2309 		     offset, length, start_lba, num_lba);
2310 	pthread_spin_unlock(&file->lock);
2311 	spdk_blob_io_write(file->blob, file->fs->sync_target.sync_fs_channel->bs_channel,
2312 			   next->buf + (start_lba * lba_size) - next->offset,
2313 			   start_lba, num_lba, __file_flush_done, req);
2314 }
2315 
2316 static void
2317 __file_extend_done(void *arg, int bserrno)
2318 {
2319 	struct spdk_fs_cb_args *args = arg;
2320 
2321 	__wake_caller(args, bserrno);
2322 }
2323 
2324 static void
2325 __file_extend_resize_cb(void *_args, int bserrno)
2326 {
2327 	struct spdk_fs_cb_args *args = _args;
2328 	struct spdk_file *file = args->file;
2329 
2330 	if (bserrno) {
2331 		__wake_caller(args, bserrno);
2332 		return;
2333 	}
2334 
2335 	spdk_blob_sync_md(file->blob, __file_extend_done, args);
2336 }
2337 
2338 static void
2339 __file_extend_blob(void *_args)
2340 {
2341 	struct spdk_fs_cb_args *args = _args;
2342 	struct spdk_file *file = args->file;
2343 
2344 	spdk_blob_resize(file->blob, args->op.resize.num_clusters, __file_extend_resize_cb, args);
2345 }
2346 
2347 static void
2348 __rw_from_file_done(void *ctx, int bserrno)
2349 {
2350 	struct spdk_fs_request *req = ctx;
2351 
2352 	__wake_caller(&req->args, bserrno);
2353 	free_fs_request(req);
2354 }
2355 
2356 static void
2357 __rw_from_file(void *ctx)
2358 {
2359 	struct spdk_fs_request *req = ctx;
2360 	struct spdk_fs_cb_args *args = &req->args;
2361 	struct spdk_file *file = args->file;
2362 
2363 	if (args->op.rw.is_read) {
2364 		spdk_file_read_async(file, file->fs->sync_target.sync_io_channel, args->iovs[0].iov_base,
2365 				     args->op.rw.offset, (uint64_t)args->iovs[0].iov_len,
2366 				     __rw_from_file_done, req);
2367 	} else {
2368 		spdk_file_write_async(file, file->fs->sync_target.sync_io_channel, args->iovs[0].iov_base,
2369 				      args->op.rw.offset, (uint64_t)args->iovs[0].iov_len,
2370 				      __rw_from_file_done, req);
2371 	}
2372 }
2373 
2374 struct rw_from_file_arg {
2375 	struct spdk_fs_channel *channel;
2376 	int rwerrno;
2377 };
2378 
2379 static int
2380 __send_rw_from_file(struct spdk_file *file, void *payload,
2381 		    uint64_t offset, uint64_t length, bool is_read,
2382 		    struct rw_from_file_arg *arg)
2383 {
2384 	struct spdk_fs_request *req;
2385 	struct spdk_fs_cb_args *args;
2386 
2387 	req = alloc_fs_request_with_iov(arg->channel, 1);
2388 	if (req == NULL) {
2389 		sem_post(&arg->channel->sem);
2390 		return -ENOMEM;
2391 	}
2392 
2393 	args = &req->args;
2394 	args->file = file;
2395 	args->sem = &arg->channel->sem;
2396 	args->iovs[0].iov_base = payload;
2397 	args->iovs[0].iov_len = (size_t)length;
2398 	args->op.rw.offset = offset;
2399 	args->op.rw.is_read = is_read;
2400 	args->rwerrno = &arg->rwerrno;
2401 	file->fs->send_request(__rw_from_file, req);
2402 	return 0;
2403 }
2404 
2405 int
2406 spdk_file_write(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
2407 		void *payload, uint64_t offset, uint64_t length)
2408 {
2409 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2410 	struct spdk_fs_request *flush_req;
2411 	uint64_t rem_length, copy, blob_size, cluster_sz;
2412 	uint32_t cache_buffers_filled = 0;
2413 	uint8_t *cur_payload;
2414 	struct cache_buffer *last;
2415 
2416 	BLOBFS_TRACE_RW(file, "offset=%jx length=%jx\n", offset, length);
2417 
2418 	if (length == 0) {
2419 		return 0;
2420 	}
2421 
2422 	if (offset != file->append_pos) {
2423 		BLOBFS_TRACE(file, " error offset=%jx append_pos=%jx\n", offset, file->append_pos);
2424 		return -EINVAL;
2425 	}
2426 
2427 	pthread_spin_lock(&file->lock);
2428 	file->open_for_writing = true;
2429 
2430 	if ((file->last == NULL) && (file->append_pos % CACHE_BUFFER_SIZE == 0)) {
2431 		cache_append_buffer(file);
2432 	}
2433 
2434 	if (file->last == NULL) {
2435 		struct rw_from_file_arg arg = {};
2436 		int rc;
2437 
2438 		arg.channel = channel;
2439 		arg.rwerrno = 0;
2440 		file->append_pos += length;
2441 		pthread_spin_unlock(&file->lock);
2442 		rc = __send_rw_from_file(file, payload, offset, length, false, &arg);
2443 		if (rc != 0) {
2444 			return rc;
2445 		}
2446 		sem_wait(&channel->sem);
2447 		return arg.rwerrno;
2448 	}
2449 
2450 	blob_size = __file_get_blob_size(file);
2451 
2452 	if ((offset + length) > blob_size) {
2453 		struct spdk_fs_cb_args extend_args = {};
2454 
2455 		cluster_sz = file->fs->bs_opts.cluster_sz;
2456 		extend_args.sem = &channel->sem;
2457 		extend_args.op.resize.num_clusters = __bytes_to_clusters((offset + length), cluster_sz);
2458 		extend_args.file = file;
2459 		BLOBFS_TRACE(file, "start resize to %u clusters\n", extend_args.op.resize.num_clusters);
2460 		pthread_spin_unlock(&file->lock);
2461 		file->fs->send_request(__file_extend_blob, &extend_args);
2462 		sem_wait(&channel->sem);
2463 		if (extend_args.rc) {
2464 			return extend_args.rc;
2465 		}
2466 		pthread_spin_lock(&file->lock);
2467 	}
2468 
2469 	flush_req = alloc_fs_request(channel);
2470 	if (flush_req == NULL) {
2471 		pthread_spin_unlock(&file->lock);
2472 		return -ENOMEM;
2473 	}
2474 
2475 	last = file->last;
2476 	rem_length = length;
2477 	cur_payload = payload;
2478 	while (rem_length > 0) {
2479 		copy = last->buf_size - last->bytes_filled;
2480 		if (copy > rem_length) {
2481 			copy = rem_length;
2482 		}
2483 		BLOBFS_TRACE_RW(file, "  fill offset=%jx length=%jx\n", file->append_pos, copy);
2484 		memcpy(&last->buf[last->bytes_filled], cur_payload, copy);
2485 		file->append_pos += copy;
2486 		if (file->length < file->append_pos) {
2487 			file->length = file->append_pos;
2488 		}
2489 		cur_payload += copy;
2490 		last->bytes_filled += copy;
2491 		rem_length -= copy;
2492 		if (last->bytes_filled == last->buf_size) {
2493 			cache_buffers_filled++;
2494 			last = cache_append_buffer(file);
2495 			if (last == NULL) {
2496 				BLOBFS_TRACE(file, "nomem\n");
2497 				free_fs_request(flush_req);
2498 				pthread_spin_unlock(&file->lock);
2499 				return -ENOMEM;
2500 			}
2501 		}
2502 	}
2503 
2504 	pthread_spin_unlock(&file->lock);
2505 
2506 	if (cache_buffers_filled == 0) {
2507 		free_fs_request(flush_req);
2508 		return 0;
2509 	}
2510 
2511 	flush_req->args.file = file;
2512 	file->fs->send_request(__file_flush, flush_req);
2513 	return 0;
2514 }
2515 
2516 static void
2517 __readahead_done(void *ctx, int bserrno)
2518 {
2519 	struct spdk_fs_request *req = ctx;
2520 	struct spdk_fs_cb_args *args = &req->args;
2521 	struct cache_buffer *cache_buffer = args->op.readahead.cache_buffer;
2522 	struct spdk_file *file = args->file;
2523 
2524 	BLOBFS_TRACE(file, "offset=%jx\n", cache_buffer->offset);
2525 
2526 	pthread_spin_lock(&file->lock);
2527 	cache_buffer->bytes_filled = args->op.readahead.length;
2528 	cache_buffer->bytes_flushed = args->op.readahead.length;
2529 	cache_buffer->in_progress = false;
2530 	pthread_spin_unlock(&file->lock);
2531 
2532 	free_fs_request(req);
2533 }
2534 
2535 static void
2536 __readahead(void *ctx)
2537 {
2538 	struct spdk_fs_request *req = ctx;
2539 	struct spdk_fs_cb_args *args = &req->args;
2540 	struct spdk_file *file = args->file;
2541 	uint64_t offset, length, start_lba, num_lba;
2542 	uint32_t lba_size;
2543 
2544 	offset = args->op.readahead.offset;
2545 	length = args->op.readahead.length;
2546 	assert(length > 0);
2547 
2548 	__get_page_parameters(file, offset, length, &start_lba, &lba_size, &num_lba);
2549 
2550 	BLOBFS_TRACE(file, "offset=%jx length=%jx page start=%jx num=%jx\n",
2551 		     offset, length, start_lba, num_lba);
2552 	spdk_blob_io_read(file->blob, file->fs->sync_target.sync_fs_channel->bs_channel,
2553 			  args->op.readahead.cache_buffer->buf,
2554 			  start_lba, num_lba, __readahead_done, req);
2555 }
2556 
2557 static uint64_t
2558 __next_cache_buffer_offset(uint64_t offset)
2559 {
2560 	return (offset + CACHE_BUFFER_SIZE) & ~(CACHE_TREE_LEVEL_MASK(0));
2561 }
2562 
2563 static void
2564 check_readahead(struct spdk_file *file, uint64_t offset,
2565 		struct spdk_fs_channel *channel)
2566 {
2567 	struct spdk_fs_request *req;
2568 	struct spdk_fs_cb_args *args;
2569 
2570 	offset = __next_cache_buffer_offset(offset);
2571 	if (tree_find_buffer(file->tree, offset) != NULL || file->length <= offset) {
2572 		return;
2573 	}
2574 
2575 	req = alloc_fs_request(channel);
2576 	if (req == NULL) {
2577 		return;
2578 	}
2579 	args = &req->args;
2580 
2581 	BLOBFS_TRACE(file, "offset=%jx\n", offset);
2582 
2583 	args->file = file;
2584 	args->op.readahead.offset = offset;
2585 	args->op.readahead.cache_buffer = cache_insert_buffer(file, offset);
2586 	if (!args->op.readahead.cache_buffer) {
2587 		BLOBFS_TRACE(file, "Cannot allocate buf for offset=%jx\n", offset);
2588 		free_fs_request(req);
2589 		return;
2590 	}
2591 
2592 	args->op.readahead.cache_buffer->in_progress = true;
2593 	if (file->length < (offset + CACHE_BUFFER_SIZE)) {
2594 		args->op.readahead.length = file->length & (CACHE_BUFFER_SIZE - 1);
2595 	} else {
2596 		args->op.readahead.length = CACHE_BUFFER_SIZE;
2597 	}
2598 	file->fs->send_request(__readahead, req);
2599 }
2600 
2601 int64_t
2602 spdk_file_read(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
2603 	       void *payload, uint64_t offset, uint64_t length)
2604 {
2605 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2606 	uint64_t final_offset, final_length;
2607 	uint32_t sub_reads = 0;
2608 	struct cache_buffer *buf;
2609 	uint64_t read_len;
2610 	struct rw_from_file_arg arg = {};
2611 
2612 	pthread_spin_lock(&file->lock);
2613 
2614 	BLOBFS_TRACE_RW(file, "offset=%ju length=%ju\n", offset, length);
2615 
2616 	file->open_for_writing = false;
2617 
2618 	if (length == 0 || offset >= file->append_pos) {
2619 		pthread_spin_unlock(&file->lock);
2620 		return 0;
2621 	}
2622 
2623 	if (offset + length > file->append_pos) {
2624 		length = file->append_pos - offset;
2625 	}
2626 
2627 	if (offset != file->next_seq_offset) {
2628 		file->seq_byte_count = 0;
2629 	}
2630 	file->seq_byte_count += length;
2631 	file->next_seq_offset = offset + length;
2632 	if (file->seq_byte_count >= CACHE_READAHEAD_THRESHOLD) {
2633 		check_readahead(file, offset, channel);
2634 		check_readahead(file, offset + CACHE_BUFFER_SIZE, channel);
2635 	}
2636 
2637 	arg.channel = channel;
2638 	arg.rwerrno = 0;
2639 	final_length = 0;
2640 	final_offset = offset + length;
2641 	while (offset < final_offset) {
2642 		int ret = 0;
2643 		length = NEXT_CACHE_BUFFER_OFFSET(offset) - offset;
2644 		if (length > (final_offset - offset)) {
2645 			length = final_offset - offset;
2646 		}
2647 
2648 		buf = tree_find_filled_buffer(file->tree, offset);
2649 		if (buf == NULL) {
2650 			pthread_spin_unlock(&file->lock);
2651 			ret = __send_rw_from_file(file, payload, offset, length, true, &arg);
2652 			pthread_spin_lock(&file->lock);
2653 			if (ret == 0) {
2654 				sub_reads++;
2655 			}
2656 		} else {
2657 			read_len = length;
2658 			if ((offset + length) > (buf->offset + buf->bytes_filled)) {
2659 				read_len = buf->offset + buf->bytes_filled - offset;
2660 			}
2661 			BLOBFS_TRACE(file, "read %p offset=%ju length=%ju\n", payload, offset, read_len);
2662 			memcpy(payload, &buf->buf[offset - buf->offset], read_len);
2663 			if ((offset + read_len) % CACHE_BUFFER_SIZE == 0) {
2664 				tree_remove_buffer(file->tree, buf);
2665 				if (file->tree->present_mask == 0) {
2666 					spdk_thread_send_msg(g_cache_pool_thread, _remove_file_from_cache_pool, file);
2667 				}
2668 			}
2669 		}
2670 
2671 		if (ret == 0) {
2672 			final_length += length;
2673 		} else {
2674 			arg.rwerrno = ret;
2675 			break;
2676 		}
2677 		payload += length;
2678 		offset += length;
2679 	}
2680 	pthread_spin_unlock(&file->lock);
2681 	while (sub_reads > 0) {
2682 		sem_wait(&channel->sem);
2683 		sub_reads--;
2684 	}
2685 	if (arg.rwerrno == 0) {
2686 		return final_length;
2687 	} else {
2688 		return arg.rwerrno;
2689 	}
2690 }
2691 
2692 static void
2693 _file_sync(struct spdk_file *file, struct spdk_fs_channel *channel,
2694 	   spdk_file_op_complete cb_fn, void *cb_arg)
2695 {
2696 	struct spdk_fs_request *sync_req;
2697 	struct spdk_fs_request *flush_req;
2698 	struct spdk_fs_cb_args *sync_args;
2699 	struct spdk_fs_cb_args *flush_args;
2700 
2701 	BLOBFS_TRACE(file, "offset=%jx\n", file->append_pos);
2702 
2703 	pthread_spin_lock(&file->lock);
2704 	if (file->append_pos <= file->length_xattr) {
2705 		BLOBFS_TRACE(file, "done - file already synced\n");
2706 		pthread_spin_unlock(&file->lock);
2707 		cb_fn(cb_arg, 0);
2708 		return;
2709 	}
2710 
2711 	sync_req = alloc_fs_request(channel);
2712 	if (!sync_req) {
2713 		SPDK_ERRLOG("Cannot allocate sync req for file=%s\n", file->name);
2714 		pthread_spin_unlock(&file->lock);
2715 		cb_fn(cb_arg, -ENOMEM);
2716 		return;
2717 	}
2718 	sync_args = &sync_req->args;
2719 
2720 	flush_req = alloc_fs_request(channel);
2721 	if (!flush_req) {
2722 		SPDK_ERRLOG("Cannot allocate flush req for file=%s\n", file->name);
2723 		free_fs_request(sync_req);
2724 		pthread_spin_unlock(&file->lock);
2725 		cb_fn(cb_arg, -ENOMEM);
2726 		return;
2727 	}
2728 	flush_args = &flush_req->args;
2729 
2730 	sync_args->file = file;
2731 	sync_args->fn.file_op = cb_fn;
2732 	sync_args->arg = cb_arg;
2733 	sync_args->op.sync.offset = file->append_pos;
2734 	sync_args->op.sync.xattr_in_progress = false;
2735 	TAILQ_INSERT_TAIL(&file->sync_requests, sync_req, args.op.sync.tailq);
2736 	pthread_spin_unlock(&file->lock);
2737 
2738 	flush_args->file = file;
2739 	channel->send_request(__file_flush, flush_req);
2740 }
2741 
2742 int
2743 spdk_file_sync(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx)
2744 {
2745 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2746 	struct spdk_fs_cb_args args = {};
2747 
2748 	args.sem = &channel->sem;
2749 	_file_sync(file, channel, __wake_caller, &args);
2750 	sem_wait(&channel->sem);
2751 
2752 	return args.rc;
2753 }
2754 
2755 void
2756 spdk_file_sync_async(struct spdk_file *file, struct spdk_io_channel *_channel,
2757 		     spdk_file_op_complete cb_fn, void *cb_arg)
2758 {
2759 	struct spdk_fs_channel *channel = spdk_io_channel_get_ctx(_channel);
2760 
2761 	_file_sync(file, channel, cb_fn, cb_arg);
2762 }
2763 
2764 void
2765 spdk_file_set_priority(struct spdk_file *file, uint32_t priority)
2766 {
2767 	BLOBFS_TRACE(file, "priority=%u\n", priority);
2768 	file->priority = priority;
2769 
2770 }
2771 
2772 /*
2773  * Close routines
2774  */
2775 
2776 static void
2777 __file_close_async_done(void *ctx, int bserrno)
2778 {
2779 	struct spdk_fs_request *req = ctx;
2780 	struct spdk_fs_cb_args *args = &req->args;
2781 	struct spdk_file *file = args->file;
2782 
2783 	spdk_trace_record(TRACE_BLOBFS_CLOSE, 0, 0, 0, file->name);
2784 
2785 	if (file->is_deleted) {
2786 		spdk_fs_delete_file_async(file->fs, file->name, blob_delete_cb, ctx);
2787 		return;
2788 	}
2789 
2790 	args->fn.file_op(args->arg, bserrno);
2791 	free_fs_request(req);
2792 }
2793 
2794 static void
2795 __file_close_async(struct spdk_file *file, struct spdk_fs_request *req)
2796 {
2797 	struct spdk_blob *blob;
2798 
2799 	pthread_spin_lock(&file->lock);
2800 	if (file->ref_count == 0) {
2801 		pthread_spin_unlock(&file->lock);
2802 		__file_close_async_done(req, -EBADF);
2803 		return;
2804 	}
2805 
2806 	file->ref_count--;
2807 	if (file->ref_count > 0) {
2808 		pthread_spin_unlock(&file->lock);
2809 		req->args.fn.file_op(req->args.arg, 0);
2810 		free_fs_request(req);
2811 		return;
2812 	}
2813 
2814 	pthread_spin_unlock(&file->lock);
2815 
2816 	blob = file->blob;
2817 	file->blob = NULL;
2818 	spdk_blob_close(blob, __file_close_async_done, req);
2819 }
2820 
2821 static void
2822 __file_close_async__sync_done(void *arg, int fserrno)
2823 {
2824 	struct spdk_fs_request *req = arg;
2825 	struct spdk_fs_cb_args *args = &req->args;
2826 
2827 	__file_close_async(args->file, req);
2828 }
2829 
2830 void
2831 spdk_file_close_async(struct spdk_file *file, spdk_file_op_complete cb_fn, void *cb_arg)
2832 {
2833 	struct spdk_fs_request *req;
2834 	struct spdk_fs_cb_args *args;
2835 
2836 	req = alloc_fs_request(file->fs->md_target.md_fs_channel);
2837 	if (req == NULL) {
2838 		SPDK_ERRLOG("Cannot allocate close async req for file=%s\n", file->name);
2839 		cb_fn(cb_arg, -ENOMEM);
2840 		return;
2841 	}
2842 
2843 	args = &req->args;
2844 	args->file = file;
2845 	args->fn.file_op = cb_fn;
2846 	args->arg = cb_arg;
2847 
2848 	spdk_file_sync_async(file, file->fs->md_target.md_io_channel, __file_close_async__sync_done, req);
2849 }
2850 
2851 static void
2852 __file_close(void *arg)
2853 {
2854 	struct spdk_fs_request *req = arg;
2855 	struct spdk_fs_cb_args *args = &req->args;
2856 	struct spdk_file *file = args->file;
2857 
2858 	__file_close_async(file, req);
2859 }
2860 
2861 int
2862 spdk_file_close(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx)
2863 {
2864 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2865 	struct spdk_fs_request *req;
2866 	struct spdk_fs_cb_args *args;
2867 
2868 	req = alloc_fs_request(channel);
2869 	if (req == NULL) {
2870 		SPDK_ERRLOG("Cannot allocate close req for file=%s\n", file->name);
2871 		return -ENOMEM;
2872 	}
2873 
2874 	args = &req->args;
2875 
2876 	spdk_file_sync(file, ctx);
2877 	BLOBFS_TRACE(file, "name=%s\n", file->name);
2878 	args->file = file;
2879 	args->sem = &channel->sem;
2880 	args->fn.file_op = __wake_caller;
2881 	args->arg = args;
2882 	channel->send_request(__file_close, req);
2883 	sem_wait(&channel->sem);
2884 
2885 	return args->rc;
2886 }
2887 
2888 int
2889 spdk_file_get_id(struct spdk_file *file, void *id, size_t size)
2890 {
2891 	if (size < sizeof(spdk_blob_id)) {
2892 		return -EINVAL;
2893 	}
2894 
2895 	memcpy(id, &file->blobid, sizeof(spdk_blob_id));
2896 
2897 	return sizeof(spdk_blob_id);
2898 }
2899 
2900 static void
2901 _file_free(void *ctx)
2902 {
2903 	struct spdk_file *file = ctx;
2904 
2905 	TAILQ_REMOVE(&g_caches, file, cache_tailq);
2906 
2907 	free(file->name);
2908 	free(file->tree);
2909 	free(file);
2910 }
2911 
2912 static void
2913 file_free(struct spdk_file *file)
2914 {
2915 	BLOBFS_TRACE(file, "free=%s\n", file->name);
2916 	pthread_spin_lock(&file->lock);
2917 	if (file->tree->present_mask == 0) {
2918 		pthread_spin_unlock(&file->lock);
2919 		free(file->name);
2920 		free(file->tree);
2921 		free(file);
2922 		return;
2923 	}
2924 
2925 	tree_free_buffers(file->tree);
2926 	assert(file->tree->present_mask == 0);
2927 	spdk_thread_send_msg(g_cache_pool_thread, _file_free, file);
2928 	pthread_spin_unlock(&file->lock);
2929 }
2930 
2931 SPDK_LOG_REGISTER_COMPONENT(blobfs)
2932 SPDK_LOG_REGISTER_COMPONENT(blobfs_rw)
2933