xref: /spdk/lib/blobfs/blobfs.c (revision 619da10386160b74a84b2b653fc6e1d8a0365d94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/blobfs.h"
9 #include "tree.h"
10 
11 #include "spdk/queue.h"
12 #include "spdk/thread.h"
13 #include "spdk/assert.h"
14 #include "spdk/env.h"
15 #include "spdk/util.h"
16 #include "spdk/log.h"
17 #include "spdk/trace.h"
18 
19 #include "spdk_internal/trace_defs.h"
20 
21 #define BLOBFS_TRACE(file, str, args...) \
22 	SPDK_DEBUGLOG(blobfs, "file=%s " str, file->name, ##args)
23 
24 #define BLOBFS_TRACE_RW(file, str, args...) \
25 	SPDK_DEBUGLOG(blobfs_rw, "file=%s " str, file->name, ##args)
26 
27 #define BLOBFS_DEFAULT_CACHE_SIZE (4ULL * 1024 * 1024 * 1024)
28 #define SPDK_BLOBFS_DEFAULT_OPTS_CLUSTER_SZ (1024 * 1024)
29 
30 #define SPDK_BLOBFS_SIGNATURE	"BLOBFS"
31 
32 static uint64_t g_fs_cache_size = BLOBFS_DEFAULT_CACHE_SIZE;
33 static struct spdk_mempool *g_cache_pool;
34 static TAILQ_HEAD(, spdk_file) g_caches = TAILQ_HEAD_INITIALIZER(g_caches);
35 static struct spdk_poller *g_cache_pool_mgmt_poller;
36 static struct spdk_thread *g_cache_pool_thread;
37 #define BLOBFS_CACHE_POOL_POLL_PERIOD_IN_US 1000ULL
38 static int g_fs_count = 0;
39 static pthread_mutex_t g_cache_init_lock = PTHREAD_MUTEX_INITIALIZER;
40 
41 SPDK_TRACE_REGISTER_FN(blobfs_trace, "blobfs", TRACE_GROUP_BLOBFS)
42 {
43 	struct spdk_trace_tpoint_opts opts[] = {
44 		{
45 			"BLOBFS_XATTR_START", TRACE_BLOBFS_XATTR_START,
46 			OWNER_NONE, OBJECT_NONE, 0,
47 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
48 		},
49 		{
50 			"BLOBFS_XATTR_END", TRACE_BLOBFS_XATTR_END,
51 			OWNER_NONE, OBJECT_NONE, 0,
52 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
53 		},
54 		{
55 			"BLOBFS_OPEN", TRACE_BLOBFS_OPEN,
56 			OWNER_NONE, OBJECT_NONE, 0,
57 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
58 		},
59 		{
60 			"BLOBFS_CLOSE", TRACE_BLOBFS_CLOSE,
61 			OWNER_NONE, OBJECT_NONE, 0,
62 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
63 		},
64 		{
65 			"BLOBFS_DELETE_START", TRACE_BLOBFS_DELETE_START,
66 			OWNER_NONE, OBJECT_NONE, 0,
67 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
68 		},
69 		{
70 			"BLOBFS_DELETE_DONE", TRACE_BLOBFS_DELETE_DONE,
71 			OWNER_NONE, OBJECT_NONE, 0,
72 			{{ "file", SPDK_TRACE_ARG_TYPE_STR, 40 }},
73 		}
74 	};
75 
76 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
77 }
78 
79 void
80 cache_buffer_free(struct cache_buffer *cache_buffer)
81 {
82 	spdk_mempool_put(g_cache_pool, cache_buffer->buf);
83 	free(cache_buffer);
84 }
85 
86 #define CACHE_READAHEAD_THRESHOLD	(128 * 1024)
87 
88 struct spdk_file {
89 	struct spdk_filesystem	*fs;
90 	struct spdk_blob	*blob;
91 	char			*name;
92 	uint64_t		length;
93 	bool                    is_deleted;
94 	bool			open_for_writing;
95 	uint64_t		length_flushed;
96 	uint64_t		length_xattr;
97 	uint64_t		append_pos;
98 	uint64_t		seq_byte_count;
99 	uint64_t		next_seq_offset;
100 	uint32_t		priority;
101 	TAILQ_ENTRY(spdk_file)	tailq;
102 	spdk_blob_id		blobid;
103 	uint32_t		ref_count;
104 	pthread_spinlock_t	lock;
105 	struct cache_buffer	*last;
106 	struct cache_tree	*tree;
107 	TAILQ_HEAD(open_requests_head, spdk_fs_request) open_requests;
108 	TAILQ_HEAD(sync_requests_head, spdk_fs_request) sync_requests;
109 	TAILQ_ENTRY(spdk_file)	cache_tailq;
110 };
111 
112 struct spdk_deleted_file {
113 	spdk_blob_id	id;
114 	TAILQ_ENTRY(spdk_deleted_file)	tailq;
115 };
116 
117 struct spdk_filesystem {
118 	struct spdk_blob_store	*bs;
119 	TAILQ_HEAD(, spdk_file)	files;
120 	struct spdk_bs_opts	bs_opts;
121 	struct spdk_bs_dev	*bdev;
122 	fs_send_request_fn	send_request;
123 
124 	struct {
125 		uint32_t		max_ops;
126 		struct spdk_io_channel	*sync_io_channel;
127 		struct spdk_fs_channel	*sync_fs_channel;
128 	} sync_target;
129 
130 	struct {
131 		uint32_t		max_ops;
132 		struct spdk_io_channel	*md_io_channel;
133 		struct spdk_fs_channel	*md_fs_channel;
134 	} md_target;
135 
136 	struct {
137 		uint32_t		max_ops;
138 	} io_target;
139 };
140 
141 struct spdk_fs_cb_args {
142 	union {
143 		spdk_fs_op_with_handle_complete		fs_op_with_handle;
144 		spdk_fs_op_complete			fs_op;
145 		spdk_file_op_with_handle_complete	file_op_with_handle;
146 		spdk_file_op_complete			file_op;
147 		spdk_file_stat_op_complete		stat_op;
148 	} fn;
149 	void *arg;
150 	sem_t *sem;
151 	struct spdk_filesystem *fs;
152 	struct spdk_file *file;
153 	int rc;
154 	int *rwerrno;
155 	struct iovec *iovs;
156 	uint32_t iovcnt;
157 	struct iovec iov;
158 	union {
159 		struct {
160 			TAILQ_HEAD(, spdk_deleted_file)	deleted_files;
161 		} fs_load;
162 		struct {
163 			uint64_t	length;
164 		} truncate;
165 		struct {
166 			struct spdk_io_channel	*channel;
167 			void		*pin_buf;
168 			int		is_read;
169 			off_t		offset;
170 			size_t		length;
171 			uint64_t	start_lba;
172 			uint64_t	num_lba;
173 			uint32_t	blocklen;
174 		} rw;
175 		struct {
176 			const char	*old_name;
177 			const char	*new_name;
178 		} rename;
179 		struct {
180 			struct cache_buffer	*cache_buffer;
181 			uint64_t		length;
182 		} flush;
183 		struct {
184 			struct cache_buffer	*cache_buffer;
185 			uint64_t		length;
186 			uint64_t		offset;
187 		} readahead;
188 		struct {
189 			/* offset of the file when the sync request was made */
190 			uint64_t			offset;
191 			TAILQ_ENTRY(spdk_fs_request)	tailq;
192 			bool				xattr_in_progress;
193 			/* length written to the xattr for this file - this should
194 			 * always be the same as the offset if only one thread is
195 			 * writing to the file, but could differ if multiple threads
196 			 * are appending
197 			 */
198 			uint64_t			length;
199 		} sync;
200 		struct {
201 			uint32_t			num_clusters;
202 		} resize;
203 		struct {
204 			const char	*name;
205 			uint32_t	flags;
206 			TAILQ_ENTRY(spdk_fs_request)	tailq;
207 		} open;
208 		struct {
209 			const char		*name;
210 			struct spdk_blob	*blob;
211 		} create;
212 		struct {
213 			const char	*name;
214 		} delete;
215 		struct {
216 			const char	*name;
217 		} stat;
218 	} op;
219 };
220 
221 static void file_free(struct spdk_file *file);
222 static void fs_io_device_unregister(struct spdk_filesystem *fs);
223 static void fs_free_io_channels(struct spdk_filesystem *fs);
224 
225 void
226 spdk_fs_opts_init(struct spdk_blobfs_opts *opts)
227 {
228 	opts->cluster_sz = SPDK_BLOBFS_DEFAULT_OPTS_CLUSTER_SZ;
229 }
230 
231 static int _blobfs_cache_pool_reclaim(void *arg);
232 
233 static bool
234 blobfs_cache_pool_need_reclaim(void)
235 {
236 	size_t count;
237 
238 	count = spdk_mempool_count(g_cache_pool);
239 	/* We define a aggressive policy here as the requirements from db_bench are batched, so start the poller
240 	 *  when the number of available cache buffer is less than 1/5 of total buffers.
241 	 */
242 	if (count > (size_t)g_fs_cache_size / CACHE_BUFFER_SIZE / 5) {
243 		return false;
244 	}
245 
246 	return true;
247 }
248 
249 static void
250 __start_cache_pool_mgmt(void *ctx)
251 {
252 	assert(g_cache_pool == NULL);
253 
254 	g_cache_pool = spdk_mempool_create("spdk_fs_cache",
255 					   g_fs_cache_size / CACHE_BUFFER_SIZE,
256 					   CACHE_BUFFER_SIZE,
257 					   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
258 					   SPDK_ENV_SOCKET_ID_ANY);
259 	if (!g_cache_pool) {
260 		SPDK_ERRLOG("Create mempool failed, you may "
261 			    "increase the memory and try again\n");
262 		assert(false);
263 	}
264 
265 	assert(g_cache_pool_mgmt_poller == NULL);
266 	g_cache_pool_mgmt_poller = SPDK_POLLER_REGISTER(_blobfs_cache_pool_reclaim, NULL,
267 				   BLOBFS_CACHE_POOL_POLL_PERIOD_IN_US);
268 }
269 
270 static void
271 __stop_cache_pool_mgmt(void *ctx)
272 {
273 	spdk_poller_unregister(&g_cache_pool_mgmt_poller);
274 
275 	assert(g_cache_pool != NULL);
276 	assert(spdk_mempool_count(g_cache_pool) == g_fs_cache_size / CACHE_BUFFER_SIZE);
277 	spdk_mempool_free(g_cache_pool);
278 	g_cache_pool = NULL;
279 
280 	spdk_thread_exit(g_cache_pool_thread);
281 }
282 
283 static void
284 initialize_global_cache(void)
285 {
286 	pthread_mutex_lock(&g_cache_init_lock);
287 	if (g_fs_count == 0) {
288 		g_cache_pool_thread = spdk_thread_create("cache_pool_mgmt", NULL);
289 		assert(g_cache_pool_thread != NULL);
290 		spdk_thread_send_msg(g_cache_pool_thread, __start_cache_pool_mgmt, NULL);
291 	}
292 	g_fs_count++;
293 	pthread_mutex_unlock(&g_cache_init_lock);
294 }
295 
296 static void
297 free_global_cache(void)
298 {
299 	pthread_mutex_lock(&g_cache_init_lock);
300 	g_fs_count--;
301 	if (g_fs_count == 0) {
302 		spdk_thread_send_msg(g_cache_pool_thread, __stop_cache_pool_mgmt, NULL);
303 	}
304 	pthread_mutex_unlock(&g_cache_init_lock);
305 }
306 
307 static uint64_t
308 __file_get_blob_size(struct spdk_file *file)
309 {
310 	uint64_t cluster_sz;
311 
312 	cluster_sz = file->fs->bs_opts.cluster_sz;
313 	return cluster_sz * spdk_blob_get_num_clusters(file->blob);
314 }
315 
316 struct spdk_fs_request {
317 	struct spdk_fs_cb_args		args;
318 	TAILQ_ENTRY(spdk_fs_request)	link;
319 	struct spdk_fs_channel		*channel;
320 };
321 
322 struct spdk_fs_channel {
323 	struct spdk_fs_request		*req_mem;
324 	TAILQ_HEAD(, spdk_fs_request)	reqs;
325 	sem_t				sem;
326 	struct spdk_filesystem		*fs;
327 	struct spdk_io_channel		*bs_channel;
328 	fs_send_request_fn		send_request;
329 	bool				sync;
330 	uint32_t			outstanding_reqs;
331 	pthread_spinlock_t		lock;
332 };
333 
334 /* For now, this is effectively an alias. But eventually we'll shift
335  * some data members over. */
336 struct spdk_fs_thread_ctx {
337 	struct spdk_fs_channel	ch;
338 };
339 
340 static struct spdk_fs_request *
341 alloc_fs_request_with_iov(struct spdk_fs_channel *channel, uint32_t iovcnt)
342 {
343 	struct spdk_fs_request *req;
344 	struct iovec *iovs = NULL;
345 
346 	if (iovcnt > 1) {
347 		iovs = calloc(iovcnt, sizeof(struct iovec));
348 		if (!iovs) {
349 			return NULL;
350 		}
351 	}
352 
353 	if (channel->sync) {
354 		pthread_spin_lock(&channel->lock);
355 	}
356 
357 	req = TAILQ_FIRST(&channel->reqs);
358 	if (req) {
359 		channel->outstanding_reqs++;
360 		TAILQ_REMOVE(&channel->reqs, req, link);
361 	}
362 
363 	if (channel->sync) {
364 		pthread_spin_unlock(&channel->lock);
365 	}
366 
367 	if (req == NULL) {
368 		SPDK_ERRLOG("Cannot allocate req on spdk_fs_channel =%p\n", channel);
369 		free(iovs);
370 		return NULL;
371 	}
372 	memset(req, 0, sizeof(*req));
373 	req->channel = channel;
374 	if (iovcnt > 1) {
375 		req->args.iovs = iovs;
376 	} else {
377 		req->args.iovs = &req->args.iov;
378 	}
379 	req->args.iovcnt = iovcnt;
380 
381 	return req;
382 }
383 
384 static struct spdk_fs_request *
385 alloc_fs_request(struct spdk_fs_channel *channel)
386 {
387 	return alloc_fs_request_with_iov(channel, 0);
388 }
389 
390 static void
391 free_fs_request(struct spdk_fs_request *req)
392 {
393 	struct spdk_fs_channel *channel = req->channel;
394 
395 	if (req->args.iovcnt > 1) {
396 		free(req->args.iovs);
397 	}
398 
399 	if (channel->sync) {
400 		pthread_spin_lock(&channel->lock);
401 	}
402 
403 	TAILQ_INSERT_HEAD(&req->channel->reqs, req, link);
404 	channel->outstanding_reqs--;
405 
406 	if (channel->sync) {
407 		pthread_spin_unlock(&channel->lock);
408 	}
409 }
410 
411 static int
412 fs_channel_create(struct spdk_filesystem *fs, struct spdk_fs_channel *channel,
413 		  uint32_t max_ops)
414 {
415 	uint32_t i;
416 
417 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_fs_request));
418 	if (!channel->req_mem) {
419 		return -1;
420 	}
421 
422 	channel->outstanding_reqs = 0;
423 	TAILQ_INIT(&channel->reqs);
424 	sem_init(&channel->sem, 0, 0);
425 
426 	for (i = 0; i < max_ops; i++) {
427 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
428 	}
429 
430 	channel->fs = fs;
431 
432 	return 0;
433 }
434 
435 static int
436 fs_md_channel_create(void *io_device, void *ctx_buf)
437 {
438 	struct spdk_filesystem		*fs;
439 	struct spdk_fs_channel		*channel = ctx_buf;
440 
441 	fs = SPDK_CONTAINEROF(io_device, struct spdk_filesystem, md_target);
442 
443 	return fs_channel_create(fs, channel, fs->md_target.max_ops);
444 }
445 
446 static int
447 fs_sync_channel_create(void *io_device, void *ctx_buf)
448 {
449 	struct spdk_filesystem		*fs;
450 	struct spdk_fs_channel		*channel = ctx_buf;
451 
452 	fs = SPDK_CONTAINEROF(io_device, struct spdk_filesystem, sync_target);
453 
454 	return fs_channel_create(fs, channel, fs->sync_target.max_ops);
455 }
456 
457 static int
458 fs_io_channel_create(void *io_device, void *ctx_buf)
459 {
460 	struct spdk_filesystem		*fs;
461 	struct spdk_fs_channel		*channel = ctx_buf;
462 
463 	fs = SPDK_CONTAINEROF(io_device, struct spdk_filesystem, io_target);
464 
465 	return fs_channel_create(fs, channel, fs->io_target.max_ops);
466 }
467 
468 static void
469 fs_channel_destroy(void *io_device, void *ctx_buf)
470 {
471 	struct spdk_fs_channel *channel = ctx_buf;
472 
473 	if (channel->outstanding_reqs > 0) {
474 		SPDK_ERRLOG("channel freed with %" PRIu32 " outstanding requests!\n",
475 			    channel->outstanding_reqs);
476 	}
477 
478 	free(channel->req_mem);
479 	if (channel->bs_channel != NULL) {
480 		spdk_bs_free_io_channel(channel->bs_channel);
481 	}
482 }
483 
484 static void
485 __send_request_direct(fs_request_fn fn, void *arg)
486 {
487 	fn(arg);
488 }
489 
490 static void
491 common_fs_bs_init(struct spdk_filesystem *fs, struct spdk_blob_store *bs)
492 {
493 	fs->bs = bs;
494 	fs->bs_opts.cluster_sz = spdk_bs_get_cluster_size(bs);
495 	fs->md_target.md_fs_channel->bs_channel = spdk_bs_alloc_io_channel(fs->bs);
496 	fs->md_target.md_fs_channel->send_request = __send_request_direct;
497 	fs->sync_target.sync_fs_channel->bs_channel = spdk_bs_alloc_io_channel(fs->bs);
498 	fs->sync_target.sync_fs_channel->send_request = __send_request_direct;
499 
500 	initialize_global_cache();
501 }
502 
503 static void
504 init_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
505 {
506 	struct spdk_fs_request *req = ctx;
507 	struct spdk_fs_cb_args *args = &req->args;
508 	struct spdk_filesystem *fs = args->fs;
509 
510 	if (bserrno == 0) {
511 		common_fs_bs_init(fs, bs);
512 	} else {
513 		free(fs);
514 		fs = NULL;
515 	}
516 
517 	args->fn.fs_op_with_handle(args->arg, fs, bserrno);
518 	free_fs_request(req);
519 }
520 
521 static struct spdk_filesystem *
522 fs_alloc(struct spdk_bs_dev *dev, fs_send_request_fn send_request_fn)
523 {
524 	struct spdk_filesystem *fs;
525 
526 	fs = calloc(1, sizeof(*fs));
527 	if (fs == NULL) {
528 		return NULL;
529 	}
530 
531 	fs->bdev = dev;
532 	fs->send_request = send_request_fn;
533 	TAILQ_INIT(&fs->files);
534 
535 	fs->md_target.max_ops = 512;
536 	spdk_io_device_register(&fs->md_target, fs_md_channel_create, fs_channel_destroy,
537 				sizeof(struct spdk_fs_channel), "blobfs_md");
538 	fs->md_target.md_io_channel = spdk_get_io_channel(&fs->md_target);
539 	fs->md_target.md_fs_channel = spdk_io_channel_get_ctx(fs->md_target.md_io_channel);
540 
541 	fs->sync_target.max_ops = 512;
542 	spdk_io_device_register(&fs->sync_target, fs_sync_channel_create, fs_channel_destroy,
543 				sizeof(struct spdk_fs_channel), "blobfs_sync");
544 	fs->sync_target.sync_io_channel = spdk_get_io_channel(&fs->sync_target);
545 	fs->sync_target.sync_fs_channel = spdk_io_channel_get_ctx(fs->sync_target.sync_io_channel);
546 
547 	fs->io_target.max_ops = 512;
548 	spdk_io_device_register(&fs->io_target, fs_io_channel_create, fs_channel_destroy,
549 				sizeof(struct spdk_fs_channel), "blobfs_io");
550 
551 	return fs;
552 }
553 
554 static void
555 __wake_caller(void *arg, int fserrno)
556 {
557 	struct spdk_fs_cb_args *args = arg;
558 
559 	if ((args->rwerrno != NULL) && (*(args->rwerrno) == 0) && fserrno) {
560 		*(args->rwerrno) = fserrno;
561 	}
562 	args->rc = fserrno;
563 	sem_post(args->sem);
564 }
565 
566 void
567 spdk_fs_init(struct spdk_bs_dev *dev, struct spdk_blobfs_opts *opt,
568 	     fs_send_request_fn send_request_fn,
569 	     spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
570 {
571 	struct spdk_filesystem *fs;
572 	struct spdk_fs_request *req;
573 	struct spdk_fs_cb_args *args;
574 	struct spdk_bs_opts opts = {};
575 
576 	fs = fs_alloc(dev, send_request_fn);
577 	if (fs == NULL) {
578 		cb_fn(cb_arg, NULL, -ENOMEM);
579 		return;
580 	}
581 
582 	req = alloc_fs_request(fs->md_target.md_fs_channel);
583 	if (req == NULL) {
584 		fs_free_io_channels(fs);
585 		fs_io_device_unregister(fs);
586 		cb_fn(cb_arg, NULL, -ENOMEM);
587 		return;
588 	}
589 
590 	args = &req->args;
591 	args->fn.fs_op_with_handle = cb_fn;
592 	args->arg = cb_arg;
593 	args->fs = fs;
594 
595 	spdk_bs_opts_init(&opts, sizeof(opts));
596 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), SPDK_BLOBFS_SIGNATURE);
597 	if (opt) {
598 		opts.cluster_sz = opt->cluster_sz;
599 	}
600 	spdk_bs_init(dev, &opts, init_cb, req);
601 }
602 
603 static struct spdk_file *
604 file_alloc(struct spdk_filesystem *fs)
605 {
606 	struct spdk_file *file;
607 
608 	file = calloc(1, sizeof(*file));
609 	if (file == NULL) {
610 		return NULL;
611 	}
612 
613 	file->tree = calloc(1, sizeof(*file->tree));
614 	if (file->tree == NULL) {
615 		free(file);
616 		return NULL;
617 	}
618 
619 	if (pthread_spin_init(&file->lock, 0)) {
620 		free(file->tree);
621 		free(file);
622 		return NULL;
623 	}
624 
625 	file->fs = fs;
626 	TAILQ_INIT(&file->open_requests);
627 	TAILQ_INIT(&file->sync_requests);
628 	TAILQ_INSERT_TAIL(&fs->files, file, tailq);
629 	file->priority = SPDK_FILE_PRIORITY_LOW;
630 	return file;
631 }
632 
633 static void fs_load_done(void *ctx, int bserrno);
634 
635 static int
636 _handle_deleted_files(struct spdk_fs_request *req)
637 {
638 	struct spdk_fs_cb_args *args = &req->args;
639 	struct spdk_filesystem *fs = args->fs;
640 
641 	if (!TAILQ_EMPTY(&args->op.fs_load.deleted_files)) {
642 		struct spdk_deleted_file *deleted_file;
643 
644 		deleted_file = TAILQ_FIRST(&args->op.fs_load.deleted_files);
645 		TAILQ_REMOVE(&args->op.fs_load.deleted_files, deleted_file, tailq);
646 		spdk_bs_delete_blob(fs->bs, deleted_file->id, fs_load_done, req);
647 		free(deleted_file);
648 		return 0;
649 	}
650 
651 	return 1;
652 }
653 
654 static void
655 fs_load_done(void *ctx, int bserrno)
656 {
657 	struct spdk_fs_request *req = ctx;
658 	struct spdk_fs_cb_args *args = &req->args;
659 	struct spdk_filesystem *fs = args->fs;
660 
661 	/* The filesystem has been loaded.  Now check if there are any files that
662 	 *  were marked for deletion before last unload.  Do not complete the
663 	 *  fs_load callback until all of them have been deleted on disk.
664 	 */
665 	if (_handle_deleted_files(req) == 0) {
666 		/* We found a file that's been marked for deleting but not actually
667 		 *  deleted yet.  This function will get called again once the delete
668 		 *  operation is completed.
669 		 */
670 		return;
671 	}
672 
673 	args->fn.fs_op_with_handle(args->arg, fs, 0);
674 	free_fs_request(req);
675 
676 }
677 
678 static void
679 iter_cb(void *ctx, struct spdk_blob *blob, int rc)
680 {
681 	struct spdk_fs_request *req = ctx;
682 	struct spdk_fs_cb_args *args = &req->args;
683 	struct spdk_filesystem *fs = args->fs;
684 	uint64_t *length;
685 	const char *name;
686 	uint32_t *is_deleted;
687 	size_t value_len;
688 
689 	if (rc < 0) {
690 		args->fn.fs_op_with_handle(args->arg, fs, rc);
691 		free_fs_request(req);
692 		return;
693 	}
694 
695 	rc = spdk_blob_get_xattr_value(blob, "name", (const void **)&name, &value_len);
696 	if (rc < 0) {
697 		args->fn.fs_op_with_handle(args->arg, fs, rc);
698 		free_fs_request(req);
699 		return;
700 	}
701 
702 	rc = spdk_blob_get_xattr_value(blob, "length", (const void **)&length, &value_len);
703 	if (rc < 0) {
704 		args->fn.fs_op_with_handle(args->arg, fs, rc);
705 		free_fs_request(req);
706 		return;
707 	}
708 
709 	assert(value_len == 8);
710 
711 	/* This file could be deleted last time without close it, then app crashed, so we delete it now */
712 	rc = spdk_blob_get_xattr_value(blob, "is_deleted", (const void **)&is_deleted, &value_len);
713 	if (rc < 0) {
714 		struct spdk_file *f;
715 
716 		f = file_alloc(fs);
717 		if (f == NULL) {
718 			SPDK_ERRLOG("Cannot allocate file to handle deleted file on disk\n");
719 			args->fn.fs_op_with_handle(args->arg, fs, -ENOMEM);
720 			free_fs_request(req);
721 			return;
722 		}
723 
724 		f->name = strdup(name);
725 		if (!f->name) {
726 			SPDK_ERRLOG("Cannot allocate memory for file name\n");
727 			args->fn.fs_op_with_handle(args->arg, fs, -ENOMEM);
728 			free_fs_request(req);
729 			file_free(f);
730 			return;
731 		}
732 
733 		f->blobid = spdk_blob_get_id(blob);
734 		f->length = *length;
735 		f->length_flushed = *length;
736 		f->length_xattr = *length;
737 		f->append_pos = *length;
738 		SPDK_DEBUGLOG(blobfs, "added file %s length=%ju\n", f->name, f->length);
739 	} else {
740 		struct spdk_deleted_file *deleted_file;
741 
742 		deleted_file = calloc(1, sizeof(*deleted_file));
743 		if (deleted_file == NULL) {
744 			args->fn.fs_op_with_handle(args->arg, fs, -ENOMEM);
745 			free_fs_request(req);
746 			return;
747 		}
748 		deleted_file->id = spdk_blob_get_id(blob);
749 		TAILQ_INSERT_TAIL(&args->op.fs_load.deleted_files, deleted_file, tailq);
750 	}
751 }
752 
753 static void
754 load_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
755 {
756 	struct spdk_fs_request *req = ctx;
757 	struct spdk_fs_cb_args *args = &req->args;
758 	struct spdk_filesystem *fs = args->fs;
759 	struct spdk_bs_type bstype;
760 	static const struct spdk_bs_type blobfs_type = {SPDK_BLOBFS_SIGNATURE};
761 	static const struct spdk_bs_type zeros;
762 
763 	if (bserrno != 0) {
764 		args->fn.fs_op_with_handle(args->arg, NULL, bserrno);
765 		free_fs_request(req);
766 		fs_free_io_channels(fs);
767 		fs_io_device_unregister(fs);
768 		return;
769 	}
770 
771 	bstype = spdk_bs_get_bstype(bs);
772 
773 	if (!memcmp(&bstype, &zeros, sizeof(bstype))) {
774 		SPDK_DEBUGLOG(blobfs, "assigning bstype\n");
775 		spdk_bs_set_bstype(bs, blobfs_type);
776 	} else if (memcmp(&bstype, &blobfs_type, sizeof(bstype))) {
777 		SPDK_ERRLOG("not blobfs\n");
778 		SPDK_LOGDUMP(blobfs, "bstype", &bstype, sizeof(bstype));
779 		args->fn.fs_op_with_handle(args->arg, NULL, -EINVAL);
780 		free_fs_request(req);
781 		fs_free_io_channels(fs);
782 		fs_io_device_unregister(fs);
783 		return;
784 	}
785 
786 	common_fs_bs_init(fs, bs);
787 	fs_load_done(req, 0);
788 }
789 
790 static void
791 fs_io_device_unregister(struct spdk_filesystem *fs)
792 {
793 	assert(fs != NULL);
794 	spdk_io_device_unregister(&fs->md_target, NULL);
795 	spdk_io_device_unregister(&fs->sync_target, NULL);
796 	spdk_io_device_unregister(&fs->io_target, NULL);
797 	free(fs);
798 }
799 
800 static void
801 fs_free_io_channels(struct spdk_filesystem *fs)
802 {
803 	assert(fs != NULL);
804 	spdk_fs_free_io_channel(fs->md_target.md_io_channel);
805 	spdk_fs_free_io_channel(fs->sync_target.sync_io_channel);
806 }
807 
808 void
809 spdk_fs_load(struct spdk_bs_dev *dev, fs_send_request_fn send_request_fn,
810 	     spdk_fs_op_with_handle_complete cb_fn, void *cb_arg)
811 {
812 	struct spdk_filesystem *fs;
813 	struct spdk_fs_cb_args *args;
814 	struct spdk_fs_request *req;
815 	struct spdk_bs_opts	bs_opts;
816 
817 	fs = fs_alloc(dev, send_request_fn);
818 	if (fs == NULL) {
819 		cb_fn(cb_arg, NULL, -ENOMEM);
820 		return;
821 	}
822 
823 	req = alloc_fs_request(fs->md_target.md_fs_channel);
824 	if (req == NULL) {
825 		fs_free_io_channels(fs);
826 		fs_io_device_unregister(fs);
827 		cb_fn(cb_arg, NULL, -ENOMEM);
828 		return;
829 	}
830 
831 	args = &req->args;
832 	args->fn.fs_op_with_handle = cb_fn;
833 	args->arg = cb_arg;
834 	args->fs = fs;
835 	TAILQ_INIT(&args->op.fs_load.deleted_files);
836 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
837 	bs_opts.iter_cb_fn = iter_cb;
838 	bs_opts.iter_cb_arg = req;
839 	spdk_bs_load(dev, &bs_opts, load_cb, req);
840 }
841 
842 static void
843 unload_cb(void *ctx, int bserrno)
844 {
845 	struct spdk_fs_request *req = ctx;
846 	struct spdk_fs_cb_args *args = &req->args;
847 	struct spdk_filesystem *fs = args->fs;
848 	struct spdk_file *file, *tmp;
849 
850 	TAILQ_FOREACH_SAFE(file, &fs->files, tailq, tmp) {
851 		TAILQ_REMOVE(&fs->files, file, tailq);
852 		file_free(file);
853 	}
854 
855 	free_global_cache();
856 
857 	args->fn.fs_op(args->arg, bserrno);
858 	free(req);
859 
860 	fs_io_device_unregister(fs);
861 }
862 
863 void
864 spdk_fs_unload(struct spdk_filesystem *fs, spdk_fs_op_complete cb_fn, void *cb_arg)
865 {
866 	struct spdk_fs_request *req;
867 	struct spdk_fs_cb_args *args;
868 
869 	/*
870 	 * We must free the md_channel before unloading the blobstore, so just
871 	 *  allocate this request from the general heap.
872 	 */
873 	req = calloc(1, sizeof(*req));
874 	if (req == NULL) {
875 		cb_fn(cb_arg, -ENOMEM);
876 		return;
877 	}
878 
879 	args = &req->args;
880 	args->fn.fs_op = cb_fn;
881 	args->arg = cb_arg;
882 	args->fs = fs;
883 
884 	fs_free_io_channels(fs);
885 	spdk_bs_unload(fs->bs, unload_cb, req);
886 }
887 
888 static struct spdk_file *
889 fs_find_file(struct spdk_filesystem *fs, const char *name)
890 {
891 	struct spdk_file *file;
892 
893 	TAILQ_FOREACH(file, &fs->files, tailq) {
894 		if (!strncmp(name, file->name, SPDK_FILE_NAME_MAX)) {
895 			return file;
896 		}
897 	}
898 
899 	return NULL;
900 }
901 
902 void
903 spdk_fs_file_stat_async(struct spdk_filesystem *fs, const char *name,
904 			spdk_file_stat_op_complete cb_fn, void *cb_arg)
905 {
906 	struct spdk_file_stat stat;
907 	struct spdk_file *f = NULL;
908 
909 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
910 		cb_fn(cb_arg, NULL, -ENAMETOOLONG);
911 		return;
912 	}
913 
914 	f = fs_find_file(fs, name);
915 	if (f != NULL) {
916 		stat.blobid = f->blobid;
917 		stat.size = f->append_pos >= f->length ? f->append_pos : f->length;
918 		cb_fn(cb_arg, &stat, 0);
919 		return;
920 	}
921 
922 	cb_fn(cb_arg, NULL, -ENOENT);
923 }
924 
925 static void
926 __copy_stat(void *arg, struct spdk_file_stat *stat, int fserrno)
927 {
928 	struct spdk_fs_request *req = arg;
929 	struct spdk_fs_cb_args *args = &req->args;
930 
931 	args->rc = fserrno;
932 	if (fserrno == 0) {
933 		memcpy(args->arg, stat, sizeof(*stat));
934 	}
935 	sem_post(args->sem);
936 }
937 
938 static void
939 __file_stat(void *arg)
940 {
941 	struct spdk_fs_request *req = arg;
942 	struct spdk_fs_cb_args *args = &req->args;
943 
944 	spdk_fs_file_stat_async(args->fs, args->op.stat.name,
945 				args->fn.stat_op, req);
946 }
947 
948 int
949 spdk_fs_file_stat(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
950 		  const char *name, struct spdk_file_stat *stat)
951 {
952 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
953 	struct spdk_fs_request *req;
954 	int rc;
955 
956 	req = alloc_fs_request(channel);
957 	if (req == NULL) {
958 		SPDK_ERRLOG("Cannot allocate stat req on file=%s\n", name);
959 		return -ENOMEM;
960 	}
961 
962 	req->args.fs = fs;
963 	req->args.op.stat.name = name;
964 	req->args.fn.stat_op = __copy_stat;
965 	req->args.arg = stat;
966 	req->args.sem = &channel->sem;
967 	channel->send_request(__file_stat, req);
968 	sem_wait(&channel->sem);
969 
970 	rc = req->args.rc;
971 	free_fs_request(req);
972 
973 	return rc;
974 }
975 
976 static void
977 fs_create_blob_close_cb(void *ctx, int bserrno)
978 {
979 	int rc;
980 	struct spdk_fs_request *req = ctx;
981 	struct spdk_fs_cb_args *args = &req->args;
982 
983 	rc = args->rc ? args->rc : bserrno;
984 	args->fn.file_op(args->arg, rc);
985 	free_fs_request(req);
986 }
987 
988 static void
989 fs_create_blob_resize_cb(void *ctx, int bserrno)
990 {
991 	struct spdk_fs_request *req = ctx;
992 	struct spdk_fs_cb_args *args = &req->args;
993 	struct spdk_file *f = args->file;
994 	struct spdk_blob *blob = args->op.create.blob;
995 	uint64_t length = 0;
996 
997 	args->rc = bserrno;
998 	if (bserrno) {
999 		spdk_blob_close(blob, fs_create_blob_close_cb, args);
1000 		return;
1001 	}
1002 
1003 	spdk_blob_set_xattr(blob, "name", f->name, strlen(f->name) + 1);
1004 	spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
1005 
1006 	spdk_blob_close(blob, fs_create_blob_close_cb, args);
1007 }
1008 
1009 static void
1010 fs_create_blob_open_cb(void *ctx, struct spdk_blob *blob, int bserrno)
1011 {
1012 	struct spdk_fs_request *req = ctx;
1013 	struct spdk_fs_cb_args *args = &req->args;
1014 
1015 	if (bserrno) {
1016 		args->fn.file_op(args->arg, bserrno);
1017 		free_fs_request(req);
1018 		return;
1019 	}
1020 
1021 	args->op.create.blob = blob;
1022 	spdk_blob_resize(blob, 1, fs_create_blob_resize_cb, req);
1023 }
1024 
1025 static void
1026 fs_create_blob_create_cb(void *ctx, spdk_blob_id blobid, int bserrno)
1027 {
1028 	struct spdk_fs_request *req = ctx;
1029 	struct spdk_fs_cb_args *args = &req->args;
1030 	struct spdk_file *f = args->file;
1031 
1032 	if (bserrno) {
1033 		args->fn.file_op(args->arg, bserrno);
1034 		free_fs_request(req);
1035 		return;
1036 	}
1037 
1038 	f->blobid = blobid;
1039 	spdk_bs_open_blob(f->fs->bs, blobid, fs_create_blob_open_cb, req);
1040 }
1041 
1042 void
1043 spdk_fs_create_file_async(struct spdk_filesystem *fs, const char *name,
1044 			  spdk_file_op_complete cb_fn, void *cb_arg)
1045 {
1046 	struct spdk_file *file;
1047 	struct spdk_fs_request *req;
1048 	struct spdk_fs_cb_args *args;
1049 
1050 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1051 		cb_fn(cb_arg, -ENAMETOOLONG);
1052 		return;
1053 	}
1054 
1055 	file = fs_find_file(fs, name);
1056 	if (file != NULL) {
1057 		cb_fn(cb_arg, -EEXIST);
1058 		return;
1059 	}
1060 
1061 	file = file_alloc(fs);
1062 	if (file == NULL) {
1063 		SPDK_ERRLOG("Cannot allocate new file for creation\n");
1064 		cb_fn(cb_arg, -ENOMEM);
1065 		return;
1066 	}
1067 
1068 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1069 	if (req == NULL) {
1070 		SPDK_ERRLOG("Cannot allocate create async req for file=%s\n", name);
1071 		TAILQ_REMOVE(&fs->files, file, tailq);
1072 		file_free(file);
1073 		cb_fn(cb_arg, -ENOMEM);
1074 		return;
1075 	}
1076 
1077 	args = &req->args;
1078 	args->file = file;
1079 	args->fn.file_op = cb_fn;
1080 	args->arg = cb_arg;
1081 
1082 	file->name = strdup(name);
1083 	if (!file->name) {
1084 		SPDK_ERRLOG("Cannot allocate file->name for file=%s\n", name);
1085 		free_fs_request(req);
1086 		TAILQ_REMOVE(&fs->files, file, tailq);
1087 		file_free(file);
1088 		cb_fn(cb_arg, -ENOMEM);
1089 		return;
1090 	}
1091 	spdk_bs_create_blob(fs->bs, fs_create_blob_create_cb, args);
1092 }
1093 
1094 static void
1095 __fs_create_file_done(void *arg, int fserrno)
1096 {
1097 	struct spdk_fs_request *req = arg;
1098 	struct spdk_fs_cb_args *args = &req->args;
1099 
1100 	__wake_caller(args, fserrno);
1101 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.create.name);
1102 }
1103 
1104 static void
1105 __fs_create_file(void *arg)
1106 {
1107 	struct spdk_fs_request *req = arg;
1108 	struct spdk_fs_cb_args *args = &req->args;
1109 
1110 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.create.name);
1111 	spdk_fs_create_file_async(args->fs, args->op.create.name, __fs_create_file_done, req);
1112 }
1113 
1114 int
1115 spdk_fs_create_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx, const char *name)
1116 {
1117 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1118 	struct spdk_fs_request *req;
1119 	struct spdk_fs_cb_args *args;
1120 	int rc;
1121 
1122 	SPDK_DEBUGLOG(blobfs, "file=%s\n", name);
1123 
1124 	req = alloc_fs_request(channel);
1125 	if (req == NULL) {
1126 		SPDK_ERRLOG("Cannot allocate req to create file=%s\n", name);
1127 		return -ENOMEM;
1128 	}
1129 
1130 	args = &req->args;
1131 	args->fs = fs;
1132 	args->op.create.name = name;
1133 	args->sem = &channel->sem;
1134 	fs->send_request(__fs_create_file, req);
1135 	sem_wait(&channel->sem);
1136 	rc = args->rc;
1137 	free_fs_request(req);
1138 
1139 	return rc;
1140 }
1141 
1142 static void
1143 fs_open_blob_done(void *ctx, struct spdk_blob *blob, int bserrno)
1144 {
1145 	struct spdk_fs_request *req = ctx;
1146 	struct spdk_fs_cb_args *args = &req->args;
1147 	struct spdk_file *f = args->file;
1148 
1149 	f->blob = blob;
1150 	while (!TAILQ_EMPTY(&f->open_requests)) {
1151 		req = TAILQ_FIRST(&f->open_requests);
1152 		args = &req->args;
1153 		TAILQ_REMOVE(&f->open_requests, req, args.op.open.tailq);
1154 		spdk_trace_record(TRACE_BLOBFS_OPEN, 0, 0, 0, f->name);
1155 		args->fn.file_op_with_handle(args->arg, f, bserrno);
1156 		free_fs_request(req);
1157 	}
1158 }
1159 
1160 static void
1161 fs_open_blob_create_cb(void *ctx, int bserrno)
1162 {
1163 	struct spdk_fs_request *req = ctx;
1164 	struct spdk_fs_cb_args *args = &req->args;
1165 	struct spdk_file *file = args->file;
1166 	struct spdk_filesystem *fs = args->fs;
1167 
1168 	if (file == NULL) {
1169 		/*
1170 		 * This is from an open with CREATE flag - the file
1171 		 *  is now created so look it up in the file list for this
1172 		 *  filesystem.
1173 		 */
1174 		file = fs_find_file(fs, args->op.open.name);
1175 		assert(file != NULL);
1176 		args->file = file;
1177 	}
1178 
1179 	file->ref_count++;
1180 	TAILQ_INSERT_TAIL(&file->open_requests, req, args.op.open.tailq);
1181 	if (file->ref_count == 1) {
1182 		assert(file->blob == NULL);
1183 		spdk_bs_open_blob(fs->bs, file->blobid, fs_open_blob_done, req);
1184 	} else if (file->blob != NULL) {
1185 		fs_open_blob_done(req, file->blob, 0);
1186 	} else {
1187 		/*
1188 		 * The blob open for this file is in progress due to a previous
1189 		 *  open request.  When that open completes, it will invoke the
1190 		 *  open callback for this request.
1191 		 */
1192 	}
1193 }
1194 
1195 void
1196 spdk_fs_open_file_async(struct spdk_filesystem *fs, const char *name, uint32_t flags,
1197 			spdk_file_op_with_handle_complete cb_fn, void *cb_arg)
1198 {
1199 	struct spdk_file *f = NULL;
1200 	struct spdk_fs_request *req;
1201 	struct spdk_fs_cb_args *args;
1202 
1203 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1204 		cb_fn(cb_arg, NULL, -ENAMETOOLONG);
1205 		return;
1206 	}
1207 
1208 	f = fs_find_file(fs, name);
1209 	if (f == NULL && !(flags & SPDK_BLOBFS_OPEN_CREATE)) {
1210 		cb_fn(cb_arg, NULL, -ENOENT);
1211 		return;
1212 	}
1213 
1214 	if (f != NULL && f->is_deleted == true) {
1215 		cb_fn(cb_arg, NULL, -ENOENT);
1216 		return;
1217 	}
1218 
1219 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1220 	if (req == NULL) {
1221 		SPDK_ERRLOG("Cannot allocate async open req for file=%s\n", name);
1222 		cb_fn(cb_arg, NULL, -ENOMEM);
1223 		return;
1224 	}
1225 
1226 	args = &req->args;
1227 	args->fn.file_op_with_handle = cb_fn;
1228 	args->arg = cb_arg;
1229 	args->file = f;
1230 	args->fs = fs;
1231 	args->op.open.name = name;
1232 
1233 	if (f == NULL) {
1234 		spdk_fs_create_file_async(fs, name, fs_open_blob_create_cb, req);
1235 	} else {
1236 		fs_open_blob_create_cb(req, 0);
1237 	}
1238 }
1239 
1240 static void
1241 __fs_open_file_done(void *arg, struct spdk_file *file, int bserrno)
1242 {
1243 	struct spdk_fs_request *req = arg;
1244 	struct spdk_fs_cb_args *args = &req->args;
1245 
1246 	args->file = file;
1247 	__wake_caller(args, bserrno);
1248 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.open.name);
1249 }
1250 
1251 static void
1252 __fs_open_file(void *arg)
1253 {
1254 	struct spdk_fs_request *req = arg;
1255 	struct spdk_fs_cb_args *args = &req->args;
1256 
1257 	SPDK_DEBUGLOG(blobfs, "file=%s\n", args->op.open.name);
1258 	spdk_fs_open_file_async(args->fs, args->op.open.name, args->op.open.flags,
1259 				__fs_open_file_done, req);
1260 }
1261 
1262 int
1263 spdk_fs_open_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
1264 		  const char *name, uint32_t flags, struct spdk_file **file)
1265 {
1266 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1267 	struct spdk_fs_request *req;
1268 	struct spdk_fs_cb_args *args;
1269 	int rc;
1270 
1271 	SPDK_DEBUGLOG(blobfs, "file=%s\n", name);
1272 
1273 	req = alloc_fs_request(channel);
1274 	if (req == NULL) {
1275 		SPDK_ERRLOG("Cannot allocate req for opening file=%s\n", name);
1276 		return -ENOMEM;
1277 	}
1278 
1279 	args = &req->args;
1280 	args->fs = fs;
1281 	args->op.open.name = name;
1282 	args->op.open.flags = flags;
1283 	args->sem = &channel->sem;
1284 	fs->send_request(__fs_open_file, req);
1285 	sem_wait(&channel->sem);
1286 	rc = args->rc;
1287 	if (rc == 0) {
1288 		*file = args->file;
1289 	} else {
1290 		*file = NULL;
1291 	}
1292 	free_fs_request(req);
1293 
1294 	return rc;
1295 }
1296 
1297 static void
1298 fs_rename_blob_close_cb(void *ctx, int bserrno)
1299 {
1300 	struct spdk_fs_request *req = ctx;
1301 	struct spdk_fs_cb_args *args = &req->args;
1302 
1303 	args->fn.fs_op(args->arg, bserrno);
1304 	free_fs_request(req);
1305 }
1306 
1307 static void
1308 fs_rename_blob_open_cb(void *ctx, struct spdk_blob *blob, int bserrno)
1309 {
1310 	struct spdk_fs_request *req = ctx;
1311 	struct spdk_fs_cb_args *args = &req->args;
1312 	const char *new_name = args->op.rename.new_name;
1313 
1314 	spdk_blob_set_xattr(blob, "name", new_name, strlen(new_name) + 1);
1315 	spdk_blob_close(blob, fs_rename_blob_close_cb, req);
1316 }
1317 
1318 static void
1319 _fs_md_rename_file(struct spdk_fs_request *req)
1320 {
1321 	struct spdk_fs_cb_args *args = &req->args;
1322 	struct spdk_file *f;
1323 
1324 	f = fs_find_file(args->fs, args->op.rename.old_name);
1325 	if (f == NULL) {
1326 		args->fn.fs_op(args->arg, -ENOENT);
1327 		free_fs_request(req);
1328 		return;
1329 	}
1330 
1331 	free(f->name);
1332 	f->name = strdup(args->op.rename.new_name);
1333 	if (!f->name) {
1334 		SPDK_ERRLOG("Cannot allocate memory for file name\n");
1335 		args->fn.fs_op(args->arg, -ENOMEM);
1336 		free_fs_request(req);
1337 		return;
1338 	}
1339 
1340 	args->file = f;
1341 	spdk_bs_open_blob(args->fs->bs, f->blobid, fs_rename_blob_open_cb, req);
1342 }
1343 
1344 static void
1345 fs_rename_delete_done(void *arg, int fserrno)
1346 {
1347 	_fs_md_rename_file(arg);
1348 }
1349 
1350 void
1351 spdk_fs_rename_file_async(struct spdk_filesystem *fs,
1352 			  const char *old_name, const char *new_name,
1353 			  spdk_file_op_complete cb_fn, void *cb_arg)
1354 {
1355 	struct spdk_file *f;
1356 	struct spdk_fs_request *req;
1357 	struct spdk_fs_cb_args *args;
1358 
1359 	SPDK_DEBUGLOG(blobfs, "old=%s new=%s\n", old_name, new_name);
1360 	if (strnlen(new_name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1361 		cb_fn(cb_arg, -ENAMETOOLONG);
1362 		return;
1363 	}
1364 
1365 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1366 	if (req == NULL) {
1367 		SPDK_ERRLOG("Cannot allocate rename async req for renaming file from %s to %s\n", old_name,
1368 			    new_name);
1369 		cb_fn(cb_arg, -ENOMEM);
1370 		return;
1371 	}
1372 
1373 	args = &req->args;
1374 	args->fn.fs_op = cb_fn;
1375 	args->fs = fs;
1376 	args->arg = cb_arg;
1377 	args->op.rename.old_name = old_name;
1378 	args->op.rename.new_name = new_name;
1379 
1380 	f = fs_find_file(fs, new_name);
1381 	if (f == NULL) {
1382 		_fs_md_rename_file(req);
1383 		return;
1384 	}
1385 
1386 	/*
1387 	 * The rename overwrites an existing file.  So delete the existing file, then
1388 	 *  do the actual rename.
1389 	 */
1390 	spdk_fs_delete_file_async(fs, new_name, fs_rename_delete_done, req);
1391 }
1392 
1393 static void
1394 __fs_rename_file_done(void *arg, int fserrno)
1395 {
1396 	struct spdk_fs_request *req = arg;
1397 	struct spdk_fs_cb_args *args = &req->args;
1398 
1399 	__wake_caller(args, fserrno);
1400 }
1401 
1402 static void
1403 __fs_rename_file(void *arg)
1404 {
1405 	struct spdk_fs_request *req = arg;
1406 	struct spdk_fs_cb_args *args = &req->args;
1407 
1408 	spdk_fs_rename_file_async(args->fs, args->op.rename.old_name, args->op.rename.new_name,
1409 				  __fs_rename_file_done, req);
1410 }
1411 
1412 int
1413 spdk_fs_rename_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
1414 		    const char *old_name, const char *new_name)
1415 {
1416 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1417 	struct spdk_fs_request *req;
1418 	struct spdk_fs_cb_args *args;
1419 	int rc;
1420 
1421 	req = alloc_fs_request(channel);
1422 	if (req == NULL) {
1423 		SPDK_ERRLOG("Cannot allocate rename req for file=%s\n", old_name);
1424 		return -ENOMEM;
1425 	}
1426 
1427 	args = &req->args;
1428 
1429 	args->fs = fs;
1430 	args->op.rename.old_name = old_name;
1431 	args->op.rename.new_name = new_name;
1432 	args->sem = &channel->sem;
1433 	fs->send_request(__fs_rename_file, req);
1434 	sem_wait(&channel->sem);
1435 	rc = args->rc;
1436 	free_fs_request(req);
1437 	return rc;
1438 }
1439 
1440 static void
1441 blob_delete_cb(void *ctx, int bserrno)
1442 {
1443 	struct spdk_fs_request *req = ctx;
1444 	struct spdk_fs_cb_args *args = &req->args;
1445 
1446 	args->fn.file_op(args->arg, bserrno);
1447 	free_fs_request(req);
1448 }
1449 
1450 void
1451 spdk_fs_delete_file_async(struct spdk_filesystem *fs, const char *name,
1452 			  spdk_file_op_complete cb_fn, void *cb_arg)
1453 {
1454 	struct spdk_file *f;
1455 	spdk_blob_id blobid;
1456 	struct spdk_fs_request *req;
1457 	struct spdk_fs_cb_args *args;
1458 
1459 	SPDK_DEBUGLOG(blobfs, "file=%s\n", name);
1460 
1461 	if (strnlen(name, SPDK_FILE_NAME_MAX + 1) == SPDK_FILE_NAME_MAX + 1) {
1462 		cb_fn(cb_arg, -ENAMETOOLONG);
1463 		return;
1464 	}
1465 
1466 	f = fs_find_file(fs, name);
1467 	if (f == NULL) {
1468 		SPDK_ERRLOG("Cannot find the file=%s to deleted\n", name);
1469 		cb_fn(cb_arg, -ENOENT);
1470 		return;
1471 	}
1472 
1473 	req = alloc_fs_request(fs->md_target.md_fs_channel);
1474 	if (req == NULL) {
1475 		SPDK_ERRLOG("Cannot allocate the req for the file=%s to deleted\n", name);
1476 		cb_fn(cb_arg, -ENOMEM);
1477 		return;
1478 	}
1479 
1480 	args = &req->args;
1481 	args->fn.file_op = cb_fn;
1482 	args->arg = cb_arg;
1483 
1484 	if (f->ref_count > 0) {
1485 		/* If the ref > 0, we mark the file as deleted and delete it when we close it. */
1486 		f->is_deleted = true;
1487 		spdk_blob_set_xattr(f->blob, "is_deleted", &f->is_deleted, sizeof(bool));
1488 		spdk_blob_sync_md(f->blob, blob_delete_cb, req);
1489 		return;
1490 	}
1491 
1492 	blobid = f->blobid;
1493 	TAILQ_REMOVE(&fs->files, f, tailq);
1494 
1495 	file_free(f);
1496 
1497 	spdk_bs_delete_blob(fs->bs, blobid, blob_delete_cb, req);
1498 }
1499 
1500 static void
1501 __fs_delete_file_done(void *arg, int fserrno)
1502 {
1503 	struct spdk_fs_request *req = arg;
1504 	struct spdk_fs_cb_args *args = &req->args;
1505 
1506 	spdk_trace_record(TRACE_BLOBFS_DELETE_DONE, 0, 0, 0, args->op.delete.name);
1507 	__wake_caller(args, fserrno);
1508 }
1509 
1510 static void
1511 __fs_delete_file(void *arg)
1512 {
1513 	struct spdk_fs_request *req = arg;
1514 	struct spdk_fs_cb_args *args = &req->args;
1515 
1516 	spdk_trace_record(TRACE_BLOBFS_DELETE_START, 0, 0, 0, args->op.delete.name);
1517 	spdk_fs_delete_file_async(args->fs, args->op.delete.name, __fs_delete_file_done, req);
1518 }
1519 
1520 int
1521 spdk_fs_delete_file(struct spdk_filesystem *fs, struct spdk_fs_thread_ctx *ctx,
1522 		    const char *name)
1523 {
1524 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1525 	struct spdk_fs_request *req;
1526 	struct spdk_fs_cb_args *args;
1527 	int rc;
1528 
1529 	req = alloc_fs_request(channel);
1530 	if (req == NULL) {
1531 		SPDK_DEBUGLOG(blobfs, "Cannot allocate req to delete file=%s\n", name);
1532 		return -ENOMEM;
1533 	}
1534 
1535 	args = &req->args;
1536 	args->fs = fs;
1537 	args->op.delete.name = name;
1538 	args->sem = &channel->sem;
1539 	fs->send_request(__fs_delete_file, req);
1540 	sem_wait(&channel->sem);
1541 	rc = args->rc;
1542 	free_fs_request(req);
1543 
1544 	return rc;
1545 }
1546 
1547 spdk_fs_iter
1548 spdk_fs_iter_first(struct spdk_filesystem *fs)
1549 {
1550 	struct spdk_file *f;
1551 
1552 	f = TAILQ_FIRST(&fs->files);
1553 	return f;
1554 }
1555 
1556 spdk_fs_iter
1557 spdk_fs_iter_next(spdk_fs_iter iter)
1558 {
1559 	struct spdk_file *f = iter;
1560 
1561 	if (f == NULL) {
1562 		return NULL;
1563 	}
1564 
1565 	f = TAILQ_NEXT(f, tailq);
1566 	return f;
1567 }
1568 
1569 const char *
1570 spdk_file_get_name(struct spdk_file *file)
1571 {
1572 	return file->name;
1573 }
1574 
1575 uint64_t
1576 spdk_file_get_length(struct spdk_file *file)
1577 {
1578 	uint64_t length;
1579 
1580 	assert(file != NULL);
1581 
1582 	length = file->append_pos >= file->length ? file->append_pos : file->length;
1583 	SPDK_DEBUGLOG(blobfs, "file=%s length=0x%jx\n", file->name, length);
1584 	return length;
1585 }
1586 
1587 static void
1588 fs_truncate_complete_cb(void *ctx, int bserrno)
1589 {
1590 	struct spdk_fs_request *req = ctx;
1591 	struct spdk_fs_cb_args *args = &req->args;
1592 
1593 	args->fn.file_op(args->arg, bserrno);
1594 	free_fs_request(req);
1595 }
1596 
1597 static void
1598 fs_truncate_resize_cb(void *ctx, int bserrno)
1599 {
1600 	struct spdk_fs_request *req = ctx;
1601 	struct spdk_fs_cb_args *args = &req->args;
1602 	struct spdk_file *file = args->file;
1603 	uint64_t *length = &args->op.truncate.length;
1604 
1605 	if (bserrno) {
1606 		args->fn.file_op(args->arg, bserrno);
1607 		free_fs_request(req);
1608 		return;
1609 	}
1610 
1611 	spdk_blob_set_xattr(file->blob, "length", length, sizeof(*length));
1612 
1613 	file->length = *length;
1614 	if (file->append_pos > file->length) {
1615 		file->append_pos = file->length;
1616 	}
1617 
1618 	spdk_blob_sync_md(file->blob, fs_truncate_complete_cb, req);
1619 }
1620 
1621 static uint64_t
1622 __bytes_to_clusters(uint64_t length, uint64_t cluster_sz)
1623 {
1624 	return (length + cluster_sz - 1) / cluster_sz;
1625 }
1626 
1627 void
1628 spdk_file_truncate_async(struct spdk_file *file, uint64_t length,
1629 			 spdk_file_op_complete cb_fn, void *cb_arg)
1630 {
1631 	struct spdk_filesystem *fs;
1632 	size_t num_clusters;
1633 	struct spdk_fs_request *req;
1634 	struct spdk_fs_cb_args *args;
1635 
1636 	SPDK_DEBUGLOG(blobfs, "file=%s old=0x%jx new=0x%jx\n", file->name, file->length, length);
1637 	if (length == file->length) {
1638 		cb_fn(cb_arg, 0);
1639 		return;
1640 	}
1641 
1642 	req = alloc_fs_request(file->fs->md_target.md_fs_channel);
1643 	if (req == NULL) {
1644 		cb_fn(cb_arg, -ENOMEM);
1645 		return;
1646 	}
1647 
1648 	args = &req->args;
1649 	args->fn.file_op = cb_fn;
1650 	args->arg = cb_arg;
1651 	args->file = file;
1652 	args->op.truncate.length = length;
1653 	fs = file->fs;
1654 
1655 	num_clusters = __bytes_to_clusters(length, fs->bs_opts.cluster_sz);
1656 
1657 	spdk_blob_resize(file->blob, num_clusters, fs_truncate_resize_cb, req);
1658 }
1659 
1660 static void
1661 __truncate(void *arg)
1662 {
1663 	struct spdk_fs_request *req = arg;
1664 	struct spdk_fs_cb_args *args = &req->args;
1665 
1666 	spdk_file_truncate_async(args->file, args->op.truncate.length,
1667 				 args->fn.file_op, args);
1668 }
1669 
1670 int
1671 spdk_file_truncate(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
1672 		   uint64_t length)
1673 {
1674 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
1675 	struct spdk_fs_request *req;
1676 	struct spdk_fs_cb_args *args;
1677 	int rc;
1678 
1679 	req = alloc_fs_request(channel);
1680 	if (req == NULL) {
1681 		return -ENOMEM;
1682 	}
1683 
1684 	args = &req->args;
1685 
1686 	args->file = file;
1687 	args->op.truncate.length = length;
1688 	args->fn.file_op = __wake_caller;
1689 	args->sem = &channel->sem;
1690 
1691 	channel->send_request(__truncate, req);
1692 	sem_wait(&channel->sem);
1693 	rc = args->rc;
1694 	free_fs_request(req);
1695 
1696 	return rc;
1697 }
1698 
1699 static void
1700 __rw_done(void *ctx, int bserrno)
1701 {
1702 	struct spdk_fs_request *req = ctx;
1703 	struct spdk_fs_cb_args *args = &req->args;
1704 
1705 	spdk_free(args->op.rw.pin_buf);
1706 	args->fn.file_op(args->arg, bserrno);
1707 	free_fs_request(req);
1708 }
1709 
1710 static void
1711 __read_done(void *ctx, int bserrno)
1712 {
1713 	struct spdk_fs_request *req = ctx;
1714 	struct spdk_fs_cb_args *args = &req->args;
1715 	void *buf;
1716 
1717 	assert(req != NULL);
1718 	buf = (void *)((uintptr_t)args->op.rw.pin_buf + (args->op.rw.offset & (args->op.rw.blocklen - 1)));
1719 	if (args->op.rw.is_read) {
1720 		spdk_copy_buf_to_iovs(args->iovs, args->iovcnt, buf, args->op.rw.length);
1721 		__rw_done(req, 0);
1722 	} else {
1723 		spdk_copy_iovs_to_buf(buf, args->op.rw.length, args->iovs, args->iovcnt);
1724 		spdk_blob_io_write(args->file->blob, args->op.rw.channel,
1725 				   args->op.rw.pin_buf,
1726 				   args->op.rw.start_lba, args->op.rw.num_lba,
1727 				   __rw_done, req);
1728 	}
1729 }
1730 
1731 static void
1732 __do_blob_read(void *ctx, int fserrno)
1733 {
1734 	struct spdk_fs_request *req = ctx;
1735 	struct spdk_fs_cb_args *args = &req->args;
1736 
1737 	if (fserrno) {
1738 		__rw_done(req, fserrno);
1739 		return;
1740 	}
1741 	spdk_blob_io_read(args->file->blob, args->op.rw.channel,
1742 			  args->op.rw.pin_buf,
1743 			  args->op.rw.start_lba, args->op.rw.num_lba,
1744 			  __read_done, req);
1745 }
1746 
1747 static void
1748 __get_page_parameters(struct spdk_file *file, uint64_t offset, uint64_t length,
1749 		      uint64_t *start_lba, uint32_t *lba_size, uint64_t *num_lba)
1750 {
1751 	uint64_t end_lba;
1752 
1753 	*lba_size = spdk_bs_get_io_unit_size(file->fs->bs);
1754 	*start_lba = offset / *lba_size;
1755 	end_lba = (offset + length - 1) / *lba_size;
1756 	*num_lba = (end_lba - *start_lba + 1);
1757 }
1758 
1759 static bool
1760 __is_lba_aligned(struct spdk_file *file, uint64_t offset, uint64_t length)
1761 {
1762 	uint32_t lba_size = spdk_bs_get_io_unit_size(file->fs->bs);
1763 
1764 	if ((offset % lba_size == 0) && (length % lba_size == 0)) {
1765 		return true;
1766 	}
1767 
1768 	return false;
1769 }
1770 
1771 static void
1772 _fs_request_setup_iovs(struct spdk_fs_request *req, struct iovec *iovs, uint32_t iovcnt)
1773 {
1774 	uint32_t i;
1775 
1776 	for (i = 0; i < iovcnt; i++) {
1777 		req->args.iovs[i].iov_base = iovs[i].iov_base;
1778 		req->args.iovs[i].iov_len = iovs[i].iov_len;
1779 	}
1780 }
1781 
1782 static void
1783 __readvwritev(struct spdk_file *file, struct spdk_io_channel *_channel,
1784 	      struct iovec *iovs, uint32_t iovcnt, uint64_t offset, uint64_t length,
1785 	      spdk_file_op_complete cb_fn, void *cb_arg, int is_read)
1786 {
1787 	struct spdk_fs_request *req;
1788 	struct spdk_fs_cb_args *args;
1789 	struct spdk_fs_channel *channel = spdk_io_channel_get_ctx(_channel);
1790 	uint64_t start_lba, num_lba, pin_buf_length;
1791 	uint32_t lba_size;
1792 
1793 	if (is_read && offset + length > file->length) {
1794 		cb_fn(cb_arg, -EINVAL);
1795 		return;
1796 	}
1797 
1798 	req = alloc_fs_request_with_iov(channel, iovcnt);
1799 	if (req == NULL) {
1800 		cb_fn(cb_arg, -ENOMEM);
1801 		return;
1802 	}
1803 
1804 	__get_page_parameters(file, offset, length, &start_lba, &lba_size, &num_lba);
1805 
1806 	args = &req->args;
1807 	args->fn.file_op = cb_fn;
1808 	args->arg = cb_arg;
1809 	args->file = file;
1810 	args->op.rw.channel = channel->bs_channel;
1811 	_fs_request_setup_iovs(req, iovs, iovcnt);
1812 	args->op.rw.is_read = is_read;
1813 	args->op.rw.offset = offset;
1814 	args->op.rw.blocklen = lba_size;
1815 
1816 	pin_buf_length = num_lba * lba_size;
1817 	args->op.rw.length = pin_buf_length;
1818 	args->op.rw.pin_buf = spdk_malloc(pin_buf_length, lba_size, NULL,
1819 					  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1820 	if (args->op.rw.pin_buf == NULL) {
1821 		SPDK_DEBUGLOG(blobfs, "Failed to allocate buf for: file=%s offset=%jx length=%jx\n",
1822 			      file->name, offset, length);
1823 		free_fs_request(req);
1824 		cb_fn(cb_arg, -ENOMEM);
1825 		return;
1826 	}
1827 
1828 	args->op.rw.start_lba = start_lba;
1829 	args->op.rw.num_lba = num_lba;
1830 
1831 	if (!is_read && file->length < offset + length) {
1832 		spdk_file_truncate_async(file, offset + length, __do_blob_read, req);
1833 	} else if (!is_read && __is_lba_aligned(file, offset, length)) {
1834 		spdk_copy_iovs_to_buf(args->op.rw.pin_buf, args->op.rw.length, args->iovs, args->iovcnt);
1835 		spdk_blob_io_write(args->file->blob, args->op.rw.channel,
1836 				   args->op.rw.pin_buf,
1837 				   args->op.rw.start_lba, args->op.rw.num_lba,
1838 				   __rw_done, req);
1839 	} else {
1840 		__do_blob_read(req, 0);
1841 	}
1842 }
1843 
1844 static void
1845 __readwrite(struct spdk_file *file, struct spdk_io_channel *channel,
1846 	    void *payload, uint64_t offset, uint64_t length,
1847 	    spdk_file_op_complete cb_fn, void *cb_arg, int is_read)
1848 {
1849 	struct iovec iov;
1850 
1851 	iov.iov_base = payload;
1852 	iov.iov_len = (size_t)length;
1853 
1854 	__readvwritev(file, channel, &iov, 1, offset, length, cb_fn, cb_arg, is_read);
1855 }
1856 
1857 void
1858 spdk_file_write_async(struct spdk_file *file, struct spdk_io_channel *channel,
1859 		      void *payload, uint64_t offset, uint64_t length,
1860 		      spdk_file_op_complete cb_fn, void *cb_arg)
1861 {
1862 	__readwrite(file, channel, payload, offset, length, cb_fn, cb_arg, 0);
1863 }
1864 
1865 void
1866 spdk_file_writev_async(struct spdk_file *file, struct spdk_io_channel *channel,
1867 		       struct iovec *iovs, uint32_t iovcnt, uint64_t offset, uint64_t length,
1868 		       spdk_file_op_complete cb_fn, void *cb_arg)
1869 {
1870 	SPDK_DEBUGLOG(blobfs, "file=%s offset=%jx length=%jx\n",
1871 		      file->name, offset, length);
1872 
1873 	__readvwritev(file, channel, iovs, iovcnt, offset, length, cb_fn, cb_arg, 0);
1874 }
1875 
1876 void
1877 spdk_file_read_async(struct spdk_file *file, struct spdk_io_channel *channel,
1878 		     void *payload, uint64_t offset, uint64_t length,
1879 		     spdk_file_op_complete cb_fn, void *cb_arg)
1880 {
1881 	SPDK_DEBUGLOG(blobfs, "file=%s offset=%jx length=%jx\n",
1882 		      file->name, offset, length);
1883 	__readwrite(file, channel, payload, offset, length, cb_fn, cb_arg, 1);
1884 }
1885 
1886 void
1887 spdk_file_readv_async(struct spdk_file *file, struct spdk_io_channel *channel,
1888 		      struct iovec *iovs, uint32_t iovcnt, uint64_t offset, uint64_t length,
1889 		      spdk_file_op_complete cb_fn, void *cb_arg)
1890 {
1891 	SPDK_DEBUGLOG(blobfs, "file=%s offset=%jx length=%jx\n",
1892 		      file->name, offset, length);
1893 
1894 	__readvwritev(file, channel, iovs, iovcnt, offset, length, cb_fn, cb_arg, 1);
1895 }
1896 
1897 struct spdk_io_channel *
1898 spdk_fs_alloc_io_channel(struct spdk_filesystem *fs)
1899 {
1900 	struct spdk_io_channel *io_channel;
1901 	struct spdk_fs_channel *fs_channel;
1902 
1903 	io_channel = spdk_get_io_channel(&fs->io_target);
1904 	fs_channel = spdk_io_channel_get_ctx(io_channel);
1905 	fs_channel->bs_channel = spdk_bs_alloc_io_channel(fs->bs);
1906 	fs_channel->send_request = __send_request_direct;
1907 
1908 	return io_channel;
1909 }
1910 
1911 void
1912 spdk_fs_free_io_channel(struct spdk_io_channel *channel)
1913 {
1914 	spdk_put_io_channel(channel);
1915 }
1916 
1917 struct spdk_fs_thread_ctx *
1918 spdk_fs_alloc_thread_ctx(struct spdk_filesystem *fs)
1919 {
1920 	struct spdk_fs_thread_ctx *ctx;
1921 
1922 	ctx = calloc(1, sizeof(*ctx));
1923 	if (!ctx) {
1924 		return NULL;
1925 	}
1926 
1927 	if (pthread_spin_init(&ctx->ch.lock, 0)) {
1928 		free(ctx);
1929 		return NULL;
1930 	}
1931 
1932 	fs_channel_create(fs, &ctx->ch, 512);
1933 
1934 	ctx->ch.send_request = fs->send_request;
1935 	ctx->ch.sync = 1;
1936 
1937 	return ctx;
1938 }
1939 
1940 
1941 void
1942 spdk_fs_free_thread_ctx(struct spdk_fs_thread_ctx *ctx)
1943 {
1944 	assert(ctx->ch.sync == 1);
1945 
1946 	while (true) {
1947 		pthread_spin_lock(&ctx->ch.lock);
1948 		if (ctx->ch.outstanding_reqs == 0) {
1949 			pthread_spin_unlock(&ctx->ch.lock);
1950 			break;
1951 		}
1952 		pthread_spin_unlock(&ctx->ch.lock);
1953 		usleep(1000);
1954 	}
1955 
1956 	fs_channel_destroy(NULL, &ctx->ch);
1957 	free(ctx);
1958 }
1959 
1960 int
1961 spdk_fs_set_cache_size(uint64_t size_in_mb)
1962 {
1963 	/* setting g_fs_cache_size is only permitted if cache pool
1964 	 * is already freed or hasn't been initialized
1965 	 */
1966 	if (g_cache_pool != NULL) {
1967 		return -EPERM;
1968 	}
1969 
1970 	g_fs_cache_size = size_in_mb * 1024 * 1024;
1971 
1972 	return 0;
1973 }
1974 
1975 uint64_t
1976 spdk_fs_get_cache_size(void)
1977 {
1978 	return g_fs_cache_size / (1024 * 1024);
1979 }
1980 
1981 static void __file_flush(void *ctx);
1982 
1983 /* Try to free some cache buffers from this file.
1984  */
1985 static int
1986 reclaim_cache_buffers(struct spdk_file *file)
1987 {
1988 	int rc;
1989 
1990 	BLOBFS_TRACE(file, "free=%s\n", file->name);
1991 
1992 	/* The function is safe to be called with any threads, while the file
1993 	 * lock maybe locked by other thread for now, so try to get the file
1994 	 * lock here.
1995 	 */
1996 	rc = pthread_spin_trylock(&file->lock);
1997 	if (rc != 0) {
1998 		return -1;
1999 	}
2000 
2001 	if (file->tree->present_mask == 0) {
2002 		pthread_spin_unlock(&file->lock);
2003 		return -1;
2004 	}
2005 	tree_free_buffers(file->tree);
2006 
2007 	TAILQ_REMOVE(&g_caches, file, cache_tailq);
2008 	/* If not freed, put it in the end of the queue */
2009 	if (file->tree->present_mask != 0) {
2010 		TAILQ_INSERT_TAIL(&g_caches, file, cache_tailq);
2011 	} else {
2012 		file->last = NULL;
2013 	}
2014 	pthread_spin_unlock(&file->lock);
2015 
2016 	return 0;
2017 }
2018 
2019 static int
2020 _blobfs_cache_pool_reclaim(void *arg)
2021 {
2022 	struct spdk_file *file, *tmp;
2023 	int rc;
2024 
2025 	if (!blobfs_cache_pool_need_reclaim()) {
2026 		return SPDK_POLLER_IDLE;
2027 	}
2028 
2029 	TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
2030 		if (!file->open_for_writing &&
2031 		    file->priority == SPDK_FILE_PRIORITY_LOW) {
2032 			rc = reclaim_cache_buffers(file);
2033 			if (rc < 0) {
2034 				continue;
2035 			}
2036 			if (!blobfs_cache_pool_need_reclaim()) {
2037 				return SPDK_POLLER_BUSY;
2038 			}
2039 			break;
2040 		}
2041 	}
2042 
2043 	TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
2044 		if (!file->open_for_writing) {
2045 			rc = reclaim_cache_buffers(file);
2046 			if (rc < 0) {
2047 				continue;
2048 			}
2049 			if (!blobfs_cache_pool_need_reclaim()) {
2050 				return SPDK_POLLER_BUSY;
2051 			}
2052 			break;
2053 		}
2054 	}
2055 
2056 	TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
2057 		rc = reclaim_cache_buffers(file);
2058 		if (rc < 0) {
2059 			continue;
2060 		}
2061 		break;
2062 	}
2063 
2064 	return SPDK_POLLER_BUSY;
2065 }
2066 
2067 static void
2068 _add_file_to_cache_pool(void *ctx)
2069 {
2070 	struct spdk_file *file = ctx;
2071 
2072 	TAILQ_INSERT_TAIL(&g_caches, file, cache_tailq);
2073 }
2074 
2075 static void
2076 _remove_file_from_cache_pool(void *ctx)
2077 {
2078 	struct spdk_file *file = ctx;
2079 
2080 	TAILQ_REMOVE(&g_caches, file, cache_tailq);
2081 }
2082 
2083 static struct cache_buffer *
2084 cache_insert_buffer(struct spdk_file *file, uint64_t offset)
2085 {
2086 	struct cache_buffer *buf;
2087 	int count = 0;
2088 	bool need_update = false;
2089 
2090 	buf = calloc(1, sizeof(*buf));
2091 	if (buf == NULL) {
2092 		SPDK_DEBUGLOG(blobfs, "calloc failed\n");
2093 		return NULL;
2094 	}
2095 
2096 	do {
2097 		buf->buf = spdk_mempool_get(g_cache_pool);
2098 		if (buf->buf) {
2099 			break;
2100 		}
2101 		if (count++ == 100) {
2102 			SPDK_ERRLOG("Could not allocate cache buffer for file=%p on offset=%jx\n",
2103 				    file, offset);
2104 			free(buf);
2105 			return NULL;
2106 		}
2107 		usleep(BLOBFS_CACHE_POOL_POLL_PERIOD_IN_US);
2108 	} while (true);
2109 
2110 	buf->buf_size = CACHE_BUFFER_SIZE;
2111 	buf->offset = offset;
2112 
2113 	if (file->tree->present_mask == 0) {
2114 		need_update = true;
2115 	}
2116 	file->tree = tree_insert_buffer(file->tree, buf);
2117 
2118 	if (need_update) {
2119 		spdk_thread_send_msg(g_cache_pool_thread, _add_file_to_cache_pool, file);
2120 	}
2121 
2122 	return buf;
2123 }
2124 
2125 static struct cache_buffer *
2126 cache_append_buffer(struct spdk_file *file)
2127 {
2128 	struct cache_buffer *last;
2129 
2130 	assert(file->last == NULL || file->last->bytes_filled == file->last->buf_size);
2131 	assert((file->append_pos % CACHE_BUFFER_SIZE) == 0);
2132 
2133 	last = cache_insert_buffer(file, file->append_pos);
2134 	if (last == NULL) {
2135 		SPDK_DEBUGLOG(blobfs, "cache_insert_buffer failed\n");
2136 		return NULL;
2137 	}
2138 
2139 	file->last = last;
2140 
2141 	return last;
2142 }
2143 
2144 static void __check_sync_reqs(struct spdk_file *file);
2145 
2146 static void
2147 __file_cache_finish_sync(void *ctx, int bserrno)
2148 {
2149 	struct spdk_file *file;
2150 	struct spdk_fs_request *sync_req = ctx;
2151 	struct spdk_fs_cb_args *sync_args;
2152 
2153 	sync_args = &sync_req->args;
2154 	file = sync_args->file;
2155 	pthread_spin_lock(&file->lock);
2156 	file->length_xattr = sync_args->op.sync.length;
2157 	assert(sync_args->op.sync.offset <= file->length_flushed);
2158 	spdk_trace_record(TRACE_BLOBFS_XATTR_END, 0, sync_args->op.sync.offset,
2159 			  0, file->name);
2160 	BLOBFS_TRACE(file, "sync done offset=%jx\n", sync_args->op.sync.offset);
2161 	TAILQ_REMOVE(&file->sync_requests, sync_req, args.op.sync.tailq);
2162 	pthread_spin_unlock(&file->lock);
2163 
2164 	sync_args->fn.file_op(sync_args->arg, bserrno);
2165 
2166 	free_fs_request(sync_req);
2167 	__check_sync_reqs(file);
2168 }
2169 
2170 static void
2171 __check_sync_reqs(struct spdk_file *file)
2172 {
2173 	struct spdk_fs_request *sync_req;
2174 
2175 	pthread_spin_lock(&file->lock);
2176 
2177 	TAILQ_FOREACH(sync_req, &file->sync_requests, args.op.sync.tailq) {
2178 		if (sync_req->args.op.sync.offset <= file->length_flushed) {
2179 			break;
2180 		}
2181 	}
2182 
2183 	if (sync_req != NULL && !sync_req->args.op.sync.xattr_in_progress) {
2184 		BLOBFS_TRACE(file, "set xattr length 0x%jx\n", file->length_flushed);
2185 		sync_req->args.op.sync.xattr_in_progress = true;
2186 		sync_req->args.op.sync.length = file->length_flushed;
2187 		spdk_blob_set_xattr(file->blob, "length", &file->length_flushed,
2188 				    sizeof(file->length_flushed));
2189 
2190 		pthread_spin_unlock(&file->lock);
2191 		spdk_trace_record(TRACE_BLOBFS_XATTR_START, 0, file->length_flushed,
2192 				  0, file->name);
2193 		spdk_blob_sync_md(file->blob, __file_cache_finish_sync, sync_req);
2194 	} else {
2195 		pthread_spin_unlock(&file->lock);
2196 	}
2197 }
2198 
2199 static void
2200 __file_flush_done(void *ctx, int bserrno)
2201 {
2202 	struct spdk_fs_request *req = ctx;
2203 	struct spdk_fs_cb_args *args = &req->args;
2204 	struct spdk_file *file = args->file;
2205 	struct cache_buffer *next = args->op.flush.cache_buffer;
2206 
2207 	BLOBFS_TRACE(file, "length=%jx\n", args->op.flush.length);
2208 
2209 	pthread_spin_lock(&file->lock);
2210 	next->in_progress = false;
2211 	next->bytes_flushed += args->op.flush.length;
2212 	file->length_flushed += args->op.flush.length;
2213 	if (file->length_flushed > file->length) {
2214 		file->length = file->length_flushed;
2215 	}
2216 	if (next->bytes_flushed == next->buf_size) {
2217 		BLOBFS_TRACE(file, "write buffer fully flushed 0x%jx\n", file->length_flushed);
2218 		next = tree_find_buffer(file->tree, file->length_flushed);
2219 	}
2220 
2221 	/*
2222 	 * Assert that there is no cached data that extends past the end of the underlying
2223 	 *  blob.
2224 	 */
2225 	assert(next == NULL || next->offset < __file_get_blob_size(file) ||
2226 	       next->bytes_filled == 0);
2227 
2228 	pthread_spin_unlock(&file->lock);
2229 
2230 	__check_sync_reqs(file);
2231 
2232 	__file_flush(req);
2233 }
2234 
2235 static void
2236 __file_flush(void *ctx)
2237 {
2238 	struct spdk_fs_request *req = ctx;
2239 	struct spdk_fs_cb_args *args = &req->args;
2240 	struct spdk_file *file = args->file;
2241 	struct cache_buffer *next;
2242 	uint64_t offset, length, start_lba, num_lba;
2243 	uint32_t lba_size;
2244 
2245 	pthread_spin_lock(&file->lock);
2246 	next = tree_find_buffer(file->tree, file->length_flushed);
2247 	if (next == NULL || next->in_progress ||
2248 	    ((next->bytes_filled < next->buf_size) && TAILQ_EMPTY(&file->sync_requests))) {
2249 		/*
2250 		 * There is either no data to flush, a flush I/O is already in
2251 		 *  progress, or the next buffer is partially filled but there's no
2252 		 *  outstanding request to sync it.
2253 		 * So return immediately - if a flush I/O is in progress we will flush
2254 		 *  more data after that is completed, or a partial buffer will get flushed
2255 		 *  when it is either filled or the file is synced.
2256 		 */
2257 		free_fs_request(req);
2258 		if (next == NULL) {
2259 			/*
2260 			 * For cases where a file's cache was evicted, and then the
2261 			 *  file was later appended, we will write the data directly
2262 			 *  to disk and bypass cache.  So just update length_flushed
2263 			 *  here to reflect that all data was already written to disk.
2264 			 */
2265 			file->length_flushed = file->append_pos;
2266 		}
2267 		pthread_spin_unlock(&file->lock);
2268 		if (next == NULL) {
2269 			/*
2270 			 * There is no data to flush, but we still need to check for any
2271 			 *  outstanding sync requests to make sure metadata gets updated.
2272 			 */
2273 			__check_sync_reqs(file);
2274 		}
2275 		return;
2276 	}
2277 
2278 	offset = next->offset + next->bytes_flushed;
2279 	length = next->bytes_filled - next->bytes_flushed;
2280 	if (length == 0) {
2281 		free_fs_request(req);
2282 		pthread_spin_unlock(&file->lock);
2283 		/*
2284 		 * There is no data to flush, but we still need to check for any
2285 		 *  outstanding sync requests to make sure metadata gets updated.
2286 		 */
2287 		__check_sync_reqs(file);
2288 		return;
2289 	}
2290 	args->op.flush.length = length;
2291 	args->op.flush.cache_buffer = next;
2292 
2293 	__get_page_parameters(file, offset, length, &start_lba, &lba_size, &num_lba);
2294 
2295 	next->in_progress = true;
2296 	BLOBFS_TRACE(file, "offset=0x%jx length=0x%jx page start=0x%jx num=0x%jx\n",
2297 		     offset, length, start_lba, num_lba);
2298 	pthread_spin_unlock(&file->lock);
2299 	spdk_blob_io_write(file->blob, file->fs->sync_target.sync_fs_channel->bs_channel,
2300 			   next->buf + (start_lba * lba_size) - next->offset,
2301 			   start_lba, num_lba, __file_flush_done, req);
2302 }
2303 
2304 static void
2305 __file_extend_done(void *arg, int bserrno)
2306 {
2307 	struct spdk_fs_cb_args *args = arg;
2308 
2309 	__wake_caller(args, bserrno);
2310 }
2311 
2312 static void
2313 __file_extend_resize_cb(void *_args, int bserrno)
2314 {
2315 	struct spdk_fs_cb_args *args = _args;
2316 	struct spdk_file *file = args->file;
2317 
2318 	if (bserrno) {
2319 		__wake_caller(args, bserrno);
2320 		return;
2321 	}
2322 
2323 	spdk_blob_sync_md(file->blob, __file_extend_done, args);
2324 }
2325 
2326 static void
2327 __file_extend_blob(void *_args)
2328 {
2329 	struct spdk_fs_cb_args *args = _args;
2330 	struct spdk_file *file = args->file;
2331 
2332 	spdk_blob_resize(file->blob, args->op.resize.num_clusters, __file_extend_resize_cb, args);
2333 }
2334 
2335 static void
2336 __rw_from_file_done(void *ctx, int bserrno)
2337 {
2338 	struct spdk_fs_request *req = ctx;
2339 
2340 	__wake_caller(&req->args, bserrno);
2341 	free_fs_request(req);
2342 }
2343 
2344 static void
2345 __rw_from_file(void *ctx)
2346 {
2347 	struct spdk_fs_request *req = ctx;
2348 	struct spdk_fs_cb_args *args = &req->args;
2349 	struct spdk_file *file = args->file;
2350 
2351 	if (args->op.rw.is_read) {
2352 		spdk_file_read_async(file, file->fs->sync_target.sync_io_channel, args->iovs[0].iov_base,
2353 				     args->op.rw.offset, (uint64_t)args->iovs[0].iov_len,
2354 				     __rw_from_file_done, req);
2355 	} else {
2356 		spdk_file_write_async(file, file->fs->sync_target.sync_io_channel, args->iovs[0].iov_base,
2357 				      args->op.rw.offset, (uint64_t)args->iovs[0].iov_len,
2358 				      __rw_from_file_done, req);
2359 	}
2360 }
2361 
2362 struct rw_from_file_arg {
2363 	struct spdk_fs_channel *channel;
2364 	int rwerrno;
2365 };
2366 
2367 static int
2368 __send_rw_from_file(struct spdk_file *file, void *payload,
2369 		    uint64_t offset, uint64_t length, bool is_read,
2370 		    struct rw_from_file_arg *arg)
2371 {
2372 	struct spdk_fs_request *req;
2373 	struct spdk_fs_cb_args *args;
2374 
2375 	req = alloc_fs_request_with_iov(arg->channel, 1);
2376 	if (req == NULL) {
2377 		sem_post(&arg->channel->sem);
2378 		return -ENOMEM;
2379 	}
2380 
2381 	args = &req->args;
2382 	args->file = file;
2383 	args->sem = &arg->channel->sem;
2384 	args->iovs[0].iov_base = payload;
2385 	args->iovs[0].iov_len = (size_t)length;
2386 	args->op.rw.offset = offset;
2387 	args->op.rw.is_read = is_read;
2388 	args->rwerrno = &arg->rwerrno;
2389 	file->fs->send_request(__rw_from_file, req);
2390 	return 0;
2391 }
2392 
2393 int
2394 spdk_file_write(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
2395 		void *payload, uint64_t offset, uint64_t length)
2396 {
2397 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2398 	struct spdk_fs_request *flush_req;
2399 	uint64_t rem_length, copy, blob_size, cluster_sz;
2400 	uint32_t cache_buffers_filled = 0;
2401 	uint8_t *cur_payload;
2402 	struct cache_buffer *last;
2403 
2404 	BLOBFS_TRACE_RW(file, "offset=%jx length=%jx\n", offset, length);
2405 
2406 	if (length == 0) {
2407 		return 0;
2408 	}
2409 
2410 	if (offset != file->append_pos) {
2411 		BLOBFS_TRACE(file, " error offset=%jx append_pos=%jx\n", offset, file->append_pos);
2412 		return -EINVAL;
2413 	}
2414 
2415 	pthread_spin_lock(&file->lock);
2416 	file->open_for_writing = true;
2417 
2418 	if ((file->last == NULL) && (file->append_pos % CACHE_BUFFER_SIZE == 0)) {
2419 		cache_append_buffer(file);
2420 	}
2421 
2422 	if (file->last == NULL) {
2423 		struct rw_from_file_arg arg = {};
2424 		int rc;
2425 
2426 		arg.channel = channel;
2427 		arg.rwerrno = 0;
2428 		file->append_pos += length;
2429 		pthread_spin_unlock(&file->lock);
2430 		rc = __send_rw_from_file(file, payload, offset, length, false, &arg);
2431 		if (rc != 0) {
2432 			return rc;
2433 		}
2434 		sem_wait(&channel->sem);
2435 		return arg.rwerrno;
2436 	}
2437 
2438 	blob_size = __file_get_blob_size(file);
2439 
2440 	if ((offset + length) > blob_size) {
2441 		struct spdk_fs_cb_args extend_args = {};
2442 
2443 		cluster_sz = file->fs->bs_opts.cluster_sz;
2444 		extend_args.sem = &channel->sem;
2445 		extend_args.op.resize.num_clusters = __bytes_to_clusters((offset + length), cluster_sz);
2446 		extend_args.file = file;
2447 		BLOBFS_TRACE(file, "start resize to %u clusters\n", extend_args.op.resize.num_clusters);
2448 		pthread_spin_unlock(&file->lock);
2449 		file->fs->send_request(__file_extend_blob, &extend_args);
2450 		sem_wait(&channel->sem);
2451 		if (extend_args.rc) {
2452 			return extend_args.rc;
2453 		}
2454 	}
2455 
2456 	flush_req = alloc_fs_request(channel);
2457 	if (flush_req == NULL) {
2458 		pthread_spin_unlock(&file->lock);
2459 		return -ENOMEM;
2460 	}
2461 
2462 	last = file->last;
2463 	rem_length = length;
2464 	cur_payload = payload;
2465 	while (rem_length > 0) {
2466 		copy = last->buf_size - last->bytes_filled;
2467 		if (copy > rem_length) {
2468 			copy = rem_length;
2469 		}
2470 		BLOBFS_TRACE_RW(file, "  fill offset=%jx length=%jx\n", file->append_pos, copy);
2471 		memcpy(&last->buf[last->bytes_filled], cur_payload, copy);
2472 		file->append_pos += copy;
2473 		if (file->length < file->append_pos) {
2474 			file->length = file->append_pos;
2475 		}
2476 		cur_payload += copy;
2477 		last->bytes_filled += copy;
2478 		rem_length -= copy;
2479 		if (last->bytes_filled == last->buf_size) {
2480 			cache_buffers_filled++;
2481 			last = cache_append_buffer(file);
2482 			if (last == NULL) {
2483 				BLOBFS_TRACE(file, "nomem\n");
2484 				free_fs_request(flush_req);
2485 				pthread_spin_unlock(&file->lock);
2486 				return -ENOMEM;
2487 			}
2488 		}
2489 	}
2490 
2491 	pthread_spin_unlock(&file->lock);
2492 
2493 	if (cache_buffers_filled == 0) {
2494 		free_fs_request(flush_req);
2495 		return 0;
2496 	}
2497 
2498 	flush_req->args.file = file;
2499 	file->fs->send_request(__file_flush, flush_req);
2500 	return 0;
2501 }
2502 
2503 static void
2504 __readahead_done(void *ctx, int bserrno)
2505 {
2506 	struct spdk_fs_request *req = ctx;
2507 	struct spdk_fs_cb_args *args = &req->args;
2508 	struct cache_buffer *cache_buffer = args->op.readahead.cache_buffer;
2509 	struct spdk_file *file = args->file;
2510 
2511 	BLOBFS_TRACE(file, "offset=%jx\n", cache_buffer->offset);
2512 
2513 	pthread_spin_lock(&file->lock);
2514 	cache_buffer->bytes_filled = args->op.readahead.length;
2515 	cache_buffer->bytes_flushed = args->op.readahead.length;
2516 	cache_buffer->in_progress = false;
2517 	pthread_spin_unlock(&file->lock);
2518 
2519 	free_fs_request(req);
2520 }
2521 
2522 static void
2523 __readahead(void *ctx)
2524 {
2525 	struct spdk_fs_request *req = ctx;
2526 	struct spdk_fs_cb_args *args = &req->args;
2527 	struct spdk_file *file = args->file;
2528 	uint64_t offset, length, start_lba, num_lba;
2529 	uint32_t lba_size;
2530 
2531 	offset = args->op.readahead.offset;
2532 	length = args->op.readahead.length;
2533 	assert(length > 0);
2534 
2535 	__get_page_parameters(file, offset, length, &start_lba, &lba_size, &num_lba);
2536 
2537 	BLOBFS_TRACE(file, "offset=%jx length=%jx page start=%jx num=%jx\n",
2538 		     offset, length, start_lba, num_lba);
2539 	spdk_blob_io_read(file->blob, file->fs->sync_target.sync_fs_channel->bs_channel,
2540 			  args->op.readahead.cache_buffer->buf,
2541 			  start_lba, num_lba, __readahead_done, req);
2542 }
2543 
2544 static uint64_t
2545 __next_cache_buffer_offset(uint64_t offset)
2546 {
2547 	return (offset + CACHE_BUFFER_SIZE) & ~(CACHE_TREE_LEVEL_MASK(0));
2548 }
2549 
2550 static void
2551 check_readahead(struct spdk_file *file, uint64_t offset,
2552 		struct spdk_fs_channel *channel)
2553 {
2554 	struct spdk_fs_request *req;
2555 	struct spdk_fs_cb_args *args;
2556 
2557 	offset = __next_cache_buffer_offset(offset);
2558 	if (tree_find_buffer(file->tree, offset) != NULL || file->length <= offset) {
2559 		return;
2560 	}
2561 
2562 	req = alloc_fs_request(channel);
2563 	if (req == NULL) {
2564 		return;
2565 	}
2566 	args = &req->args;
2567 
2568 	BLOBFS_TRACE(file, "offset=%jx\n", offset);
2569 
2570 	args->file = file;
2571 	args->op.readahead.offset = offset;
2572 	args->op.readahead.cache_buffer = cache_insert_buffer(file, offset);
2573 	if (!args->op.readahead.cache_buffer) {
2574 		BLOBFS_TRACE(file, "Cannot allocate buf for offset=%jx\n", offset);
2575 		free_fs_request(req);
2576 		return;
2577 	}
2578 
2579 	args->op.readahead.cache_buffer->in_progress = true;
2580 	if (file->length < (offset + CACHE_BUFFER_SIZE)) {
2581 		args->op.readahead.length = file->length & (CACHE_BUFFER_SIZE - 1);
2582 	} else {
2583 		args->op.readahead.length = CACHE_BUFFER_SIZE;
2584 	}
2585 	file->fs->send_request(__readahead, req);
2586 }
2587 
2588 int64_t
2589 spdk_file_read(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
2590 	       void *payload, uint64_t offset, uint64_t length)
2591 {
2592 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2593 	uint64_t final_offset, final_length;
2594 	uint32_t sub_reads = 0;
2595 	struct cache_buffer *buf;
2596 	uint64_t read_len;
2597 	struct rw_from_file_arg arg = {};
2598 
2599 	pthread_spin_lock(&file->lock);
2600 
2601 	BLOBFS_TRACE_RW(file, "offset=%ju length=%ju\n", offset, length);
2602 
2603 	file->open_for_writing = false;
2604 
2605 	if (length == 0 || offset >= file->append_pos) {
2606 		pthread_spin_unlock(&file->lock);
2607 		return 0;
2608 	}
2609 
2610 	if (offset + length > file->append_pos) {
2611 		length = file->append_pos - offset;
2612 	}
2613 
2614 	if (offset != file->next_seq_offset) {
2615 		file->seq_byte_count = 0;
2616 	}
2617 	file->seq_byte_count += length;
2618 	file->next_seq_offset = offset + length;
2619 	if (file->seq_byte_count >= CACHE_READAHEAD_THRESHOLD) {
2620 		check_readahead(file, offset, channel);
2621 		check_readahead(file, offset + CACHE_BUFFER_SIZE, channel);
2622 	}
2623 
2624 	arg.channel = channel;
2625 	arg.rwerrno = 0;
2626 	final_length = 0;
2627 	final_offset = offset + length;
2628 	while (offset < final_offset) {
2629 		int ret = 0;
2630 		length = NEXT_CACHE_BUFFER_OFFSET(offset) - offset;
2631 		if (length > (final_offset - offset)) {
2632 			length = final_offset - offset;
2633 		}
2634 
2635 		buf = tree_find_filled_buffer(file->tree, offset);
2636 		if (buf == NULL) {
2637 			pthread_spin_unlock(&file->lock);
2638 			ret = __send_rw_from_file(file, payload, offset, length, true, &arg);
2639 			pthread_spin_lock(&file->lock);
2640 			if (ret == 0) {
2641 				sub_reads++;
2642 			}
2643 		} else {
2644 			read_len = length;
2645 			if ((offset + length) > (buf->offset + buf->bytes_filled)) {
2646 				read_len = buf->offset + buf->bytes_filled - offset;
2647 			}
2648 			BLOBFS_TRACE(file, "read %p offset=%ju length=%ju\n", payload, offset, read_len);
2649 			memcpy(payload, &buf->buf[offset - buf->offset], read_len);
2650 			if ((offset + read_len) % CACHE_BUFFER_SIZE == 0) {
2651 				tree_remove_buffer(file->tree, buf);
2652 				if (file->tree->present_mask == 0) {
2653 					spdk_thread_send_msg(g_cache_pool_thread, _remove_file_from_cache_pool, file);
2654 				}
2655 			}
2656 		}
2657 
2658 		if (ret == 0) {
2659 			final_length += length;
2660 		} else {
2661 			arg.rwerrno = ret;
2662 			break;
2663 		}
2664 		payload += length;
2665 		offset += length;
2666 	}
2667 	pthread_spin_unlock(&file->lock);
2668 	while (sub_reads > 0) {
2669 		sem_wait(&channel->sem);
2670 		sub_reads--;
2671 	}
2672 	if (arg.rwerrno == 0) {
2673 		return final_length;
2674 	} else {
2675 		return arg.rwerrno;
2676 	}
2677 }
2678 
2679 static void
2680 _file_sync(struct spdk_file *file, struct spdk_fs_channel *channel,
2681 	   spdk_file_op_complete cb_fn, void *cb_arg)
2682 {
2683 	struct spdk_fs_request *sync_req;
2684 	struct spdk_fs_request *flush_req;
2685 	struct spdk_fs_cb_args *sync_args;
2686 	struct spdk_fs_cb_args *flush_args;
2687 
2688 	BLOBFS_TRACE(file, "offset=%jx\n", file->append_pos);
2689 
2690 	pthread_spin_lock(&file->lock);
2691 	if (file->append_pos <= file->length_xattr) {
2692 		BLOBFS_TRACE(file, "done - file already synced\n");
2693 		pthread_spin_unlock(&file->lock);
2694 		cb_fn(cb_arg, 0);
2695 		return;
2696 	}
2697 
2698 	sync_req = alloc_fs_request(channel);
2699 	if (!sync_req) {
2700 		SPDK_ERRLOG("Cannot allocate sync req for file=%s\n", file->name);
2701 		pthread_spin_unlock(&file->lock);
2702 		cb_fn(cb_arg, -ENOMEM);
2703 		return;
2704 	}
2705 	sync_args = &sync_req->args;
2706 
2707 	flush_req = alloc_fs_request(channel);
2708 	if (!flush_req) {
2709 		SPDK_ERRLOG("Cannot allocate flush req for file=%s\n", file->name);
2710 		free_fs_request(sync_req);
2711 		pthread_spin_unlock(&file->lock);
2712 		cb_fn(cb_arg, -ENOMEM);
2713 		return;
2714 	}
2715 	flush_args = &flush_req->args;
2716 
2717 	sync_args->file = file;
2718 	sync_args->fn.file_op = cb_fn;
2719 	sync_args->arg = cb_arg;
2720 	sync_args->op.sync.offset = file->append_pos;
2721 	sync_args->op.sync.xattr_in_progress = false;
2722 	TAILQ_INSERT_TAIL(&file->sync_requests, sync_req, args.op.sync.tailq);
2723 	pthread_spin_unlock(&file->lock);
2724 
2725 	flush_args->file = file;
2726 	channel->send_request(__file_flush, flush_req);
2727 }
2728 
2729 int
2730 spdk_file_sync(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx)
2731 {
2732 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2733 	struct spdk_fs_cb_args args = {};
2734 
2735 	args.sem = &channel->sem;
2736 	_file_sync(file, channel, __wake_caller, &args);
2737 	sem_wait(&channel->sem);
2738 
2739 	return args.rc;
2740 }
2741 
2742 void
2743 spdk_file_sync_async(struct spdk_file *file, struct spdk_io_channel *_channel,
2744 		     spdk_file_op_complete cb_fn, void *cb_arg)
2745 {
2746 	struct spdk_fs_channel *channel = spdk_io_channel_get_ctx(_channel);
2747 
2748 	_file_sync(file, channel, cb_fn, cb_arg);
2749 }
2750 
2751 void
2752 spdk_file_set_priority(struct spdk_file *file, uint32_t priority)
2753 {
2754 	BLOBFS_TRACE(file, "priority=%u\n", priority);
2755 	file->priority = priority;
2756 
2757 }
2758 
2759 /*
2760  * Close routines
2761  */
2762 
2763 static void
2764 __file_close_async_done(void *ctx, int bserrno)
2765 {
2766 	struct spdk_fs_request *req = ctx;
2767 	struct spdk_fs_cb_args *args = &req->args;
2768 	struct spdk_file *file = args->file;
2769 
2770 	spdk_trace_record(TRACE_BLOBFS_CLOSE, 0, 0, 0, file->name);
2771 
2772 	if (file->is_deleted) {
2773 		spdk_fs_delete_file_async(file->fs, file->name, blob_delete_cb, ctx);
2774 		return;
2775 	}
2776 
2777 	args->fn.file_op(args->arg, bserrno);
2778 	free_fs_request(req);
2779 }
2780 
2781 static void
2782 __file_close_async(struct spdk_file *file, struct spdk_fs_request *req)
2783 {
2784 	struct spdk_blob *blob;
2785 
2786 	pthread_spin_lock(&file->lock);
2787 	if (file->ref_count == 0) {
2788 		pthread_spin_unlock(&file->lock);
2789 		__file_close_async_done(req, -EBADF);
2790 		return;
2791 	}
2792 
2793 	file->ref_count--;
2794 	if (file->ref_count > 0) {
2795 		pthread_spin_unlock(&file->lock);
2796 		req->args.fn.file_op(req->args.arg, 0);
2797 		free_fs_request(req);
2798 		return;
2799 	}
2800 
2801 	pthread_spin_unlock(&file->lock);
2802 
2803 	blob = file->blob;
2804 	file->blob = NULL;
2805 	spdk_blob_close(blob, __file_close_async_done, req);
2806 }
2807 
2808 static void
2809 __file_close_async__sync_done(void *arg, int fserrno)
2810 {
2811 	struct spdk_fs_request *req = arg;
2812 	struct spdk_fs_cb_args *args = &req->args;
2813 
2814 	__file_close_async(args->file, req);
2815 }
2816 
2817 void
2818 spdk_file_close_async(struct spdk_file *file, spdk_file_op_complete cb_fn, void *cb_arg)
2819 {
2820 	struct spdk_fs_request *req;
2821 	struct spdk_fs_cb_args *args;
2822 
2823 	req = alloc_fs_request(file->fs->md_target.md_fs_channel);
2824 	if (req == NULL) {
2825 		SPDK_ERRLOG("Cannot allocate close async req for file=%s\n", file->name);
2826 		cb_fn(cb_arg, -ENOMEM);
2827 		return;
2828 	}
2829 
2830 	args = &req->args;
2831 	args->file = file;
2832 	args->fn.file_op = cb_fn;
2833 	args->arg = cb_arg;
2834 
2835 	spdk_file_sync_async(file, file->fs->md_target.md_io_channel, __file_close_async__sync_done, req);
2836 }
2837 
2838 static void
2839 __file_close(void *arg)
2840 {
2841 	struct spdk_fs_request *req = arg;
2842 	struct spdk_fs_cb_args *args = &req->args;
2843 	struct spdk_file *file = args->file;
2844 
2845 	__file_close_async(file, req);
2846 }
2847 
2848 int
2849 spdk_file_close(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx)
2850 {
2851 	struct spdk_fs_channel *channel = (struct spdk_fs_channel *)ctx;
2852 	struct spdk_fs_request *req;
2853 	struct spdk_fs_cb_args *args;
2854 
2855 	req = alloc_fs_request(channel);
2856 	if (req == NULL) {
2857 		SPDK_ERRLOG("Cannot allocate close req for file=%s\n", file->name);
2858 		return -ENOMEM;
2859 	}
2860 
2861 	args = &req->args;
2862 
2863 	spdk_file_sync(file, ctx);
2864 	BLOBFS_TRACE(file, "name=%s\n", file->name);
2865 	args->file = file;
2866 	args->sem = &channel->sem;
2867 	args->fn.file_op = __wake_caller;
2868 	args->arg = args;
2869 	channel->send_request(__file_close, req);
2870 	sem_wait(&channel->sem);
2871 
2872 	return args->rc;
2873 }
2874 
2875 int
2876 spdk_file_get_id(struct spdk_file *file, void *id, size_t size)
2877 {
2878 	if (size < sizeof(spdk_blob_id)) {
2879 		return -EINVAL;
2880 	}
2881 
2882 	memcpy(id, &file->blobid, sizeof(spdk_blob_id));
2883 
2884 	return sizeof(spdk_blob_id);
2885 }
2886 
2887 static void
2888 _file_free(void *ctx)
2889 {
2890 	struct spdk_file *file = ctx;
2891 
2892 	TAILQ_REMOVE(&g_caches, file, cache_tailq);
2893 
2894 	free(file->name);
2895 	free(file->tree);
2896 	free(file);
2897 }
2898 
2899 static void
2900 file_free(struct spdk_file *file)
2901 {
2902 	BLOBFS_TRACE(file, "free=%s\n", file->name);
2903 	pthread_spin_lock(&file->lock);
2904 	if (file->tree->present_mask == 0) {
2905 		pthread_spin_unlock(&file->lock);
2906 		free(file->name);
2907 		free(file->tree);
2908 		free(file);
2909 		return;
2910 	}
2911 
2912 	tree_free_buffers(file->tree);
2913 	assert(file->tree->present_mask == 0);
2914 	spdk_thread_send_msg(g_cache_pool_thread, _file_free, file);
2915 	pthread_spin_unlock(&file->lock);
2916 }
2917 
2918 SPDK_LOG_REGISTER_COMPONENT(blobfs)
2919 SPDK_LOG_REGISTER_COMPONENT(blobfs_rw)
2920