xref: /spdk/module/bdev/nvme/bdev_nvme.h (revision 92d1e663afe5048334744edf8d98e5b9a54a794a)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
338520df4SEvgeniy Kochetov  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
40568555aSEvgeniy Kochetov  *   Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
52796687dSParameswaran Krishnamurthy  *   Copyright (c) 2022 Dell Inc, or its subsidiaries. All rights reserved.
607fe6a43SSeth Howell  */
707fe6a43SSeth Howell 
807fe6a43SSeth Howell #ifndef SPDK_BDEV_NVME_H
907fe6a43SSeth Howell #define SPDK_BDEV_NVME_H
1007fe6a43SSeth Howell 
1107fe6a43SSeth Howell #include "spdk/stdinc.h"
1207fe6a43SSeth Howell 
1307fe6a43SSeth Howell #include "spdk/queue.h"
1407fe6a43SSeth Howell #include "spdk/nvme.h"
1507fe6a43SSeth Howell #include "spdk/bdev_module.h"
16dd98a248SMehakjot Singh Sidhu #include "spdk/module/bdev/nvme.h"
172796687dSParameswaran Krishnamurthy #include "spdk/jsonrpc.h"
1807fe6a43SSeth Howell 
1975f1d648SShuhei Matsumoto TAILQ_HEAD(nvme_bdev_ctrlrs, nvme_bdev_ctrlr);
2075f1d648SShuhei Matsumoto extern struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs;
21d409971bSBen Walker extern pthread_mutex_t g_bdev_nvme_mutex;
22d409971bSBen Walker extern bool g_bdev_nvme_module_finish;
232796687dSParameswaran Krishnamurthy extern struct spdk_thread *g_bdev_nvme_init_thread;
24d409971bSBen Walker 
25d409971bSBen Walker #define NVME_MAX_CONTROLLERS 1024
26d409971bSBen Walker 
27e5f9e822SKonrad Sztyber typedef void (*spdk_bdev_nvme_start_discovery_fn)(void *ctx, int status);
28b68f2eebSJim Harris typedef void (*spdk_bdev_nvme_stop_discovery_fn)(void *ctx);
29d409971bSBen Walker 
30d409971bSBen Walker struct nvme_async_probe_ctx {
31d409971bSBen Walker 	struct spdk_nvme_probe_ctx *probe_ctx;
329a684763SYash Raj Singh 	char *base_name;
33d409971bSBen Walker 	const char **names;
34da1a1210SRichael Zhuang 	uint32_t max_bdevs;
35da1a1210SRichael Zhuang 	uint32_t reported_bdevs;
36d409971bSBen Walker 	struct spdk_poller *poller;
37d409971bSBen Walker 	struct spdk_nvme_transport_id trid;
38dd98a248SMehakjot Singh Sidhu 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts;
39d40292d0SShuhei Matsumoto 	struct spdk_nvme_ctrlr_opts drv_opts;
40dd98a248SMehakjot Singh Sidhu 	spdk_bdev_nvme_create_cb cb_fn;
41d409971bSBen Walker 	void *cb_ctx;
42d409971bSBen Walker 	uint32_t populates_in_progress;
43d409971bSBen Walker 	bool ctrlr_attached;
44d409971bSBen Walker 	bool probe_done;
45d409971bSBen Walker 	bool namespaces_populated;
46d409971bSBen Walker };
47d409971bSBen Walker 
48d409971bSBen Walker struct nvme_ns {
49d409971bSBen Walker 	uint32_t			id;
50d409971bSBen Walker 	struct spdk_nvme_ns		*ns;
51d409971bSBen Walker 	struct nvme_ctrlr		*ctrlr;
52d409971bSBen Walker 	struct nvme_bdev		*bdev;
53d409971bSBen Walker 	uint32_t			ana_group_id;
54d409971bSBen Walker 	enum spdk_nvme_ana_state	ana_state;
5584ac18e5SShuhei Matsumoto 	bool				ana_state_updating;
5613ca6e52SShuhei Matsumoto 	bool				ana_transition_timedout;
57*92d1e663SAlex Michon 	bool				depopulating;
5813ca6e52SShuhei Matsumoto 	struct spdk_poller		*anatt_timer;
59b0a6496aSShuhei Matsumoto 	struct nvme_async_probe_ctx	*probe_ctx;
60c19ec843SShuhei Matsumoto 	TAILQ_ENTRY(nvme_ns)		tailq;
61c93b5564SBen Walker 	RB_ENTRY(nvme_ns)		node;
624d7b2b36SRichael Zhuang 
634d7b2b36SRichael Zhuang 	/**
644d7b2b36SRichael Zhuang 	 * record io path stat before destroyed. Allocation of stat is
654d7b2b36SRichael Zhuang 	 * decided by option io_path_stat of RPC
664d7b2b36SRichael Zhuang 	 * bdev_nvme_set_options
674d7b2b36SRichael Zhuang 	 */
684d7b2b36SRichael Zhuang 	struct spdk_bdev_io_stat	*stat;
69d409971bSBen Walker };
70d409971bSBen Walker 
71d409971bSBen Walker struct nvme_bdev_io;
7275f1d648SShuhei Matsumoto struct nvme_bdev_ctrlr;
73c19ec843SShuhei Matsumoto struct nvme_bdev;
7432697257SShuhei Matsumoto struct nvme_io_path;
75adace9fbSShuhei Matsumoto struct nvme_ctrlr_channel_iter;
76adace9fbSShuhei Matsumoto struct nvme_bdev_channel_iter;
77d409971bSBen Walker 
780262859fSBen Walker struct nvme_path_id {
79d409971bSBen Walker 	struct spdk_nvme_transport_id		trid;
80b098640fSBen Walker 	struct spdk_nvme_host_id		hostid;
810262859fSBen Walker 	TAILQ_ENTRY(nvme_path_id)		link;
827348b89cSShuhei Matsumoto 	uint64_t				last_failed_tsc;
83d409971bSBen Walker };
84d409971bSBen Walker 
85aefc9cc4SShuhei Matsumoto typedef void (*bdev_nvme_ctrlr_op_cb)(void *cb_arg, int rc);
86aca0d56eSShuhei Matsumoto typedef void (*nvme_ctrlr_disconnected_cb)(struct nvme_ctrlr *nvme_ctrlr);
87d409971bSBen Walker 
88d409971bSBen Walker struct nvme_ctrlr {
89d409971bSBen Walker 	/**
90d409971bSBen Walker 	 * points to pinned, physically contiguous memory region;
91d409971bSBen Walker 	 * contains 4KB IDENTIFY structure for controller which is
92d409971bSBen Walker 	 *  target for CONTROLLER IDENTIFY command during initialization
93d409971bSBen Walker 	 */
94d409971bSBen Walker 	struct spdk_nvme_ctrlr			*ctrlr;
957d28aae7SBen Walker 	struct nvme_path_id			*active_path_id;
96d409971bSBen Walker 	int					ref;
971e79d219SShuhei Matsumoto 
981e79d219SShuhei Matsumoto 	uint32_t				resetting : 1;
99ae4e54fdSShuhei Matsumoto 	uint32_t				reconnect_is_delayed : 1;
1006cdc0f25SShuhei Matsumoto 	uint32_t				in_failover : 1;
1016cdc0f25SShuhei Matsumoto 	uint32_t				pending_failover : 1;
10280e81273SShuhei Matsumoto 	uint32_t				fast_io_fail_timedout : 1;
1031e79d219SShuhei Matsumoto 	uint32_t				destruct : 1;
104f3fec96cSShuhei Matsumoto 	uint32_t				ana_log_page_updating : 1;
105a4ebc571SShuhei Matsumoto 	uint32_t				io_path_cache_clearing : 1;
106681a5aa4SShuhei Matsumoto 	uint32_t				dont_retry : 1;
10743cfaf81SShuhei Matsumoto 	uint32_t				disabled : 1;
10800a79982SShuhei Matsumoto 
109dd98a248SMehakjot Singh Sidhu 	struct spdk_bdev_nvme_ctrlr_opts	opts;
11000a79982SShuhei Matsumoto 
111c93b5564SBen Walker 	RB_HEAD(nvme_ns_tree, nvme_ns)		namespaces;
112d409971bSBen Walker 
113d409971bSBen Walker 	struct spdk_opal_dev			*opal_dev;
114d409971bSBen Walker 
115d409971bSBen Walker 	struct spdk_poller			*adminq_timer_poller;
116d409971bSBen Walker 	struct spdk_thread			*thread;
117b8c65ccfSAnkit Kumar 	struct spdk_interrupt			*intr;
118d409971bSBen Walker 
119aefc9cc4SShuhei Matsumoto 	bdev_nvme_ctrlr_op_cb			ctrlr_op_cb_fn;
120aefc9cc4SShuhei Matsumoto 	void					*ctrlr_op_cb_arg;
121c5ebb7ffSKonrad Sztyber 	/* Poller used to check for reset/detach completion */
122c5ebb7ffSKonrad Sztyber 	struct spdk_poller			*reset_detach_poller;
123c5ebb7ffSKonrad Sztyber 	struct spdk_nvme_detach_ctx		*detach_ctx;
124d409971bSBen Walker 
125ae4e54fdSShuhei Matsumoto 	uint64_t				reset_start_tsc;
126ae4e54fdSShuhei Matsumoto 	struct spdk_poller			*reconnect_delay_timer;
127ae4e54fdSShuhei Matsumoto 
128aca0d56eSShuhei Matsumoto 	nvme_ctrlr_disconnected_cb		disconnected_cb;
129aca0d56eSShuhei Matsumoto 
1303edf9f12SShuhei Matsumoto 	TAILQ_HEAD(, nvme_bdev_io)		pending_resets;
1313edf9f12SShuhei Matsumoto 
132d409971bSBen Walker 	/** linked list pointer for device list */
133d409971bSBen Walker 	TAILQ_ENTRY(nvme_ctrlr)			tailq;
13475f1d648SShuhei Matsumoto 	struct nvme_bdev_ctrlr			*nbdev_ctrlr;
135d409971bSBen Walker 
1360262859fSBen Walker 	TAILQ_HEAD(nvme_paths, nvme_path_id)	trids;
137d409971bSBen Walker 
13881e92f6bSShuhei Matsumoto 	uint32_t				max_ana_log_page_size;
139d409971bSBen Walker 	struct spdk_nvme_ana_page		*ana_log_page;
140d409971bSBen Walker 	struct spdk_nvme_ana_group_descriptor	*copied_ana_desc;
141d409971bSBen Walker 
142d409971bSBen Walker 	struct nvme_async_probe_ctx		*probe_ctx;
1431ecd5b03SKonrad Sztyber 	struct spdk_key				*psk;
14458a28432SKonrad Sztyber 	struct spdk_key				*dhchap_key;
1453e4c5347SKonrad Sztyber 	struct spdk_key				*dhchap_ctrlr_key;
146d409971bSBen Walker 
147d409971bSBen Walker 	pthread_mutex_t				mutex;
148d409971bSBen Walker };
149d409971bSBen Walker 
15075f1d648SShuhei Matsumoto struct nvme_bdev_ctrlr {
15175f1d648SShuhei Matsumoto 	char				*name;
15275f1d648SShuhei Matsumoto 	TAILQ_HEAD(, nvme_ctrlr)	ctrlrs;
153c19ec843SShuhei Matsumoto 	TAILQ_HEAD(, nvme_bdev)		bdevs;
15475f1d648SShuhei Matsumoto 	TAILQ_ENTRY(nvme_bdev_ctrlr)	tailq;
15575f1d648SShuhei Matsumoto };
15675f1d648SShuhei Matsumoto 
157e33ae4a6SShuhei Matsumoto struct nvme_error_stat {
158e33ae4a6SShuhei Matsumoto 	uint32_t status_type[8];
159e33ae4a6SShuhei Matsumoto 	uint32_t status[4][256];
160e33ae4a6SShuhei Matsumoto };
161e33ae4a6SShuhei Matsumoto 
162d409971bSBen Walker struct nvme_bdev {
163d409971bSBen Walker 	struct spdk_bdev			disk;
164c19ec843SShuhei Matsumoto 	uint32_t				nsid;
165c19ec843SShuhei Matsumoto 	struct nvme_bdev_ctrlr			*nbdev_ctrlr;
166c19ec843SShuhei Matsumoto 	pthread_mutex_t				mutex;
167c19ec843SShuhei Matsumoto 	int					ref;
168dd98a248SMehakjot Singh Sidhu 	enum spdk_bdev_nvme_multipath_policy	mp_policy;
169dd98a248SMehakjot Singh Sidhu 	enum spdk_bdev_nvme_multipath_selector	mp_selector;
1702f500a23SRichael Zhuang 	uint32_t				rr_min_io;
171c19ec843SShuhei Matsumoto 	TAILQ_HEAD(, nvme_ns)			nvme_ns_list;
172d409971bSBen Walker 	bool					opal;
173c19ec843SShuhei Matsumoto 	TAILQ_ENTRY(nvme_bdev)			tailq;
174e33ae4a6SShuhei Matsumoto 	struct nvme_error_stat			*err_stat;
175d409971bSBen Walker };
176d409971bSBen Walker 
177c113e4cdSShuhei Matsumoto struct nvme_qpair {
178c113e4cdSShuhei Matsumoto 	struct nvme_ctrlr		*ctrlr;
179d409971bSBen Walker 	struct spdk_nvme_qpair		*qpair;
180d409971bSBen Walker 	struct nvme_poll_group		*group;
181c113e4cdSShuhei Matsumoto 	struct nvme_ctrlr_channel	*ctrlr_ch;
18232697257SShuhei Matsumoto 
18332697257SShuhei Matsumoto 	/* The following is used to update io_path cache of nvme_bdev_channels. */
18432697257SShuhei Matsumoto 	TAILQ_HEAD(, nvme_io_path)	io_path_list;
18532697257SShuhei Matsumoto 
186c113e4cdSShuhei Matsumoto 	TAILQ_ENTRY(nvme_qpair)		tailq;
187d409971bSBen Walker };
188d409971bSBen Walker 
189c113e4cdSShuhei Matsumoto struct nvme_ctrlr_channel {
190c113e4cdSShuhei Matsumoto 	struct nvme_qpair		*qpair;
191a76bbe35SShuhei Matsumoto 
192adace9fbSShuhei Matsumoto 	struct nvme_ctrlr_channel_iter	*reset_iter;
193bb4be4dcSShuhei Matsumoto 	struct spdk_poller		*connect_poller;
194c113e4cdSShuhei Matsumoto };
19517d28590SShuhei Matsumoto 
196c19ec843SShuhei Matsumoto struct nvme_io_path {
197d409971bSBen Walker 	struct nvme_ns			*nvme_ns;
198c113e4cdSShuhei Matsumoto 	struct nvme_qpair		*qpair;
199c19ec843SShuhei Matsumoto 	STAILQ_ENTRY(nvme_io_path)	stailq;
20032697257SShuhei Matsumoto 
20132697257SShuhei Matsumoto 	/* The following are used to update io_path cache of the nvme_bdev_channel. */
20232697257SShuhei Matsumoto 	struct nvme_bdev_channel	*nbdev_ch;
20332697257SShuhei Matsumoto 	TAILQ_ENTRY(nvme_io_path)	tailq;
204f61b0041SRichael Zhuang 
205f61b0041SRichael Zhuang 	/* allocation of stat is decided by option io_path_stat of RPC bdev_nvme_set_options */
206f61b0041SRichael Zhuang 	struct spdk_bdev_io_stat	*stat;
207c19ec843SShuhei Matsumoto };
208c19ec843SShuhei Matsumoto 
209c19ec843SShuhei Matsumoto struct nvme_bdev_channel {
21072e4a4d4SShuhei Matsumoto 	struct nvme_io_path			*current_io_path;
211dd98a248SMehakjot Singh Sidhu 	enum spdk_bdev_nvme_multipath_policy	mp_policy;
212dd98a248SMehakjot Singh Sidhu 	enum spdk_bdev_nvme_multipath_selector	mp_selector;
2132f500a23SRichael Zhuang 	uint32_t				rr_min_io;
2142f500a23SRichael Zhuang 	uint32_t				rr_counter;
215c19ec843SShuhei Matsumoto 	STAILQ_HEAD(, nvme_io_path)		io_path_list;
21699d2e2acSBen Walker 	TAILQ_HEAD(retry_io_head, nvme_bdev_io)	retry_io_list;
217ef409194SShuhei Matsumoto 	struct spdk_poller			*retry_io_poller;
218bcbd1fddSShuhei Matsumoto 	bool					resetting;
219d409971bSBen Walker };
220d409971bSBen Walker 
22187e9ae40SShuhei Matsumoto struct nvme_poll_group {
22287e9ae40SShuhei Matsumoto 	struct spdk_nvme_poll_group		*group;
22387e9ae40SShuhei Matsumoto 	struct spdk_io_channel			*accel_channel;
22487e9ae40SShuhei Matsumoto 	struct spdk_poller			*poller;
22587e9ae40SShuhei Matsumoto 	bool					collect_spin_stat;
22687e9ae40SShuhei Matsumoto 	uint64_t				spin_ticks;
22787e9ae40SShuhei Matsumoto 	uint64_t				start_ticks;
22887e9ae40SShuhei Matsumoto 	uint64_t				end_ticks;
229c113e4cdSShuhei Matsumoto 	TAILQ_HEAD(, nvme_qpair)		qpair_list;
230b8c65ccfSAnkit Kumar 	struct spdk_interrupt			*intr;
23187e9ae40SShuhei Matsumoto };
23287e9ae40SShuhei Matsumoto 
2332a6a6448SShuhei Matsumoto void nvme_io_path_info_json(struct spdk_json_write_ctx *w, struct nvme_io_path *io_path);
2342a6a6448SShuhei Matsumoto 
235d409971bSBen Walker struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name);
236d409971bSBen Walker 
237adace9fbSShuhei Matsumoto typedef void (*nvme_ctrlr_for_each_channel_msg)(struct nvme_ctrlr_channel_iter *iter,
238adace9fbSShuhei Matsumoto 		struct nvme_ctrlr *nvme_ctrlr,
239adace9fbSShuhei Matsumoto 		struct nvme_ctrlr_channel *ctrlr_ch,
240adace9fbSShuhei Matsumoto 		void *ctx);
241adace9fbSShuhei Matsumoto 
242adace9fbSShuhei Matsumoto typedef void (*nvme_ctrlr_for_each_channel_done)(struct nvme_ctrlr *nvme_ctrlr,
243adace9fbSShuhei Matsumoto 		void *ctx, int status);
244adace9fbSShuhei Matsumoto 
245adace9fbSShuhei Matsumoto void nvme_ctrlr_for_each_channel(struct nvme_ctrlr *nvme_ctrlr,
246adace9fbSShuhei Matsumoto 				 nvme_ctrlr_for_each_channel_msg fn, void *ctx,
247adace9fbSShuhei Matsumoto 				 nvme_ctrlr_for_each_channel_done cpl);
248adace9fbSShuhei Matsumoto 
249adace9fbSShuhei Matsumoto void nvme_ctrlr_for_each_channel_continue(struct nvme_ctrlr_channel_iter *iter,
250adace9fbSShuhei Matsumoto 		int status);
251adace9fbSShuhei Matsumoto 
2524e7627a3SShuhei Matsumoto 
2534e7627a3SShuhei Matsumoto typedef void (*nvme_bdev_for_each_channel_msg)(struct nvme_bdev_channel_iter *iter,
2544e7627a3SShuhei Matsumoto 		struct nvme_bdev *nbdev,
2554e7627a3SShuhei Matsumoto 		struct nvme_bdev_channel *nbdev_ch,
2564e7627a3SShuhei Matsumoto 		void *ctx);
2574e7627a3SShuhei Matsumoto 
2584e7627a3SShuhei Matsumoto typedef void (*nvme_bdev_for_each_channel_done)(struct nvme_bdev *nbdev,
2594e7627a3SShuhei Matsumoto 		void *ctx, int status);
2604e7627a3SShuhei Matsumoto 
2614e7627a3SShuhei Matsumoto void nvme_bdev_for_each_channel(struct nvme_bdev *nbdev,
2624e7627a3SShuhei Matsumoto 				nvme_bdev_for_each_channel_msg fn, void *ctx,
2634e7627a3SShuhei Matsumoto 				nvme_bdev_for_each_channel_done cpl);
2644e7627a3SShuhei Matsumoto 
2654e7627a3SShuhei Matsumoto void nvme_bdev_for_each_channel_continue(struct nvme_bdev_channel_iter *iter,
2664e7627a3SShuhei Matsumoto 		int status);
2674e7627a3SShuhei Matsumoto 
268462ad02fSShuhei Matsumoto struct nvme_ctrlr *nvme_bdev_ctrlr_get_ctrlr_by_id(struct nvme_bdev_ctrlr *nbdev_ctrlr,
269462ad02fSShuhei Matsumoto 		uint16_t cntlid);
270462ad02fSShuhei Matsumoto 
2718f633fa1SKai Li struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get_by_name(const char *name);
272d409971bSBen Walker 
2738f633fa1SKai Li typedef void (*nvme_bdev_ctrlr_for_each_fn)(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx);
2748f633fa1SKai Li 
2758f633fa1SKai Li void nvme_bdev_ctrlr_for_each(nvme_bdev_ctrlr_for_each_fn fn, void *ctx);
276d409971bSBen Walker 
277d409971bSBen Walker void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
278d409971bSBen Walker 			      struct spdk_json_write_ctx *w);
279d409971bSBen Walker 
28050b6329cSShuhei Matsumoto void nvme_ctrlr_info_json(struct spdk_json_write_ctx *w, struct nvme_ctrlr *nvme_ctrlr);
28150b6329cSShuhei Matsumoto 
282d409971bSBen Walker struct nvme_ns *nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid);
283d409971bSBen Walker struct nvme_ns *nvme_ctrlr_get_first_active_ns(struct nvme_ctrlr *nvme_ctrlr);
284d409971bSBen Walker struct nvme_ns *nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns);
285dd710e64SMaciej Szwed 
286c710c9acSShuhei Matsumoto struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch);
287b95421d4SSeth Howell int bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx);
28807fe6a43SSeth Howell 
289b68f2eebSJim Harris int bdev_nvme_start_discovery(struct spdk_nvme_transport_id *trid, const char *base_name,
290dd98a248SMehakjot Singh Sidhu 			      struct spdk_nvme_ctrlr_opts *drv_opts, struct spdk_bdev_nvme_ctrlr_opts *bdev_opts,
2912796687dSParameswaran Krishnamurthy 			      uint64_t timeout, bool from_mdns,
2922796687dSParameswaran Krishnamurthy 			      spdk_bdev_nvme_start_discovery_fn cb_fn, void *cb_ctx);
293932ee64bSJim Harris int bdev_nvme_stop_discovery(const char *name, spdk_bdev_nvme_stop_discovery_fn cb_fn,
294932ee64bSJim Harris 			     void *cb_ctx);
295f331ae16SKonrad Sztyber void bdev_nvme_get_discovery_info(struct spdk_json_write_ctx *w);
296b68f2eebSJim Harris 
2972796687dSParameswaran Krishnamurthy int bdev_nvme_start_mdns_discovery(const char *base_name,
2982796687dSParameswaran Krishnamurthy 				   const char *svcname,
2992796687dSParameswaran Krishnamurthy 				   struct spdk_nvme_ctrlr_opts *drv_opts,
300dd98a248SMehakjot Singh Sidhu 				   struct spdk_bdev_nvme_ctrlr_opts *bdev_opts);
3012796687dSParameswaran Krishnamurthy int bdev_nvme_stop_mdns_discovery(const char *name);
3022796687dSParameswaran Krishnamurthy void bdev_nvme_get_mdns_discovery_info(struct spdk_jsonrpc_request *request);
3032796687dSParameswaran Krishnamurthy void bdev_nvme_mdns_discovery_config_json(struct spdk_json_write_ctx *w);
3042796687dSParameswaran Krishnamurthy 
3058d3f8fb8SKonrad Sztyber typedef void (*bdev_nvme_set_keys_cb)(void *ctx, int status);
3068d3f8fb8SKonrad Sztyber 
3078d3f8fb8SKonrad Sztyber int bdev_nvme_set_keys(const char *name, const char *dhchap_key, const char *dhchap_ctrlr_key,
3088d3f8fb8SKonrad Sztyber 		       bdev_nvme_set_keys_cb cb_fn, void *cb_ctx);
3098d3f8fb8SKonrad Sztyber 
310b95421d4SSeth Howell struct spdk_nvme_ctrlr *bdev_nvme_get_ctrlr(struct spdk_bdev *bdev);
31107fe6a43SSeth Howell 
31207034ca3SArtsiom Koltun typedef void (*bdev_nvme_delete_done_fn)(void *ctx, int rc);
31307034ca3SArtsiom Koltun 
31407fe6a43SSeth Howell /**
31583f27434SShuhei Matsumoto  * Delete NVMe controller with all bdevs on top of it, or delete the specified path
31683f27434SShuhei Matsumoto  * if there is any alternative path. Requires to pass name of NVMe controller.
31707fe6a43SSeth Howell  *
31807fe6a43SSeth Howell  * \param name NVMe controller name
319be6a2fefSBen Walker  * \param path_id The specified path to remove (optional)
32007034ca3SArtsiom Koltun  * \param delete_done Callback function on delete complete (optional)
32107034ca3SArtsiom Koltun  * \param delete_done_ctx Context passed to callback (optional)
32207034ca3SArtsiom Koltun  * \return zero on success,
32307034ca3SArtsiom Koltun  *		-EINVAL on wrong parameters or
32407034ca3SArtsiom Koltun  *		-ENODEV if controller is not found or
32507034ca3SArtsiom Koltun  *		-ENOMEM on no memory
32607fe6a43SSeth Howell  */
32707034ca3SArtsiom Koltun int bdev_nvme_delete(const char *name, const struct nvme_path_id *path_id,
32807034ca3SArtsiom Koltun 		     bdev_nvme_delete_done_fn delete_done, void *delete_done_ctx);
32907fe6a43SSeth Howell 
33035774c71SShuhei Matsumoto enum nvme_ctrlr_op {
33135774c71SShuhei Matsumoto 	NVME_CTRLR_OP_RESET = 1,
33243cfaf81SShuhei Matsumoto 	NVME_CTRLR_OP_ENABLE,
33343cfaf81SShuhei Matsumoto 	NVME_CTRLR_OP_DISABLE,
33435774c71SShuhei Matsumoto };
33535774c71SShuhei Matsumoto 
336153f03cfSJonathan Teh /**
3376f2e8fa5SShuhei Matsumoto  * Perform specified operation on an NVMe controller.
338153f03cfSJonathan Teh  *
339ba5ae93dSShuhei Matsumoto  * NOTE: The callback function is always called after this function returns except for
340ba5ae93dSShuhei Matsumoto  * out of memory cases.
341ba5ae93dSShuhei Matsumoto  *
342aefc9cc4SShuhei Matsumoto  * \param nvme_ctrlr The specified NVMe controller to operate
34335774c71SShuhei Matsumoto  * \param op Operation code
344aefc9cc4SShuhei Matsumoto  * \param cb_fn Function to be called back after operation completes
345153f03cfSJonathan Teh  * \param cb_arg Argument for callback function
346153f03cfSJonathan Teh  */
347ba5ae93dSShuhei Matsumoto void nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
34835774c71SShuhei Matsumoto 		       bdev_nvme_ctrlr_op_cb cb_fn, void *cb_arg);
349153f03cfSJonathan Teh 
3506f2e8fa5SShuhei Matsumoto /**
3516f2e8fa5SShuhei Matsumoto  * Perform specified operation on all NVMe controllers in an NVMe bdev controller.
3526f2e8fa5SShuhei Matsumoto  *
3536f2e8fa5SShuhei Matsumoto  * NOTE: The callback function is always called after this function returns except for
3546f2e8fa5SShuhei Matsumoto  * out of memory cases.
3556f2e8fa5SShuhei Matsumoto  *
3566f2e8fa5SShuhei Matsumoto  * \param nbdev_ctrlr The specified NVMe bdev controller to operate
3576f2e8fa5SShuhei Matsumoto  * \param op Operation code
3586f2e8fa5SShuhei Matsumoto  * \param cb_fn Function to be called back after operation completes
3596f2e8fa5SShuhei Matsumoto  * \param cb_arg Argument for callback function
3606f2e8fa5SShuhei Matsumoto  */
3616f2e8fa5SShuhei Matsumoto void nvme_bdev_ctrlr_op_rpc(struct nvme_bdev_ctrlr *nbdev_ctrlr, enum nvme_ctrlr_op op,
3626f2e8fa5SShuhei Matsumoto 			    bdev_nvme_ctrlr_op_cb cb_fn, void *cb_arg);
3636f2e8fa5SShuhei Matsumoto 
36422b77a3cSShuhei Matsumoto typedef void (*bdev_nvme_set_preferred_path_cb)(void *cb_arg, int rc);
36522b77a3cSShuhei Matsumoto 
36622b77a3cSShuhei Matsumoto /**
36722b77a3cSShuhei Matsumoto  * Set the preferred I/O path for an NVMe bdev in multipath mode.
36822b77a3cSShuhei Matsumoto  *
36922b77a3cSShuhei Matsumoto  * NOTE: This function does not support NVMe bdevs in failover mode.
37022b77a3cSShuhei Matsumoto  *
37122b77a3cSShuhei Matsumoto  * \param name NVMe bdev name
37222b77a3cSShuhei Matsumoto  * \param cntlid NVMe-oF controller ID
37322b77a3cSShuhei Matsumoto  * \param cb_fn Function to be called back after completion.
37422b77a3cSShuhei Matsumoto  * \param cb_arg Argument for callback function.
37522b77a3cSShuhei Matsumoto  */
37622b77a3cSShuhei Matsumoto void bdev_nvme_set_preferred_path(const char *name, uint16_t cntlid,
37722b77a3cSShuhei Matsumoto 				  bdev_nvme_set_preferred_path_cb cb_fn, void *cb_arg);
37822b77a3cSShuhei Matsumoto 
37907fe6a43SSeth Howell #endif /* SPDK_BDEV_NVME_H */
380