xref: /spdk/lib/nvme/nvme_io_msg.c (revision 186b109dd3a723612e3df79bb3d97699173d39e3)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "nvme_internal.h"
7 #include "nvme_io_msg.h"
8 
9 #define SPDK_NVME_MSG_IO_PROCESS_SIZE 8
10 
11 /**
12  * Send message to IO queue.
13  */
14 int
15 nvme_io_msg_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_io_msg_fn fn,
16 		 void *arg)
17 {
18 	int rc;
19 	struct spdk_nvme_io_msg *io;
20 
21 	/* Protect requests ring against preemptive producers */
22 	pthread_mutex_lock(&ctrlr->external_io_msgs_lock);
23 
24 	io = (struct spdk_nvme_io_msg *)calloc(1, sizeof(struct spdk_nvme_io_msg));
25 	if (!io) {
26 		SPDK_ERRLOG("IO msg allocation failed.");
27 		pthread_mutex_unlock(&ctrlr->external_io_msgs_lock);
28 		return -ENOMEM;
29 	}
30 
31 	io->ctrlr = ctrlr;
32 	io->nsid = nsid;
33 	io->fn = fn;
34 	io->arg = arg;
35 
36 	rc = spdk_ring_enqueue(ctrlr->external_io_msgs, (void **)&io, 1, NULL);
37 	if (rc != 1) {
38 		assert(false);
39 		free(io);
40 		pthread_mutex_unlock(&ctrlr->external_io_msgs_lock);
41 		return -ENOMEM;
42 	}
43 
44 	pthread_mutex_unlock(&ctrlr->external_io_msgs_lock);
45 
46 	return 0;
47 }
48 
49 int
50 nvme_io_msg_process(struct spdk_nvme_ctrlr *ctrlr)
51 {
52 	int i;
53 	int count;
54 	struct spdk_nvme_io_msg *io;
55 	void *requests[SPDK_NVME_MSG_IO_PROCESS_SIZE];
56 
57 	if (!spdk_process_is_primary()) {
58 		return 0;
59 	}
60 
61 	if (!ctrlr->external_io_msgs || !ctrlr->external_io_msgs_qpair || ctrlr->prepare_for_reset) {
62 		/* Not ready or pending reset */
63 		return 0;
64 	}
65 
66 	if (ctrlr->needs_io_msg_update) {
67 		ctrlr->needs_io_msg_update = false;
68 		nvme_io_msg_ctrlr_update(ctrlr);
69 	}
70 
71 	spdk_nvme_qpair_process_completions(ctrlr->external_io_msgs_qpair, 0);
72 
73 	count = spdk_ring_dequeue(ctrlr->external_io_msgs, requests,
74 				  SPDK_NVME_MSG_IO_PROCESS_SIZE);
75 	if (count == 0) {
76 		return 0;
77 	}
78 
79 	for (i = 0; i < count; i++) {
80 		io = requests[i];
81 
82 		assert(io != NULL);
83 
84 		io->fn(io->ctrlr, io->nsid, io->arg);
85 		free(io);
86 	}
87 
88 	return count;
89 }
90 
91 static bool
92 nvme_io_msg_is_producer_registered(struct spdk_nvme_ctrlr *ctrlr,
93 				   struct nvme_io_msg_producer *io_msg_producer)
94 {
95 	struct nvme_io_msg_producer *tmp;
96 
97 	STAILQ_FOREACH(tmp, &ctrlr->io_producers, link) {
98 		if (tmp == io_msg_producer) {
99 			return true;
100 		}
101 	}
102 	return false;
103 }
104 
105 int
106 nvme_io_msg_ctrlr_register(struct spdk_nvme_ctrlr *ctrlr,
107 			   struct nvme_io_msg_producer *io_msg_producer)
108 {
109 	if (io_msg_producer == NULL) {
110 		SPDK_ERRLOG("io_msg_producer cannot be NULL\n");
111 		return -EINVAL;
112 	}
113 
114 	nvme_ctrlr_lock(ctrlr);
115 	if (nvme_io_msg_is_producer_registered(ctrlr, io_msg_producer)) {
116 		nvme_ctrlr_unlock(ctrlr);
117 		return -EEXIST;
118 	}
119 
120 	if (!STAILQ_EMPTY(&ctrlr->io_producers) || ctrlr->is_resetting) {
121 		/* There are registered producers - IO messaging already started */
122 		STAILQ_INSERT_TAIL(&ctrlr->io_producers, io_msg_producer, link);
123 		nvme_ctrlr_unlock(ctrlr);
124 		return 0;
125 	}
126 
127 	pthread_mutex_init(&ctrlr->external_io_msgs_lock, NULL);
128 
129 	/**
130 	 * Initialize ring and qpair for controller
131 	 */
132 	ctrlr->external_io_msgs = spdk_ring_create(SPDK_RING_TYPE_MP_SC, 65536, SPDK_ENV_NUMA_ID_ANY);
133 	if (!ctrlr->external_io_msgs) {
134 		SPDK_ERRLOG("Unable to allocate memory for message ring\n");
135 		nvme_ctrlr_unlock(ctrlr);
136 		return -ENOMEM;
137 	}
138 
139 	ctrlr->external_io_msgs_qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
140 	if (ctrlr->external_io_msgs_qpair == NULL) {
141 		SPDK_ERRLOG("spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
142 		spdk_ring_free(ctrlr->external_io_msgs);
143 		ctrlr->external_io_msgs = NULL;
144 		nvme_ctrlr_unlock(ctrlr);
145 		return -ENOMEM;
146 	}
147 
148 	STAILQ_INSERT_TAIL(&ctrlr->io_producers, io_msg_producer, link);
149 	nvme_ctrlr_unlock(ctrlr);
150 
151 	return 0;
152 }
153 
154 void
155 nvme_io_msg_ctrlr_update(struct spdk_nvme_ctrlr *ctrlr)
156 {
157 	struct nvme_io_msg_producer *io_msg_producer;
158 
159 	if (!spdk_process_is_primary()) {
160 		ctrlr->needs_io_msg_update = true;
161 		return;
162 	}
163 
164 	/* Update all producers */
165 	nvme_ctrlr_lock(ctrlr);
166 	STAILQ_FOREACH(io_msg_producer, &ctrlr->io_producers, link) {
167 		io_msg_producer->update(ctrlr);
168 	}
169 	nvme_ctrlr_unlock(ctrlr);
170 }
171 
172 void
173 nvme_io_msg_ctrlr_detach(struct spdk_nvme_ctrlr *ctrlr)
174 {
175 	struct nvme_io_msg_producer *io_msg_producer, *tmp;
176 
177 	if (!spdk_process_is_primary()) {
178 		return;
179 	}
180 
181 	/* Stop all producers */
182 	STAILQ_FOREACH_SAFE(io_msg_producer, &ctrlr->io_producers, link, tmp) {
183 		io_msg_producer->stop(ctrlr);
184 		STAILQ_REMOVE(&ctrlr->io_producers, io_msg_producer, nvme_io_msg_producer, link);
185 	}
186 
187 	if (ctrlr->external_io_msgs) {
188 		spdk_ring_free(ctrlr->external_io_msgs);
189 		ctrlr->external_io_msgs = NULL;
190 	}
191 
192 	if (ctrlr->external_io_msgs_qpair) {
193 		spdk_nvme_ctrlr_free_io_qpair(ctrlr->external_io_msgs_qpair);
194 		ctrlr->external_io_msgs_qpair = NULL;
195 	}
196 
197 	pthread_mutex_destroy(&ctrlr->external_io_msgs_lock);
198 }
199 
200 void
201 nvme_io_msg_ctrlr_unregister(struct spdk_nvme_ctrlr *ctrlr,
202 			     struct nvme_io_msg_producer *io_msg_producer)
203 {
204 	assert(io_msg_producer != NULL);
205 
206 	nvme_ctrlr_lock(ctrlr);
207 	if (!nvme_io_msg_is_producer_registered(ctrlr, io_msg_producer)) {
208 		nvme_ctrlr_unlock(ctrlr);
209 		return;
210 	}
211 
212 	STAILQ_REMOVE(&ctrlr->io_producers, io_msg_producer, nvme_io_msg_producer, link);
213 	if (STAILQ_EMPTY(&ctrlr->io_producers)) {
214 		nvme_io_msg_ctrlr_detach(ctrlr);
215 	}
216 	nvme_ctrlr_unlock(ctrlr);
217 }
218