xref: /spdk/lib/nvme/nvme_poll_group.c (revision 32999ab917f67af61872f868585fd3d78ad6fb8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 
36 #include "nvme_internal.h"
37 
38 struct spdk_nvme_poll_group *
39 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
40 {
41 	struct spdk_nvme_poll_group *group;
42 
43 	group = calloc(1, sizeof(*group));
44 	if (group == NULL) {
45 		return NULL;
46 	}
47 
48 	group->accel_fn_table.table_size = sizeof(struct spdk_nvme_accel_fn_table);
49 	if (table && table->table_size != 0) {
50 		group->accel_fn_table.table_size = table->table_size;
51 #define SET_FIELD(field) \
52 	if (offsetof(struct spdk_nvme_accel_fn_table, field) + sizeof(table->field) <= table->table_size) { \
53 		group->accel_fn_table.field = table->field; \
54 	} \
55 
56 		SET_FIELD(submit_accel_crc32c);
57 		/* Do not remove this statement, you should always update this statement when you adding a new field,
58 		 * and do not forget to add the SET_FIELD statement for your added field. */
59 		SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_accel_fn_table) == 16, "Incorrect size");
60 
61 #undef SET_FIELD
62 	}
63 
64 	group->ctx = ctx;
65 	STAILQ_INIT(&group->tgroups);
66 
67 	return group;
68 }
69 
70 struct spdk_nvme_poll_group *
71 spdk_nvme_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
72 {
73 	struct spdk_nvme_transport_poll_group *tgroup;
74 
75 	tgroup = nvme_transport_qpair_get_optimal_poll_group(qpair->transport, qpair);
76 
77 	if (tgroup == NULL) {
78 		return NULL;
79 	}
80 
81 	return tgroup->group;
82 }
83 
84 int
85 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
86 {
87 	struct spdk_nvme_transport_poll_group *tgroup;
88 	const struct spdk_nvme_transport *transport;
89 
90 	if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
91 		return -EINVAL;
92 	}
93 
94 	STAILQ_FOREACH(tgroup, &group->tgroups, link) {
95 		if (tgroup->transport == qpair->transport) {
96 			break;
97 		}
98 	}
99 
100 	/* See if a new transport has been added (dlopen style) and we need to update the poll group */
101 	if (!tgroup) {
102 		transport = nvme_get_first_transport();
103 		while (transport != NULL) {
104 			if (transport == qpair->transport) {
105 				tgroup = nvme_transport_poll_group_create(transport);
106 				if (tgroup == NULL) {
107 					return -ENOMEM;
108 				}
109 				tgroup->group = group;
110 				STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
111 				break;
112 			}
113 			transport = nvme_get_next_transport(transport);
114 		}
115 	}
116 
117 	return tgroup ? nvme_transport_poll_group_add(tgroup, qpair) : -ENODEV;
118 }
119 
120 int
121 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
122 {
123 	struct spdk_nvme_transport_poll_group *tgroup;
124 
125 	STAILQ_FOREACH(tgroup, &group->tgroups, link) {
126 		if (tgroup->transport == qpair->transport) {
127 			return nvme_transport_poll_group_remove(tgroup, qpair);
128 		}
129 	}
130 
131 	return -ENODEV;
132 }
133 
134 int
135 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
136 {
137 	return nvme_transport_poll_group_connect_qpair(qpair);
138 }
139 
140 int
141 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
142 {
143 	return nvme_transport_poll_group_disconnect_qpair(qpair);
144 }
145 
146 int64_t
147 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
148 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
149 {
150 	struct spdk_nvme_transport_poll_group *tgroup;
151 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
152 
153 	if (disconnected_qpair_cb == NULL) {
154 		return -EINVAL;
155 	}
156 
157 	STAILQ_FOREACH(tgroup, &group->tgroups, link) {
158 		local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
159 				    disconnected_qpair_cb);
160 		if (local_completions < 0 && error_reason == 0) {
161 			error_reason = local_completions;
162 		} else {
163 			num_completions += local_completions;
164 			/* Just to be safe */
165 			assert(num_completions >= 0);
166 		}
167 	}
168 
169 	return error_reason ? error_reason : num_completions;
170 }
171 
172 void *
173 spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group)
174 {
175 	return group->ctx;
176 }
177 
178 int
179 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
180 {
181 	struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
182 
183 	STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
184 		STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
185 		if (nvme_transport_poll_group_destroy(tgroup) != 0) {
186 			STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
187 			return -EBUSY;
188 		}
189 
190 	}
191 
192 	free(group);
193 
194 	return 0;
195 }
196 
197 int
198 spdk_nvme_poll_group_get_stats(struct spdk_nvme_poll_group *group,
199 			       struct spdk_nvme_poll_group_stat **stats)
200 {
201 	struct spdk_nvme_transport_poll_group *tgroup;
202 	struct spdk_nvme_poll_group_stat *result;
203 	uint32_t transports_count = 0;
204 	/* Not all transports used by this poll group may support statistics reporting */
205 	uint32_t reported_stats_count = 0;
206 	int rc;
207 
208 	assert(group);
209 	assert(stats);
210 
211 	result = calloc(1, sizeof(*result));
212 	if (!result) {
213 		SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
214 		return -ENOMEM;
215 	}
216 
217 	STAILQ_FOREACH(tgroup, &group->tgroups, link) {
218 		transports_count++;
219 	}
220 
221 	result->transport_stat = calloc(transports_count, sizeof(*result->transport_stat));
222 	if (!result->transport_stat) {
223 		SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
224 		free(result);
225 		return -ENOMEM;
226 	}
227 
228 	STAILQ_FOREACH(tgroup, &group->tgroups, link) {
229 		rc = nvme_transport_poll_group_get_stats(tgroup, &result->transport_stat[reported_stats_count]);
230 		if (rc == 0) {
231 			reported_stats_count++;
232 		}
233 	}
234 
235 	if (reported_stats_count == 0) {
236 		free(result->transport_stat);
237 		free(result);
238 		SPDK_DEBUGLOG(nvme, "No transport statistics available\n");
239 		return -ENOTSUP;
240 	}
241 
242 	result->num_transports = reported_stats_count;
243 	*stats = result;
244 
245 	return 0;
246 }
247 
248 void
249 spdk_nvme_poll_group_free_stats(struct spdk_nvme_poll_group *group,
250 				struct spdk_nvme_poll_group_stat *stat)
251 {
252 	struct spdk_nvme_transport_poll_group *tgroup;
253 	uint32_t i;
254 	uint32_t freed_stats __attribute__((unused)) = 0;
255 
256 	assert(group);
257 	assert(stat);
258 
259 	for (i = 0; i < stat->num_transports; i++) {
260 		STAILQ_FOREACH(tgroup, &group->tgroups, link) {
261 			if (nvme_transport_get_trtype(tgroup->transport) == stat->transport_stat[i]->trtype) {
262 				nvme_transport_poll_group_free_stats(tgroup, stat->transport_stat[i]);
263 				freed_stats++;
264 				break;
265 			}
266 		}
267 	}
268 
269 	assert(freed_stats == stat->num_transports);
270 
271 	free(stat->transport_stat);
272 	free(stat);
273 }
274