xref: /spdk/lib/nvme/nvme_transport.c (revision 7192849ed24874f3e9cc31e8a33a9b32c49b9506)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe transport abstraction
36  */
37 
38 #include "nvme_internal.h"
39 #include "spdk/queue.h"
40 
41 #define SPDK_MAX_NUM_OF_TRANSPORTS 16
42 
43 struct spdk_nvme_transport {
44 	struct spdk_nvme_transport_ops	ops;
45 	TAILQ_ENTRY(spdk_nvme_transport)	link;
46 };
47 
48 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
49 	TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
50 
51 struct spdk_nvme_transport g_spdk_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
52 int g_current_transport_index = 0;
53 
54 const struct spdk_nvme_transport *
55 nvme_get_first_transport(void)
56 {
57 	return TAILQ_FIRST(&g_spdk_nvme_transports);
58 }
59 
60 const struct spdk_nvme_transport *
61 nvme_get_next_transport(const struct spdk_nvme_transport *transport)
62 {
63 	return TAILQ_NEXT(transport, link);
64 }
65 
66 /*
67  * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
68  * transport object in either the controller struct or the admin qpair. THis means
69  * that a lot of admin related transport calls will have to call nvme_get_transport
70  * in order to knwo which functions to call.
71  * In the I/O path, we have the ability to store the transport struct in the I/O
72  * qpairs to avoid taking a performance hit.
73  */
74 const struct spdk_nvme_transport *
75 nvme_get_transport(const char *transport_name)
76 {
77 	struct spdk_nvme_transport *registered_transport;
78 
79 	TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
80 		if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
81 			return registered_transport;
82 		}
83 	}
84 
85 	return NULL;
86 }
87 
88 bool
89 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
90 {
91 	return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
92 }
93 
94 bool
95 spdk_nvme_transport_available_by_name(const char *transport_name)
96 {
97 	return nvme_get_transport(transport_name) == NULL ? false : true;
98 }
99 
100 void spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
101 {
102 	struct spdk_nvme_transport *new_transport;
103 
104 	if (nvme_get_transport(ops->name)) {
105 		SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
106 		assert(false);
107 	}
108 
109 	if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
110 		SPDK_ERRLOG("Unable to register new NVMe transport.\n");
111 		assert(false);
112 		return;
113 	}
114 	new_transport = &g_spdk_transports[g_current_transport_index++];
115 
116 	new_transport->ops = *ops;
117 	TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
118 }
119 
120 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
121 		const struct spdk_nvme_ctrlr_opts *opts,
122 		void *devhandle)
123 {
124 	const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
125 	struct spdk_nvme_ctrlr *ctrlr;
126 
127 	if (transport == NULL) {
128 		SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
129 		return NULL;
130 	}
131 
132 	ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
133 
134 	return ctrlr;
135 }
136 
137 int
138 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
139 			  bool direct_connect)
140 {
141 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
142 
143 	if (transport == NULL) {
144 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
145 		return -ENOENT;
146 	}
147 
148 	return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
149 }
150 
151 int
152 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
153 {
154 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
155 
156 	assert(transport != NULL);
157 	return transport->ops.ctrlr_destruct(ctrlr);
158 }
159 
160 int
161 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
162 {
163 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
164 
165 	assert(transport != NULL);
166 	return transport->ops.ctrlr_enable(ctrlr);
167 }
168 
169 int
170 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
171 {
172 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
173 
174 	assert(transport != NULL);
175 	return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
176 }
177 
178 int
179 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
180 {
181 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
182 
183 	assert(transport != NULL);
184 	return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
185 }
186 
187 int
188 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
189 {
190 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
191 
192 	assert(transport != NULL);
193 	return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
194 }
195 
196 int
197 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
198 {
199 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
200 
201 	assert(transport != NULL);
202 	return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
203 }
204 
205 uint32_t
206 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
207 {
208 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
209 
210 	assert(transport != NULL);
211 	return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
212 }
213 
214 uint16_t
215 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
216 {
217 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
218 
219 	assert(transport != NULL);
220 	return transport->ops.ctrlr_get_max_sges(ctrlr);
221 }
222 
223 int
224 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
225 {
226 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
227 
228 	assert(transport != NULL);
229 	if (transport->ops.ctrlr_reserve_cmb != NULL) {
230 		return transport->ops.ctrlr_reserve_cmb(ctrlr);
231 	}
232 
233 	return -ENOTSUP;
234 }
235 
236 void *
237 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
238 {
239 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
240 
241 	assert(transport != NULL);
242 	if (transport->ops.ctrlr_map_cmb != NULL) {
243 		return transport->ops.ctrlr_map_cmb(ctrlr, size);
244 	}
245 
246 	return NULL;
247 }
248 
249 int
250 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
251 {
252 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
253 
254 	assert(transport != NULL);
255 	if (transport->ops.ctrlr_unmap_cmb != NULL) {
256 		return transport->ops.ctrlr_unmap_cmb(ctrlr);
257 	}
258 
259 	return 0;
260 }
261 
262 struct spdk_nvme_qpair *
263 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
264 				     const struct spdk_nvme_io_qpair_opts *opts)
265 {
266 	struct spdk_nvme_qpair *qpair;
267 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
268 
269 	assert(transport != NULL);
270 	qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
271 	if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
272 		qpair->transport = transport;
273 	}
274 
275 	return qpair;
276 }
277 
278 int
279 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
280 {
281 	return qpair->transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
282 }
283 
284 int
285 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
286 {
287 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
288 	uint8_t transport_failure_reason;
289 	int rc;
290 
291 	assert(transport != NULL);
292 	if (!nvme_qpair_is_admin_queue(qpair)) {
293 		qpair->transport = transport;
294 	}
295 
296 	transport_failure_reason = qpair->transport_failure_reason;
297 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
298 
299 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
300 	rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
301 	if (rc != 0) {
302 		goto err;
303 	}
304 
305 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
306 	if (qpair->poll_group) {
307 		rc = nvme_poll_group_connect_qpair(qpair);
308 		if (rc) {
309 			goto err;
310 		}
311 	}
312 
313 	return rc;
314 
315 err:
316 	/* If the qpair was unable to reconnect, restore the original failure reason. */
317 	qpair->transport_failure_reason = transport_failure_reason;
318 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
319 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
320 	return rc;
321 }
322 
323 void
324 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
325 {
326 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
327 
328 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
329 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
330 		return;
331 	}
332 
333 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
334 	assert(transport != NULL);
335 	if (qpair->poll_group) {
336 		nvme_poll_group_disconnect_qpair(qpair);
337 	}
338 
339 	transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
340 
341 	nvme_qpair_abort_reqs(qpair, 0);
342 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
343 }
344 
345 void
346 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
347 {
348 	const struct spdk_nvme_transport *transport;
349 
350 	assert(dnr <= 1);
351 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
352 		qpair->transport->ops.qpair_abort_reqs(qpair, dnr);
353 	} else {
354 		transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
355 		assert(transport != NULL);
356 		transport->ops.qpair_abort_reqs(qpair, dnr);
357 	}
358 }
359 
360 int
361 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
362 {
363 	const struct spdk_nvme_transport *transport;
364 
365 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
366 		return qpair->transport->ops.qpair_reset(qpair);
367 	}
368 
369 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
370 	assert(transport != NULL);
371 	return transport->ops.qpair_reset(qpair);
372 }
373 
374 int
375 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
376 {
377 	const struct spdk_nvme_transport *transport;
378 
379 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
380 		return qpair->transport->ops.qpair_submit_request(qpair, req);
381 	}
382 
383 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
384 	assert(transport != NULL);
385 	return transport->ops.qpair_submit_request(qpair, req);
386 }
387 
388 int32_t
389 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
390 {
391 	const struct spdk_nvme_transport *transport;
392 
393 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
394 		return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
395 	}
396 
397 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
398 	assert(transport != NULL);
399 	return transport->ops.qpair_process_completions(qpair, max_completions);
400 }
401 
402 int
403 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
404 				      int (*iter_fn)(struct nvme_request *req, void *arg),
405 				      void *arg)
406 {
407 	const struct spdk_nvme_transport *transport;
408 
409 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
410 		return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
411 	}
412 
413 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
414 	assert(transport != NULL);
415 	return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
416 }
417 
418 void
419 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
420 {
421 	const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
422 
423 	assert(transport != NULL);
424 	transport->ops.admin_qpair_abort_aers(qpair);
425 }
426 
427 struct spdk_nvme_transport_poll_group *
428 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
429 {
430 	struct spdk_nvme_transport_poll_group *group = NULL;
431 
432 	group = transport->ops.poll_group_create();
433 	if (group) {
434 		group->transport = transport;
435 		STAILQ_INIT(&group->connected_qpairs);
436 		STAILQ_INIT(&group->disconnected_qpairs);
437 	}
438 
439 	return group;
440 }
441 
442 int
443 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
444 			      struct spdk_nvme_qpair *qpair)
445 {
446 	int rc;
447 
448 	rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
449 	if (rc == 0) {
450 		qpair->poll_group = tgroup;
451 		assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
452 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
453 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
454 	}
455 
456 	return rc;
457 }
458 
459 int
460 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
461 				 struct spdk_nvme_qpair *qpair)
462 {
463 	int rc;
464 
465 	rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
466 	if (rc == 0) {
467 		if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
468 			STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
469 		} else if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
470 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
471 		} else {
472 			return -ENOENT;
473 		}
474 
475 		qpair->poll_group = NULL;
476 		qpair->poll_group_tailq_head = NULL;
477 	}
478 
479 	return rc;
480 }
481 
482 int64_t
483 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
484 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
485 {
486 	struct spdk_nvme_qpair *qpair;
487 	int64_t rc;
488 
489 	tgroup->in_completion_context = true;
490 	rc = tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
491 			disconnected_qpair_cb);
492 	tgroup->in_completion_context = false;
493 
494 	if (spdk_unlikely(tgroup->num_qpairs_to_delete > 0)) {
495 		/* deleted qpairs are more likely to be in the disconnected qpairs list. */
496 		STAILQ_FOREACH(qpair, &tgroup->disconnected_qpairs, poll_group_stailq) {
497 			if (spdk_unlikely(qpair->delete_after_completion_context)) {
498 				spdk_nvme_ctrlr_free_io_qpair(qpair);
499 				if (--tgroup->num_qpairs_to_delete == 0) {
500 					return rc;
501 				}
502 			}
503 		}
504 
505 		STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
506 			if (spdk_unlikely(qpair->delete_after_completion_context)) {
507 				spdk_nvme_ctrlr_free_io_qpair(qpair);
508 				if (--tgroup->num_qpairs_to_delete == 0) {
509 					return rc;
510 				}
511 			}
512 		}
513 		/* Just in case. */
514 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Mismatch between qpairs to delete and poll group number.\n");
515 		tgroup->num_qpairs_to_delete = 0;
516 	}
517 
518 	return rc;
519 }
520 
521 int
522 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
523 {
524 	return tgroup->transport->ops.poll_group_destroy(tgroup);
525 }
526 
527 int
528 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
529 {
530 	struct spdk_nvme_transport_poll_group *tgroup;
531 	int rc;
532 
533 	tgroup = qpair->poll_group;
534 
535 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
536 		return 0;
537 	}
538 
539 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
540 		rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
541 		if (rc == 0) {
542 			qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
543 			STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
544 			STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
545 			/* EINPROGRESS indicates that a call has already been made to this function.
546 			 * It just keeps us from segfaulting on a double removal/insert.
547 			 */
548 		} else if (rc == -EINPROGRESS) {
549 			rc = 0;
550 		}
551 		return rc;
552 	}
553 
554 	return -EINVAL;
555 }
556 
557 int
558 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
559 {
560 	struct spdk_nvme_transport_poll_group *tgroup;
561 	int rc;
562 
563 	tgroup = qpair->poll_group;
564 
565 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
566 		return 0;
567 	}
568 
569 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
570 		rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
571 		if (rc == 0) {
572 			qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
573 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
574 			STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
575 		}
576 
577 		return rc == -EINPROGRESS ? 0 : rc;
578 	}
579 
580 
581 	return -EINVAL;
582 }
583