1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 /** \file 8 * Thread 9 */ 10 11 #ifndef SPDK_THREAD_H_ 12 #define SPDK_THREAD_H_ 13 14 #ifdef __linux__ 15 #include <sys/epoll.h> 16 #endif 17 18 #include "spdk/fd_group.h" 19 #include "spdk/stdinc.h" 20 #include "spdk/assert.h" 21 #include "spdk/cpuset.h" 22 #include "spdk/env.h" 23 #include "spdk/util.h" 24 25 #ifdef __cplusplus 26 extern "C" { 27 #endif 28 29 /** 30 * Pollers should always return a value of this type 31 * indicating whether they did real work or not. 32 */ 33 enum spdk_thread_poller_rc { 34 SPDK_POLLER_IDLE, 35 SPDK_POLLER_BUSY, 36 }; 37 38 /** 39 * A stackless, lightweight thread. 40 */ 41 struct spdk_thread; 42 43 /** 44 * A function repeatedly called on the same spdk_thread. 45 */ 46 struct spdk_poller; 47 48 struct spdk_io_channel_iter; 49 50 /** 51 * A function that is called each time a new thread is created. 52 * The implementor of this function should frequently call 53 * spdk_thread_poll() on the thread provided. 54 * 55 * \param thread The new spdk_thread. 56 */ 57 typedef int (*spdk_new_thread_fn)(struct spdk_thread *thread); 58 59 /** 60 * SPDK thread operation type. 61 */ 62 enum spdk_thread_op { 63 /* Called each time a new thread is created. The implementor of this operation 64 * should frequently call spdk_thread_poll() on the thread provided. 65 */ 66 SPDK_THREAD_OP_NEW, 67 68 /* Called when SPDK thread needs to be rescheduled. (e.g., when cpumask of the 69 * SPDK thread is updated. 70 */ 71 SPDK_THREAD_OP_RESCHED, 72 }; 73 74 /** 75 * Function to be called for SPDK thread operation. 76 */ 77 typedef int (*spdk_thread_op_fn)(struct spdk_thread *thread, enum spdk_thread_op op); 78 79 /** 80 * Function to check whether the SPDK thread operation is supported. 81 */ 82 typedef bool (*spdk_thread_op_supported_fn)(enum spdk_thread_op op); 83 84 /** 85 * A function that will be called on the target thread. 86 * 87 * \param ctx Context passed as arg to spdk_thread_pass_msg(). 88 */ 89 typedef void (*spdk_msg_fn)(void *ctx); 90 91 /** 92 * Function to be called to pass a message to a thread. 93 * 94 * \param fn Callback function for a thread. 95 * \param ctx Context passed to fn. 96 * \param thread_ctx Context for the thread. 97 */ 98 typedef void (*spdk_thread_pass_msg)(spdk_msg_fn fn, void *ctx, 99 void *thread_ctx); 100 101 /** 102 * Callback function for a poller. 103 * 104 * \param ctx Context passed as arg to spdk_poller_register(). 105 * \return value of type `enum spdk_thread_poller_rc` (ex: SPDK_POLLER_IDLE 106 * if no work was done or SPDK_POLLER_BUSY if work was done.) 107 */ 108 typedef int (*spdk_poller_fn)(void *ctx); 109 110 /** 111 * Callback function to set poller into interrupt mode or back to poll mode. 112 * 113 * \param poller Poller to set interrupt or poll mode. 114 * \param cb_arg Argument passed to the callback function. 115 * \param interrupt_mode Set interrupt mode for true, or poll mode for false 116 */ 117 typedef void (*spdk_poller_set_interrupt_mode_cb)(struct spdk_poller *poller, void *cb_arg, 118 bool interrupt_mode); 119 120 /** 121 * Mark that the poller is capable of entering interrupt mode. 122 * 123 * When registering the poller set interrupt callback, the callback will get 124 * executed immediately if its spdk_thread is in the interrupt mode. 125 * 126 * \param poller The poller to register callback function. 127 * \param cb_fn Callback function called when the poller must transition into or out of interrupt mode 128 * \param cb_arg Argument passed to the callback function. 129 */ 130 void spdk_poller_register_interrupt(struct spdk_poller *poller, 131 spdk_poller_set_interrupt_mode_cb cb_fn, 132 void *cb_arg); 133 134 /** 135 * I/O channel creation callback. 136 * 137 * \param io_device I/O device associated with this channel. 138 * \param ctx_buf Context for the I/O device. 139 */ 140 typedef int (*spdk_io_channel_create_cb)(void *io_device, void *ctx_buf); 141 142 /** 143 * I/O channel destruction callback. 144 * 145 * \param io_device I/O device associated with this channel. 146 * \param ctx_buf Context for the I/O device. 147 */ 148 typedef void (*spdk_io_channel_destroy_cb)(void *io_device, void *ctx_buf); 149 150 /** 151 * I/O device unregister callback. 152 * 153 * \param io_device Unregistered I/O device. 154 */ 155 typedef void (*spdk_io_device_unregister_cb)(void *io_device); 156 157 /** 158 * Called on the appropriate thread for each channel associated with io_device. 159 * 160 * \param i I/O channel iterator. 161 */ 162 typedef void (*spdk_channel_msg)(struct spdk_io_channel_iter *i); 163 164 /** 165 * spdk_for_each_channel() callback. 166 * 167 * \param i I/O channel iterator. 168 * \param status 0 if it completed successfully, or negative errno if it failed. 169 */ 170 typedef void (*spdk_channel_for_each_cpl)(struct spdk_io_channel_iter *i, int status); 171 172 #define SPDK_IO_CHANNEL_STRUCT_SIZE 96 173 174 /** 175 * Message memory pool size definitions 176 */ 177 #define SPDK_MSG_MEMPOOL_CACHE_SIZE 1024 178 /* Power of 2 minus 1 is optimal for memory consumption */ 179 #define SPDK_DEFAULT_MSG_MEMPOOL_SIZE (262144 - 1) 180 181 /** 182 * Initialize the threading library. Must be called once prior to allocating any threads. 183 * 184 * \param new_thread_fn Called each time a new SPDK thread is created. The implementor 185 * is expected to frequently call spdk_thread_poll() on the provided thread. 186 * \param ctx_sz For each thread allocated, an additional region of memory of 187 * size ctx_size will also be allocated, for use by the thread scheduler. A pointer 188 * to this region may be obtained by calling spdk_thread_get_ctx(). 189 * 190 * \return 0 on success. Negated errno on failure. 191 */ 192 int spdk_thread_lib_init(spdk_new_thread_fn new_thread_fn, size_t ctx_sz); 193 194 /** 195 * Initialize the threading library. Must be called once prior to allocating any threads 196 * 197 * Both thread_op_fn and thread_op_type_supported_fn have to be specified or not 198 * specified together. 199 * 200 * \param thread_op_fn Called for SPDK thread operation. 201 * \param thread_op_supported_fn Called to check whether the SPDK thread operation is supported. 202 * \param ctx_sz For each thread allocated, for use by the thread scheduler. A pointer 203 * to this region may be obtained by calling spdk_thread_get_ctx(). 204 * \param msg_mempool_size Size of the allocated spdk_msg_mempool. 205 * 206 * \return 0 on success. Negated errno on failure. 207 */ 208 int spdk_thread_lib_init_ext(spdk_thread_op_fn thread_op_fn, 209 spdk_thread_op_supported_fn thread_op_supported_fn, 210 size_t ctx_sz, size_t msg_mempool_size); 211 212 /** 213 * Release all resources associated with this library. 214 */ 215 void spdk_thread_lib_fini(void); 216 217 /** 218 * Creates a new SPDK thread object. 219 * 220 * Note that the first thread created via spdk_thread_create() will be designated as 221 * the app thread. Other SPDK libraries may place restrictions on certain APIs to 222 * only be called in the context of this app thread. 223 * 224 * \param name Human-readable name for the thread; can be retrieved with spdk_thread_get_name(). 225 * The string is copied, so the pointed-to data only needs to be valid during the 226 * spdk_thread_create() call. May be NULL to specify no name. 227 * \param cpumask Optional mask of CPU cores on which to schedule this thread. This is only 228 * a suggestion to the scheduler. The value is copied, so cpumask may be released when 229 * this function returns. May be NULL if no mask is required. 230 * 231 * \return a pointer to the allocated thread on success or NULL on failure.. 232 */ 233 struct spdk_thread *spdk_thread_create(const char *name, const struct spdk_cpuset *cpumask); 234 235 /** 236 * Return the app thread. 237 * 238 * The app thread is the first thread created using spdk_thread_create(). 239 * 240 * \return a pointer to the app thread, or NULL if no thread has been created yet. 241 */ 242 struct spdk_thread *spdk_thread_get_app_thread(void); 243 244 /** 245 * Check if the specified spdk_thread is the app thread. 246 * 247 * \param thread The thread to check. If NULL, check the current spdk_thread. 248 * \return true if the specified spdk_thread is the app thread, false otherwise. 249 */ 250 bool spdk_thread_is_app_thread(struct spdk_thread *thread); 251 252 /** 253 * Force the current system thread to act as if executing the given SPDK thread. 254 * 255 * \param thread The thread to set. 256 */ 257 void spdk_set_thread(struct spdk_thread *thread); 258 259 /** 260 * Bind or unbind spdk_thread to its current CPU core. 261 * 262 * If spdk_thread is bound, it couldn't be rescheduled to other CPU cores until it is unbound. 263 * 264 * \param thread The thread to bind or not. 265 * \param bind true for bind, false for unbind. 266 */ 267 void spdk_thread_bind(struct spdk_thread *thread, bool bind); 268 269 /** 270 * Returns whether the thread is bound to its current CPU core. 271 * 272 * \param thread The thread to query. 273 * 274 * \return true if bound, false otherwise 275 */ 276 bool spdk_thread_is_bound(struct spdk_thread *thread); 277 278 /** 279 * Mark the thread as exited, failing all future spdk_thread_send_msg(), 280 * spdk_poller_register(), and spdk_get_io_channel() calls. May only be called 281 * within an spdk poller or message. 282 * 283 * All I/O channel references associated with the thread must be released 284 * using spdk_put_io_channel(), and all active pollers associated with the thread 285 * should be unregistered using spdk_poller_unregister(), prior to calling 286 * this function. This function will complete these processing. The completion can 287 * be queried by spdk_thread_is_exited(). 288 * 289 * Note that this function must not be called on the app thread until after it 290 * has been called for all other threads. 291 * 292 * \param thread The thread to exit. 293 * 294 * \return always 0. (return value was deprecated but keep it for ABI compatibility.) 295 */ 296 int spdk_thread_exit(struct spdk_thread *thread); 297 298 /** 299 * Returns whether the thread is marked as exited. 300 * 301 * A thread is exited only after it has spdk_thread_exit() called on it, and 302 * it has been polled until any outstanding operations targeting this 303 * thread have completed. This may include poller unregistrations, io channel 304 * unregistrations, or outstanding spdk_thread_send_msg calls. 305 * 306 * \param thread The thread to query. 307 * 308 * \return true if marked as exited, false otherwise. 309 */ 310 bool spdk_thread_is_exited(struct spdk_thread *thread); 311 312 /** 313 * Returns whether the thread is still running. 314 * 315 * A thread is considered running until it has * spdk_thread_exit() called on it. 316 * 317 * \param thread The thread to query. 318 * 319 * \return true if still running, false otherwise. 320 */ 321 bool spdk_thread_is_running(struct spdk_thread *thread); 322 323 /** 324 * Destroy a thread, releasing all of its resources. May only be called 325 * on a thread previously marked as exited. 326 * 327 * \param thread The thread to destroy. 328 * 329 */ 330 void spdk_thread_destroy(struct spdk_thread *thread); 331 332 /** 333 * Return a pointer to this thread's context. 334 * 335 * \param thread The thread on which to get the context. 336 * 337 * \return a pointer to the per-thread context, or NULL if there is 338 * no per-thread context. 339 */ 340 void *spdk_thread_get_ctx(struct spdk_thread *thread); 341 342 /** 343 * Get the thread's cpumask. 344 * 345 * \param thread The thread to get the cpumask for. 346 * 347 * \return cpuset pointer 348 */ 349 struct spdk_cpuset *spdk_thread_get_cpumask(struct spdk_thread *thread); 350 351 /** 352 * Set the current thread's cpumask to the specified value. The thread may be 353 * rescheduled to one of the CPUs specified in the cpumask. 354 * 355 * This API requires SPDK thread operation supports SPDK_THREAD_OP_RESCHED. 356 * 357 * \param cpumask The new cpumask for the thread. 358 * 359 * \return 0 on success, negated errno otherwise. 360 */ 361 int spdk_thread_set_cpumask(struct spdk_cpuset *cpumask); 362 363 /** 364 * Return the thread object associated with the context handle previously 365 * obtained by calling spdk_thread_get_ctx(). 366 * 367 * \param ctx A context previously obtained by calling spdk_thread_get_ctx() 368 * 369 * \return The associated thread. 370 */ 371 struct spdk_thread *spdk_thread_get_from_ctx(void *ctx); 372 373 /** 374 * Perform one iteration worth of processing on the thread. This includes 375 * both expired and continuous pollers as well as messages. If the thread 376 * has exited, return immediately. 377 * 378 * \param thread The thread to process 379 * \param max_msgs The maximum number of messages that will be processed. 380 * Use 0 to process the default number of messages (8). 381 * \param now The current time, in ticks. Optional. If 0 is passed, this 382 * function will call spdk_get_ticks() to get the current time. 383 * The current time is used as start time and this function 384 * will call spdk_get_ticks() at its end to know end time to 385 * measure run time of this function. 386 * 387 * \return 1 if work was done. 0 if no work was done. 388 */ 389 int spdk_thread_poll(struct spdk_thread *thread, uint32_t max_msgs, uint64_t now); 390 391 /** 392 * Return the number of ticks until the next timed poller 393 * would expire. Timed pollers are pollers for which 394 * period_microseconds is greater than 0. 395 * 396 * \param thread The thread to check poller expiration times on 397 * 398 * \return Number of ticks. If no timed pollers, return 0. 399 */ 400 uint64_t spdk_thread_next_poller_expiration(struct spdk_thread *thread); 401 402 /** 403 * Returns whether there are any active pollers (pollers for which 404 * period_microseconds equals 0) registered to be run on the thread. 405 * 406 * \param thread The thread to check. 407 * 408 * \return 1 if there is at least one active poller, 0 otherwise. 409 */ 410 int spdk_thread_has_active_pollers(struct spdk_thread *thread); 411 412 /** 413 * Returns whether there are any pollers registered to be run 414 * on the thread. 415 * 416 * \param thread The thread to check. 417 * 418 * \return true if there is any active poller, false otherwise. 419 */ 420 bool spdk_thread_has_pollers(struct spdk_thread *thread); 421 422 /** 423 * Returns whether there are scheduled operations to be run on the thread. 424 * 425 * \param thread The thread to check. 426 * 427 * \return true if there are no scheduled operations, false otherwise. 428 */ 429 bool spdk_thread_is_idle(struct spdk_thread *thread); 430 431 /** 432 * Get count of allocated threads. 433 */ 434 uint32_t spdk_thread_get_count(void); 435 436 /** 437 * Get a handle to the current thread. 438 * 439 * This handle may be passed to other threads and used as the target of 440 * spdk_thread_send_msg(). 441 * 442 * \sa spdk_io_channel_get_thread() 443 * 444 * \return a pointer to the current thread on success or NULL on failure. 445 */ 446 struct spdk_thread *spdk_get_thread(void); 447 448 /** 449 * Get a thread's name. 450 * 451 * \param thread Thread to query. 452 * 453 * \return the name of the thread. 454 */ 455 const char *spdk_thread_get_name(const struct spdk_thread *thread); 456 457 /** 458 * Get a thread's ID. 459 * 460 * \param thread Thread to query. 461 * 462 * \return the ID of the thread.. 463 */ 464 uint64_t spdk_thread_get_id(const struct spdk_thread *thread); 465 466 /** 467 * Get the thread by the ID. 468 * 469 * \param id ID of the thread. 470 * \return Thread whose ID matches or NULL otherwise. 471 */ 472 struct spdk_thread *spdk_thread_get_by_id(uint64_t id); 473 474 struct spdk_thread_stats { 475 uint64_t busy_tsc; 476 uint64_t idle_tsc; 477 }; 478 479 /** 480 * Get statistics about the current thread. 481 * 482 * Copy cumulative thread stats values to the provided thread stats structure. 483 * 484 * \param stats User's thread_stats structure. 485 */ 486 int spdk_thread_get_stats(struct spdk_thread_stats *stats); 487 488 /** 489 * Return the TSC value from the end of the last time this thread was polled. 490 * 491 * \param thread Thread to query. If NULL, use current thread. 492 * 493 * \return TSC value from the end of the last time this thread was polled. 494 */ 495 uint64_t spdk_thread_get_last_tsc(struct spdk_thread *thread); 496 497 /** 498 * Send a message to the given thread. 499 * 500 * The message will be sent asynchronously - i.e. spdk_thread_send_msg will always return 501 * prior to `fn` being called. 502 * 503 * \param thread The target thread. 504 * \param fn This function will be called on the given thread. 505 * \param ctx This context will be passed to fn when called. 506 * 507 * \return 0 on success 508 * \return -ENOMEM if the message could not be allocated 509 * \return -EIO if the message could not be sent to the destination thread 510 */ 511 int spdk_thread_send_msg(const struct spdk_thread *thread, spdk_msg_fn fn, void *ctx); 512 513 /** 514 * Send a message to the given thread. Only one critical message can be outstanding at the same 515 * time. It's intended to use this function in any cases that might interrupt the execution of the 516 * application, such as signal handlers. 517 * 518 * The message will be sent asynchronously - i.e. spdk_thread_send_critical_msg will always return 519 * prior to `fn` being called. 520 * 521 * \param thread The target thread. 522 * \param fn This function will be called on the given thread. 523 * 524 * \return 0 on success 525 * \return -EIO if the message could not be sent to the destination thread, due to an already 526 * outstanding critical message 527 */ 528 int spdk_thread_send_critical_msg(struct spdk_thread *thread, spdk_msg_fn fn); 529 530 /** 531 * Run the msg callback on the given thread. If this happens to be the current 532 * thread, the callback is executed immediately; otherwise a message is sent to 533 * the thread, and it's run asynchronously. 534 * 535 * \param thread The target thread. 536 * \param fn This function will be called on the given thread. 537 * \param ctx This context will be passed to fn when called. 538 * 539 * \return 0 on success 540 * \return -ENOMEM if the message could not be allocated 541 * \return -EIO if the message could not be sent to the destination thread 542 */ 543 static inline int 544 spdk_thread_exec_msg(const struct spdk_thread *thread, spdk_msg_fn fn, void *ctx) 545 { 546 assert(thread != NULL); 547 548 if (spdk_get_thread() == thread) { 549 fn(ctx); 550 return 0; 551 } 552 553 return spdk_thread_send_msg(thread, fn, ctx); 554 } 555 556 /** 557 * Send a message to each thread, serially. 558 * 559 * The message is sent asynchronously - i.e. spdk_for_each_thread will return 560 * prior to `fn` being called on each thread. 561 * 562 * \param fn This is the function that will be called on each thread. 563 * \param ctx This context will be passed to fn when called. 564 * \param cpl This will be called on the originating thread after `fn` has been 565 * called on each thread. 566 */ 567 void spdk_for_each_thread(spdk_msg_fn fn, void *ctx, spdk_msg_fn cpl); 568 569 /** 570 * Set current spdk_thread into interrupt mode or back to poll mode. 571 * 572 * Only valid when thread interrupt facility is enabled by 573 * spdk_interrupt_mode_enable(). 574 * 575 * \param enable_interrupt Set interrupt mode for true, or poll mode for false 576 */ 577 void spdk_thread_set_interrupt_mode(bool enable_interrupt); 578 579 /** 580 * Register a poller on the current thread. 581 * 582 * The poller can be unregistered by calling spdk_poller_unregister(). 583 * 584 * \param fn This function will be called every `period_microseconds`. 585 * \param arg Argument passed to fn. 586 * \param period_microseconds How often to call `fn`. If 0, call `fn` as often 587 * as possible. 588 * 589 * \return a pointer to the poller registered on the current thread on success 590 * or NULL on failure. 591 */ 592 struct spdk_poller *spdk_poller_register(spdk_poller_fn fn, 593 void *arg, 594 uint64_t period_microseconds); 595 596 /** 597 * Register a poller on the current thread with arbitrary name. 598 * 599 * The poller can be unregistered by calling spdk_poller_unregister(). 600 * 601 * \param fn This function will be called every `period_microseconds`. 602 * \param arg Argument passed to fn. 603 * \param period_microseconds How often to call `fn`. If 0, call `fn` as often 604 * as possible. 605 * \param name Human readable name for the poller. Pointer of the poller function 606 * name is set if NULL. 607 * 608 * \return a pointer to the poller registered on the current thread on success 609 * or NULL on failure. 610 */ 611 struct spdk_poller *spdk_poller_register_named(spdk_poller_fn fn, 612 void *arg, 613 uint64_t period_microseconds, 614 const char *name); 615 616 /* 617 * \brief Register a poller on the current thread with setting its name 618 * to the string of the poller function name. The poller being registered 619 * should return a value of type `enum spdk_thread_poller_rc`. See 620 * \ref spdk_poller_fn for more information. 621 */ 622 #define SPDK_POLLER_REGISTER(fn, arg, period_microseconds) \ 623 spdk_poller_register_named(fn, arg, period_microseconds, #fn) 624 625 /** 626 * Unregister a poller on the current thread. 627 * 628 * \param ppoller The poller to unregister. 629 */ 630 void spdk_poller_unregister(struct spdk_poller **ppoller); 631 632 /** 633 * Pause a poller on the current thread. 634 * 635 * The poller is not run until it is resumed with spdk_poller_resume(). It is 636 * perfectly fine to pause an already paused poller. 637 * 638 * \param poller The poller to pause. 639 */ 640 void spdk_poller_pause(struct spdk_poller *poller); 641 642 /** 643 * Resume a poller on the current thread. 644 * 645 * Resumes a poller paused with spdk_poller_pause(). It is perfectly fine to 646 * resume an unpaused poller. 647 * 648 * \param poller The poller to resume. 649 */ 650 void spdk_poller_resume(struct spdk_poller *poller); 651 652 /** 653 * Register the opaque io_device context as an I/O device. 654 * 655 * After an I/O device is registered, it can return I/O channels using the 656 * spdk_get_io_channel() function. 657 * 658 * \param io_device The pointer to io_device context. 659 * \param create_cb Callback function invoked to allocate any resources required 660 * for a new I/O channel. 661 * \param destroy_cb Callback function invoked to release the resources for an 662 * I/O channel. 663 * \param ctx_size The size of the context buffer allocated to store references 664 * to allocated I/O channel resources. 665 * \param name A string name for the device used only for debugging. Optional - 666 * may be NULL. 667 */ 668 void spdk_io_device_register(void *io_device, spdk_io_channel_create_cb create_cb, 669 spdk_io_channel_destroy_cb destroy_cb, uint32_t ctx_size, 670 const char *name); 671 672 /** 673 * Unregister the opaque io_device context as an I/O device. 674 * 675 * The actual unregistration might be deferred until all active I/O channels are 676 * destroyed. 677 * 678 * \param io_device The pointer to io_device context. 679 * \param unregister_cb An optional callback function invoked to release any 680 * references to this I/O device. 681 */ 682 void spdk_io_device_unregister(void *io_device, spdk_io_device_unregister_cb unregister_cb); 683 684 /** 685 * Get an I/O channel for the specified io_device to be used by the calling thread. 686 * 687 * The io_device context pointer specified must have previously been registered 688 * using spdk_io_device_register(). If an existing I/O channel does not exist 689 * yet for the given io_device on the calling thread, it will allocate an I/O 690 * channel and invoke the create_cb function pointer specified in spdk_io_device_register(). 691 * If an I/O channel already exists for the given io_device on the calling thread, 692 * its reference is returned rather than creating a new I/O channel. 693 * 694 * \param io_device The pointer to io_device context. 695 * 696 * \return a pointer to the I/O channel for this device on success or NULL on failure. 697 */ 698 struct spdk_io_channel *spdk_get_io_channel(void *io_device); 699 700 /** 701 * Release a reference to an I/O channel. This happens asynchronously. 702 * 703 * This must be called on the same thread that called spdk_get_io_channel() 704 * for the specified I/O channel. If this releases the last reference to the 705 * I/O channel, The destroy_cb function specified in spdk_io_device_register() 706 * will be invoked to release any associated resources. 707 * 708 * \param ch I/O channel to release a reference. 709 */ 710 void spdk_put_io_channel(struct spdk_io_channel *ch); 711 712 /** 713 * Get the context buffer associated with an I/O channel. 714 * 715 * \param ch I/O channel. 716 * 717 * \return a pointer to the context buffer. 718 */ 719 static inline void * 720 spdk_io_channel_get_ctx(struct spdk_io_channel *ch) 721 { 722 return (uint8_t *)ch + SPDK_IO_CHANNEL_STRUCT_SIZE; 723 } 724 725 /** 726 * Get I/O channel from the context buffer. This is the inverse of 727 * spdk_io_channel_get_ctx(). 728 * 729 * \param ctx The pointer to the context buffer. 730 * 731 * \return a pointer to the I/O channel associated with the context buffer. 732 */ 733 struct spdk_io_channel *spdk_io_channel_from_ctx(void *ctx); 734 735 /** 736 * Get the thread associated with an I/O channel. 737 * 738 * \param ch I/O channel. 739 * 740 * \return a pointer to the thread associated with the I/O channel 741 */ 742 struct spdk_thread *spdk_io_channel_get_thread(struct spdk_io_channel *ch); 743 744 /** 745 * Call 'fn' on each channel associated with io_device. 746 * 747 * This happens asynchronously, so fn may be called after spdk_for_each_channel 748 * returns. 'fn' will be called for each channel serially, such that two calls 749 * to 'fn' will not overlap in time. After 'fn' has been called, call 750 * spdk_for_each_channel_continue() to continue iterating. 751 * 752 * \param io_device 'fn' will be called on each channel associated with this io_device. 753 * \param fn Called on the appropriate thread for each channel associated with io_device. 754 * \param ctx Context buffer registered to spdk_io_channel_iter that can be obtained 755 * form the function spdk_io_channel_iter_get_ctx(). 756 * \param cpl Called on the thread that spdk_for_each_channel was initially called 757 * from when 'fn' has been called on each channel. 758 */ 759 void spdk_for_each_channel(void *io_device, spdk_channel_msg fn, void *ctx, 760 spdk_channel_for_each_cpl cpl); 761 762 /** 763 * Get io_device from the I/O channel iterator. 764 * 765 * \param i I/O channel iterator. 766 * 767 * \return a pointer to the io_device. 768 */ 769 void *spdk_io_channel_iter_get_io_device(struct spdk_io_channel_iter *i); 770 771 /** 772 * Get I/O channel from the I/O channel iterator. 773 * 774 * \param i I/O channel iterator. 775 * 776 * \return a pointer to the I/O channel. 777 */ 778 struct spdk_io_channel *spdk_io_channel_iter_get_channel(struct spdk_io_channel_iter *i); 779 780 /** 781 * Get context buffer from the I/O channel iterator. 782 * 783 * \param i I/O channel iterator. 784 * 785 * \return a pointer to the context buffer. 786 */ 787 void *spdk_io_channel_iter_get_ctx(struct spdk_io_channel_iter *i); 788 789 /** 790 * Get the io_device for the specified I/O channel. 791 * 792 * \param ch I/O channel. 793 * 794 * \return a pointer to the io_device for the I/O channel 795 */ 796 void *spdk_io_channel_get_io_device(struct spdk_io_channel *ch); 797 798 /** 799 * Helper function to iterate all channels for spdk_for_each_channel(). 800 * 801 * \param i I/O channel iterator. 802 * \param status Status for the I/O channel iterator; 803 * for non 0 status remaining iterations are terminated. 804 */ 805 void spdk_for_each_channel_continue(struct spdk_io_channel_iter *i, int status); 806 807 /** 808 * A representative for registered interrupt file descriptor. 809 */ 810 struct spdk_interrupt; 811 812 /** 813 * Callback function registered for interrupt file descriptor. 814 * 815 * \param ctx Context passed as arg to spdk_interrupt_register(). 816 * 817 * \return 0 to indicate that interrupt took place but no events were found; 818 * positive to indicate that interrupt took place and some events were processed; 819 * negative if no event information is provided. 820 */ 821 typedef int (*spdk_interrupt_fn)(void *ctx); 822 823 /** 824 * Register an spdk_interrupt on the current thread. The provided function 825 * will be called any time the associated file descriptor is written to. 826 * 827 * \param efd File descriptor of the spdk_interrupt. 828 * \param fn Called each time there are events in spdk_interrupt. 829 * \param arg Function argument for fn. 830 * \param name Human readable name for the spdk_interrupt. Pointer of the spdk_interrupt 831 * name is set if NULL. 832 * 833 * \return a pointer to the spdk_interrupt registered on the current thread on success 834 * or NULL on failure. 835 */ 836 struct spdk_interrupt *spdk_interrupt_register(int efd, spdk_interrupt_fn fn, 837 void *arg, const char *name); 838 839 /* 840 * \brief Register an spdk_interrupt on the current thread with setting its name 841 * to the string of the spdk_interrupt function name. 842 */ 843 #define SPDK_INTERRUPT_REGISTER(efd, fn, arg) \ 844 spdk_interrupt_register(efd, fn, arg, #fn) 845 846 /** 847 * Unregister an spdk_interrupt on the current thread. 848 * 849 * \param pintr The spdk_interrupt to unregister. 850 */ 851 void spdk_interrupt_unregister(struct spdk_interrupt **pintr); 852 853 enum spdk_interrupt_event_types { 854 #ifdef __linux__ 855 SPDK_INTERRUPT_EVENT_IN = EPOLLIN, 856 SPDK_INTERRUPT_EVENT_OUT = EPOLLOUT, 857 SPDK_INTERRUPT_EVENT_ET = EPOLLET 858 #else 859 SPDK_INTERRUPT_EVENT_IN = 0x001, 860 SPDK_INTERRUPT_EVENT_OUT = 0x004, 861 SPDK_INTERRUPT_EVENT_ET = 1u << 31 862 #endif 863 }; 864 865 /** 866 * Change the event_types associated with the spdk_interrupt on the current thread. 867 * 868 * \param intr The pointer to the spdk_interrupt registered on the current thread. 869 * \param event_types New event_types for the spdk_interrupt. 870 * 871 * \return 0 if success or -errno if failed. 872 */ 873 int spdk_interrupt_set_event_types(struct spdk_interrupt *intr, 874 enum spdk_interrupt_event_types event_types); 875 876 /** 877 * Return a file descriptor that becomes ready whenever any of the registered 878 * interrupt file descriptors are ready 879 * 880 * \param thread The thread to get. 881 * 882 * \return The spdk_interrupt fd of thread itself. 883 */ 884 int spdk_thread_get_interrupt_fd(struct spdk_thread *thread); 885 886 /** 887 * Return an fd_group that becomes ready whenever any of the registered 888 * interrupt file descriptors are ready 889 * 890 * 891 * \param thread The thread to get. 892 * 893 * \return The spdk_fd_group of the thread itself. 894 */ 895 struct spdk_fd_group *spdk_thread_get_interrupt_fd_group(struct spdk_thread *thread); 896 897 /** 898 * Set SPDK run as event driven mode 899 * 900 * \return 0 on success or -errno on failure 901 */ 902 int spdk_interrupt_mode_enable(void); 903 904 /** 905 * Reports whether interrupt mode is set. 906 * 907 * \return True if interrupt mode is set, false otherwise. 908 */ 909 bool spdk_interrupt_mode_is_enabled(void); 910 911 /** 912 * A spinlock augmented with safety checks for use with SPDK. 913 * 914 * SPDK code that uses spdk_spinlock runs from an SPDK thread, which itself is associated with a 915 * pthread. There are typically many SPDK threads associated with each pthread. The SPDK application 916 * may migrate SPDK threads between pthreads from time to time to balance the load on those threads. 917 * Migration of SPDK threads only happens when the thread is off CPU, and as such it is only safe to 918 * hold a lock so long as an SPDK thread stays on CPU. 919 * 920 * It is not safe to lock a spinlock, return from the event or poller, then unlock it at some later 921 * time because: 922 * 923 * - Even though the SPDK thread may be the same, the SPDK thread may be running on different 924 * pthreads during lock and unlock. A pthread spinlock may consider this to be an unlock by a 925 * non-owner, which results in undefined behavior. 926 * - A lock that is acquired by a poller or event may be needed by another poller or event that 927 * runs on the same pthread. This can lead to deadlock or detection of deadlock. 928 * - A lock that is acquired by a poller or event that is needed by another poller or event that 929 * runs on a second pthread will block the second pthread from doing any useful work until the 930 * lock is released. Because the lock holder and the lock acquirer are on the same pthread, this 931 * would lead to deadlock. 932 * 933 * If an SPDK spinlock is used erroneously, the program will abort. 934 */ 935 struct spdk_spinlock { 936 pthread_spinlock_t spinlock; 937 struct spdk_thread *thread; 938 struct spdk_spinlock_internal *internal; 939 bool initialized; 940 bool destroyed; 941 }; 942 943 /** 944 * Initialize an spdk_spinlock. 945 * 946 * \param sspin The SPDK spinlock to initialize. 947 */ 948 void spdk_spin_init(struct spdk_spinlock *sspin); 949 950 /** 951 * Destroy an spdk_spinlock. 952 * 953 * \param sspin The SPDK spinlock to initialize. 954 */ 955 void spdk_spin_destroy(struct spdk_spinlock *sspin); 956 957 /** 958 * Lock an SPDK spin lock. 959 * 960 * \param sspin An SPDK spinlock. 961 */ 962 void spdk_spin_lock(struct spdk_spinlock *sspin); 963 964 /** 965 * Unlock an SPDK spinlock. 966 * 967 * \param sspin An SPDK spinlock. 968 */ 969 void spdk_spin_unlock(struct spdk_spinlock *sspin); 970 971 /** 972 * Determine if the caller holds this SPDK spinlock. 973 * 974 * \param sspin An SPDK spinlock. 975 * \return true if spinlock is held by this thread, else false 976 */ 977 bool spdk_spin_held(struct spdk_spinlock *sspin); 978 979 struct spdk_iobuf_opts { 980 /** Maximum number of small buffers */ 981 uint64_t small_pool_count; 982 /** Maximum number of large buffers */ 983 uint64_t large_pool_count; 984 /** Size of a single small buffer */ 985 uint32_t small_bufsize; 986 /** Size of a single large buffer */ 987 uint32_t large_bufsize; 988 }; 989 990 struct spdk_iobuf_entry; 991 992 typedef void (*spdk_iobuf_get_cb)(struct spdk_iobuf_entry *entry, void *buf); 993 994 /** iobuf queue entry */ 995 struct spdk_iobuf_entry { 996 spdk_iobuf_get_cb cb_fn; 997 const void *module; 998 STAILQ_ENTRY(spdk_iobuf_entry) stailq; 999 }; 1000 1001 1002 struct spdk_iobuf_buffer { 1003 STAILQ_ENTRY(spdk_iobuf_buffer) stailq; 1004 }; 1005 1006 typedef STAILQ_HEAD(, spdk_iobuf_entry) spdk_iobuf_entry_stailq_t; 1007 typedef STAILQ_HEAD(, spdk_iobuf_buffer) spdk_iobuf_buffer_stailq_t; 1008 1009 struct spdk_iobuf_pool { 1010 /** Buffer pool */ 1011 struct spdk_ring *pool; 1012 /** Buffer cache */ 1013 spdk_iobuf_buffer_stailq_t cache; 1014 /** Number of elements in the cache */ 1015 uint32_t cache_count; 1016 /** Size of the cache */ 1017 uint32_t cache_size; 1018 /** Buffer wait queue */ 1019 spdk_iobuf_entry_stailq_t *queue; 1020 /** Buffer size */ 1021 uint32_t bufsize; 1022 }; 1023 1024 /** iobuf channel */ 1025 struct spdk_iobuf_channel { 1026 /** Small buffer memory pool */ 1027 struct spdk_iobuf_pool small; 1028 /** Large buffer memory pool */ 1029 struct spdk_iobuf_pool large; 1030 /** Module pointer */ 1031 const void *module; 1032 /** Parent IO channel */ 1033 struct spdk_io_channel *parent; 1034 }; 1035 1036 /** 1037 * Initialize and allocate iobuf pools. 1038 * 1039 * \return 0 on success, negative errno otherwise. 1040 */ 1041 int spdk_iobuf_initialize(void); 1042 1043 typedef void (*spdk_iobuf_finish_cb)(void *cb_arg); 1044 1045 /** 1046 * Clean up and free iobuf pools. 1047 * 1048 * \param cb_fn Callback to be executed once the clean up is completed. 1049 * \param cb_arg Callback argument. 1050 */ 1051 void spdk_iobuf_finish(spdk_iobuf_finish_cb cb_fn, void *cb_arg); 1052 1053 /** 1054 * Set iobuf options. These options will be used during `spdk_iobuf_initialize()`. 1055 * 1056 * \param opts Options describing the size of the pools to reserve. 1057 * 1058 * \return 0 on success, negative errno otherwise. 1059 */ 1060 int spdk_iobuf_set_opts(const struct spdk_iobuf_opts *opts); 1061 1062 /** 1063 * Get iobuf options. 1064 * 1065 * \param opts Options to fill in. 1066 */ 1067 void spdk_iobuf_get_opts(struct spdk_iobuf_opts *opts); 1068 1069 /** 1070 * Register a module as an iobuf pool user. Only registered users can request buffers from the 1071 * iobuf pool. 1072 * 1073 * \name Name of the module. 1074 * 1075 * \return 0 on success, negative errno otherwise. 1076 */ 1077 int spdk_iobuf_register_module(const char *name); 1078 1079 /** 1080 * Unregister an iobuf pool user from a module. 1081 * 1082 * \name Name of the module. 1083 * 1084 * \return 0 on success, negative errno otherwise. 1085 */ 1086 int spdk_iobuf_unregister_module(const char *name); 1087 1088 /** 1089 * Initialize an iobuf channel. 1090 * 1091 * \param ch iobuf channel to initialize. 1092 * \param name Name of the module registered via `spdk_iobuf_register_module()`. 1093 * \param small_cache_size Number of small buffers to be cached by this channel. 1094 * \param large_cache_size Number of large buffers to be cached by this channel. 1095 * 1096 * \return 0 on success, negative errno otherwise. 1097 */ 1098 int spdk_iobuf_channel_init(struct spdk_iobuf_channel *ch, const char *name, 1099 uint32_t small_cache_size, uint32_t large_cache_size); 1100 1101 /** 1102 * Release resources tied to an iobuf channel. 1103 * 1104 * \param ch iobuf channel. 1105 */ 1106 void spdk_iobuf_channel_fini(struct spdk_iobuf_channel *ch); 1107 1108 typedef int (*spdk_iobuf_for_each_entry_fn)(struct spdk_iobuf_channel *ch, 1109 struct spdk_iobuf_entry *entry, void *ctx); 1110 1111 /** 1112 * Iterate over all entries on a given queue and execute a callback on those that were requested 1113 * using `ch`. The iteration is stopped if the callback returns non-zero status. 1114 * 1115 * \param ch iobuf channel to iterate over. 1116 * \param pool Pool to iterate over (`small` or `large`). 1117 * \param cb_fn Callback to execute on each entry on the queue that was requested using `ch`. 1118 * \param cb_ctx Argument passed to `cb_fn`. 1119 * 1120 * \return status of the last callback. 1121 */ 1122 int spdk_iobuf_for_each_entry(struct spdk_iobuf_channel *ch, struct spdk_iobuf_pool *pool, 1123 spdk_iobuf_for_each_entry_fn cb_fn, void *cb_ctx); 1124 1125 /** 1126 * Abort an outstanding request waiting for a buffer. 1127 * 1128 * \param ch iobuf channel on which the entry is waiting. 1129 * \param entry Entry to remove from the wait queue. 1130 * \param len Length of the requested buffer (must be the exact same value as specified in 1131 * `spdk_iobuf_get()`. 1132 */ 1133 void spdk_iobuf_entry_abort(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry, 1134 uint64_t len); 1135 1136 /** 1137 * Get a buffer from the iobuf pool. If no buffers are available, the request is queued until a 1138 * buffer is released. 1139 * 1140 * \param ch iobuf channel. 1141 * \param len Length of the buffer to retrieve. The user is responsible for making sure the length 1142 * doesn't exceed large_bufsize. 1143 * \param entry Wait queue entry. 1144 * \param cb_fn Callback to be executed once a buffer becomes available. If a buffer is available 1145 * immediately, it is NOT be executed. 1146 * 1147 * \return pointer to a buffer or NULL if no buffers are currently available. 1148 */ 1149 void *spdk_iobuf_get(struct spdk_iobuf_channel *ch, uint64_t len, struct spdk_iobuf_entry *entry, 1150 spdk_iobuf_get_cb cb_fn); 1151 1152 /** 1153 * Release a buffer back to the iobuf pool. If there are outstanding requests waiting for a buffer, 1154 * this buffer will be passed to one of them. 1155 * 1156 * \param ch iobuf channel. 1157 * \param buf Buffer to release 1158 * \param len Length of the buffer (must be the exact same value as specified in `spdk_iobuf_get()`). 1159 */ 1160 void spdk_iobuf_put(struct spdk_iobuf_channel *ch, void *buf, uint64_t len); 1161 1162 #ifdef __cplusplus 1163 } 1164 #endif 1165 1166 #endif /* SPDK_THREAD_H_ */ 1167