1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <ocf/ocf.h> 35 #include <ocf/ocf_types.h> 36 #include <ocf/ocf_mngt.h> 37 38 #include "ctx.h" 39 #include "data.h" 40 #include "volume.h" 41 #include "utils.h" 42 #include "vbdev_ocf.h" 43 44 #include "spdk/bdev_module.h" 45 #include "spdk/conf.h" 46 #include "spdk/io_channel.h" 47 #include "spdk/string.h" 48 #include "spdk_internal/log.h" 49 #include "spdk/cpuset.h" 50 51 static struct spdk_bdev_module ocf_if; 52 53 static TAILQ_HEAD(, vbdev_ocf) g_ocf_vbdev_head 54 = TAILQ_HEAD_INITIALIZER(g_ocf_vbdev_head); 55 56 static TAILQ_HEAD(, examining_bdev) g_ocf_examining_bdevs_head 57 = TAILQ_HEAD_INITIALIZER(g_ocf_examining_bdevs_head); 58 59 bool g_fini_started = false; 60 61 /* Structure for keeping list of bdevs that are claimed but not used yet */ 62 struct examining_bdev { 63 struct spdk_bdev *bdev; 64 TAILQ_ENTRY(examining_bdev) tailq; 65 }; 66 67 /* Add bdev to list of claimed */ 68 static void 69 examine_start(struct spdk_bdev *bdev) 70 { 71 struct examining_bdev *entry = malloc(sizeof(*entry)); 72 73 assert(entry); 74 entry->bdev = bdev; 75 TAILQ_INSERT_TAIL(&g_ocf_examining_bdevs_head, entry, tailq); 76 } 77 78 /* Find bdev on list of claimed bdevs, then remove it, 79 * if it was the last one on list then report examine done */ 80 static void 81 examine_done(int status, struct vbdev_ocf *vbdev, void *cb_arg) 82 { 83 struct spdk_bdev *bdev = cb_arg; 84 struct examining_bdev *entry, *safe, *found = NULL; 85 86 TAILQ_FOREACH_SAFE(entry, &g_ocf_examining_bdevs_head, tailq, safe) { 87 if (entry->bdev == bdev) { 88 if (found) { 89 goto remove; 90 } else { 91 found = entry; 92 } 93 } 94 } 95 96 assert(found); 97 spdk_bdev_module_examine_done(&ocf_if); 98 99 remove: 100 TAILQ_REMOVE(&g_ocf_examining_bdevs_head, found, tailq); 101 free(found); 102 } 103 104 /* Free allocated strings and structure itself 105 * Used at shutdown only */ 106 static void 107 free_vbdev(struct vbdev_ocf *vbdev) 108 { 109 if (!vbdev) { 110 return; 111 } 112 113 free(vbdev->name); 114 free(vbdev->cache.name); 115 free(vbdev->core.name); 116 free(vbdev); 117 } 118 119 /* Get existing cache base 120 * that is attached to other vbdev */ 121 static struct vbdev_ocf_base * 122 get_other_cache_base(struct vbdev_ocf_base *base) 123 { 124 struct vbdev_ocf *vbdev; 125 126 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 127 if (&vbdev->cache == base || !vbdev->cache.attached) { 128 continue; 129 } 130 if (!strcmp(vbdev->cache.name, base->name)) { 131 return &vbdev->cache; 132 } 133 } 134 135 return NULL; 136 } 137 138 /* Get existing OCF cache instance 139 * that is started by other vbdev */ 140 static ocf_cache_t 141 get_other_cache_instance(struct vbdev_ocf *vbdev) 142 { 143 struct vbdev_ocf *cmp; 144 145 TAILQ_FOREACH(cmp, &g_ocf_vbdev_head, tailq) { 146 if (cmp->state.doing_finish || cmp == vbdev) { 147 continue; 148 } 149 if (strcmp(cmp->cache.name, vbdev->cache.name)) { 150 continue; 151 } 152 if (cmp->ocf_cache) { 153 return cmp->ocf_cache; 154 } 155 } 156 157 return NULL; 158 } 159 160 /* Close and unclaim base bdev */ 161 static void 162 remove_base_bdev(struct vbdev_ocf_base *base) 163 { 164 if (base->attached) { 165 spdk_bdev_module_release_bdev(base->bdev); 166 spdk_bdev_close(base->desc); 167 base->attached = false; 168 169 if (base->management_channel && !base->is_cache) { 170 spdk_put_io_channel(base->management_channel); 171 } 172 } 173 } 174 175 /* Finish unregister operation */ 176 static void 177 unregister_finish(struct vbdev_ocf *vbdev) 178 { 179 spdk_bdev_destruct_done(&vbdev->exp_bdev, vbdev->state.stop_status); 180 vbdev_ocf_cache_ctx_put(vbdev->cache_ctx); 181 vbdev_ocf_mngt_continue(vbdev, 0); 182 } 183 184 static void 185 close_core_bdev(struct vbdev_ocf *vbdev) 186 { 187 remove_base_bdev(&vbdev->core); 188 vbdev_ocf_mngt_continue(vbdev, 0); 189 } 190 191 static void 192 remove_core_cmpl(void *priv, int error) 193 { 194 struct vbdev_ocf *vbdev = priv; 195 196 ocf_mngt_cache_unlock(vbdev->ocf_cache); 197 vbdev_ocf_mngt_continue(vbdev, error); 198 } 199 200 /* Try to lock cache, then remove core */ 201 static void 202 remove_core_poll(struct vbdev_ocf *vbdev) 203 { 204 int rc; 205 206 rc = ocf_mngt_cache_trylock(vbdev->ocf_cache); 207 if (rc) { 208 return; 209 } 210 211 vbdev_ocf_mngt_poll(vbdev, NULL); 212 ocf_mngt_cache_remove_core(vbdev->ocf_core, remove_core_cmpl, vbdev); 213 } 214 215 /* Detach core base */ 216 static void 217 detach_core(struct vbdev_ocf *vbdev) 218 { 219 if (vbdev->ocf_cache && ocf_cache_is_running(vbdev->ocf_cache)) { 220 vbdev_ocf_mngt_poll(vbdev, remove_core_poll); 221 } else { 222 vbdev_ocf_mngt_continue(vbdev, 0); 223 } 224 } 225 226 static void 227 close_cache_bdev(struct vbdev_ocf *vbdev) 228 { 229 remove_base_bdev(&vbdev->cache); 230 vbdev_ocf_mngt_continue(vbdev, 0); 231 } 232 233 /* Detach cache base */ 234 static void 235 detach_cache(struct vbdev_ocf *vbdev) 236 { 237 vbdev->state.stop_status = vbdev->mngt_ctx.status; 238 239 /* If some other vbdev references this cache bdev, 240 * we detach this only by changing the flag, without actual close */ 241 if (get_other_cache_base(&vbdev->cache)) { 242 vbdev->cache.attached = false; 243 } 244 245 vbdev_ocf_mngt_continue(vbdev, 0); 246 } 247 248 static void 249 stop_vbdev_cmpl(ocf_cache_t cache, void *priv, int error) 250 { 251 struct vbdev_ocf *vbdev = priv; 252 253 vbdev_ocf_queue_put(vbdev->cache_ctx->mngt_queue); 254 ocf_mngt_cache_unlock(cache); 255 spdk_put_io_channel(vbdev->cache.management_channel); 256 257 vbdev_ocf_mngt_continue(vbdev, error); 258 } 259 260 /* Try to lock cache, then stop it */ 261 static void 262 stop_vbdev_poll(struct vbdev_ocf *vbdev) 263 { 264 if (!ocf_cache_is_running(vbdev->ocf_cache)) { 265 vbdev_ocf_mngt_continue(vbdev, 0); 266 return; 267 } 268 269 if (!g_fini_started && get_other_cache_instance(vbdev)) { 270 SPDK_NOTICELOG("Not stopping cache instance '%s'" 271 " because it is referenced by other OCF bdev\n", 272 vbdev->cache.name); 273 vbdev_ocf_mngt_continue(vbdev, 0); 274 return; 275 } 276 277 if (ocf_mngt_cache_trylock(vbdev->ocf_cache)) { 278 return; 279 } 280 281 vbdev_ocf_mngt_poll(vbdev, NULL); 282 ocf_mngt_cache_stop(vbdev->ocf_cache, stop_vbdev_cmpl, vbdev); 283 } 284 285 /* Stop OCF cache object 286 * vbdev_ocf is not operational after this */ 287 static void 288 stop_vbdev(struct vbdev_ocf *vbdev) 289 { 290 vbdev_ocf_mngt_poll(vbdev, stop_vbdev_poll); 291 } 292 293 /* Wait for all OCF requests to finish */ 294 static void 295 wait_for_requests_poll(struct vbdev_ocf *vbdev) 296 { 297 if (ocf_cache_has_pending_requests(vbdev->ocf_cache)) { 298 return; 299 } 300 301 vbdev_ocf_mngt_continue(vbdev, 0); 302 } 303 304 /* Start waiting for OCF requests to finish */ 305 static void 306 wait_for_requests(struct vbdev_ocf *vbdev) 307 { 308 vbdev_ocf_mngt_poll(vbdev, wait_for_requests_poll); 309 } 310 311 static void 312 flush_vbdev_cmpl(ocf_cache_t cache, void *priv, int error) 313 { 314 struct vbdev_ocf *vbdev = priv; 315 316 ocf_mngt_cache_unlock(cache); 317 vbdev_ocf_mngt_continue(vbdev, error); 318 } 319 320 static void 321 flush_vbdev_poll(struct vbdev_ocf *vbdev) 322 { 323 if (!ocf_cache_is_running(vbdev->ocf_cache)) { 324 vbdev_ocf_mngt_continue(vbdev, -EINVAL); 325 return; 326 } 327 328 if (ocf_mngt_cache_trylock(vbdev->ocf_cache)) { 329 return; 330 } 331 332 vbdev_ocf_mngt_poll(vbdev, NULL); 333 ocf_mngt_cache_flush(vbdev->ocf_cache, false, flush_vbdev_cmpl, vbdev); 334 } 335 336 static void 337 flush_vbdev(struct vbdev_ocf *vbdev) 338 { 339 vbdev_ocf_mngt_poll(vbdev, flush_vbdev_poll); 340 } 341 342 /* Procedures called during unregister */ 343 vbdev_ocf_mngt_fn unregister_path[] = { 344 flush_vbdev, 345 wait_for_requests, 346 stop_vbdev, 347 detach_cache, 348 close_cache_bdev, 349 detach_core, 350 close_core_bdev, 351 unregister_finish, 352 NULL 353 }; 354 355 /* Start asynchronous management operation using unregister_path */ 356 static void 357 unregister_cb(void *opaque) 358 { 359 struct vbdev_ocf *vbdev = opaque; 360 int rc; 361 362 rc = vbdev_ocf_mngt_start(vbdev, unregister_path, NULL, NULL); 363 if (rc) { 364 SPDK_ERRLOG("Unable to unregister OCF bdev: %d\n", rc); 365 spdk_bdev_destruct_done(&vbdev->exp_bdev, rc); 366 } 367 } 368 369 /* Unregister io device with callback to unregister_cb 370 * This function is called during spdk_bdev_unregister */ 371 static int 372 vbdev_ocf_destruct(void *opaque) 373 { 374 struct vbdev_ocf *vbdev = opaque; 375 376 if (vbdev->state.doing_finish) { 377 return -EALREADY; 378 } 379 vbdev->state.doing_finish = true; 380 381 if (vbdev->state.started) { 382 spdk_io_device_unregister(vbdev, unregister_cb); 383 /* Return 1 because unregister is delayed */ 384 return 1; 385 } 386 387 if (vbdev->cache.attached) { 388 detach_cache(vbdev); 389 close_cache_bdev(vbdev); 390 } 391 if (vbdev->core.attached) { 392 detach_core(vbdev); 393 close_core_bdev(vbdev); 394 } 395 396 return 0; 397 } 398 399 /* Stop OCF cache and unregister SPDK bdev */ 400 int 401 vbdev_ocf_delete(struct vbdev_ocf *vbdev, void (*cb)(void *, int), void *cb_arg) 402 { 403 int rc = 0; 404 405 if (vbdev->state.started) { 406 spdk_bdev_unregister(&vbdev->exp_bdev, cb, cb_arg); 407 } else { 408 rc = vbdev_ocf_destruct(vbdev); 409 if (rc == 0 && cb) { 410 cb(cb_arg, 0); 411 } 412 } 413 414 return rc; 415 } 416 417 /* If vbdev is online, return its object */ 418 struct vbdev_ocf * 419 vbdev_ocf_get_by_name(const char *name) 420 { 421 struct vbdev_ocf *vbdev; 422 423 if (name == NULL) { 424 assert(false); 425 return NULL; 426 } 427 428 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 429 if (vbdev->name == NULL || vbdev->state.doing_finish) { 430 continue; 431 } 432 if (strcmp(vbdev->name, name) == 0) { 433 return vbdev; 434 } 435 } 436 return NULL; 437 } 438 439 /* Return matching base if parent vbdev is online */ 440 struct vbdev_ocf_base * 441 vbdev_ocf_get_base_by_name(const char *name) 442 { 443 struct vbdev_ocf *vbdev; 444 445 if (name == NULL) { 446 assert(false); 447 return NULL; 448 } 449 450 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 451 if (vbdev->state.doing_finish) { 452 continue; 453 } 454 455 if (vbdev->cache.name && strcmp(vbdev->cache.name, name) == 0) { 456 return &vbdev->cache; 457 } 458 if (vbdev->core.name && strcmp(vbdev->core.name, name) == 0) { 459 return &vbdev->core; 460 } 461 } 462 return NULL; 463 } 464 465 /* Execute fn for each OCF device that is online or waits for base devices */ 466 void 467 vbdev_ocf_foreach(vbdev_ocf_foreach_fn fn, void *ctx) 468 { 469 struct vbdev_ocf *vbdev; 470 471 assert(fn != NULL); 472 473 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 474 if (!vbdev->state.doing_finish) { 475 fn(vbdev, ctx); 476 } 477 } 478 } 479 480 /* Called from OCF when SPDK_IO is completed */ 481 static void 482 vbdev_ocf_io_submit_cb(struct ocf_io *io, int error) 483 { 484 struct spdk_bdev_io *bdev_io = io->priv1; 485 486 if (error == 0) { 487 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 488 } else if (error == -ENOMEM) { 489 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 490 } else { 491 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 492 } 493 494 ocf_io_put(io); 495 } 496 497 /* Configure io parameters and send it to OCF */ 498 static int 499 io_submit_to_ocf(struct spdk_bdev_io *bdev_io, struct ocf_io *io) 500 { 501 int dir; 502 uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen; 503 uint64_t offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen; 504 505 switch (bdev_io->type) { 506 case SPDK_BDEV_IO_TYPE_WRITE: 507 case SPDK_BDEV_IO_TYPE_READ: 508 dir = OCF_READ; 509 if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 510 dir = OCF_WRITE; 511 } 512 ocf_io_configure(io, offset, len, dir, 0, 0); 513 ocf_core_submit_io(io); 514 return 0; 515 case SPDK_BDEV_IO_TYPE_FLUSH: 516 ocf_io_configure(io, offset, len, OCF_WRITE, 0, OCF_WRITE_FLUSH); 517 ocf_core_submit_flush(io); 518 return 0; 519 case SPDK_BDEV_IO_TYPE_UNMAP: 520 ocf_io_configure(io, offset, len, 0, 0, 0); 521 ocf_core_submit_discard(io); 522 return 0; 523 case SPDK_BDEV_IO_TYPE_RESET: 524 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 525 default: 526 SPDK_ERRLOG("Unsupported IO type: %d\n", bdev_io->type); 527 return -EINVAL; 528 } 529 } 530 531 /* Submit SPDK-IO to OCF */ 532 static void 533 io_handle(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 534 { 535 struct vbdev_ocf *vbdev = bdev_io->bdev->ctxt; 536 struct ocf_io *io = NULL; 537 struct bdev_ocf_data *data = NULL; 538 struct vbdev_ocf_qcxt *qctx = spdk_io_channel_get_ctx(ch); 539 int err; 540 541 io = ocf_core_new_io(vbdev->ocf_core); 542 if (!io) { 543 err = -ENOMEM; 544 goto fail; 545 } 546 547 ocf_io_set_queue(io, qctx->queue); 548 549 data = vbdev_ocf_data_from_spdk_io(bdev_io); 550 if (!data) { 551 err = -ENOMEM; 552 goto fail; 553 } 554 555 err = ocf_io_set_data(io, data, 0); 556 if (err) { 557 goto fail; 558 } 559 560 ocf_io_set_cmpl(io, bdev_io, NULL, vbdev_ocf_io_submit_cb); 561 562 err = io_submit_to_ocf(bdev_io, io); 563 if (err) { 564 goto fail; 565 } 566 567 return; 568 569 fail: 570 if (io) { 571 ocf_io_put(io); 572 } 573 574 if (err == -ENOMEM) { 575 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 576 } else { 577 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 578 } 579 } 580 581 static void 582 vbdev_ocf_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 583 bool success) 584 { 585 if (!success) { 586 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 587 return; 588 } 589 590 io_handle(ch, bdev_io); 591 } 592 593 /* Called from bdev layer when an io to Cache vbdev is submitted */ 594 static void 595 vbdev_ocf_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 596 { 597 switch (bdev_io->type) { 598 case SPDK_BDEV_IO_TYPE_READ: 599 /* User does not have to allocate io vectors for the request, 600 * so in case they are not allocated, we allocate them here */ 601 spdk_bdev_io_get_buf(bdev_io, vbdev_ocf_get_buf_cb, 602 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 603 break; 604 case SPDK_BDEV_IO_TYPE_WRITE: 605 case SPDK_BDEV_IO_TYPE_FLUSH: 606 case SPDK_BDEV_IO_TYPE_UNMAP: 607 io_handle(ch, bdev_io); 608 break; 609 case SPDK_BDEV_IO_TYPE_RESET: 610 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 611 default: 612 SPDK_ERRLOG("Unknown I/O type %d\n", bdev_io->type); 613 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 614 break; 615 } 616 } 617 618 /* Called from bdev layer */ 619 static bool 620 vbdev_ocf_io_type_supported(void *opaque, enum spdk_bdev_io_type io_type) 621 { 622 struct vbdev_ocf *vbdev = opaque; 623 624 switch (io_type) { 625 case SPDK_BDEV_IO_TYPE_READ: 626 case SPDK_BDEV_IO_TYPE_WRITE: 627 case SPDK_BDEV_IO_TYPE_FLUSH: 628 case SPDK_BDEV_IO_TYPE_UNMAP: 629 return spdk_bdev_io_type_supported(vbdev->core.bdev, io_type); 630 case SPDK_BDEV_IO_TYPE_RESET: 631 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 632 default: 633 return false; 634 } 635 } 636 637 /* Called from bdev layer */ 638 static struct spdk_io_channel * 639 vbdev_ocf_get_io_channel(void *opaque) 640 { 641 struct vbdev_ocf *bdev = opaque; 642 643 return spdk_get_io_channel(bdev); 644 } 645 646 static int 647 vbdev_ocf_dump_info_json(void *opaque, struct spdk_json_write_ctx *w) 648 { 649 struct vbdev_ocf *vbdev = opaque; 650 651 spdk_json_write_named_string(w, "cache_device", vbdev->cache.name); 652 spdk_json_write_named_string(w, "core_device", vbdev->core.name); 653 654 spdk_json_write_named_string(w, "mode", 655 ocf_get_cache_modename(ocf_cache_get_mode(vbdev->ocf_cache))); 656 spdk_json_write_named_uint32(w, "cache_line_size", 657 ocf_cache_get_line_size(vbdev->ocf_cache)); 658 spdk_json_write_named_bool(w, "metadata_volatile", 659 vbdev->cfg.cache.metadata_volatile); 660 661 return 0; 662 } 663 664 static void 665 vbdev_ocf_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 666 { 667 struct vbdev_ocf *vbdev = bdev->ctxt; 668 669 spdk_json_write_object_begin(w); 670 671 spdk_json_write_named_string(w, "method", "bdev_ocf_create"); 672 673 spdk_json_write_named_object_begin(w, "params"); 674 spdk_json_write_named_string(w, "name", vbdev->name); 675 spdk_json_write_named_string(w, "mode", 676 ocf_get_cache_modename(ocf_cache_get_mode(vbdev->ocf_cache))); 677 spdk_json_write_named_string(w, "cache_bdev_name", vbdev->cache.name); 678 spdk_json_write_named_string(w, "core_bdev_name", vbdev->core.name); 679 spdk_json_write_object_end(w); 680 681 spdk_json_write_object_end(w); 682 } 683 684 /* Cache vbdev function table 685 * Used by bdev layer */ 686 static struct spdk_bdev_fn_table cache_dev_fn_table = { 687 .destruct = vbdev_ocf_destruct, 688 .io_type_supported = vbdev_ocf_io_type_supported, 689 .submit_request = vbdev_ocf_submit_request, 690 .get_io_channel = vbdev_ocf_get_io_channel, 691 .write_config_json = vbdev_ocf_write_json_config, 692 .dump_info_json = vbdev_ocf_dump_info_json, 693 }; 694 695 /* Poller function for the OCF queue 696 * We execute OCF requests here synchronously */ 697 static int 698 queue_poll(void *opaque) 699 { 700 struct vbdev_ocf_qcxt *qctx = opaque; 701 uint32_t iono = ocf_queue_pending_io(qctx->queue); 702 int i, max = spdk_min(32, iono); 703 704 for (i = 0; i < max; i++) { 705 ocf_queue_run_single(qctx->queue); 706 } 707 708 if (iono > 0) { 709 return 1; 710 } else { 711 return 0; 712 } 713 } 714 715 /* Called during ocf_submit_io, ocf_purge* 716 * and any other requests that need to submit io */ 717 static void 718 vbdev_ocf_ctx_queue_kick(ocf_queue_t q) 719 { 720 } 721 722 /* OCF queue deinitialization 723 * Called at ocf_cache_stop */ 724 static void 725 vbdev_ocf_ctx_queue_stop(ocf_queue_t q) 726 { 727 struct vbdev_ocf_qcxt *qctx = ocf_queue_get_priv(q); 728 729 if (qctx) { 730 spdk_put_io_channel(qctx->cache_ch); 731 spdk_put_io_channel(qctx->core_ch); 732 spdk_poller_unregister(&qctx->poller); 733 if (qctx->allocated) { 734 free(qctx); 735 } 736 } 737 } 738 739 /* Queue ops is an interface for running queue thread 740 * stop() operation in called just before queue gets destroyed */ 741 const struct ocf_queue_ops queue_ops = { 742 .kick_sync = vbdev_ocf_ctx_queue_kick, 743 .kick = vbdev_ocf_ctx_queue_kick, 744 .stop = vbdev_ocf_ctx_queue_stop, 745 }; 746 747 /* Called on cache vbdev creation at every thread 748 * We allocate OCF queues here and SPDK poller for it */ 749 static int 750 io_device_create_cb(void *io_device, void *ctx_buf) 751 { 752 struct vbdev_ocf *vbdev = io_device; 753 struct vbdev_ocf_qcxt *qctx = ctx_buf; 754 int rc; 755 756 rc = vbdev_ocf_queue_create(vbdev->ocf_cache, &qctx->queue, &queue_ops); 757 if (rc) { 758 return rc; 759 } 760 761 ocf_queue_set_priv(qctx->queue, qctx); 762 763 qctx->vbdev = vbdev; 764 qctx->cache_ch = spdk_bdev_get_io_channel(vbdev->cache.desc); 765 qctx->core_ch = spdk_bdev_get_io_channel(vbdev->core.desc); 766 qctx->poller = spdk_poller_register(queue_poll, qctx, 0); 767 768 return rc; 769 } 770 771 /* Called per thread 772 * Put OCF queue and relaunch poller with new context to finish pending requests */ 773 static void 774 io_device_destroy_cb(void *io_device, void *ctx_buf) 775 { 776 /* Making a copy of context to use it after io channel will be destroyed */ 777 struct vbdev_ocf_qcxt *copy = malloc(sizeof(*copy)); 778 struct vbdev_ocf_qcxt *qctx = ctx_buf; 779 780 if (copy) { 781 ocf_queue_set_priv(qctx->queue, copy); 782 memcpy(copy, qctx, sizeof(*copy)); 783 spdk_poller_unregister(&qctx->poller); 784 copy->poller = spdk_poller_register(queue_poll, copy, 0); 785 copy->allocated = true; 786 } else { 787 SPDK_ERRLOG("Unable to stop OCF queue properly: %s\n", 788 spdk_strerror(ENOMEM)); 789 } 790 791 vbdev_ocf_queue_put(qctx->queue); 792 } 793 794 /* OCF management queue deinitialization */ 795 static void 796 vbdev_ocf_ctx_mngt_queue_stop(ocf_queue_t q) 797 { 798 struct spdk_poller *poller = ocf_queue_get_priv(q); 799 800 if (poller) { 801 spdk_poller_unregister(&poller); 802 } 803 } 804 805 static int 806 mngt_queue_poll(void *opaque) 807 { 808 ocf_queue_t q = opaque; 809 uint32_t iono = ocf_queue_pending_io(q); 810 int i, max = spdk_min(32, iono); 811 812 for (i = 0; i < max; i++) { 813 ocf_queue_run_single(q); 814 } 815 816 if (iono > 0) { 817 return 1; 818 } else { 819 return 0; 820 } 821 } 822 823 static void 824 vbdev_ocf_ctx_mngt_queue_kick(ocf_queue_t q) 825 { 826 } 827 828 /* Queue ops is an interface for running queue thread 829 * stop() operation in called just before queue gets destroyed */ 830 const struct ocf_queue_ops mngt_queue_ops = { 831 .kick_sync = NULL, 832 .kick = vbdev_ocf_ctx_mngt_queue_kick, 833 .stop = vbdev_ocf_ctx_mngt_queue_stop, 834 }; 835 836 /* Create exported spdk object */ 837 static void 838 finish_register(struct vbdev_ocf *vbdev) 839 { 840 int result; 841 842 vbdev->cache.management_channel = vbdev->cache_ctx->management_channel; 843 844 /* Copy properties of the base bdev */ 845 vbdev->exp_bdev.blocklen = vbdev->core.bdev->blocklen; 846 vbdev->exp_bdev.write_cache = vbdev->core.bdev->write_cache; 847 vbdev->exp_bdev.required_alignment = vbdev->core.bdev->required_alignment; 848 849 vbdev->exp_bdev.name = vbdev->name; 850 vbdev->exp_bdev.product_name = "SPDK OCF"; 851 852 vbdev->exp_bdev.blockcnt = vbdev->core.bdev->blockcnt; 853 vbdev->exp_bdev.ctxt = vbdev; 854 vbdev->exp_bdev.fn_table = &cache_dev_fn_table; 855 vbdev->exp_bdev.module = &ocf_if; 856 857 /* Finally register vbdev in SPDK */ 858 spdk_io_device_register(vbdev, io_device_create_cb, io_device_destroy_cb, 859 sizeof(struct vbdev_ocf_qcxt), vbdev->name); 860 result = spdk_bdev_register(&vbdev->exp_bdev); 861 if (result) { 862 SPDK_ERRLOG("Could not register exposed bdev\n"); 863 } else { 864 vbdev->state.started = true; 865 } 866 867 vbdev_ocf_mngt_continue(vbdev, result); 868 } 869 870 static void 871 add_core_cmpl(ocf_cache_t cache, ocf_core_t core, void *priv, int error) 872 { 873 struct vbdev_ocf *vbdev = priv; 874 875 ocf_mngt_cache_unlock(cache); 876 877 if (error) { 878 SPDK_ERRLOG("Failed to add core device to cache instance\n"); 879 } else { 880 vbdev->ocf_core = core; 881 vbdev->core.id = ocf_core_get_id(core); 882 } 883 884 vbdev->core.management_channel = spdk_bdev_get_io_channel(vbdev->core.desc); 885 vbdev_ocf_mngt_continue(vbdev, error); 886 } 887 888 /* Try to lock cache, then add core */ 889 static void 890 add_core_poll(struct vbdev_ocf *vbdev) 891 { 892 if (ocf_mngt_cache_trylock(vbdev->ocf_cache)) { 893 return; 894 } 895 896 vbdev_ocf_mngt_poll(vbdev, NULL); 897 ocf_mngt_cache_add_core(vbdev->ocf_cache, &vbdev->cfg.core, add_core_cmpl, vbdev); 898 } 899 900 /* Add core for existing OCF cache instance */ 901 static void 902 add_core(struct vbdev_ocf *vbdev) 903 { 904 vbdev_ocf_mngt_poll(vbdev, add_core_poll); 905 } 906 907 static void 908 start_cache_cmpl(ocf_cache_t cache, void *priv, int error) 909 { 910 struct vbdev_ocf *vbdev = priv; 911 912 ocf_mngt_cache_unlock(cache); 913 914 vbdev_ocf_mngt_continue(vbdev, error); 915 } 916 917 static int 918 create_management_queue(struct vbdev_ocf *vbdev) 919 { 920 struct spdk_poller *mngt_poller; 921 int rc; 922 923 rc = vbdev_ocf_queue_create(vbdev->ocf_cache, &vbdev->cache_ctx->mngt_queue, &mngt_queue_ops); 924 if (rc) { 925 SPDK_ERRLOG("Unable to create mngt_queue: %d\n", rc); 926 return rc; 927 } 928 929 mngt_poller = spdk_poller_register(mngt_queue_poll, vbdev->cache_ctx->mngt_queue, 100); 930 if (mngt_poller == NULL) { 931 SPDK_ERRLOG("Unable to initiate mngt request: %s", spdk_strerror(ENOMEM)); 932 return -ENOMEM; 933 } 934 935 ocf_queue_set_priv(vbdev->cache_ctx->mngt_queue, mngt_poller); 936 ocf_mngt_cache_set_mngt_queue(vbdev->ocf_cache, vbdev->cache_ctx->mngt_queue); 937 938 return 0; 939 } 940 941 /* Start OCF cache, attach caching device */ 942 static void 943 start_cache(struct vbdev_ocf *vbdev) 944 { 945 ocf_cache_t existing; 946 int rc; 947 948 if (vbdev->ocf_cache) { 949 vbdev->mngt_ctx.status = -EALREADY; 950 vbdev_ocf_mngt_stop(vbdev); 951 return; 952 } 953 954 existing = get_other_cache_instance(vbdev); 955 if (existing) { 956 SPDK_NOTICELOG("OCF bdev %s connects to existing cache device %s\n", 957 vbdev->name, vbdev->cache.name); 958 vbdev->ocf_cache = existing; 959 vbdev->cache.id = ocf_cache_get_id(existing); 960 vbdev->cache_ctx = ocf_cache_get_priv(existing); 961 vbdev_ocf_cache_ctx_get(vbdev->cache_ctx); 962 vbdev_ocf_mngt_continue(vbdev, 0); 963 return; 964 } 965 966 vbdev->cache_ctx = calloc(1, sizeof(struct vbdev_ocf_cache_ctx)); 967 if (vbdev->cache_ctx == NULL) { 968 vbdev->mngt_ctx.status = -ENOMEM; 969 vbdev_ocf_mngt_stop(vbdev); 970 return; 971 } 972 973 vbdev_ocf_cache_ctx_get(vbdev->cache_ctx); 974 pthread_mutex_init(&vbdev->cache_ctx->lock, NULL); 975 976 rc = ocf_mngt_cache_start(vbdev_ocf_ctx, &vbdev->ocf_cache, &vbdev->cfg.cache); 977 if (rc) { 978 vbdev_ocf_cache_ctx_put(vbdev->cache_ctx); 979 vbdev->mngt_ctx.status = rc; 980 vbdev_ocf_mngt_stop(vbdev); 981 return; 982 } 983 984 vbdev->cache.id = ocf_cache_get_id(vbdev->ocf_cache); 985 ocf_cache_set_priv(vbdev->ocf_cache, vbdev->cache_ctx); 986 987 rc = create_management_queue(vbdev); 988 if (rc) { 989 SPDK_ERRLOG("Unable to create mngt_queue: %d\n", rc); 990 vbdev_ocf_cache_ctx_put(vbdev->cache_ctx); 991 vbdev->mngt_ctx.status = rc; 992 vbdev_ocf_mngt_stop(vbdev); 993 return; 994 } 995 996 vbdev->cache_ctx->management_channel = spdk_bdev_get_io_channel(vbdev->cache.desc); 997 vbdev->cache.management_channel = vbdev->cache_ctx->management_channel; 998 999 if (vbdev->cfg.loadq) { 1000 ocf_mngt_cache_load(vbdev->ocf_cache, &vbdev->cfg.device, start_cache_cmpl, vbdev); 1001 } else { 1002 ocf_mngt_cache_attach(vbdev->ocf_cache, &vbdev->cfg.device, start_cache_cmpl, vbdev); 1003 } 1004 } 1005 1006 /* Procedures called during register operation */ 1007 vbdev_ocf_mngt_fn register_path[] = { 1008 start_cache, 1009 add_core, 1010 finish_register, 1011 NULL 1012 }; 1013 1014 /* Start cache instance and register OCF bdev */ 1015 static void 1016 register_vbdev(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_callback cb, void *cb_arg) 1017 { 1018 int rc; 1019 1020 if (!(vbdev->core.attached && vbdev->cache.attached) || vbdev->state.started) { 1021 cb(-EPERM, vbdev, cb_arg); 1022 return; 1023 } 1024 1025 rc = vbdev_ocf_mngt_start(vbdev, register_path, cb, cb_arg); 1026 if (rc) { 1027 cb(rc, vbdev, cb_arg); 1028 } 1029 } 1030 1031 /* Init OCF configuration options 1032 * for core and cache devices */ 1033 static void 1034 init_vbdev_config(struct vbdev_ocf *vbdev) 1035 { 1036 struct vbdev_ocf_config *cfg = &vbdev->cfg; 1037 1038 /* Id 0 means OCF decides the id */ 1039 cfg->cache.id = 0; 1040 cfg->cache.name = vbdev->name; 1041 1042 /* TODO [metadata]: make configurable with persistent 1043 * metadata support */ 1044 cfg->cache.metadata_volatile = false; 1045 1046 /* TODO [cache line size]: make cache line size configurable 1047 * Using standard 4KiB for now */ 1048 cfg->cache.cache_line_size = ocf_cache_line_size_4; 1049 1050 /* This are suggested values that 1051 * should be sufficient for most use cases */ 1052 cfg->cache.backfill.max_queue_size = 65536; 1053 cfg->cache.backfill.queue_unblock_size = 60000; 1054 1055 /* TODO [cache line size] */ 1056 cfg->device.cache_line_size = ocf_cache_line_size_4; 1057 cfg->device.force = true; 1058 cfg->device.min_free_ram = 0; 1059 cfg->device.perform_test = false; 1060 cfg->device.discard_on_start = false; 1061 1062 vbdev->cfg.cache.locked = true; 1063 1064 cfg->core.volume_type = SPDK_OBJECT; 1065 cfg->device.volume_type = SPDK_OBJECT; 1066 cfg->core.core_id = OCF_CORE_MAX; 1067 1068 if (vbdev->cfg.loadq) { 1069 /* When doing cache_load(), we need to set try_add to true, 1070 * otherwise OCF will interpret this core as new 1071 * instead of the inactive one */ 1072 vbdev->cfg.core.try_add = true; 1073 } 1074 1075 /* Serialize bdev names in OCF UUID to interpret on future loads 1076 * Core UUID is pair of (core bdev name, cache bdev name) 1077 * Cache UUID is cache bdev name */ 1078 cfg->device.uuid.size = strlen(vbdev->cache.name) + 1; 1079 cfg->device.uuid.data = vbdev->cache.name; 1080 1081 snprintf(vbdev->uuid, VBDEV_OCF_MD_MAX_LEN, "%s %s", 1082 vbdev->core.name, vbdev->name); 1083 cfg->core.uuid.size = strlen(vbdev->uuid) + 1; 1084 cfg->core.uuid.data = vbdev->uuid; 1085 vbdev->uuid[strlen(vbdev->core.name)] = 0; 1086 } 1087 1088 /* Allocate vbdev structure object and add it to the global list */ 1089 static int 1090 init_vbdev(const char *vbdev_name, 1091 const char *cache_mode_name, 1092 const char *cache_name, 1093 const char *core_name, 1094 bool loadq) 1095 { 1096 struct vbdev_ocf *vbdev; 1097 int rc = 0; 1098 1099 if (spdk_bdev_get_by_name(vbdev_name) || vbdev_ocf_get_by_name(vbdev_name)) { 1100 SPDK_ERRLOG("Device with name '%s' already exists\n", vbdev_name); 1101 return -EPERM; 1102 } 1103 1104 vbdev = calloc(1, sizeof(*vbdev)); 1105 if (!vbdev) { 1106 goto error_mem; 1107 } 1108 1109 vbdev->cache.parent = vbdev; 1110 vbdev->core.parent = vbdev; 1111 vbdev->cache.is_cache = true; 1112 vbdev->core.is_cache = false; 1113 1114 if (cache_mode_name) { 1115 vbdev->cfg.cache.cache_mode 1116 = ocf_get_cache_mode(cache_mode_name); 1117 } else if (!loadq) { /* In load path it is OK to pass NULL as cache mode */ 1118 SPDK_ERRLOG("No cache mode specified\n"); 1119 rc = -EINVAL; 1120 goto error_free; 1121 } 1122 if (vbdev->cfg.cache.cache_mode < 0) { 1123 SPDK_ERRLOG("Incorrect cache mode '%s'\n", cache_mode_name); 1124 rc = -EINVAL; 1125 goto error_free; 1126 } 1127 1128 vbdev->name = strdup(vbdev_name); 1129 if (!vbdev->name) { 1130 goto error_mem; 1131 } 1132 1133 vbdev->cache.name = strdup(cache_name); 1134 if (!vbdev->cache.name) { 1135 goto error_mem; 1136 } 1137 1138 vbdev->core.name = strdup(core_name); 1139 if (!vbdev->core.name) { 1140 goto error_mem; 1141 } 1142 1143 vbdev->cfg.loadq = loadq; 1144 init_vbdev_config(vbdev); 1145 TAILQ_INSERT_TAIL(&g_ocf_vbdev_head, vbdev, tailq); 1146 return rc; 1147 1148 error_mem: 1149 rc = -ENOMEM; 1150 error_free: 1151 free_vbdev(vbdev); 1152 return rc; 1153 } 1154 1155 /* Read configuration file at the start of SPDK application 1156 * This adds vbdevs to global list if some mentioned in config */ 1157 static int 1158 vbdev_ocf_init(void) 1159 { 1160 const char *vbdev_name, *modename, *cache_name, *core_name; 1161 struct spdk_conf_section *sp; 1162 int status; 1163 1164 status = vbdev_ocf_ctx_init(); 1165 if (status) { 1166 SPDK_ERRLOG("OCF ctx initialization failed with=%d\n", status); 1167 return status; 1168 } 1169 1170 status = vbdev_ocf_volume_init(); 1171 if (status) { 1172 vbdev_ocf_ctx_cleanup(); 1173 SPDK_ERRLOG("OCF volume initialization failed with=%d\n", status); 1174 return status; 1175 } 1176 1177 sp = spdk_conf_find_section(NULL, "OCF"); 1178 if (sp == NULL) { 1179 return 0; 1180 } 1181 1182 for (int i = 0; ; i++) { 1183 if (!spdk_conf_section_get_nval(sp, "OCF", i)) { 1184 break; 1185 } 1186 1187 vbdev_name = spdk_conf_section_get_nmval(sp, "OCF", i, 0); 1188 if (!vbdev_name) { 1189 SPDK_ERRLOG("No vbdev name specified\n"); 1190 continue; 1191 } 1192 1193 modename = spdk_conf_section_get_nmval(sp, "OCF", i, 1); 1194 if (!modename) { 1195 SPDK_ERRLOG("No modename specified for OCF vbdev '%s'\n", vbdev_name); 1196 continue; 1197 } 1198 1199 cache_name = spdk_conf_section_get_nmval(sp, "OCF", i, 2); 1200 if (!cache_name) { 1201 SPDK_ERRLOG("No cache device specified for OCF vbdev '%s'\n", vbdev_name); 1202 continue; 1203 } 1204 1205 core_name = spdk_conf_section_get_nmval(sp, "OCF", i, 3); 1206 if (!core_name) { 1207 SPDK_ERRLOG("No core devices specified for OCF vbdev '%s'\n", vbdev_name); 1208 continue; 1209 } 1210 1211 status = init_vbdev(vbdev_name, modename, cache_name, core_name, false); 1212 if (status) { 1213 SPDK_ERRLOG("Config initialization failed with code: %d\n", status); 1214 } 1215 } 1216 1217 return status; 1218 } 1219 1220 /* Called after application shutdown started 1221 * Release memory of allocated structures here */ 1222 static void 1223 vbdev_ocf_module_fini(void) 1224 { 1225 struct vbdev_ocf *vbdev; 1226 1227 while ((vbdev = TAILQ_FIRST(&g_ocf_vbdev_head))) { 1228 TAILQ_REMOVE(&g_ocf_vbdev_head, vbdev, tailq); 1229 free_vbdev(vbdev); 1230 } 1231 1232 vbdev_ocf_volume_cleanup(); 1233 vbdev_ocf_ctx_cleanup(); 1234 } 1235 1236 /* When base device gets unpluged this is called 1237 * We will unregister cache vbdev here 1238 * When cache device is removed, we delete every OCF bdev that used it */ 1239 static void 1240 hotremove_cb(void *ctx) 1241 { 1242 struct vbdev_ocf_base *base = ctx; 1243 struct vbdev_ocf *vbdev; 1244 1245 if (!base->is_cache) { 1246 if (base->parent->state.doing_finish) { 1247 return; 1248 } 1249 1250 SPDK_NOTICELOG("Deinitializing '%s' because its core device '%s' was removed\n", 1251 base->parent->name, base->name); 1252 vbdev_ocf_delete(base->parent, NULL, NULL); 1253 return; 1254 } 1255 1256 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 1257 if (vbdev->state.doing_finish) { 1258 continue; 1259 } 1260 if (strcmp(base->name, vbdev->cache.name) == 0) { 1261 SPDK_NOTICELOG("Deinitializing '%s' because" 1262 " its cache device '%s' was removed\n", 1263 vbdev->name, base->name); 1264 vbdev_ocf_delete(vbdev, NULL, NULL); 1265 } 1266 } 1267 } 1268 1269 /* Open base SPDK bdev and claim it */ 1270 static int 1271 attach_base(struct vbdev_ocf_base *base) 1272 { 1273 int status; 1274 1275 if (base->attached) { 1276 return -EALREADY; 1277 } 1278 1279 /* If base cache bdev was already opened by other vbdev, 1280 * we just copy its descriptor here */ 1281 if (base->is_cache) { 1282 struct vbdev_ocf_base *existing = get_other_cache_base(base); 1283 if (existing) { 1284 base->desc = existing->desc; 1285 base->attached = true; 1286 return 0; 1287 } 1288 } 1289 1290 status = spdk_bdev_open(base->bdev, true, hotremove_cb, base, &base->desc); 1291 if (status) { 1292 SPDK_ERRLOG("Unable to open device '%s' for writing\n", base->name); 1293 return status; 1294 } 1295 1296 status = spdk_bdev_module_claim_bdev(base->bdev, base->desc, 1297 &ocf_if); 1298 if (status) { 1299 SPDK_ERRLOG("Unable to claim device '%s'\n", base->name); 1300 spdk_bdev_close(base->desc); 1301 return status; 1302 } 1303 1304 base->attached = true; 1305 return status; 1306 } 1307 1308 /* Attach base bdevs */ 1309 static int 1310 attach_base_bdevs(struct vbdev_ocf *vbdev, 1311 struct spdk_bdev *cache_bdev, 1312 struct spdk_bdev *core_bdev) 1313 { 1314 int rc = 0; 1315 1316 if (cache_bdev) { 1317 vbdev->cache.bdev = cache_bdev; 1318 rc |= attach_base(&vbdev->cache); 1319 } 1320 1321 if (core_bdev) { 1322 vbdev->core.bdev = core_bdev; 1323 rc |= attach_base(&vbdev->core); 1324 } 1325 1326 return rc; 1327 } 1328 1329 /* Init and then start vbdev if all base devices are present */ 1330 void 1331 vbdev_ocf_construct(const char *vbdev_name, 1332 const char *cache_mode_name, 1333 const char *cache_name, 1334 const char *core_name, 1335 bool loadq, 1336 void (*cb)(int, struct vbdev_ocf *, void *), 1337 void *cb_arg) 1338 { 1339 int rc; 1340 struct spdk_bdev *cache_bdev = spdk_bdev_get_by_name(cache_name); 1341 struct spdk_bdev *core_bdev = spdk_bdev_get_by_name(core_name); 1342 struct vbdev_ocf *vbdev; 1343 1344 rc = init_vbdev(vbdev_name, cache_mode_name, cache_name, core_name, loadq); 1345 if (rc) { 1346 cb(rc, NULL, cb_arg); 1347 return; 1348 } 1349 1350 vbdev = vbdev_ocf_get_by_name(vbdev_name); 1351 if (vbdev == NULL) { 1352 cb(-ENODEV, NULL, cb_arg); 1353 return; 1354 } 1355 1356 if (cache_bdev == NULL) { 1357 SPDK_NOTICELOG("OCF bdev '%s' is waiting for cache device '%s' to connect\n", 1358 vbdev->name, cache_name); 1359 } 1360 if (core_bdev == NULL) { 1361 SPDK_NOTICELOG("OCF bdev '%s' is waiting for core device '%s' to connect\n", 1362 vbdev->name, core_name); 1363 } 1364 1365 rc = attach_base_bdevs(vbdev, cache_bdev, core_bdev); 1366 if (rc) { 1367 cb(rc, vbdev, cb_arg); 1368 return; 1369 } 1370 1371 if (core_bdev && cache_bdev) { 1372 register_vbdev(vbdev, cb, cb_arg); 1373 } else { 1374 cb(0, vbdev, cb_arg); 1375 } 1376 } 1377 1378 /* This called if new device is created in SPDK application 1379 * If that device named as one of base bdevs of OCF vbdev, 1380 * claim and open them */ 1381 static void 1382 vbdev_ocf_examine(struct spdk_bdev *bdev) 1383 { 1384 const char *bdev_name = spdk_bdev_get_name(bdev); 1385 struct vbdev_ocf *vbdev; 1386 1387 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 1388 if (vbdev->state.doing_finish) { 1389 continue; 1390 } 1391 1392 if (!strcmp(bdev_name, vbdev->cache.name)) { 1393 attach_base_bdevs(vbdev, bdev, NULL); 1394 continue; 1395 } 1396 if (!strcmp(bdev_name, vbdev->core.name)) { 1397 attach_base_bdevs(vbdev, NULL, bdev); 1398 break; 1399 } 1400 } 1401 spdk_bdev_module_examine_done(&ocf_if); 1402 } 1403 1404 struct metadata_probe_ctx { 1405 struct vbdev_ocf_base base; 1406 ocf_volume_t volume; 1407 1408 struct ocf_volume_uuid *core_uuids; 1409 unsigned int uuid_count; 1410 1411 int result; 1412 int refcnt; 1413 }; 1414 1415 static void 1416 examine_ctx_put(struct metadata_probe_ctx *ctx) 1417 { 1418 unsigned int i; 1419 1420 ctx->refcnt--; 1421 if (ctx->refcnt > 0) { 1422 return; 1423 } 1424 1425 if (ctx->result) { 1426 SPDK_ERRLOG("OCF metadata probe for bdev '%s' failed with %d\n", 1427 spdk_bdev_get_name(ctx->base.bdev), ctx->result); 1428 } 1429 1430 if (ctx->base.desc) { 1431 spdk_bdev_close(ctx->base.desc); 1432 } 1433 1434 if (ctx->volume) { 1435 ocf_volume_destroy(ctx->volume); 1436 } 1437 1438 if (ctx->core_uuids) { 1439 for (i = 0; i < ctx->uuid_count; i++) { 1440 free(ctx->core_uuids[i].data); 1441 } 1442 } 1443 free(ctx->core_uuids); 1444 1445 examine_done(ctx->result, NULL, ctx->base.bdev); 1446 free(ctx); 1447 } 1448 1449 static void 1450 metadata_probe_construct_cb(int rc, struct vbdev_ocf *vbdev, void *vctx) 1451 { 1452 struct metadata_probe_ctx *ctx = vctx; 1453 1454 examine_ctx_put(ctx); 1455 } 1456 1457 /* This is second callback for ocf_metadata_probe_cores() 1458 * Here we create vbdev configurations based on UUIDs */ 1459 static void 1460 metadata_probe_cores_construct(void *priv, int error, unsigned int num_cores) 1461 { 1462 struct metadata_probe_ctx *ctx = priv; 1463 const char *vbdev_name; 1464 const char *core_name; 1465 unsigned int i; 1466 1467 if (error) { 1468 ctx->result = error; 1469 examine_ctx_put(ctx); 1470 return; 1471 } 1472 1473 for (i = 0; i < num_cores; i++) { 1474 core_name = ocf_uuid_to_str(&ctx->core_uuids[i]); 1475 vbdev_name = core_name + strlen(core_name) + 1; 1476 ctx->refcnt++; 1477 vbdev_ocf_construct(vbdev_name, NULL, ctx->base.bdev->name, core_name, true, 1478 metadata_probe_construct_cb, ctx); 1479 } 1480 1481 examine_ctx_put(ctx); 1482 } 1483 1484 /* This callback is called after OCF reads cores UUIDs from cache metadata 1485 * Here we allocate memory for those UUIDs and call ocf_metadata_probe_cores() again */ 1486 static void 1487 metadata_probe_cores_get_num(void *priv, int error, unsigned int num_cores) 1488 { 1489 struct metadata_probe_ctx *ctx = priv; 1490 unsigned int i; 1491 1492 if (error) { 1493 ctx->result = error; 1494 examine_ctx_put(ctx); 1495 return; 1496 } 1497 1498 ctx->uuid_count = num_cores; 1499 ctx->core_uuids = calloc(num_cores, sizeof(struct ocf_volume_uuid)); 1500 if (!ctx->core_uuids) { 1501 ctx->result = -ENOMEM; 1502 examine_ctx_put(ctx); 1503 return; 1504 } 1505 1506 for (i = 0; i < ctx->uuid_count; i++) { 1507 ctx->core_uuids[i].size = OCF_VOLUME_UUID_MAX_SIZE; 1508 ctx->core_uuids[i].data = malloc(OCF_VOLUME_UUID_MAX_SIZE); 1509 if (!ctx->core_uuids[i].data) { 1510 ctx->result = -ENOMEM; 1511 examine_ctx_put(ctx); 1512 return; 1513 } 1514 } 1515 1516 ocf_metadata_probe_cores(vbdev_ocf_ctx, ctx->volume, ctx->core_uuids, ctx->uuid_count, 1517 metadata_probe_cores_construct, ctx); 1518 } 1519 1520 static void 1521 metadata_probe_cb(void *priv, int rc, 1522 struct ocf_metadata_probe_status *status) 1523 { 1524 struct metadata_probe_ctx *ctx = priv; 1525 1526 if (rc) { 1527 /* -ENODATA means device does not have cache metadata on it */ 1528 if (rc != -ENODATA) { 1529 ctx->result = rc; 1530 } 1531 examine_ctx_put(ctx); 1532 return; 1533 } 1534 1535 ocf_metadata_probe_cores(vbdev_ocf_ctx, ctx->volume, NULL, 0, 1536 metadata_probe_cores_get_num, ctx); 1537 } 1538 1539 /* This is called after vbdev_ocf_examine 1540 * It allows to delay application initialization 1541 * until all OCF bdevs get registered 1542 * If vbdev has all of its base devices it starts asynchronously here 1543 * We first check if bdev appears in configuration, 1544 * if not we do metadata_probe() to create its configuration from bdev metadata */ 1545 static void 1546 vbdev_ocf_examine_disk(struct spdk_bdev *bdev) 1547 { 1548 const char *bdev_name = spdk_bdev_get_name(bdev); 1549 struct vbdev_ocf *vbdev; 1550 struct metadata_probe_ctx *ctx; 1551 bool created_from_config = false; 1552 int rc; 1553 1554 examine_start(bdev); 1555 1556 TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) { 1557 if (vbdev->state.doing_finish || vbdev->state.started) { 1558 continue; 1559 } 1560 1561 if (!strcmp(bdev_name, vbdev->cache.name)) { 1562 examine_start(bdev); 1563 register_vbdev(vbdev, examine_done, bdev); 1564 created_from_config = true; 1565 continue; 1566 } 1567 if (!strcmp(bdev_name, vbdev->core.name)) { 1568 examine_start(bdev); 1569 register_vbdev(vbdev, examine_done, bdev); 1570 examine_done(0, NULL, bdev); 1571 return; 1572 } 1573 } 1574 1575 /* If devices is discovered during config we do not check for metadata */ 1576 if (created_from_config) { 1577 examine_done(0, NULL, bdev); 1578 return; 1579 } 1580 1581 /* Metadata probe path 1582 * We create temporary OCF volume and a temporary base structure 1583 * to use them for ocf_metadata_probe() and for bottom adapter IOs 1584 * Then we get UUIDs of core devices an create configurations based on them */ 1585 ctx = calloc(1, sizeof(*ctx)); 1586 if (!ctx) { 1587 examine_done(-ENOMEM, NULL, bdev); 1588 return; 1589 } 1590 1591 ctx->base.bdev = bdev; 1592 ctx->refcnt = 1; 1593 1594 rc = spdk_bdev_open(ctx->base.bdev, true, NULL, NULL, &ctx->base.desc); 1595 if (rc) { 1596 ctx->result = rc; 1597 examine_ctx_put(ctx); 1598 return; 1599 } 1600 1601 rc = ocf_ctx_volume_create(vbdev_ocf_ctx, &ctx->volume, NULL, SPDK_OBJECT); 1602 if (rc) { 1603 ctx->result = rc; 1604 examine_ctx_put(ctx); 1605 return; 1606 } 1607 1608 rc = ocf_volume_open(ctx->volume, &ctx->base); 1609 if (rc) { 1610 ctx->result = rc; 1611 examine_ctx_put(ctx); 1612 return; 1613 } 1614 1615 ocf_metadata_probe(vbdev_ocf_ctx, ctx->volume, metadata_probe_cb, ctx); 1616 } 1617 1618 static int 1619 vbdev_ocf_get_ctx_size(void) 1620 { 1621 return sizeof(struct bdev_ocf_data); 1622 } 1623 1624 static void 1625 fini_start(void) 1626 { 1627 g_fini_started = true; 1628 } 1629 1630 /* Module-global function table 1631 * Does not relate to vbdev instances */ 1632 static struct spdk_bdev_module ocf_if = { 1633 .name = "ocf", 1634 .module_init = vbdev_ocf_init, 1635 .fini_start = fini_start, 1636 .module_fini = vbdev_ocf_module_fini, 1637 .config_text = NULL, 1638 .get_ctx_size = vbdev_ocf_get_ctx_size, 1639 .examine_config = vbdev_ocf_examine, 1640 .examine_disk = vbdev_ocf_examine_disk, 1641 }; 1642 SPDK_BDEV_MODULE_REGISTER(ocf, &ocf_if); 1643 1644 SPDK_LOG_REGISTER_COMPONENT("vbdev_ocf", SPDK_TRACE_VBDEV_OCF) 1645