1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "blobstore.h" 38 #include "request.h" 39 40 #include "spdk/thread.h" 41 #include "spdk/queue.h" 42 43 #include "spdk/log.h" 44 45 void 46 bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno) 47 { 48 switch (cpl->type) { 49 case SPDK_BS_CPL_TYPE_BS_BASIC: 50 cpl->u.bs_basic.cb_fn(cpl->u.bs_basic.cb_arg, 51 bserrno); 52 break; 53 case SPDK_BS_CPL_TYPE_BS_HANDLE: 54 cpl->u.bs_handle.cb_fn(cpl->u.bs_handle.cb_arg, 55 bserrno == 0 ? cpl->u.bs_handle.bs : NULL, 56 bserrno); 57 break; 58 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 59 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, 60 bserrno); 61 break; 62 case SPDK_BS_CPL_TYPE_BLOBID: 63 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, 64 bserrno == 0 ? cpl->u.blobid.blobid : SPDK_BLOBID_INVALID, 65 bserrno); 66 break; 67 case SPDK_BS_CPL_TYPE_BLOB_HANDLE: 68 cpl->u.blob_handle.cb_fn(cpl->u.blob_handle.cb_arg, 69 bserrno == 0 ? cpl->u.blob_handle.blob : NULL, 70 bserrno); 71 break; 72 case SPDK_BS_CPL_TYPE_NESTED_SEQUENCE: 73 cpl->u.nested_seq.cb_fn(cpl->u.nested_seq.cb_arg, 74 cpl->u.nested_seq.parent, 75 bserrno); 76 break; 77 case SPDK_BS_CPL_TYPE_NONE: 78 /* this completion's callback is handled elsewhere */ 79 break; 80 } 81 } 82 83 static void 84 bs_request_set_complete(struct spdk_bs_request_set *set) 85 { 86 struct spdk_bs_cpl cpl = set->cpl; 87 int bserrno = set->bserrno; 88 89 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 90 91 bs_call_cpl(&cpl, bserrno); 92 } 93 94 static void 95 bs_sequence_completion(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 96 { 97 struct spdk_bs_request_set *set = cb_arg; 98 99 set->bserrno = bserrno; 100 set->u.sequence.cb_fn((spdk_bs_sequence_t *)set, set->u.sequence.cb_arg, bserrno); 101 } 102 103 spdk_bs_sequence_t * 104 bs_sequence_start(struct spdk_io_channel *_channel, 105 struct spdk_bs_cpl *cpl) 106 { 107 struct spdk_bs_channel *channel; 108 struct spdk_bs_request_set *set; 109 110 channel = spdk_io_channel_get_ctx(_channel); 111 assert(channel != NULL); 112 set = TAILQ_FIRST(&channel->reqs); 113 if (!set) { 114 return NULL; 115 } 116 TAILQ_REMOVE(&channel->reqs, set, link); 117 118 set->cpl = *cpl; 119 set->bserrno = 0; 120 set->channel = channel; 121 122 set->cb_args.cb_fn = bs_sequence_completion; 123 set->cb_args.cb_arg = set; 124 set->cb_args.channel = channel->dev_channel; 125 set->ext_io_opts = NULL; 126 127 return (spdk_bs_sequence_t *)set; 128 } 129 130 void 131 bs_sequence_read_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev, 132 void *payload, uint64_t lba, uint32_t lba_count, 133 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 134 { 135 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 136 struct spdk_bs_channel *channel = set->channel; 137 138 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 139 lba); 140 141 set->u.sequence.cb_fn = cb_fn; 142 set->u.sequence.cb_arg = cb_arg; 143 144 bs_dev->read(bs_dev, spdk_io_channel_from_ctx(channel), payload, lba, lba_count, &set->cb_args); 145 } 146 147 void 148 bs_sequence_read_dev(spdk_bs_sequence_t *seq, void *payload, 149 uint64_t lba, uint32_t lba_count, 150 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 151 { 152 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 153 struct spdk_bs_channel *channel = set->channel; 154 155 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 156 lba); 157 158 set->u.sequence.cb_fn = cb_fn; 159 set->u.sequence.cb_arg = cb_arg; 160 161 channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count, &set->cb_args); 162 } 163 164 void 165 bs_sequence_write_dev(spdk_bs_sequence_t *seq, void *payload, 166 uint64_t lba, uint32_t lba_count, 167 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 168 { 169 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 170 struct spdk_bs_channel *channel = set->channel; 171 172 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 173 lba); 174 175 set->u.sequence.cb_fn = cb_fn; 176 set->u.sequence.cb_arg = cb_arg; 177 178 channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count, 179 &set->cb_args); 180 } 181 182 void 183 bs_sequence_readv_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev, 184 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 185 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 186 { 187 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 188 struct spdk_bs_channel *channel = set->channel; 189 190 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 191 lba); 192 193 set->u.sequence.cb_fn = cb_fn; 194 set->u.sequence.cb_arg = cb_arg; 195 196 if (set->ext_io_opts) { 197 assert(bs_dev->readv_ext); 198 bs_dev->readv_ext(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count, 199 &set->cb_args, set->ext_io_opts); 200 } else { 201 bs_dev->readv(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count, 202 &set->cb_args); 203 } 204 } 205 206 void 207 bs_sequence_readv_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt, 208 uint64_t lba, uint32_t lba_count, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 209 { 210 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 211 struct spdk_bs_channel *channel = set->channel; 212 213 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 214 lba); 215 216 set->u.sequence.cb_fn = cb_fn; 217 set->u.sequence.cb_arg = cb_arg; 218 if (set->ext_io_opts) { 219 assert(channel->dev->readv_ext); 220 channel->dev->readv_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 221 &set->cb_args, set->ext_io_opts); 222 } else { 223 channel->dev->readv(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, &set->cb_args); 224 } 225 } 226 227 void 228 bs_sequence_writev_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt, 229 uint64_t lba, uint32_t lba_count, 230 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 231 { 232 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 233 struct spdk_bs_channel *channel = set->channel; 234 235 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 236 lba); 237 238 set->u.sequence.cb_fn = cb_fn; 239 set->u.sequence.cb_arg = cb_arg; 240 241 if (set->ext_io_opts) { 242 assert(channel->dev->writev_ext); 243 channel->dev->writev_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 244 &set->cb_args, set->ext_io_opts); 245 } else { 246 channel->dev->writev(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 247 &set->cb_args); 248 } 249 } 250 251 void 252 bs_sequence_write_zeroes_dev(spdk_bs_sequence_t *seq, 253 uint64_t lba, uint64_t lba_count, 254 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 255 { 256 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 257 struct spdk_bs_channel *channel = set->channel; 258 259 SPDK_DEBUGLOG(blob_rw, "writing zeroes to %" PRIu64 " blocks at LBA %" PRIu64 "\n", 260 lba_count, lba); 261 262 set->u.sequence.cb_fn = cb_fn; 263 set->u.sequence.cb_arg = cb_arg; 264 265 channel->dev->write_zeroes(channel->dev, channel->dev_channel, lba, lba_count, 266 &set->cb_args); 267 } 268 269 void 270 bs_sequence_finish(spdk_bs_sequence_t *seq, int bserrno) 271 { 272 if (bserrno != 0) { 273 seq->bserrno = bserrno; 274 } 275 bs_request_set_complete((struct spdk_bs_request_set *)seq); 276 } 277 278 void 279 bs_user_op_sequence_finish(void *cb_arg, int bserrno) 280 { 281 spdk_bs_sequence_t *seq = cb_arg; 282 283 bs_sequence_finish(seq, bserrno); 284 } 285 286 static void 287 bs_batch_completion(struct spdk_io_channel *_channel, 288 void *cb_arg, int bserrno) 289 { 290 struct spdk_bs_request_set *set = cb_arg; 291 292 set->u.batch.outstanding_ops--; 293 if (bserrno != 0) { 294 set->bserrno = bserrno; 295 } 296 297 if (set->u.batch.outstanding_ops == 0 && set->u.batch.batch_closed) { 298 if (set->u.batch.cb_fn) { 299 set->cb_args.cb_fn = bs_sequence_completion; 300 set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, bserrno); 301 } else { 302 bs_request_set_complete(set); 303 } 304 } 305 } 306 307 spdk_bs_batch_t * 308 bs_batch_open(struct spdk_io_channel *_channel, 309 struct spdk_bs_cpl *cpl) 310 { 311 struct spdk_bs_channel *channel; 312 struct spdk_bs_request_set *set; 313 314 channel = spdk_io_channel_get_ctx(_channel); 315 assert(channel != NULL); 316 set = TAILQ_FIRST(&channel->reqs); 317 if (!set) { 318 return NULL; 319 } 320 TAILQ_REMOVE(&channel->reqs, set, link); 321 322 set->cpl = *cpl; 323 set->bserrno = 0; 324 set->channel = channel; 325 326 set->u.batch.cb_fn = NULL; 327 set->u.batch.cb_arg = NULL; 328 set->u.batch.outstanding_ops = 0; 329 set->u.batch.batch_closed = 0; 330 331 set->cb_args.cb_fn = bs_batch_completion; 332 set->cb_args.cb_arg = set; 333 set->cb_args.channel = channel->dev_channel; 334 335 return (spdk_bs_batch_t *)set; 336 } 337 338 void 339 bs_batch_read_bs_dev(spdk_bs_batch_t *batch, struct spdk_bs_dev *bs_dev, 340 void *payload, uint64_t lba, uint32_t lba_count) 341 { 342 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 343 struct spdk_bs_channel *channel = set->channel; 344 345 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 346 lba); 347 348 set->u.batch.outstanding_ops++; 349 bs_dev->read(bs_dev, spdk_io_channel_from_ctx(channel), payload, lba, lba_count, &set->cb_args); 350 } 351 352 void 353 bs_batch_read_dev(spdk_bs_batch_t *batch, void *payload, 354 uint64_t lba, uint32_t lba_count) 355 { 356 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 357 struct spdk_bs_channel *channel = set->channel; 358 359 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 360 lba); 361 362 set->u.batch.outstanding_ops++; 363 channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count, &set->cb_args); 364 } 365 366 void 367 bs_batch_write_dev(spdk_bs_batch_t *batch, void *payload, 368 uint64_t lba, uint32_t lba_count) 369 { 370 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 371 struct spdk_bs_channel *channel = set->channel; 372 373 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks to LBA %" PRIu64 "\n", lba_count, lba); 374 375 set->u.batch.outstanding_ops++; 376 channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count, 377 &set->cb_args); 378 } 379 380 void 381 bs_batch_unmap_dev(spdk_bs_batch_t *batch, 382 uint64_t lba, uint64_t lba_count) 383 { 384 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 385 struct spdk_bs_channel *channel = set->channel; 386 387 SPDK_DEBUGLOG(blob_rw, "Unmapping %" PRIu64 " blocks at LBA %" PRIu64 "\n", lba_count, 388 lba); 389 390 set->u.batch.outstanding_ops++; 391 channel->dev->unmap(channel->dev, channel->dev_channel, lba, lba_count, 392 &set->cb_args); 393 } 394 395 void 396 bs_batch_write_zeroes_dev(spdk_bs_batch_t *batch, 397 uint64_t lba, uint64_t lba_count) 398 { 399 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 400 struct spdk_bs_channel *channel = set->channel; 401 402 SPDK_DEBUGLOG(blob_rw, "Zeroing %" PRIu64 " blocks at LBA %" PRIu64 "\n", lba_count, lba); 403 404 set->u.batch.outstanding_ops++; 405 channel->dev->write_zeroes(channel->dev, channel->dev_channel, lba, lba_count, 406 &set->cb_args); 407 } 408 409 void 410 bs_batch_close(spdk_bs_batch_t *batch) 411 { 412 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 413 414 set->u.batch.batch_closed = 1; 415 416 if (set->u.batch.outstanding_ops == 0) { 417 if (set->u.batch.cb_fn) { 418 set->cb_args.cb_fn = bs_sequence_completion; 419 set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, set->bserrno); 420 } else { 421 bs_request_set_complete(set); 422 } 423 } 424 } 425 426 spdk_bs_batch_t * 427 bs_sequence_to_batch(spdk_bs_sequence_t *seq, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 428 { 429 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 430 431 set->u.batch.cb_fn = cb_fn; 432 set->u.batch.cb_arg = cb_arg; 433 set->u.batch.outstanding_ops = 0; 434 set->u.batch.batch_closed = 0; 435 436 set->cb_args.cb_fn = bs_batch_completion; 437 438 return set; 439 } 440 441 spdk_bs_user_op_t * 442 bs_user_op_alloc(struct spdk_io_channel *_channel, struct spdk_bs_cpl *cpl, 443 enum spdk_blob_op_type op_type, struct spdk_blob *blob, 444 void *payload, int iovcnt, uint64_t offset, uint64_t length) 445 { 446 struct spdk_bs_channel *channel; 447 struct spdk_bs_request_set *set; 448 struct spdk_bs_user_op_args *args; 449 450 channel = spdk_io_channel_get_ctx(_channel); 451 assert(channel != NULL); 452 set = TAILQ_FIRST(&channel->reqs); 453 if (!set) { 454 return NULL; 455 } 456 TAILQ_REMOVE(&channel->reqs, set, link); 457 458 set->cpl = *cpl; 459 set->channel = channel; 460 set->ext_io_opts = NULL; 461 462 args = &set->u.user_op; 463 464 args->type = op_type; 465 args->iovcnt = iovcnt; 466 args->blob = blob; 467 args->offset = offset; 468 args->length = length; 469 args->payload = payload; 470 471 return (spdk_bs_user_op_t *)set; 472 } 473 474 void 475 bs_user_op_execute(spdk_bs_user_op_t *op) 476 { 477 struct spdk_bs_request_set *set; 478 struct spdk_bs_user_op_args *args; 479 struct spdk_io_channel *ch; 480 481 set = (struct spdk_bs_request_set *)op; 482 args = &set->u.user_op; 483 ch = spdk_io_channel_from_ctx(set->channel); 484 485 switch (args->type) { 486 case SPDK_BLOB_READ: 487 spdk_blob_io_read(args->blob, ch, args->payload, args->offset, args->length, 488 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 489 break; 490 case SPDK_BLOB_WRITE: 491 spdk_blob_io_write(args->blob, ch, args->payload, args->offset, args->length, 492 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 493 break; 494 case SPDK_BLOB_UNMAP: 495 spdk_blob_io_unmap(args->blob, ch, args->offset, args->length, 496 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 497 break; 498 case SPDK_BLOB_WRITE_ZEROES: 499 spdk_blob_io_write_zeroes(args->blob, ch, args->offset, args->length, 500 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 501 break; 502 case SPDK_BLOB_READV: 503 spdk_blob_io_readv_ext(args->blob, ch, args->payload, args->iovcnt, 504 args->offset, args->length, 505 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg, 506 set->ext_io_opts); 507 break; 508 case SPDK_BLOB_WRITEV: 509 spdk_blob_io_writev_ext(args->blob, ch, args->payload, args->iovcnt, 510 args->offset, args->length, 511 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg, 512 set->ext_io_opts); 513 break; 514 } 515 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 516 } 517 518 void 519 bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno) 520 { 521 struct spdk_bs_request_set *set; 522 523 set = (struct spdk_bs_request_set *)op; 524 525 set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, bserrno); 526 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 527 } 528 529 SPDK_LOG_REGISTER_COMPONENT(blob_rw) 530