1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "blobstore.h" 10 #include "request.h" 11 12 #include "spdk/thread.h" 13 #include "spdk/queue.h" 14 15 #include "spdk/log.h" 16 17 void 18 bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno) 19 { 20 switch (cpl->type) { 21 case SPDK_BS_CPL_TYPE_BS_BASIC: 22 cpl->u.bs_basic.cb_fn(cpl->u.bs_basic.cb_arg, 23 bserrno); 24 break; 25 case SPDK_BS_CPL_TYPE_BS_HANDLE: 26 cpl->u.bs_handle.cb_fn(cpl->u.bs_handle.cb_arg, 27 bserrno == 0 ? cpl->u.bs_handle.bs : NULL, 28 bserrno); 29 break; 30 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 31 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, 32 bserrno); 33 break; 34 case SPDK_BS_CPL_TYPE_BLOBID: 35 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, 36 bserrno == 0 ? cpl->u.blobid.blobid : SPDK_BLOBID_INVALID, 37 bserrno); 38 break; 39 case SPDK_BS_CPL_TYPE_BLOB_HANDLE: 40 cpl->u.blob_handle.cb_fn(cpl->u.blob_handle.cb_arg, 41 bserrno == 0 ? cpl->u.blob_handle.blob : NULL, 42 bserrno); 43 break; 44 case SPDK_BS_CPL_TYPE_NESTED_SEQUENCE: 45 cpl->u.nested_seq.cb_fn(cpl->u.nested_seq.cb_arg, 46 cpl->u.nested_seq.parent, 47 bserrno); 48 break; 49 case SPDK_BS_CPL_TYPE_NONE: 50 /* this completion's callback is handled elsewhere */ 51 break; 52 } 53 } 54 55 static void 56 bs_request_set_complete(struct spdk_bs_request_set *set) 57 { 58 struct spdk_bs_cpl cpl = set->cpl; 59 int bserrno = set->bserrno; 60 61 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 62 63 bs_call_cpl(&cpl, bserrno); 64 } 65 66 static void 67 bs_sequence_completion(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 68 { 69 struct spdk_bs_request_set *set = cb_arg; 70 71 set->bserrno = bserrno; 72 set->u.sequence.cb_fn((spdk_bs_sequence_t *)set, set->u.sequence.cb_arg, bserrno); 73 } 74 75 spdk_bs_sequence_t * 76 bs_sequence_start(struct spdk_io_channel *_channel, 77 struct spdk_bs_cpl *cpl) 78 { 79 struct spdk_bs_channel *channel; 80 struct spdk_bs_request_set *set; 81 82 channel = spdk_io_channel_get_ctx(_channel); 83 assert(channel != NULL); 84 set = TAILQ_FIRST(&channel->reqs); 85 if (!set) { 86 return NULL; 87 } 88 TAILQ_REMOVE(&channel->reqs, set, link); 89 90 set->cpl = *cpl; 91 set->bserrno = 0; 92 set->channel = channel; 93 94 set->cb_args.cb_fn = bs_sequence_completion; 95 set->cb_args.cb_arg = set; 96 set->cb_args.channel = channel->dev_channel; 97 set->ext_io_opts = NULL; 98 99 return (spdk_bs_sequence_t *)set; 100 } 101 102 void 103 bs_sequence_read_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev, 104 void *payload, uint64_t lba, uint32_t lba_count, 105 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 106 { 107 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 108 struct spdk_bs_channel *channel = set->channel; 109 110 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 111 lba); 112 113 set->u.sequence.cb_fn = cb_fn; 114 set->u.sequence.cb_arg = cb_arg; 115 116 bs_dev->read(bs_dev, spdk_io_channel_from_ctx(channel), payload, lba, lba_count, &set->cb_args); 117 } 118 119 void 120 bs_sequence_read_dev(spdk_bs_sequence_t *seq, void *payload, 121 uint64_t lba, uint32_t lba_count, 122 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 123 { 124 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 125 struct spdk_bs_channel *channel = set->channel; 126 127 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 128 lba); 129 130 set->u.sequence.cb_fn = cb_fn; 131 set->u.sequence.cb_arg = cb_arg; 132 133 channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count, &set->cb_args); 134 } 135 136 void 137 bs_sequence_write_dev(spdk_bs_sequence_t *seq, void *payload, 138 uint64_t lba, uint32_t lba_count, 139 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 140 { 141 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 142 struct spdk_bs_channel *channel = set->channel; 143 144 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 145 lba); 146 147 set->u.sequence.cb_fn = cb_fn; 148 set->u.sequence.cb_arg = cb_arg; 149 150 channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count, 151 &set->cb_args); 152 } 153 154 void 155 bs_sequence_readv_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev, 156 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 157 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 158 { 159 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 160 struct spdk_bs_channel *channel = set->channel; 161 162 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 163 lba); 164 165 set->u.sequence.cb_fn = cb_fn; 166 set->u.sequence.cb_arg = cb_arg; 167 168 if (set->ext_io_opts) { 169 assert(bs_dev->readv_ext); 170 bs_dev->readv_ext(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count, 171 &set->cb_args, set->ext_io_opts); 172 } else { 173 bs_dev->readv(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count, 174 &set->cb_args); 175 } 176 } 177 178 void 179 bs_sequence_readv_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt, 180 uint64_t lba, uint32_t lba_count, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 181 { 182 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 183 struct spdk_bs_channel *channel = set->channel; 184 185 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 186 lba); 187 188 set->u.sequence.cb_fn = cb_fn; 189 set->u.sequence.cb_arg = cb_arg; 190 if (set->ext_io_opts) { 191 assert(channel->dev->readv_ext); 192 channel->dev->readv_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 193 &set->cb_args, set->ext_io_opts); 194 } else { 195 channel->dev->readv(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, &set->cb_args); 196 } 197 } 198 199 void 200 bs_sequence_writev_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt, 201 uint64_t lba, uint32_t lba_count, 202 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 203 { 204 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 205 struct spdk_bs_channel *channel = set->channel; 206 207 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 208 lba); 209 210 set->u.sequence.cb_fn = cb_fn; 211 set->u.sequence.cb_arg = cb_arg; 212 213 if (set->ext_io_opts) { 214 assert(channel->dev->writev_ext); 215 channel->dev->writev_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 216 &set->cb_args, set->ext_io_opts); 217 } else { 218 channel->dev->writev(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 219 &set->cb_args); 220 } 221 } 222 223 void 224 bs_sequence_write_zeroes_dev(spdk_bs_sequence_t *seq, 225 uint64_t lba, uint64_t lba_count, 226 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 227 { 228 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 229 struct spdk_bs_channel *channel = set->channel; 230 231 SPDK_DEBUGLOG(blob_rw, "writing zeroes to %" PRIu64 " blocks at LBA %" PRIu64 "\n", 232 lba_count, lba); 233 234 set->u.sequence.cb_fn = cb_fn; 235 set->u.sequence.cb_arg = cb_arg; 236 237 channel->dev->write_zeroes(channel->dev, channel->dev_channel, lba, lba_count, 238 &set->cb_args); 239 } 240 241 void 242 bs_sequence_copy_dev(spdk_bs_sequence_t *seq, uint64_t dst_lba, uint64_t src_lba, 243 uint64_t lba_count, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 244 { 245 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 246 struct spdk_bs_channel *channel = set->channel; 247 248 SPDK_DEBUGLOG(blob_rw, "Copying %" PRIu64 " blocks from LBA %" PRIu64 " to LBA %" PRIu64 "\n", 249 lba_count, src_lba, dst_lba); 250 251 set->u.sequence.cb_fn = cb_fn; 252 set->u.sequence.cb_arg = cb_arg; 253 254 channel->dev->copy(channel->dev, channel->dev_channel, dst_lba, src_lba, lba_count, &set->cb_args); 255 } 256 257 void 258 bs_sequence_finish(spdk_bs_sequence_t *seq, int bserrno) 259 { 260 if (bserrno != 0) { 261 seq->bserrno = bserrno; 262 } 263 bs_request_set_complete((struct spdk_bs_request_set *)seq); 264 } 265 266 void 267 bs_user_op_sequence_finish(void *cb_arg, int bserrno) 268 { 269 spdk_bs_sequence_t *seq = cb_arg; 270 271 bs_sequence_finish(seq, bserrno); 272 } 273 274 static void 275 bs_batch_completion(struct spdk_io_channel *_channel, 276 void *cb_arg, int bserrno) 277 { 278 struct spdk_bs_request_set *set = cb_arg; 279 280 set->u.batch.outstanding_ops--; 281 if (bserrno != 0) { 282 set->bserrno = bserrno; 283 } 284 285 if (set->u.batch.outstanding_ops == 0 && set->u.batch.batch_closed) { 286 if (set->u.batch.cb_fn) { 287 set->cb_args.cb_fn = bs_sequence_completion; 288 set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, bserrno); 289 } else { 290 bs_request_set_complete(set); 291 } 292 } 293 } 294 295 spdk_bs_batch_t * 296 bs_batch_open(struct spdk_io_channel *_channel, 297 struct spdk_bs_cpl *cpl) 298 { 299 struct spdk_bs_channel *channel; 300 struct spdk_bs_request_set *set; 301 302 channel = spdk_io_channel_get_ctx(_channel); 303 assert(channel != NULL); 304 set = TAILQ_FIRST(&channel->reqs); 305 if (!set) { 306 return NULL; 307 } 308 TAILQ_REMOVE(&channel->reqs, set, link); 309 310 set->cpl = *cpl; 311 set->bserrno = 0; 312 set->channel = channel; 313 314 set->u.batch.cb_fn = NULL; 315 set->u.batch.cb_arg = NULL; 316 set->u.batch.outstanding_ops = 0; 317 set->u.batch.batch_closed = 0; 318 319 set->cb_args.cb_fn = bs_batch_completion; 320 set->cb_args.cb_arg = set; 321 set->cb_args.channel = channel->dev_channel; 322 323 return (spdk_bs_batch_t *)set; 324 } 325 326 void 327 bs_batch_read_bs_dev(spdk_bs_batch_t *batch, struct spdk_bs_dev *bs_dev, 328 void *payload, uint64_t lba, uint32_t lba_count) 329 { 330 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 331 struct spdk_bs_channel *channel = set->channel; 332 333 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 334 lba); 335 336 set->u.batch.outstanding_ops++; 337 bs_dev->read(bs_dev, spdk_io_channel_from_ctx(channel), payload, lba, lba_count, &set->cb_args); 338 } 339 340 void 341 bs_batch_read_dev(spdk_bs_batch_t *batch, void *payload, 342 uint64_t lba, uint32_t lba_count) 343 { 344 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 345 struct spdk_bs_channel *channel = set->channel; 346 347 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 348 lba); 349 350 set->u.batch.outstanding_ops++; 351 channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count, &set->cb_args); 352 } 353 354 void 355 bs_batch_write_dev(spdk_bs_batch_t *batch, void *payload, 356 uint64_t lba, uint32_t lba_count) 357 { 358 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 359 struct spdk_bs_channel *channel = set->channel; 360 361 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks to LBA %" PRIu64 "\n", lba_count, lba); 362 363 set->u.batch.outstanding_ops++; 364 channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count, 365 &set->cb_args); 366 } 367 368 void 369 bs_batch_unmap_dev(spdk_bs_batch_t *batch, 370 uint64_t lba, uint64_t lba_count) 371 { 372 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 373 struct spdk_bs_channel *channel = set->channel; 374 375 SPDK_DEBUGLOG(blob_rw, "Unmapping %" PRIu64 " blocks at LBA %" PRIu64 "\n", lba_count, 376 lba); 377 378 set->u.batch.outstanding_ops++; 379 channel->dev->unmap(channel->dev, channel->dev_channel, lba, lba_count, 380 &set->cb_args); 381 } 382 383 void 384 bs_batch_write_zeroes_dev(spdk_bs_batch_t *batch, 385 uint64_t lba, uint64_t lba_count) 386 { 387 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 388 struct spdk_bs_channel *channel = set->channel; 389 390 SPDK_DEBUGLOG(blob_rw, "Zeroing %" PRIu64 " blocks at LBA %" PRIu64 "\n", lba_count, lba); 391 392 set->u.batch.outstanding_ops++; 393 channel->dev->write_zeroes(channel->dev, channel->dev_channel, lba, lba_count, 394 &set->cb_args); 395 } 396 397 void 398 bs_batch_close(spdk_bs_batch_t *batch) 399 { 400 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 401 402 set->u.batch.batch_closed = 1; 403 404 if (set->u.batch.outstanding_ops == 0) { 405 if (set->u.batch.cb_fn) { 406 set->cb_args.cb_fn = bs_sequence_completion; 407 set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, set->bserrno); 408 } else { 409 bs_request_set_complete(set); 410 } 411 } 412 } 413 414 spdk_bs_batch_t * 415 bs_sequence_to_batch(spdk_bs_sequence_t *seq, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 416 { 417 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 418 419 set->u.batch.cb_fn = cb_fn; 420 set->u.batch.cb_arg = cb_arg; 421 set->u.batch.outstanding_ops = 0; 422 set->u.batch.batch_closed = 0; 423 424 set->cb_args.cb_fn = bs_batch_completion; 425 426 return set; 427 } 428 429 spdk_bs_user_op_t * 430 bs_user_op_alloc(struct spdk_io_channel *_channel, struct spdk_bs_cpl *cpl, 431 enum spdk_blob_op_type op_type, struct spdk_blob *blob, 432 void *payload, int iovcnt, uint64_t offset, uint64_t length) 433 { 434 struct spdk_bs_channel *channel; 435 struct spdk_bs_request_set *set; 436 struct spdk_bs_user_op_args *args; 437 438 channel = spdk_io_channel_get_ctx(_channel); 439 assert(channel != NULL); 440 set = TAILQ_FIRST(&channel->reqs); 441 if (!set) { 442 return NULL; 443 } 444 TAILQ_REMOVE(&channel->reqs, set, link); 445 446 set->cpl = *cpl; 447 set->channel = channel; 448 set->ext_io_opts = NULL; 449 450 args = &set->u.user_op; 451 452 args->type = op_type; 453 args->iovcnt = iovcnt; 454 args->blob = blob; 455 args->offset = offset; 456 args->length = length; 457 args->payload = payload; 458 459 return (spdk_bs_user_op_t *)set; 460 } 461 462 void 463 bs_user_op_execute(spdk_bs_user_op_t *op) 464 { 465 struct spdk_bs_request_set *set; 466 struct spdk_bs_user_op_args *args; 467 struct spdk_io_channel *ch; 468 469 set = (struct spdk_bs_request_set *)op; 470 args = &set->u.user_op; 471 ch = spdk_io_channel_from_ctx(set->channel); 472 473 switch (args->type) { 474 case SPDK_BLOB_READ: 475 spdk_blob_io_read(args->blob, ch, args->payload, args->offset, args->length, 476 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 477 break; 478 case SPDK_BLOB_WRITE: 479 spdk_blob_io_write(args->blob, ch, args->payload, args->offset, args->length, 480 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 481 break; 482 case SPDK_BLOB_UNMAP: 483 spdk_blob_io_unmap(args->blob, ch, args->offset, args->length, 484 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 485 break; 486 case SPDK_BLOB_WRITE_ZEROES: 487 spdk_blob_io_write_zeroes(args->blob, ch, args->offset, args->length, 488 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 489 break; 490 case SPDK_BLOB_READV: 491 spdk_blob_io_readv_ext(args->blob, ch, args->payload, args->iovcnt, 492 args->offset, args->length, 493 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg, 494 set->ext_io_opts); 495 break; 496 case SPDK_BLOB_WRITEV: 497 spdk_blob_io_writev_ext(args->blob, ch, args->payload, args->iovcnt, 498 args->offset, args->length, 499 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg, 500 set->ext_io_opts); 501 break; 502 } 503 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 504 } 505 506 void 507 bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno) 508 { 509 struct spdk_bs_request_set *set; 510 511 set = (struct spdk_bs_request_set *)op; 512 513 set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, bserrno); 514 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 515 } 516 517 SPDK_LOG_REGISTER_COMPONENT(blob_rw) 518