1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "blobstore.h" 10 #include "request.h" 11 12 #include "spdk/thread.h" 13 #include "spdk/queue.h" 14 15 #include "spdk/log.h" 16 17 void 18 bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno) 19 { 20 switch (cpl->type) { 21 case SPDK_BS_CPL_TYPE_BS_BASIC: 22 cpl->u.bs_basic.cb_fn(cpl->u.bs_basic.cb_arg, 23 bserrno); 24 break; 25 case SPDK_BS_CPL_TYPE_BS_HANDLE: 26 cpl->u.bs_handle.cb_fn(cpl->u.bs_handle.cb_arg, 27 bserrno == 0 ? cpl->u.bs_handle.bs : NULL, 28 bserrno); 29 break; 30 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 31 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, 32 bserrno); 33 break; 34 case SPDK_BS_CPL_TYPE_BLOBID: 35 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, 36 bserrno == 0 ? cpl->u.blobid.blobid : SPDK_BLOBID_INVALID, 37 bserrno); 38 break; 39 case SPDK_BS_CPL_TYPE_BLOB_HANDLE: 40 cpl->u.blob_handle.cb_fn(cpl->u.blob_handle.cb_arg, 41 bserrno == 0 ? cpl->u.blob_handle.blob : NULL, 42 bserrno); 43 break; 44 case SPDK_BS_CPL_TYPE_NESTED_SEQUENCE: 45 cpl->u.nested_seq.cb_fn(cpl->u.nested_seq.cb_arg, 46 cpl->u.nested_seq.parent, 47 bserrno); 48 break; 49 case SPDK_BS_CPL_TYPE_NONE: 50 /* this completion's callback is handled elsewhere */ 51 break; 52 } 53 } 54 55 static void 56 bs_request_set_complete(struct spdk_bs_request_set *set) 57 { 58 struct spdk_bs_cpl cpl = set->cpl; 59 int bserrno = set->bserrno; 60 61 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 62 63 bs_call_cpl(&cpl, bserrno); 64 } 65 66 static void 67 bs_sequence_completion(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 68 { 69 struct spdk_bs_request_set *set = cb_arg; 70 71 set->bserrno = bserrno; 72 set->u.sequence.cb_fn((spdk_bs_sequence_t *)set, set->u.sequence.cb_arg, bserrno); 73 } 74 75 spdk_bs_sequence_t * 76 bs_sequence_start(struct spdk_io_channel *_channel, 77 struct spdk_bs_cpl *cpl) 78 { 79 struct spdk_bs_channel *channel; 80 struct spdk_bs_request_set *set; 81 82 channel = spdk_io_channel_get_ctx(_channel); 83 assert(channel != NULL); 84 set = TAILQ_FIRST(&channel->reqs); 85 if (!set) { 86 return NULL; 87 } 88 TAILQ_REMOVE(&channel->reqs, set, link); 89 90 set->cpl = *cpl; 91 set->bserrno = 0; 92 set->channel = channel; 93 94 set->cb_args.cb_fn = bs_sequence_completion; 95 set->cb_args.cb_arg = set; 96 set->cb_args.channel = channel->dev_channel; 97 set->ext_io_opts = NULL; 98 99 return (spdk_bs_sequence_t *)set; 100 } 101 102 void 103 bs_sequence_read_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev, 104 void *payload, uint64_t lba, uint32_t lba_count, 105 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 106 { 107 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 108 struct spdk_bs_channel *channel = set->channel; 109 110 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 111 lba); 112 113 set->u.sequence.cb_fn = cb_fn; 114 set->u.sequence.cb_arg = cb_arg; 115 116 bs_dev->read(bs_dev, spdk_io_channel_from_ctx(channel), payload, lba, lba_count, &set->cb_args); 117 } 118 119 void 120 bs_sequence_read_dev(spdk_bs_sequence_t *seq, void *payload, 121 uint64_t lba, uint32_t lba_count, 122 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 123 { 124 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 125 struct spdk_bs_channel *channel = set->channel; 126 127 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 128 lba); 129 130 set->u.sequence.cb_fn = cb_fn; 131 set->u.sequence.cb_arg = cb_arg; 132 133 channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count, &set->cb_args); 134 } 135 136 void 137 bs_sequence_write_dev(spdk_bs_sequence_t *seq, void *payload, 138 uint64_t lba, uint32_t lba_count, 139 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 140 { 141 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 142 struct spdk_bs_channel *channel = set->channel; 143 144 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 145 lba); 146 147 set->u.sequence.cb_fn = cb_fn; 148 set->u.sequence.cb_arg = cb_arg; 149 150 channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count, 151 &set->cb_args); 152 } 153 154 void 155 bs_sequence_readv_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev, 156 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 157 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 158 { 159 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 160 struct spdk_bs_channel *channel = set->channel; 161 162 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 163 lba); 164 165 set->u.sequence.cb_fn = cb_fn; 166 set->u.sequence.cb_arg = cb_arg; 167 168 if (set->ext_io_opts) { 169 assert(bs_dev->readv_ext); 170 bs_dev->readv_ext(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count, 171 &set->cb_args, set->ext_io_opts); 172 } else { 173 bs_dev->readv(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count, 174 &set->cb_args); 175 } 176 } 177 178 void 179 bs_sequence_readv_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt, 180 uint64_t lba, uint32_t lba_count, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 181 { 182 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 183 struct spdk_bs_channel *channel = set->channel; 184 185 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 186 lba); 187 188 set->u.sequence.cb_fn = cb_fn; 189 set->u.sequence.cb_arg = cb_arg; 190 if (set->ext_io_opts) { 191 assert(channel->dev->readv_ext); 192 channel->dev->readv_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 193 &set->cb_args, set->ext_io_opts); 194 } else { 195 channel->dev->readv(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, &set->cb_args); 196 } 197 } 198 199 void 200 bs_sequence_writev_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt, 201 uint64_t lba, uint32_t lba_count, 202 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 203 { 204 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 205 struct spdk_bs_channel *channel = set->channel; 206 207 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 208 lba); 209 210 set->u.sequence.cb_fn = cb_fn; 211 set->u.sequence.cb_arg = cb_arg; 212 213 if (set->ext_io_opts) { 214 assert(channel->dev->writev_ext); 215 channel->dev->writev_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 216 &set->cb_args, set->ext_io_opts); 217 } else { 218 channel->dev->writev(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, 219 &set->cb_args); 220 } 221 } 222 223 void 224 bs_sequence_write_zeroes_dev(spdk_bs_sequence_t *seq, 225 uint64_t lba, uint64_t lba_count, 226 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 227 { 228 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 229 struct spdk_bs_channel *channel = set->channel; 230 231 SPDK_DEBUGLOG(blob_rw, "writing zeroes to %" PRIu64 " blocks at LBA %" PRIu64 "\n", 232 lba_count, lba); 233 234 set->u.sequence.cb_fn = cb_fn; 235 set->u.sequence.cb_arg = cb_arg; 236 237 channel->dev->write_zeroes(channel->dev, channel->dev_channel, lba, lba_count, 238 &set->cb_args); 239 } 240 241 void 242 bs_sequence_finish(spdk_bs_sequence_t *seq, int bserrno) 243 { 244 if (bserrno != 0) { 245 seq->bserrno = bserrno; 246 } 247 bs_request_set_complete((struct spdk_bs_request_set *)seq); 248 } 249 250 void 251 bs_user_op_sequence_finish(void *cb_arg, int bserrno) 252 { 253 spdk_bs_sequence_t *seq = cb_arg; 254 255 bs_sequence_finish(seq, bserrno); 256 } 257 258 static void 259 bs_batch_completion(struct spdk_io_channel *_channel, 260 void *cb_arg, int bserrno) 261 { 262 struct spdk_bs_request_set *set = cb_arg; 263 264 set->u.batch.outstanding_ops--; 265 if (bserrno != 0) { 266 set->bserrno = bserrno; 267 } 268 269 if (set->u.batch.outstanding_ops == 0 && set->u.batch.batch_closed) { 270 if (set->u.batch.cb_fn) { 271 set->cb_args.cb_fn = bs_sequence_completion; 272 set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, bserrno); 273 } else { 274 bs_request_set_complete(set); 275 } 276 } 277 } 278 279 spdk_bs_batch_t * 280 bs_batch_open(struct spdk_io_channel *_channel, 281 struct spdk_bs_cpl *cpl) 282 { 283 struct spdk_bs_channel *channel; 284 struct spdk_bs_request_set *set; 285 286 channel = spdk_io_channel_get_ctx(_channel); 287 assert(channel != NULL); 288 set = TAILQ_FIRST(&channel->reqs); 289 if (!set) { 290 return NULL; 291 } 292 TAILQ_REMOVE(&channel->reqs, set, link); 293 294 set->cpl = *cpl; 295 set->bserrno = 0; 296 set->channel = channel; 297 298 set->u.batch.cb_fn = NULL; 299 set->u.batch.cb_arg = NULL; 300 set->u.batch.outstanding_ops = 0; 301 set->u.batch.batch_closed = 0; 302 303 set->cb_args.cb_fn = bs_batch_completion; 304 set->cb_args.cb_arg = set; 305 set->cb_args.channel = channel->dev_channel; 306 307 return (spdk_bs_batch_t *)set; 308 } 309 310 void 311 bs_batch_read_bs_dev(spdk_bs_batch_t *batch, struct spdk_bs_dev *bs_dev, 312 void *payload, uint64_t lba, uint32_t lba_count) 313 { 314 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 315 struct spdk_bs_channel *channel = set->channel; 316 317 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 318 lba); 319 320 set->u.batch.outstanding_ops++; 321 bs_dev->read(bs_dev, spdk_io_channel_from_ctx(channel), payload, lba, lba_count, &set->cb_args); 322 } 323 324 void 325 bs_batch_read_dev(spdk_bs_batch_t *batch, void *payload, 326 uint64_t lba, uint32_t lba_count) 327 { 328 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 329 struct spdk_bs_channel *channel = set->channel; 330 331 SPDK_DEBUGLOG(blob_rw, "Reading %" PRIu32 " blocks from LBA %" PRIu64 "\n", lba_count, 332 lba); 333 334 set->u.batch.outstanding_ops++; 335 channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count, &set->cb_args); 336 } 337 338 void 339 bs_batch_write_dev(spdk_bs_batch_t *batch, void *payload, 340 uint64_t lba, uint32_t lba_count) 341 { 342 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 343 struct spdk_bs_channel *channel = set->channel; 344 345 SPDK_DEBUGLOG(blob_rw, "Writing %" PRIu32 " blocks to LBA %" PRIu64 "\n", lba_count, lba); 346 347 set->u.batch.outstanding_ops++; 348 channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count, 349 &set->cb_args); 350 } 351 352 void 353 bs_batch_unmap_dev(spdk_bs_batch_t *batch, 354 uint64_t lba, uint64_t lba_count) 355 { 356 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 357 struct spdk_bs_channel *channel = set->channel; 358 359 SPDK_DEBUGLOG(blob_rw, "Unmapping %" PRIu64 " blocks at LBA %" PRIu64 "\n", lba_count, 360 lba); 361 362 set->u.batch.outstanding_ops++; 363 channel->dev->unmap(channel->dev, channel->dev_channel, lba, lba_count, 364 &set->cb_args); 365 } 366 367 void 368 bs_batch_write_zeroes_dev(spdk_bs_batch_t *batch, 369 uint64_t lba, uint64_t lba_count) 370 { 371 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 372 struct spdk_bs_channel *channel = set->channel; 373 374 SPDK_DEBUGLOG(blob_rw, "Zeroing %" PRIu64 " blocks at LBA %" PRIu64 "\n", lba_count, lba); 375 376 set->u.batch.outstanding_ops++; 377 channel->dev->write_zeroes(channel->dev, channel->dev_channel, lba, lba_count, 378 &set->cb_args); 379 } 380 381 void 382 bs_batch_close(spdk_bs_batch_t *batch) 383 { 384 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch; 385 386 set->u.batch.batch_closed = 1; 387 388 if (set->u.batch.outstanding_ops == 0) { 389 if (set->u.batch.cb_fn) { 390 set->cb_args.cb_fn = bs_sequence_completion; 391 set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, set->bserrno); 392 } else { 393 bs_request_set_complete(set); 394 } 395 } 396 } 397 398 spdk_bs_batch_t * 399 bs_sequence_to_batch(spdk_bs_sequence_t *seq, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 400 { 401 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq; 402 403 set->u.batch.cb_fn = cb_fn; 404 set->u.batch.cb_arg = cb_arg; 405 set->u.batch.outstanding_ops = 0; 406 set->u.batch.batch_closed = 0; 407 408 set->cb_args.cb_fn = bs_batch_completion; 409 410 return set; 411 } 412 413 spdk_bs_user_op_t * 414 bs_user_op_alloc(struct spdk_io_channel *_channel, struct spdk_bs_cpl *cpl, 415 enum spdk_blob_op_type op_type, struct spdk_blob *blob, 416 void *payload, int iovcnt, uint64_t offset, uint64_t length) 417 { 418 struct spdk_bs_channel *channel; 419 struct spdk_bs_request_set *set; 420 struct spdk_bs_user_op_args *args; 421 422 channel = spdk_io_channel_get_ctx(_channel); 423 assert(channel != NULL); 424 set = TAILQ_FIRST(&channel->reqs); 425 if (!set) { 426 return NULL; 427 } 428 TAILQ_REMOVE(&channel->reqs, set, link); 429 430 set->cpl = *cpl; 431 set->channel = channel; 432 set->ext_io_opts = NULL; 433 434 args = &set->u.user_op; 435 436 args->type = op_type; 437 args->iovcnt = iovcnt; 438 args->blob = blob; 439 args->offset = offset; 440 args->length = length; 441 args->payload = payload; 442 443 return (spdk_bs_user_op_t *)set; 444 } 445 446 void 447 bs_user_op_execute(spdk_bs_user_op_t *op) 448 { 449 struct spdk_bs_request_set *set; 450 struct spdk_bs_user_op_args *args; 451 struct spdk_io_channel *ch; 452 453 set = (struct spdk_bs_request_set *)op; 454 args = &set->u.user_op; 455 ch = spdk_io_channel_from_ctx(set->channel); 456 457 switch (args->type) { 458 case SPDK_BLOB_READ: 459 spdk_blob_io_read(args->blob, ch, args->payload, args->offset, args->length, 460 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 461 break; 462 case SPDK_BLOB_WRITE: 463 spdk_blob_io_write(args->blob, ch, args->payload, args->offset, args->length, 464 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 465 break; 466 case SPDK_BLOB_UNMAP: 467 spdk_blob_io_unmap(args->blob, ch, args->offset, args->length, 468 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 469 break; 470 case SPDK_BLOB_WRITE_ZEROES: 471 spdk_blob_io_write_zeroes(args->blob, ch, args->offset, args->length, 472 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg); 473 break; 474 case SPDK_BLOB_READV: 475 spdk_blob_io_readv_ext(args->blob, ch, args->payload, args->iovcnt, 476 args->offset, args->length, 477 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg, 478 set->ext_io_opts); 479 break; 480 case SPDK_BLOB_WRITEV: 481 spdk_blob_io_writev_ext(args->blob, ch, args->payload, args->iovcnt, 482 args->offset, args->length, 483 set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg, 484 set->ext_io_opts); 485 break; 486 } 487 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 488 } 489 490 void 491 bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno) 492 { 493 struct spdk_bs_request_set *set; 494 495 set = (struct spdk_bs_request_set *)op; 496 497 set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, bserrno); 498 TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); 499 } 500 501 SPDK_LOG_REGISTER_COMPONENT(blob_rw) 502