Lines Matching defs:io

77 #define PRIV(io)	\
78 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
79 #define ARGS(io) \
80 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
138 static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr);
139 static void ctl_backend_ramdisk_compare(union ctl_io *io);
140 static void ctl_backend_ramdisk_rw(union ctl_io *io);
141 static int ctl_backend_ramdisk_submit(union ctl_io *io);
143 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
144 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
362 ctl_backend_ramdisk_cmp(union ctl_io *io)
364 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
370 lbas = ctl_kern_data_len(io) / cbe_lun->blocksize;
371 lba = ARGS(io)->lba + PRIV(io)->len - lbas;
378 res = cmp(ctl_kern_data_ptr(io) + off, page,
384 free(io->scsiio.kern_data_ptr, M_RAMDISK);
386 off += ctl_kern_rel_offset(io) - ctl_kern_data_len(io);
387 ctl_io_set_compare_failure(io, off);
394 ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr)
397 (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io);
400 if (ctl_kern_sg_entries(io) > 0)
401 free(ctl_kern_data_ptr(io), M_RAMDISK);
402 ctl_add_kern_rel_offset(io, ctl_kern_data_len(io));
403 if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
404 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) {
405 if (ARGS(io)->flags & CTL_LLF_COMPARE) {
407 if (ctl_backend_ramdisk_cmp(io))
410 if (ARGS(io)->len > PRIV(io)->len) {
413 &io->io_hdr, links);
419 ctl_io_set_success(io);
422 ctl_data_submit_done(io);
427 ctl_backend_ramdisk_compare(union ctl_io *io)
429 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
432 lbas = ARGS(io)->len - PRIV(io)->len;
436 ctl_set_be_move_done(io, ctl_backend_ramdisk_move_done);
437 ctl_set_kern_data_ptr(io, malloc(len, M_RAMDISK, M_WAITOK));
438 ctl_set_kern_data_len(io, len);
439 ctl_set_kern_sg_entries(io, 0);
440 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
441 PRIV(io)->len += lbas;
442 ctl_datamove(io);
446 ctl_backend_ramdisk_rw(union ctl_io *io)
448 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
456 lba = ARGS(io)->lba + PRIV(io)->len;
458 lbas = ARGS(io)->len - PRIV(io)->len;
462 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
466 ctl_set_kern_data_ptr(io, sg_entries);
474 ctl_io_set_space_alloc_fail(io);
475 ctl_data_submit_done(io);
489 ctl_set_kern_data_ptr(io, page + off);
492 ctl_set_be_move_done(io, ctl_backend_ramdisk_move_done);
493 ctl_set_kern_data_len(io, lbas * cbe_lun->blocksize);
494 ctl_set_kern_sg_entries(io, sgs);
495 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
496 PRIV(io)->len += lbas;
497 if ((ARGS(io)->flags & CTL_LLF_READ) &&
498 ARGS(io)->len <= PRIV(io)->len) {
499 ctl_io_set_success(io);
501 ctl_serseq_done(io);
503 ctl_datamove(io);
507 ctl_backend_ramdisk_submit(union ctl_io *io)
509 struct ctl_lba_len_flags *lbalen = ARGS(io);
512 ctl_io_set_success(io);
513 ctl_data_submit_done(io);
516 PRIV(io)->len = 0;
518 ctl_backend_ramdisk_compare(io);
520 ctl_backend_ramdisk_rw(io);
528 union ctl_io *io;
533 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
534 if (io != NULL) {
537 if (ARGS(io)->flags & CTL_LLF_COMPARE)
538 ctl_backend_ramdisk_compare(io);
540 ctl_backend_ramdisk_rw(io);
555 ctl_backend_ramdisk_gls(union ctl_io *io)
557 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
563 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
564 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
565 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
568 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
575 ctl_config_read_done(io);
580 ctl_backend_ramdisk_scsi_config_read(union ctl_io *io)
584 switch (io->scsiio.cdb[0]) {
586 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
587 retval = ctl_backend_ramdisk_gls(io);
590 ctl_set_invalid_field(&io->scsiio,
596 ctl_config_read_done(io);
600 ctl_set_invalid_opcode(&io->scsiio);
601 ctl_config_read_done(io);
609 ramdisk_namespace_data(union ctl_io *io)
611 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
615 if (io->nvmeio.kern_data_len != sizeof(struct nvme_namespace_data) ||
616 io->nvmeio.kern_sg_entries != 0)
619 nsdata = (struct nvme_namespace_data *)io->nvmeio.kern_data_ptr;
634 ctl_config_read_done(io);
639 ramdisk_nvme_ids(union ctl_io *io)
641 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
643 if (io->nvmeio.kern_data_len != 4096 || io->nvmeio.kern_sg_entries != 0)
646 ctl_lun_nvme_ids(cbe_lun, io->nvmeio.kern_data_ptr);
647 ctl_config_read_done(io);
652 ctl_backend_ramdisk_nvme_config_read(union ctl_io *io)
654 switch (io->nvmeio.cmd.opc) {
659 cns = le32toh(io->nvmeio.cmd.cdw10) & 0xff;
662 return (ramdisk_namespace_data(io));
664 return (ramdisk_nvme_ids(io));
666 ctl_nvme_set_invalid_field(&io->nvmeio);
667 ctl_config_read_done(io);
672 ctl_nvme_set_invalid_opcode(&io->nvmeio);
673 ctl_config_read_done(io);
679 ctl_backend_ramdisk_config_read(union ctl_io *io)
681 switch (io->io_hdr.io_type) {
683 return (ctl_backend_ramdisk_scsi_config_read(io));
685 return (ctl_backend_ramdisk_nvme_config_read(io));
734 ctl_backend_ramdisk_ws(union ctl_io *io)
736 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
738 struct ctl_lba_len_flags *lbalen = ARGS(io);
743 CTL_IO_ASSERT(io, SCSI);
746 ctl_set_invalid_field(&io->scsiio,
752 ctl_config_write_done(io);
758 ctl_set_success(&io->scsiio);
759 ctl_config_write_done(io);
767 ctl_set_space_alloc_fail(&io->scsiio);
768 ctl_data_submit_done(io);
776 memcpy(page, io->scsiio.kern_data_ptr,
782 ctl_set_success(&io->scsiio);
783 ctl_config_write_done(io);
787 ctl_backend_ramdisk_unmap(union ctl_io *io)
789 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
790 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
793 CTL_IO_ASSERT(io, SCSI);
796 ctl_set_invalid_field(&io->scsiio,
802 ctl_config_write_done(io);
814 ctl_set_success(&io->scsiio);
815 ctl_config_write_done(io);
819 ctl_backend_ramdisk_scsi_config_write(union ctl_io *io)
821 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
824 switch (io->scsiio.cdb[0]) {
828 ctl_set_success(&io->scsiio);
829 ctl_config_write_done(io);
834 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
836 ctl_set_success(&io->scsiio);
837 ctl_config_write_done(io);
849 ctl_set_success(&io->scsiio);
850 ctl_config_write_done(io);
854 ctl_set_success(&io->scsiio);
855 ctl_config_write_done(io);
859 ctl_backend_ramdisk_ws(io);
862 ctl_backend_ramdisk_unmap(io);
865 ctl_set_invalid_opcode(&io->scsiio);
866 ctl_config_write_done(io);
875 ctl_backend_ramdisk_wu(union ctl_io *io)
877 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
878 struct ctl_lba_len_flags *lbalen = ARGS(io);
880 CTL_IO_ASSERT(io, NVME);
887 ctl_nvme_set_success(&io->nvmeio);
888 ctl_config_write_done(io);
892 ctl_backend_ramdisk_wz(union ctl_io *io)
894 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
896 struct ctl_lba_len_flags *lbalen = ARGS(io);
901 CTL_IO_ASSERT(io, NVME);
903 if ((le32toh(io->nvmeio.cmd.cdw12) & (1U << 25)) != 0) {
906 ctl_nvme_set_success(&io->nvmeio);
907 ctl_config_write_done(io);
915 ctl_nvme_set_space_alloc_fail(&io->nvmeio);
916 ctl_data_submit_done(io);
923 ctl_nvme_set_success(&io->nvmeio);
924 ctl_config_write_done(io);
928 ctl_backend_ramdisk_dsm(union ctl_io *io)
930 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
936 CTL_IO_ASSERT(io, NVME);
938 ranges = le32toh(io->nvmeio.cmd.cdw10) & 0xff;
939 r = (struct nvme_dsm_range *)io->nvmeio.kern_data_ptr;
947 ctl_nvme_set_success(&io->nvmeio);
948 ctl_config_write_done(io);
952 ctl_backend_ramdisk_nvme_config_write(union ctl_io *io)
954 switch (io->nvmeio.cmd.opc) {
957 ctl_nvme_set_success(&io->nvmeio);
958 ctl_config_write_done(io);
961 ctl_backend_ramdisk_wu(io);
964 ctl_backend_ramdisk_wz(io);
967 ctl_backend_ramdisk_dsm(io);
970 ctl_nvme_set_invalid_opcode(&io->nvmeio);
971 ctl_config_write_done(io);
978 ctl_backend_ramdisk_config_write(union ctl_io *io)
980 switch (io->io_hdr.io_type) {
982 return (ctl_backend_ramdisk_scsi_config_write(io));
984 return (ctl_backend_ramdisk_nvme_config_write(io));