1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. All rights reserved. 3 */ 4 5 #include "spdk/stdinc.h" 6 #include "spdk/conf.h" 7 #include "spdk/env.h" 8 #include "spdk/event.h" 9 #include "spdk/util.h" 10 #include "spdk/string.h" 11 #include "spdk/nvme_spec.h" 12 #include "spdk/nvme.h" 13 #include "spdk/likely.h" 14 #include "spdk/file.h" 15 16 static const uint8_t *g_data; 17 static bool g_trid_specified = false; 18 static char *g_artifact_prefix; 19 static int32_t g_time_in_sec = 10; 20 static char *g_corpus_dir; 21 static uint8_t *g_repro_data; 22 static size_t g_repro_size; 23 static pthread_t g_fuzz_td; 24 static pthread_t g_reactor_td; 25 static bool g_in_fuzzer; 26 27 #define MAX_COMMANDS 5 28 29 struct fuzz_command { 30 struct spdk_nvme_cmd cmd; 31 void *buf; 32 uint32_t len; 33 }; 34 35 static struct fuzz_command g_cmds[MAX_COMMANDS]; 36 37 typedef void (*fuzz_build_cmd_fn)(struct fuzz_command *cmd); 38 39 struct fuzz_type { 40 fuzz_build_cmd_fn fn; 41 uint32_t bytes_per_cmd; 42 bool is_admin; 43 }; 44 45 static void 46 fuzz_admin_command(struct fuzz_command *cmd) 47 { 48 memcpy(&cmd->cmd, g_data, sizeof(cmd->cmd)); 49 g_data += sizeof(cmd->cmd); 50 51 /* ASYNC_EVENT_REQUEST won't complete, so pick a different opcode. */ 52 if (cmd->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) { 53 cmd->cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 54 } 55 56 /* NVME_OPC_FABRIC is special for fabric transport, so pick a different opcode. */ 57 if (cmd->cmd.opc == SPDK_NVME_OPC_FABRIC) { 58 cmd->cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 59 } 60 61 /* Fuzz a normal operation, so set a zero value in Fused field. */ 62 cmd->cmd.fuse = 0; 63 } 64 65 static void 66 fuzz_admin_get_log_page_command(struct fuzz_command *cmd) 67 { 68 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 69 70 cmd->cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 71 72 /* Only fuzz some of the more interesting parts of the GET_LOG_PAGE command. */ 73 74 cmd->cmd.cdw10_bits.get_log_page.numdl = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 75 cmd->cmd.cdw10_bits.get_log_page.lid = g_data[2]; 76 cmd->cmd.cdw10_bits.get_log_page.lsp = g_data[3] & (0x60 >> 5); 77 cmd->cmd.cdw10_bits.get_log_page.rae = g_data[3] & (0x80 >> 7); 78 79 cmd->cmd.cdw11_bits.get_log_page.numdu = g_data[3] & (0x18 >> 3); 80 81 /* Log Page Offset Lower */ 82 cmd->cmd.cdw12 = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 83 84 /* Offset Type */ 85 cmd->cmd.cdw14 = g_data[3] & (0x01 >> 0); 86 87 /* Log Page Offset Upper */ 88 cmd->cmd.cdw13 = g_data[3] & (0x06 >> 1); 89 90 g_data += 6; 91 } 92 93 static void 94 fuzz_admin_identify_command(struct fuzz_command *cmd) 95 { 96 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 97 98 cmd->cmd.opc = SPDK_NVME_OPC_IDENTIFY; 99 100 cmd->cmd.cdw10_bits.identify.cns = g_data[0]; 101 cmd->cmd.cdw10_bits.identify.cntid = ((uint16_t)g_data[1] << 8) + (uint16_t)g_data[2]; 102 103 cmd->cmd.cdw11_bits.identify.nvmsetid = ((uint16_t)g_data[3] << 8) + (uint16_t)g_data[4]; 104 cmd->cmd.cdw11_bits.identify.csi = g_data[5]; 105 106 /* UUID index, bits 0-6 are used */ 107 cmd->cmd.cdw14 = (g_data[6] & 0x7f); 108 109 g_data += 7; 110 } 111 112 static void 113 fuzz_admin_abort_command(struct fuzz_command *cmd) 114 { 115 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 116 cmd->cmd.opc = SPDK_NVME_OPC_ABORT; 117 118 cmd->cmd.cdw10_bits.abort.sqid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 119 cmd->cmd.cdw10_bits.abort.cid = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 120 121 g_data += 4; 122 } 123 124 static void 125 fuzz_admin_create_io_completion_queue_command(struct fuzz_command *cmd) 126 { 127 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 128 cmd->cmd.opc = SPDK_NVME_OPC_CREATE_IO_CQ; 129 130 cmd->cmd.cdw10_bits.create_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 131 cmd->cmd.cdw10_bits.create_io_q.qsize = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 132 133 cmd->cmd.cdw11_bits.create_io_cq.iv = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 134 cmd->cmd.cdw11_bits.create_io_cq.pc = (g_data[6] >> 7) & 0x01; 135 cmd->cmd.cdw11_bits.create_io_cq.ien = (g_data[6] >> 6) & 0x01; 136 137 g_data += 7; 138 } 139 140 static void 141 fuzz_admin_create_io_submission_queue_command(struct fuzz_command *cmd) 142 { 143 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 144 cmd->cmd.opc = SPDK_NVME_OPC_CREATE_IO_SQ; 145 146 cmd->cmd.cdw10_bits.create_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 147 cmd->cmd.cdw10_bits.create_io_q.qsize = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 148 149 cmd->cmd.cdw11_bits.create_io_sq.cqid = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 150 cmd->cmd.cdw11_bits.create_io_sq.qprio = (g_data[6] >> 6) & 0x03; 151 cmd->cmd.cdw11_bits.create_io_sq.pc = (g_data[6] >> 5) & 0x01; 152 153 /* NVM Set Identifier */ 154 cmd->cmd.cdw12 = ((uint16_t)g_data[7] << 8) + (uint16_t)g_data[8]; 155 156 g_data += 9; 157 } 158 159 static void 160 fuzz_admin_delete_io_completion_queue_command(struct fuzz_command *cmd) 161 { 162 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 163 cmd->cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ; 164 165 cmd->cmd.cdw10_bits.delete_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 166 167 g_data += 2; 168 } 169 170 static void 171 fuzz_admin_delete_io_submission_queue_command(struct fuzz_command *cmd) 172 { 173 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 174 cmd->cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ; 175 176 cmd->cmd.cdw10_bits.delete_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 177 178 g_data += 2; 179 } 180 181 static void 182 fuzz_admin_namespace_attachment_command(struct fuzz_command *cmd) 183 { 184 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 185 cmd->cmd.opc = SPDK_NVME_OPC_NS_ATTACHMENT; 186 187 cmd->cmd.cdw10_bits.ns_attach.sel = (g_data[0] >> 4) & 0x0f; 188 189 g_data += 1; 190 } 191 192 static void 193 fuzz_admin_namespace_management_command(struct fuzz_command *cmd) 194 { 195 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 196 cmd->cmd.opc = SPDK_NVME_OPC_NS_MANAGEMENT; 197 198 cmd->cmd.cdw10_bits.ns_manage.sel = (g_data[0] >> 4) & 0x0f; 199 200 g_data += 1; 201 } 202 203 static void 204 fuzz_admin_security_receive_command(struct fuzz_command *cmd) 205 { 206 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 207 cmd->cmd.opc = SPDK_NVME_OPC_SECURITY_RECEIVE; 208 209 cmd->cmd.cdw10_bits.sec_send_recv.secp = g_data[0]; 210 cmd->cmd.cdw10_bits.sec_send_recv.spsp1 = g_data[1]; 211 cmd->cmd.cdw10_bits.sec_send_recv.spsp0 = g_data[2]; 212 cmd->cmd.cdw10_bits.sec_send_recv.nssf = g_data[3]; 213 214 /* Allocation Length(AL) */ 215 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 216 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 217 218 g_data += 8; 219 } 220 221 static void 222 fuzz_admin_security_send_command(struct fuzz_command *cmd) 223 { 224 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 225 cmd->cmd.opc = SPDK_NVME_OPC_SECURITY_SEND; 226 227 cmd->cmd.cdw10_bits.sec_send_recv.secp = g_data[0]; 228 cmd->cmd.cdw10_bits.sec_send_recv.spsp1 = g_data[1]; 229 cmd->cmd.cdw10_bits.sec_send_recv.spsp0 = g_data[2]; 230 cmd->cmd.cdw10_bits.sec_send_recv.nssf = g_data[3]; 231 232 /* Transfer Length(TL) */ 233 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 234 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 235 236 g_data += 8; 237 } 238 239 static void 240 fuzz_admin_directive_send_command(struct fuzz_command *cmd) 241 { 242 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 243 cmd->cmd.opc = SPDK_NVME_OPC_DIRECTIVE_SEND; 244 245 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 246 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 247 248 cmd->cmd.cdw11_bits.directive.dspec = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 249 cmd->cmd.cdw11_bits.directive.dtype = g_data[6]; 250 cmd->cmd.cdw11_bits.directive.doper = g_data[7]; 251 252 g_data += 8; 253 } 254 255 static void 256 fuzz_admin_directive_receive_command(struct fuzz_command *cmd) 257 { 258 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 259 cmd->cmd.opc = SPDK_NVME_OPC_DIRECTIVE_RECEIVE; 260 261 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 262 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 263 264 cmd->cmd.cdw11_bits.directive.dspec = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 265 cmd->cmd.cdw11_bits.directive.dtype = g_data[6]; 266 cmd->cmd.cdw11_bits.directive.doper = g_data[7]; 267 268 g_data += 8; 269 } 270 271 static void 272 feat_arbitration(struct fuzz_command *cmd) 273 { 274 cmd->cmd.cdw11_bits.feat_arbitration.bits.hpw = g_data[2]; 275 cmd->cmd.cdw11_bits.feat_arbitration.bits.mpw = g_data[3]; 276 cmd->cmd.cdw11_bits.feat_arbitration.bits.lpw = g_data[4]; 277 cmd->cmd.cdw11_bits.feat_arbitration.bits.ab = g_data[5] & 0x07; 278 } 279 280 static void 281 feat_power_management(struct fuzz_command *cmd) 282 { 283 cmd->cmd.cdw11_bits.feat_power_management.bits.wh = g_data[2] & 0x07; 284 cmd->cmd.cdw11_bits.feat_power_management.bits.ps = (g_data[2] >> 3) & 0x1f; 285 } 286 287 static void 288 feat_lba_range_type(struct fuzz_command *cmd) 289 { 290 cmd->cmd.cdw11_bits.feat_lba_range_type.bits.num = (g_data[2] >> 2) & 0x3f; 291 } 292 293 static void 294 feat_temperature_threshold(struct fuzz_command *cmd) 295 { 296 cmd->cmd.cdw11_bits.feat_temp_threshold.bits.thsel = g_data[2] & 0x03; 297 cmd->cmd.cdw11_bits.feat_temp_threshold.bits.tmpsel = (g_data[2] >> 2) & 0x0f; 298 cmd->cmd.cdw11_bits.feat_temp_threshold.bits.tmpth = ((uint16_t)g_data[3] << 8) + 299 (uint16_t)g_data[4]; 300 } 301 302 static void 303 feat_error_recover(struct fuzz_command *cmd) 304 { 305 cmd->cmd.cdw11_bits.feat_error_recovery.bits.dulbe = g_data[2] & 0x01; 306 cmd->cmd.cdw11_bits.feat_error_recovery.bits.tler = ((uint16_t)g_data[3] << 8) + 307 (uint16_t)g_data[4]; 308 } 309 310 static void 311 feat_volatile_write_cache(struct fuzz_command *cmd) 312 { 313 cmd->cmd.cdw11_bits.feat_volatile_write_cache.bits.wce = g_data[2] & 0x01; 314 } 315 316 static void 317 feat_number_of_queues(struct fuzz_command *cmd) 318 { 319 cmd->cmd.cdw11_bits.feat_num_of_queues.bits.ncqr = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 320 cmd->cmd.cdw11_bits.feat_num_of_queues.bits.nsqr = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 321 } 322 323 static void 324 feat_interrupt_coalescing(struct fuzz_command *cmd) 325 { 326 cmd->cmd.cdw11_bits.feat_interrupt_coalescing.bits.time = g_data[2]; 327 cmd->cmd.cdw11_bits.feat_interrupt_coalescing.bits.thr = g_data[3]; 328 } 329 330 static void 331 feat_interrupt_vector_configuration(struct fuzz_command *cmd) 332 { 333 cmd->cmd.cdw11_bits.feat_interrupt_vector_configuration.bits.cd = g_data[2] & 0x01; 334 cmd->cmd.cdw11_bits.feat_interrupt_vector_configuration.bits.iv = ((uint16_t)g_data[3] << 8) + 335 (uint16_t)g_data[4]; 336 } 337 338 static void 339 feat_write_atomicity(struct fuzz_command *cmd) 340 { 341 cmd->cmd.cdw11_bits.feat_write_atomicity.bits.dn = g_data[2] & 0x01; 342 } 343 344 static void 345 feat_async_event_cfg(struct fuzz_command *cmd) 346 { 347 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.ana_change_notice = g_data[2] & 0x01; 348 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.discovery_log_change_notice = (g_data[2] >> 1) & 0x01; 349 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.fw_activation_notice = (g_data[2] >> 2) & 0x01; 350 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.ns_attr_notice = (g_data[2] >> 3) & 0x01; 351 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.telemetry_log_notice = (g_data[2] >> 4) & 0x01; 352 353 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.available_spare = g_data[3] & 0x01; 354 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.device_reliability = 355 (g_data[3] >> 1) & 0x01; 356 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.read_only = (g_data[3] >> 2) & 0x01; 357 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.temperature = (g_data[3] >> 3) & 0x01; 358 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.volatile_memory_backup = 359 (g_data[3] >> 4) & 0x01; 360 } 361 362 static void 363 feat_keep_alive_timer(struct fuzz_command *cmd) 364 { 365 cmd->cmd.cdw11_bits.feat_keep_alive_timer.bits.kato = ((uint32_t)g_data[2] << 24) + (( 366 uint32_t)g_data[3] << 16) + 367 ((uint32_t)g_data[4] << 8) + (uint32_t)g_data[5]; 368 } 369 370 static void 371 feat_host_identifier(struct fuzz_command *cmd) 372 { 373 cmd->cmd.cdw11_bits.feat_host_identifier.bits.exhid = g_data[2] & 0x01; 374 } 375 376 static void 377 feat_rsv_notification_mask(struct fuzz_command *cmd) 378 { 379 cmd->cmd.cdw11_bits.feat_rsv_notification_mask.bits.regpre = g_data[2] & 0x01; 380 cmd->cmd.cdw11_bits.feat_rsv_notification_mask.bits.respre = (g_data[2] >> 1) & 0x01; 381 cmd->cmd.cdw11_bits.feat_rsv_notification_mask.bits.resrel = (g_data[2] >> 2) & 0x01; 382 } 383 384 static void 385 feat_rsv_persistence(struct fuzz_command *cmd) 386 { 387 cmd->cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = g_data[2] & 0x01; 388 } 389 390 static void 391 fuzz_admin_set_features_command(struct fuzz_command *cmd) 392 { 393 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 394 cmd->cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 395 396 cmd->cmd.cdw10_bits.set_features.fid = g_data[0]; 397 cmd->cmd.cdw10_bits.set_features.sv = (g_data[1] >> 7) & 0x01; 398 399 switch (cmd->cmd.cdw10_bits.set_features.fid) { 400 case SPDK_NVME_FEAT_ARBITRATION: 401 feat_arbitration(cmd); 402 break; 403 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 404 feat_power_management(cmd); 405 break; 406 case SPDK_NVME_FEAT_LBA_RANGE_TYPE: 407 feat_lba_range_type(cmd); 408 break; 409 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 410 feat_temperature_threshold(cmd); 411 break; 412 case SPDK_NVME_FEAT_ERROR_RECOVERY: 413 feat_error_recover(cmd); 414 break; 415 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 416 feat_volatile_write_cache(cmd); 417 break; 418 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 419 feat_number_of_queues(cmd); 420 break; 421 case SPDK_NVME_FEAT_INTERRUPT_COALESCING: 422 feat_interrupt_coalescing(cmd); 423 break; 424 case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 425 feat_interrupt_vector_configuration(cmd); 426 break; 427 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 428 feat_write_atomicity(cmd); 429 break; 430 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 431 feat_async_event_cfg(cmd); 432 break; 433 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 434 feat_keep_alive_timer(cmd); 435 break; 436 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 437 feat_host_identifier(cmd); 438 break; 439 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 440 feat_rsv_notification_mask(cmd); 441 break; 442 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 443 feat_rsv_persistence(cmd); 444 break; 445 446 default: 447 break; 448 } 449 450 /* Use g_data[2] through g_data[5] for feature-specific 451 bits and set g_data[6] for cdw14 every iteration 452 UUID index, bits 0-6 are used */ 453 cmd->cmd.cdw14 = (g_data[6] & 0x7f); 454 455 g_data += 7; 456 } 457 458 static void 459 fuzz_admin_get_features_command(struct fuzz_command *cmd) 460 { 461 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 462 cmd->cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 463 464 cmd->cmd.cdw10_bits.get_features.fid = g_data[0]; 465 cmd->cmd.cdw10_bits.get_features.sel = (g_data[1] >> 5) & 0x07; 466 467 switch (cmd->cmd.cdw10_bits.set_features.fid) { 468 case SPDK_NVME_FEAT_ARBITRATION: 469 feat_arbitration(cmd); 470 break; 471 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 472 feat_power_management(cmd); 473 break; 474 case SPDK_NVME_FEAT_LBA_RANGE_TYPE: 475 feat_lba_range_type(cmd); 476 break; 477 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 478 feat_temperature_threshold(cmd); 479 break; 480 case SPDK_NVME_FEAT_ERROR_RECOVERY: 481 feat_error_recover(cmd); 482 break; 483 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 484 feat_volatile_write_cache(cmd); 485 break; 486 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 487 feat_number_of_queues(cmd); 488 break; 489 case SPDK_NVME_FEAT_INTERRUPT_COALESCING: 490 feat_interrupt_coalescing(cmd); 491 break; 492 case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 493 feat_interrupt_vector_configuration(cmd); 494 break; 495 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 496 feat_write_atomicity(cmd); 497 break; 498 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 499 feat_async_event_cfg(cmd); 500 break; 501 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 502 feat_keep_alive_timer(cmd); 503 break; 504 505 default: 506 break; 507 } 508 509 /* Use g_data[2] through g_data[5] for feature-specific 510 bits and set g_data[6] for cdw14 every iteration 511 UUID index, bits 0-6 are used */ 512 cmd->cmd.cdw14 = (g_data[6] & 0x7f); 513 514 g_data += 7; 515 } 516 517 static void 518 fuzz_nvm_read_command(struct fuzz_command *cmd) 519 { 520 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 521 cmd->cmd.opc = SPDK_NVME_OPC_READ; 522 523 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 524 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 525 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 526 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 527 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 528 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 529 cmd->cmd.cdw13 = g_data[12]; 530 cmd->cmd.cdw14 = ((uint32_t)g_data[13] << 24) + ((uint32_t)g_data[14] << 16) + 531 ((uint32_t)g_data[15] << 8) + (uint32_t)g_data[16]; 532 cmd->cmd.cdw15 = ((uint32_t)g_data[17] << 24) + ((uint32_t)g_data[18] << 16) + 533 ((uint32_t)g_data[19] << 8) + (uint32_t)g_data[20]; 534 535 g_data += 21; 536 } 537 538 static void 539 fuzz_nvm_write_command(struct fuzz_command *cmd) 540 { 541 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 542 cmd->cmd.opc = SPDK_NVME_OPC_WRITE; 543 544 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 545 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 546 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 547 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 548 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 549 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 550 cmd->cmd.cdw13 = ((uint32_t)g_data[12] << 24) + ((uint32_t)g_data[13] << 16) + 551 ((uint32_t)g_data[14] << 8) + (uint32_t)g_data[15]; 552 cmd->cmd.cdw14 = ((uint32_t)g_data[16] << 24) + ((uint32_t)g_data[17] << 16) + 553 ((uint32_t)g_data[18] << 8) + (uint32_t)g_data[19]; 554 cmd->cmd.cdw15 = ((uint32_t)g_data[20] << 24) + ((uint32_t)g_data[21] << 16) + 555 ((uint32_t)g_data[22] << 8) + (uint32_t)g_data[23]; 556 557 g_data += 24; 558 } 559 560 static void 561 fuzz_nvm_write_zeroes_command(struct fuzz_command *cmd) 562 { 563 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 564 cmd->cmd.opc = SPDK_NVME_OPC_WRITE_ZEROES; 565 566 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 567 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 568 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 569 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 570 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 571 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 572 cmd->cmd.cdw14 = ((uint32_t)g_data[12] << 24) + ((uint32_t)g_data[13] << 16) + 573 ((uint32_t)g_data[14] << 8) + (uint32_t)g_data[15]; 574 cmd->cmd.cdw15 = ((uint32_t)g_data[16] << 24) + ((uint32_t)g_data[17] << 16) + 575 ((uint32_t)g_data[18] << 8) + (uint32_t)g_data[19]; 576 577 g_data += 20; 578 } 579 580 static void 581 fuzz_nvm_write_uncorrectable_command(struct fuzz_command *cmd) 582 { 583 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 584 cmd->cmd.opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE; 585 586 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 587 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 588 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 589 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 590 cmd->cmd.cdw12 = (g_data[8] << 8) + g_data[9]; 591 592 g_data += 10; 593 } 594 595 static void 596 fuzz_nvm_reservation_acquire_command(struct fuzz_command *cmd) 597 { 598 struct spdk_nvme_reservation_acquire_data *payload = cmd->buf; 599 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 600 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; 601 602 cmd->cmd.cdw10_bits.resv_acquire.rtype = g_data[0]; 603 cmd->cmd.cdw10_bits.resv_acquire.iekey = (g_data[1] >> 7) & 0x01; 604 cmd->cmd.cdw10_bits.resv_acquire.racqa = (g_data[1] >> 4) & 0x07; 605 606 payload->crkey = ((uint64_t)g_data[2] << 56) + ((uint64_t)g_data[3] << 48) + 607 ((uint64_t)g_data[4] << 40) + ((uint64_t)g_data[5] << 32) + 608 ((uint64_t)g_data[6] << 24) + ((uint64_t)g_data[7] << 16) + 609 ((uint64_t)g_data[8] << 8) + (uint64_t)g_data[9]; 610 611 payload->prkey = ((uint64_t)g_data[10] << 56) + ((uint64_t)g_data[11] << 48) + 612 ((uint64_t)g_data[12] << 40) + ((uint64_t)g_data[13] << 32) + 613 ((uint64_t)g_data[14] << 24) + ((uint64_t)g_data[15] << 16) + 614 ((uint64_t)g_data[16] << 8) + (uint64_t)g_data[17]; 615 616 cmd->len = sizeof(struct spdk_nvme_reservation_acquire_data); 617 618 g_data += 18; 619 } 620 621 static void 622 fuzz_nvm_reservation_release_command(struct fuzz_command *cmd) 623 { 624 struct spdk_nvme_reservation_key_data *payload = cmd->buf; 625 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 626 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 627 628 cmd->cmd.cdw10_bits.resv_release.rtype = g_data[0]; 629 cmd->cmd.cdw10_bits.resv_release.iekey = (g_data[1] >> 7) & 0x01; 630 cmd->cmd.cdw10_bits.resv_release.rrela = (g_data[1] >> 4) & 0x07; 631 632 payload->crkey = ((uint64_t)g_data[2] << 56) + ((uint64_t)g_data[3] << 48) + 633 ((uint64_t)g_data[4] << 40) + ((uint64_t)g_data[5] << 32) + 634 ((uint64_t)g_data[6] << 24) + ((uint64_t)g_data[7] << 16) + 635 ((uint64_t)g_data[8] << 8) + (uint64_t)g_data[9]; 636 637 cmd->len = sizeof(struct spdk_nvme_reservation_key_data); 638 639 g_data += 10; 640 } 641 642 static void 643 fuzz_nvm_reservation_register_command(struct fuzz_command *cmd) 644 { 645 struct spdk_nvme_reservation_register_data *payload = cmd->buf; 646 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 647 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_REGISTER; 648 649 cmd->cmd.cdw10_bits.resv_register.cptpl = (g_data[0] >> 6) & 0x03; 650 cmd->cmd.cdw10_bits.resv_register.iekey = (g_data[0] >> 5) & 0x01; 651 cmd->cmd.cdw10_bits.resv_register.rrega = (g_data[0] >> 2) & 0x07; 652 653 payload->crkey = ((uint64_t)g_data[1] << 56) + ((uint64_t)g_data[2] << 48) + 654 ((uint64_t)g_data[3] << 40) + ((uint64_t)g_data[4] << 32) + 655 ((uint64_t)g_data[5] << 24) + ((uint64_t)g_data[6] << 16) + 656 ((uint64_t)g_data[7] << 8) + (uint64_t)g_data[8]; 657 658 payload->nrkey = ((uint64_t)g_data[9] << 56) + ((uint64_t)g_data[10] << 48) + 659 ((uint64_t)g_data[11] << 40) + ((uint64_t)g_data[12] << 32) + 660 ((uint64_t)g_data[13] << 24) + ((uint64_t)g_data[14] << 16) + 661 ((uint64_t)g_data[15] << 8) + (uint64_t)g_data[16]; 662 663 664 cmd->len = sizeof(struct spdk_nvme_reservation_register_data); 665 666 g_data += 17; 667 } 668 669 static void 670 fuzz_nvm_reservation_report_command(struct fuzz_command *cmd) 671 { 672 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 673 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_REPORT; 674 675 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 676 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 677 678 cmd->cmd.cdw11_bits.resv_report.eds = (g_data[4] >> 7) & 0x01; 679 680 g_data += 5; 681 } 682 683 static void 684 fuzz_nvm_compare_command(struct fuzz_command *cmd) 685 { 686 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 687 cmd->cmd.opc = SPDK_NVME_OPC_COMPARE; 688 689 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 690 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 691 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 692 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 693 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 694 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 695 cmd->cmd.cdw14 = ((uint32_t)g_data[12] << 24) + ((uint32_t)g_data[13] << 16) + 696 ((uint32_t)g_data[14] << 8) + (uint32_t)g_data[15]; 697 cmd->cmd.cdw15 = ((uint32_t)g_data[16] << 24) + ((uint32_t)g_data[17] << 16) + 698 ((uint32_t)g_data[18] << 8) + (uint32_t)g_data[19]; 699 700 g_data += 20; 701 } 702 703 static struct fuzz_type g_fuzzers[] = { 704 { .fn = fuzz_admin_command, .bytes_per_cmd = sizeof(struct spdk_nvme_cmd), .is_admin = true}, 705 { .fn = fuzz_admin_get_log_page_command, .bytes_per_cmd = 6, .is_admin = true}, 706 { .fn = fuzz_admin_identify_command, .bytes_per_cmd = 7, .is_admin = true}, 707 { .fn = fuzz_admin_abort_command, .bytes_per_cmd = 4, .is_admin = true}, 708 { .fn = fuzz_admin_create_io_completion_queue_command, .bytes_per_cmd = 7, .is_admin = true}, 709 { .fn = fuzz_admin_create_io_submission_queue_command, .bytes_per_cmd = 9, .is_admin = true}, 710 { .fn = fuzz_admin_delete_io_completion_queue_command, .bytes_per_cmd = 2, .is_admin = true}, 711 { .fn = fuzz_admin_delete_io_submission_queue_command, .bytes_per_cmd = 2, .is_admin = true}, 712 { .fn = fuzz_admin_namespace_attachment_command, .bytes_per_cmd = 1, .is_admin = true}, 713 { .fn = fuzz_admin_namespace_management_command, .bytes_per_cmd = 1, .is_admin = true}, 714 { .fn = fuzz_admin_security_receive_command, .bytes_per_cmd = 8, .is_admin = true}, 715 { .fn = fuzz_admin_security_send_command, .bytes_per_cmd = 8, .is_admin = true}, 716 { .fn = fuzz_admin_directive_send_command, .bytes_per_cmd = 8, .is_admin = true}, 717 { .fn = fuzz_admin_directive_receive_command, .bytes_per_cmd = 8, .is_admin = true}, 718 { .fn = fuzz_admin_set_features_command, .bytes_per_cmd = 7, .is_admin = true}, 719 { .fn = fuzz_admin_get_features_command, .bytes_per_cmd = 7, .is_admin = true}, 720 { .fn = fuzz_nvm_read_command, .bytes_per_cmd = 21, .is_admin = false}, 721 { .fn = fuzz_nvm_write_command, .bytes_per_cmd = 24, .is_admin = false}, 722 { .fn = fuzz_nvm_write_zeroes_command, .bytes_per_cmd = 20, .is_admin = false}, 723 { .fn = fuzz_nvm_write_uncorrectable_command, .bytes_per_cmd = 10, .is_admin = false}, 724 { .fn = fuzz_nvm_reservation_acquire_command, .bytes_per_cmd = 18, .is_admin = false}, 725 { .fn = fuzz_nvm_reservation_release_command, .bytes_per_cmd = 10, .is_admin = false}, 726 { .fn = fuzz_nvm_reservation_register_command, .bytes_per_cmd = 17, .is_admin = false}, 727 { .fn = fuzz_nvm_reservation_report_command, .bytes_per_cmd = 5, .is_admin = false}, 728 { .fn = fuzz_nvm_compare_command, .bytes_per_cmd = 20, .is_admin = false}, 729 { .fn = NULL, .bytes_per_cmd = 0, .is_admin = 0} 730 }; 731 732 #define NUM_FUZZERS (SPDK_COUNTOF(g_fuzzers) - 1) 733 734 static struct fuzz_type *g_fuzzer; 735 736 struct spdk_nvme_transport_id g_trid; 737 static struct spdk_nvme_ctrlr *g_ctrlr; 738 static struct spdk_nvme_qpair *g_io_qpair; 739 static void 740 nvme_fuzz_cpl_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl) 741 { 742 int *outstanding = cb_arg; 743 744 assert(*outstanding > 0); 745 (*outstanding)--; 746 } 747 748 static int 749 run_cmds(uint32_t queue_depth) 750 { 751 int rc, outstanding = 0; 752 uint32_t i; 753 754 for (i = 0; i < queue_depth; i++) { 755 struct fuzz_command *cmd = &g_cmds[i]; 756 757 g_fuzzer->fn(cmd); 758 outstanding++; 759 if (g_fuzzer->is_admin) { 760 rc = spdk_nvme_ctrlr_cmd_admin_raw(g_ctrlr, &cmd->cmd, cmd->buf, cmd->len, nvme_fuzz_cpl_cb, 761 &outstanding); 762 } else { 763 rc = spdk_nvme_ctrlr_cmd_io_raw(g_ctrlr, g_io_qpair, &cmd->cmd, cmd->buf, cmd->len, 764 nvme_fuzz_cpl_cb, &outstanding); 765 } 766 if (rc) { 767 return rc; 768 } 769 } 770 771 while (outstanding > 0) { 772 spdk_nvme_qpair_process_completions(g_io_qpair, 0); 773 spdk_nvme_ctrlr_process_admin_completions(g_ctrlr); 774 } 775 return 0; 776 } 777 778 static int 779 TestOneInput(const uint8_t *data, size_t size) 780 { 781 int ret = 0; 782 struct spdk_nvme_detach_ctx *detach_ctx = NULL; 783 784 if (size < g_fuzzer->bytes_per_cmd) { 785 return -1; 786 } 787 788 g_ctrlr = spdk_nvme_connect(&g_trid, NULL, 0); 789 if (g_ctrlr == NULL) { 790 fprintf(stderr, "spdk_nvme_connect() failed for transport address '%s'\n", 791 g_trid.traddr); 792 spdk_app_stop(-1); 793 return -1; 794 } 795 796 g_io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ctrlr, NULL, 0); 797 if (g_io_qpair == NULL) { 798 fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n"); 799 ret = -1; 800 goto detach_ctrlr; 801 } 802 803 g_data = data; 804 805 run_cmds(size / g_fuzzer->bytes_per_cmd); 806 spdk_nvme_ctrlr_free_io_qpair(g_io_qpair); 807 detach_ctrlr: 808 spdk_nvme_detach_async(g_ctrlr, &detach_ctx); 809 810 if (detach_ctx) { 811 spdk_nvme_detach_poll(detach_ctx); 812 } 813 if (ret < 0) { 814 spdk_app_stop(ret); 815 } 816 817 return ret; 818 } 819 820 int LLVMFuzzerRunDriver(int *argc, char ***argv, int (*UserCb)(const uint8_t *Data, size_t Size)); 821 822 static void 823 exit_handler(void) 824 { 825 if (g_in_fuzzer) { 826 spdk_app_stop(0); 827 pthread_join(g_reactor_td, NULL); 828 } 829 } 830 831 static void * 832 start_fuzzer(void *ctx) 833 { 834 char *_argv[] = { 835 "spdk", 836 "-len_control=0", 837 "-detect_leaks=1", 838 NULL, 839 NULL, 840 NULL, 841 NULL 842 }; 843 char time_str[128]; 844 char prefix[PATH_MAX]; 845 char len_str[128]; 846 char **argv = _argv; 847 int argc = SPDK_COUNTOF(_argv); 848 uint32_t len; 849 int rc; 850 851 spdk_unaffinitize_thread(); 852 snprintf(prefix, sizeof(prefix), "-artifact_prefix=%s", g_artifact_prefix); 853 argv[argc - 4] = prefix; 854 len = MAX_COMMANDS * g_fuzzer->bytes_per_cmd; 855 snprintf(len_str, sizeof(len_str), "-max_len=%d", len); 856 argv[argc - 3] = len_str; 857 snprintf(time_str, sizeof(time_str), "-max_total_time=%d", g_time_in_sec); 858 argv[argc - 2] = time_str; 859 argv[argc - 1] = g_corpus_dir; 860 861 g_in_fuzzer = true; 862 atexit(exit_handler); 863 864 free(g_artifact_prefix); 865 866 if (g_repro_data) { 867 printf("Running single test based on reproduction data file.\n"); 868 rc = TestOneInput(g_repro_data, g_repro_size); 869 printf("Done.\n"); 870 } else { 871 rc = LLVMFuzzerRunDriver(&argc, &argv, TestOneInput); 872 /* TODO: in the normal case, LLVMFuzzerRunDriver never returns - it calls exit() 873 * directly and we never get here. But this behavior isn't really documented 874 * anywhere by LLVM, so call spdk_app_stop(0) if it does return, which will 875 * result in the app exiting like a normal SPDK application (spdk_app_start() 876 * returns to main(). 877 */ 878 } 879 g_in_fuzzer = false; 880 spdk_app_stop(rc); 881 882 return NULL; 883 } 884 885 static void 886 begin_fuzz(void *ctx) 887 { 888 int i; 889 890 g_reactor_td = pthread_self(); 891 892 for (i = 0; i < MAX_COMMANDS; i++) { 893 g_cmds[i].buf = spdk_malloc(4096, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 894 assert(g_cmds[i].buf); 895 g_cmds[i].len = 4096; 896 } 897 898 pthread_create(&g_fuzz_td, NULL, start_fuzzer, NULL); 899 } 900 901 static void 902 nvme_fuzz_usage(void) 903 { 904 fprintf(stderr, " -D Path of corpus directory.\n"); 905 fprintf(stderr, " -F Transport ID for subsystem that should be fuzzed.\n"); 906 fprintf(stderr, " -N Name of reproduction data file.\n"); 907 fprintf(stderr, " -P Provide a prefix to use when saving artifacts.\n"); 908 fprintf(stderr, " -t Time to run fuzz tests (in seconds). Default: 10\n"); 909 fprintf(stderr, " -Z Fuzzer to run (0 to %lu)\n", NUM_FUZZERS - 1); 910 } 911 912 static int 913 nvme_fuzz_parse(int ch, char *arg) 914 { 915 long long tmp; 916 int rc; 917 918 switch (ch) { 919 case 'D': 920 g_corpus_dir = strdup(optarg); 921 break; 922 case 'F': 923 if (g_trid_specified) { 924 fprintf(stderr, "Can only specify one trid\n"); 925 return -1; 926 } 927 g_trid_specified = true; 928 rc = spdk_nvme_transport_id_parse(&g_trid, optarg); 929 if (rc < 0) { 930 fprintf(stderr, "failed to parse transport ID: %s\n", optarg); 931 return -1; 932 } 933 break; 934 case 'N': 935 g_repro_data = spdk_posix_file_load_from_name(optarg, &g_repro_size); 936 if (g_repro_data == NULL) { 937 fprintf(stderr, "could not load data for file %s\n", optarg); 938 return -1; 939 } 940 break; 941 case 'P': 942 g_artifact_prefix = strdup(optarg); 943 if (!g_artifact_prefix) { 944 fprintf(stderr, "cannot strdup: %s\n", optarg); 945 return -ENOMEM; 946 } 947 break; 948 case 't': 949 case 'Z': 950 tmp = spdk_strtoll(optarg, 10); 951 if (tmp < 0 || tmp >= INT_MAX) { 952 fprintf(stderr, "Invalid value '%s' for option -%c.\n", optarg, ch); 953 return -EINVAL; 954 } 955 switch (ch) { 956 case 't': 957 g_time_in_sec = tmp; 958 break; 959 case 'Z': 960 if ((unsigned long)tmp >= NUM_FUZZERS) { 961 fprintf(stderr, "Invalid fuzz type %lld (max %lu)\n", tmp, NUM_FUZZERS - 1); 962 return -EINVAL; 963 } 964 g_fuzzer = &g_fuzzers[tmp]; 965 break; 966 } 967 break; 968 case '?': 969 default: 970 return -EINVAL; 971 } 972 return 0; 973 } 974 975 static void 976 fuzz_shutdown(void) 977 { 978 /* If the user terminates the fuzzer prematurely, it is likely due 979 * to an input hang. So raise a SIGSEGV signal which will cause the 980 * fuzzer to generate a crash file for the last input. 981 * 982 * Note that the fuzzer will always generate a crash file, even if 983 * we get our TestOneInput() function (which is called by the fuzzer) 984 * to pthread_exit(). So just doing the SIGSEGV here in all cases is 985 * simpler than trying to differentiate between hung inputs and 986 * an impatient user. 987 */ 988 pthread_kill(g_fuzz_td, SIGSEGV); 989 } 990 991 int 992 main(int argc, char **argv) 993 { 994 struct spdk_app_opts opts = {}; 995 int rc; 996 997 spdk_app_opts_init(&opts, sizeof(opts)); 998 opts.name = "nvme_fuzz"; 999 opts.shutdown_cb = fuzz_shutdown; 1000 opts.rpc_addr = NULL; 1001 1002 if ((rc = spdk_app_parse_args(argc, argv, &opts, "D:F:N:P:t:Z:", NULL, nvme_fuzz_parse, 1003 nvme_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) { 1004 return rc; 1005 } 1006 1007 if (!g_corpus_dir) { 1008 fprintf(stderr, "Must specify corpus dir with -D option\n"); 1009 return -1; 1010 } 1011 1012 if (!g_trid_specified) { 1013 fprintf(stderr, "Must specify trid with -F option\n"); 1014 return -1; 1015 } 1016 1017 if (!g_fuzzer) { 1018 fprintf(stderr, "Must specify fuzzer with -Z option\n"); 1019 return -1; 1020 } 1021 1022 rc = spdk_app_start(&opts, begin_fuzz, NULL); 1023 1024 spdk_app_fini(); 1025 return rc; 1026 } 1027