1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 */ 4 5 #include "spdk/stdinc.h" 6 #include "spdk/conf.h" 7 #include "spdk/env.h" 8 #include "spdk/event.h" 9 #include "spdk/util.h" 10 #include "spdk/string.h" 11 #include "spdk/nvme_spec.h" 12 #include "spdk/nvme.h" 13 #include "spdk/likely.h" 14 #include "spdk/file.h" 15 16 static const uint8_t *g_data; 17 static bool g_trid_specified = false; 18 static int32_t g_time_in_sec = 10; 19 static char *g_corpus_dir; 20 static uint8_t *g_repro_data; 21 static size_t g_repro_size; 22 static pthread_t g_fuzz_td; 23 static pthread_t g_reactor_td; 24 static bool g_in_fuzzer; 25 26 #define MAX_COMMANDS 5 27 28 struct fuzz_command { 29 struct spdk_nvme_cmd cmd; 30 void *buf; 31 uint32_t len; 32 }; 33 34 static struct fuzz_command g_cmds[MAX_COMMANDS]; 35 36 typedef void (*fuzz_build_cmd_fn)(struct fuzz_command *cmd); 37 38 struct fuzz_type { 39 fuzz_build_cmd_fn fn; 40 uint32_t bytes_per_cmd; 41 bool is_admin; 42 }; 43 44 static void 45 fuzz_admin_command(struct fuzz_command *cmd) 46 { 47 memcpy(&cmd->cmd, g_data, sizeof(cmd->cmd)); 48 g_data += sizeof(cmd->cmd); 49 50 /* ASYNC_EVENT_REQUEST won't complete, so pick a different opcode. */ 51 if (cmd->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) { 52 cmd->cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 53 } 54 55 /* NVME_OPC_FABRIC is special for fabric transport, so pick a different opcode. */ 56 if (cmd->cmd.opc == SPDK_NVME_OPC_FABRIC) { 57 cmd->cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 58 } 59 60 /* Fuzz a normal operation, so set a zero value in Fused field. */ 61 cmd->cmd.fuse = 0; 62 } 63 64 static void 65 fuzz_admin_get_log_page_command(struct fuzz_command *cmd) 66 { 67 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 68 69 cmd->cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 70 71 /* Only fuzz some of the more interesting parts of the GET_LOG_PAGE command. */ 72 73 cmd->cmd.cdw10_bits.get_log_page.numdl = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 74 cmd->cmd.cdw10_bits.get_log_page.lid = g_data[2]; 75 cmd->cmd.cdw10_bits.get_log_page.lsp = g_data[3] & (0x60 >> 5); 76 cmd->cmd.cdw10_bits.get_log_page.rae = g_data[3] & (0x80 >> 7); 77 78 cmd->cmd.cdw11_bits.get_log_page.numdu = g_data[3] & (0x18 >> 3); 79 80 /* Log Page Offset Lower */ 81 cmd->cmd.cdw12 = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 82 83 /* Offset Type */ 84 cmd->cmd.cdw14 = g_data[3] & (0x01 >> 0); 85 86 /* Log Page Offset Upper */ 87 cmd->cmd.cdw13 = g_data[3] & (0x06 >> 1); 88 89 g_data += 6; 90 } 91 92 static void 93 fuzz_admin_identify_command(struct fuzz_command *cmd) 94 { 95 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 96 97 cmd->cmd.opc = SPDK_NVME_OPC_IDENTIFY; 98 99 cmd->cmd.cdw10_bits.identify.cns = g_data[0]; 100 cmd->cmd.cdw10_bits.identify.cntid = ((uint16_t)g_data[1] << 8) + (uint16_t)g_data[2]; 101 102 cmd->cmd.cdw11_bits.identify.nvmsetid = ((uint16_t)g_data[3] << 8) + (uint16_t)g_data[4]; 103 cmd->cmd.cdw11_bits.identify.csi = g_data[5]; 104 105 /* UUID index, bits 0-6 are used */ 106 cmd->cmd.cdw14 = (g_data[6] & 0x7f); 107 108 g_data += 7; 109 } 110 111 static void 112 fuzz_admin_abort_command(struct fuzz_command *cmd) 113 { 114 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 115 cmd->cmd.opc = SPDK_NVME_OPC_ABORT; 116 117 cmd->cmd.cdw10_bits.abort.sqid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 118 cmd->cmd.cdw10_bits.abort.cid = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 119 120 g_data += 4; 121 } 122 123 static void 124 fuzz_admin_create_io_completion_queue_command(struct fuzz_command *cmd) 125 { 126 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 127 cmd->cmd.opc = SPDK_NVME_OPC_CREATE_IO_CQ; 128 129 cmd->cmd.cdw10_bits.create_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 130 cmd->cmd.cdw10_bits.create_io_q.qsize = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 131 132 cmd->cmd.cdw11_bits.create_io_cq.iv = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 133 cmd->cmd.cdw11_bits.create_io_cq.pc = (g_data[6] >> 7) & 0x01; 134 cmd->cmd.cdw11_bits.create_io_cq.ien = (g_data[6] >> 6) & 0x01; 135 136 g_data += 7; 137 } 138 139 static void 140 fuzz_admin_create_io_submission_queue_command(struct fuzz_command *cmd) 141 { 142 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 143 cmd->cmd.opc = SPDK_NVME_OPC_CREATE_IO_SQ; 144 145 cmd->cmd.cdw10_bits.create_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 146 cmd->cmd.cdw10_bits.create_io_q.qsize = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 147 148 cmd->cmd.cdw11_bits.create_io_sq.cqid = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 149 cmd->cmd.cdw11_bits.create_io_sq.qprio = (g_data[6] >> 6) & 0x03; 150 cmd->cmd.cdw11_bits.create_io_sq.pc = (g_data[6] >> 5) & 0x01; 151 152 /* NVM Set Identifier */ 153 cmd->cmd.cdw12 = ((uint16_t)g_data[7] << 8) + (uint16_t)g_data[8]; 154 155 g_data += 9; 156 } 157 158 static void 159 fuzz_admin_delete_io_completion_queue_command(struct fuzz_command *cmd) 160 { 161 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 162 cmd->cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ; 163 164 cmd->cmd.cdw10_bits.delete_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 165 166 g_data += 2; 167 } 168 169 static void 170 fuzz_admin_delete_io_submission_queue_command(struct fuzz_command *cmd) 171 { 172 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 173 cmd->cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ; 174 175 cmd->cmd.cdw10_bits.delete_io_q.qid = ((uint16_t)g_data[0] << 8) + (uint16_t)g_data[1]; 176 177 g_data += 2; 178 } 179 180 static void 181 fuzz_admin_namespace_attachment_command(struct fuzz_command *cmd) 182 { 183 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 184 cmd->cmd.opc = SPDK_NVME_OPC_NS_ATTACHMENT; 185 186 cmd->cmd.cdw10_bits.ns_attach.sel = (g_data[0] >> 4) & 0x0f; 187 188 g_data += 1; 189 } 190 191 static void 192 fuzz_admin_namespace_management_command(struct fuzz_command *cmd) 193 { 194 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 195 cmd->cmd.opc = SPDK_NVME_OPC_NS_MANAGEMENT; 196 197 cmd->cmd.cdw10_bits.ns_manage.sel = (g_data[0] >> 4) & 0x0f; 198 199 g_data += 1; 200 } 201 202 static void 203 fuzz_admin_security_receive_command(struct fuzz_command *cmd) 204 { 205 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 206 cmd->cmd.opc = SPDK_NVME_OPC_SECURITY_RECEIVE; 207 208 cmd->cmd.cdw10_bits.sec_send_recv.secp = g_data[0]; 209 cmd->cmd.cdw10_bits.sec_send_recv.spsp1 = g_data[1]; 210 cmd->cmd.cdw10_bits.sec_send_recv.spsp0 = g_data[2]; 211 cmd->cmd.cdw10_bits.sec_send_recv.nssf = g_data[3]; 212 213 /* Allocation Length(AL) */ 214 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 215 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 216 217 g_data += 8; 218 } 219 220 static void 221 fuzz_admin_security_send_command(struct fuzz_command *cmd) 222 { 223 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 224 cmd->cmd.opc = SPDK_NVME_OPC_SECURITY_SEND; 225 226 cmd->cmd.cdw10_bits.sec_send_recv.secp = g_data[0]; 227 cmd->cmd.cdw10_bits.sec_send_recv.spsp1 = g_data[1]; 228 cmd->cmd.cdw10_bits.sec_send_recv.spsp0 = g_data[2]; 229 cmd->cmd.cdw10_bits.sec_send_recv.nssf = g_data[3]; 230 231 /* Transfer Length(TL) */ 232 cmd->cmd.cdw11 = (uint32_t)(g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 233 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 234 235 g_data += 8; 236 } 237 238 static void 239 fuzz_admin_directive_send_command(struct fuzz_command *cmd) 240 { 241 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 242 cmd->cmd.opc = SPDK_NVME_OPC_DIRECTIVE_SEND; 243 244 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 245 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 246 247 cmd->cmd.cdw11_bits.directive.dspec = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 248 cmd->cmd.cdw11_bits.directive.dtype = g_data[6]; 249 cmd->cmd.cdw11_bits.directive.doper = g_data[7]; 250 251 g_data += 8; 252 } 253 254 static void 255 fuzz_admin_directive_receive_command(struct fuzz_command *cmd) 256 { 257 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 258 cmd->cmd.opc = SPDK_NVME_OPC_DIRECTIVE_RECEIVE; 259 260 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 261 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 262 263 cmd->cmd.cdw11_bits.directive.dspec = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 264 cmd->cmd.cdw11_bits.directive.dtype = g_data[6]; 265 cmd->cmd.cdw11_bits.directive.doper = g_data[7]; 266 267 g_data += 8; 268 } 269 270 static void 271 feat_arbitration(struct fuzz_command *cmd) 272 { 273 cmd->cmd.cdw11_bits.feat_arbitration.bits.hpw = g_data[2]; 274 cmd->cmd.cdw11_bits.feat_arbitration.bits.mpw = g_data[3]; 275 cmd->cmd.cdw11_bits.feat_arbitration.bits.lpw = g_data[4]; 276 cmd->cmd.cdw11_bits.feat_arbitration.bits.ab = g_data[5] & 0x07; 277 } 278 279 static void 280 feat_power_management(struct fuzz_command *cmd) 281 { 282 cmd->cmd.cdw11_bits.feat_power_management.bits.wh = g_data[2] & 0x07; 283 cmd->cmd.cdw11_bits.feat_power_management.bits.ps = (g_data[2] >> 3) & 0x1f; 284 } 285 286 static void 287 feat_lba_range_type(struct fuzz_command *cmd) 288 { 289 cmd->cmd.cdw11_bits.feat_lba_range_type.bits.num = (g_data[2] >> 2) & 0x3f; 290 } 291 292 static void 293 feat_temperature_threshold(struct fuzz_command *cmd) 294 { 295 cmd->cmd.cdw11_bits.feat_temp_threshold.bits.thsel = g_data[2] & 0x03; 296 cmd->cmd.cdw11_bits.feat_temp_threshold.bits.tmpsel = (g_data[2] >> 2) & 0x0f; 297 cmd->cmd.cdw11_bits.feat_temp_threshold.bits.tmpth = ((uint16_t)g_data[3] << 8) + 298 (uint16_t)g_data[4]; 299 } 300 301 static void 302 feat_error_recover(struct fuzz_command *cmd) 303 { 304 cmd->cmd.cdw11_bits.feat_error_recovery.bits.dulbe = g_data[2] & 0x01; 305 cmd->cmd.cdw11_bits.feat_error_recovery.bits.tler = ((uint16_t)g_data[3] << 8) + 306 (uint16_t)g_data[4]; 307 } 308 309 static void 310 feat_volatile_write_cache(struct fuzz_command *cmd) 311 { 312 cmd->cmd.cdw11_bits.feat_volatile_write_cache.bits.wce = g_data[2] & 0x01; 313 } 314 315 static void 316 feat_number_of_queues(struct fuzz_command *cmd) 317 { 318 cmd->cmd.cdw11_bits.feat_num_of_queues.bits.ncqr = ((uint16_t)g_data[2] << 8) + (uint16_t)g_data[3]; 319 cmd->cmd.cdw11_bits.feat_num_of_queues.bits.nsqr = ((uint16_t)g_data[4] << 8) + (uint16_t)g_data[5]; 320 } 321 322 static void 323 feat_interrupt_coalescing(struct fuzz_command *cmd) 324 { 325 cmd->cmd.cdw11_bits.feat_interrupt_coalescing.bits.time = g_data[2]; 326 cmd->cmd.cdw11_bits.feat_interrupt_coalescing.bits.thr = g_data[3]; 327 } 328 329 static void 330 feat_interrupt_vector_configuration(struct fuzz_command *cmd) 331 { 332 cmd->cmd.cdw11_bits.feat_interrupt_vector_configuration.bits.cd = g_data[2] & 0x01; 333 cmd->cmd.cdw11_bits.feat_interrupt_vector_configuration.bits.iv = ((uint16_t)g_data[3] << 8) + 334 (uint16_t)g_data[4]; 335 } 336 337 static void 338 feat_write_atomicity(struct fuzz_command *cmd) 339 { 340 cmd->cmd.cdw11_bits.feat_write_atomicity.bits.dn = g_data[2] & 0x01; 341 } 342 343 static void 344 feat_async_event_cfg(struct fuzz_command *cmd) 345 { 346 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.ana_change_notice = g_data[2] & 0x01; 347 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.discovery_log_change_notice = (g_data[2] >> 1) & 0x01; 348 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.fw_activation_notice = (g_data[2] >> 2) & 0x01; 349 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.ns_attr_notice = (g_data[2] >> 3) & 0x01; 350 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.telemetry_log_notice = (g_data[2] >> 4) & 0x01; 351 352 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.available_spare = g_data[3] & 0x01; 353 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.device_reliability = 354 (g_data[3] >> 1) & 0x01; 355 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.read_only = (g_data[3] >> 2) & 0x01; 356 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.temperature = (g_data[3] >> 3) & 0x01; 357 cmd->cmd.cdw11_bits.feat_async_event_cfg.bits.crit_warn.bits.volatile_memory_backup = 358 (g_data[3] >> 4) & 0x01; 359 } 360 361 static void 362 feat_keep_alive_timer(struct fuzz_command *cmd) 363 { 364 cmd->cmd.cdw11_bits.feat_keep_alive_timer.bits.kato = ((uint32_t)g_data[2] << 24) + (( 365 uint32_t)g_data[3] << 16) + 366 ((uint32_t)g_data[4] << 8) + (uint32_t)g_data[5]; 367 } 368 369 static void 370 feat_host_identifier(struct fuzz_command *cmd) 371 { 372 cmd->cmd.cdw11_bits.feat_host_identifier.bits.exhid = g_data[2] & 0x01; 373 } 374 375 static void 376 feat_rsv_notification_mask(struct fuzz_command *cmd) 377 { 378 cmd->cmd.cdw11_bits.feat_rsv_notification_mask.bits.regpre = g_data[2] & 0x01; 379 cmd->cmd.cdw11_bits.feat_rsv_notification_mask.bits.respre = (g_data[2] >> 1) & 0x01; 380 cmd->cmd.cdw11_bits.feat_rsv_notification_mask.bits.resrel = (g_data[2] >> 2) & 0x01; 381 } 382 383 static void 384 feat_rsv_persistence(struct fuzz_command *cmd) 385 { 386 cmd->cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = g_data[2] & 0x01; 387 } 388 389 static void 390 fuzz_admin_set_features_command(struct fuzz_command *cmd) 391 { 392 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 393 cmd->cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 394 395 cmd->cmd.cdw10_bits.set_features.fid = g_data[0]; 396 cmd->cmd.cdw10_bits.set_features.sv = (g_data[1] >> 7) & 0x01; 397 398 switch (cmd->cmd.cdw10_bits.set_features.fid) { 399 case SPDK_NVME_FEAT_ARBITRATION: 400 feat_arbitration(cmd); 401 break; 402 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 403 feat_power_management(cmd); 404 break; 405 case SPDK_NVME_FEAT_LBA_RANGE_TYPE: 406 feat_lba_range_type(cmd); 407 break; 408 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 409 feat_temperature_threshold(cmd); 410 break; 411 case SPDK_NVME_FEAT_ERROR_RECOVERY: 412 feat_error_recover(cmd); 413 break; 414 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 415 feat_volatile_write_cache(cmd); 416 break; 417 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 418 feat_number_of_queues(cmd); 419 break; 420 case SPDK_NVME_FEAT_INTERRUPT_COALESCING: 421 feat_interrupt_coalescing(cmd); 422 break; 423 case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 424 feat_interrupt_vector_configuration(cmd); 425 break; 426 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 427 feat_write_atomicity(cmd); 428 break; 429 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 430 feat_async_event_cfg(cmd); 431 break; 432 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 433 feat_keep_alive_timer(cmd); 434 break; 435 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 436 feat_host_identifier(cmd); 437 break; 438 case SPDK_NVME_FEAT_HOST_RESERVE_MASK: 439 feat_rsv_notification_mask(cmd); 440 break; 441 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST: 442 feat_rsv_persistence(cmd); 443 break; 444 445 default: 446 break; 447 } 448 449 /* Use g_data[2] through g_data[5] for feature-specific 450 bits and set g_data[6] for cdw14 every iteration 451 UUID index, bits 0-6 are used */ 452 cmd->cmd.cdw14 = (g_data[6] & 0x7f); 453 454 g_data += 7; 455 } 456 457 static void 458 fuzz_admin_get_features_command(struct fuzz_command *cmd) 459 { 460 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 461 cmd->cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 462 463 cmd->cmd.cdw10_bits.get_features.fid = g_data[0]; 464 cmd->cmd.cdw10_bits.get_features.sel = (g_data[1] >> 5) & 0x07; 465 466 switch (cmd->cmd.cdw10_bits.set_features.fid) { 467 case SPDK_NVME_FEAT_ARBITRATION: 468 feat_arbitration(cmd); 469 break; 470 case SPDK_NVME_FEAT_POWER_MANAGEMENT: 471 feat_power_management(cmd); 472 break; 473 case SPDK_NVME_FEAT_LBA_RANGE_TYPE: 474 feat_lba_range_type(cmd); 475 break; 476 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD: 477 feat_temperature_threshold(cmd); 478 break; 479 case SPDK_NVME_FEAT_ERROR_RECOVERY: 480 feat_error_recover(cmd); 481 break; 482 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: 483 feat_volatile_write_cache(cmd); 484 break; 485 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: 486 feat_number_of_queues(cmd); 487 break; 488 case SPDK_NVME_FEAT_INTERRUPT_COALESCING: 489 feat_interrupt_coalescing(cmd); 490 break; 491 case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 492 feat_interrupt_vector_configuration(cmd); 493 break; 494 case SPDK_NVME_FEAT_WRITE_ATOMICITY: 495 feat_write_atomicity(cmd); 496 break; 497 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 498 feat_async_event_cfg(cmd); 499 break; 500 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: 501 feat_keep_alive_timer(cmd); 502 break; 503 504 default: 505 break; 506 } 507 508 /* Use g_data[2] through g_data[5] for feature-specific 509 bits and set g_data[6] for cdw14 every iteration 510 UUID index, bits 0-6 are used */ 511 cmd->cmd.cdw14 = (g_data[6] & 0x7f); 512 513 g_data += 7; 514 } 515 516 static void 517 fuzz_nvm_read_command(struct fuzz_command *cmd) 518 { 519 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 520 cmd->cmd.opc = SPDK_NVME_OPC_READ; 521 522 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 523 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 524 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 525 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 526 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 527 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 528 cmd->cmd.cdw13 = g_data[12]; 529 cmd->cmd.cdw14 = ((uint32_t)g_data[13] << 24) + ((uint32_t)g_data[14] << 16) + 530 ((uint32_t)g_data[15] << 8) + (uint32_t)g_data[16]; 531 cmd->cmd.cdw15 = ((uint32_t)g_data[17] << 24) + ((uint32_t)g_data[18] << 16) + 532 ((uint32_t)g_data[19] << 8) + (uint32_t)g_data[20]; 533 534 g_data += 21; 535 } 536 537 static void 538 fuzz_nvm_write_command(struct fuzz_command *cmd) 539 { 540 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 541 cmd->cmd.opc = SPDK_NVME_OPC_WRITE; 542 543 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 544 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 545 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 546 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 547 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 548 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 549 cmd->cmd.cdw13 = ((uint32_t)g_data[12] << 24) + ((uint32_t)g_data[13] << 16) + 550 ((uint32_t)g_data[14] << 8) + (uint32_t)g_data[15]; 551 cmd->cmd.cdw14 = ((uint32_t)g_data[16] << 24) + ((uint32_t)g_data[17] << 16) + 552 ((uint32_t)g_data[18] << 8) + (uint32_t)g_data[19]; 553 cmd->cmd.cdw15 = ((uint32_t)g_data[20] << 24) + ((uint32_t)g_data[21] << 16) + 554 ((uint32_t)g_data[22] << 8) + (uint32_t)g_data[23]; 555 556 g_data += 24; 557 } 558 559 static void 560 fuzz_nvm_write_zeroes_command(struct fuzz_command *cmd) 561 { 562 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 563 cmd->cmd.opc = SPDK_NVME_OPC_WRITE_ZEROES; 564 565 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 566 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 567 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 568 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 569 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 570 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 571 cmd->cmd.cdw14 = ((uint32_t)g_data[12] << 24) + ((uint32_t)g_data[13] << 16) + 572 ((uint32_t)g_data[14] << 8) + (uint32_t)g_data[15]; 573 cmd->cmd.cdw15 = ((uint32_t)g_data[16] << 24) + ((uint32_t)g_data[17] << 16) + 574 ((uint32_t)g_data[18] << 8) + (uint32_t)g_data[19]; 575 576 g_data += 20; 577 } 578 579 static void 580 fuzz_nvm_write_uncorrectable_command(struct fuzz_command *cmd) 581 { 582 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 583 cmd->cmd.opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE; 584 585 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 586 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 587 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 588 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 589 cmd->cmd.cdw12 = (g_data[8] << 8) + g_data[9]; 590 591 g_data += 10; 592 } 593 594 static void 595 fuzz_nvm_reservation_acquire_command(struct fuzz_command *cmd) 596 { 597 struct spdk_nvme_reservation_acquire_data *payload = cmd->buf; 598 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 599 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; 600 601 cmd->cmd.cdw10_bits.resv_acquire.rtype = g_data[0]; 602 cmd->cmd.cdw10_bits.resv_acquire.iekey = (g_data[1] >> 7) & 0x01; 603 cmd->cmd.cdw10_bits.resv_acquire.racqa = (g_data[1] >> 4) & 0x07; 604 605 payload->crkey = ((uint64_t)g_data[2] << 56) + ((uint64_t)g_data[3] << 48) + 606 ((uint64_t)g_data[4] << 40) + ((uint64_t)g_data[5] << 32) + 607 ((uint64_t)g_data[6] << 24) + ((uint64_t)g_data[7] << 16) + 608 ((uint64_t)g_data[8] << 8) + (uint64_t)g_data[9]; 609 610 payload->prkey = ((uint64_t)g_data[10] << 56) + ((uint64_t)g_data[11] << 48) + 611 ((uint64_t)g_data[12] << 40) + ((uint64_t)g_data[13] << 32) + 612 ((uint64_t)g_data[14] << 24) + ((uint64_t)g_data[15] << 16) + 613 ((uint64_t)g_data[16] << 8) + (uint64_t)g_data[17]; 614 615 cmd->len = sizeof(struct spdk_nvme_reservation_acquire_data); 616 617 g_data += 18; 618 } 619 620 static void 621 fuzz_nvm_reservation_release_command(struct fuzz_command *cmd) 622 { 623 struct spdk_nvme_reservation_key_data *payload = cmd->buf; 624 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 625 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 626 627 cmd->cmd.cdw10_bits.resv_release.rtype = g_data[0]; 628 cmd->cmd.cdw10_bits.resv_release.iekey = (g_data[1] >> 7) & 0x01; 629 cmd->cmd.cdw10_bits.resv_release.rrela = (g_data[1] >> 4) & 0x07; 630 631 payload->crkey = ((uint64_t)g_data[2] << 56) + ((uint64_t)g_data[3] << 48) + 632 ((uint64_t)g_data[4] << 40) + ((uint64_t)g_data[5] << 32) + 633 ((uint64_t)g_data[6] << 24) + ((uint64_t)g_data[7] << 16) + 634 ((uint64_t)g_data[8] << 8) + (uint64_t)g_data[9]; 635 636 cmd->len = sizeof(struct spdk_nvme_reservation_key_data); 637 638 g_data += 10; 639 } 640 641 static void 642 fuzz_nvm_reservation_register_command(struct fuzz_command *cmd) 643 { 644 struct spdk_nvme_reservation_register_data *payload = cmd->buf; 645 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 646 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_REGISTER; 647 648 cmd->cmd.cdw10_bits.resv_register.cptpl = (g_data[0] >> 6) & 0x03; 649 cmd->cmd.cdw10_bits.resv_register.iekey = (g_data[0] >> 5) & 0x01; 650 cmd->cmd.cdw10_bits.resv_register.rrega = (g_data[0] >> 2) & 0x07; 651 652 payload->crkey = ((uint64_t)g_data[1] << 56) + ((uint64_t)g_data[2] << 48) + 653 ((uint64_t)g_data[3] << 40) + ((uint64_t)g_data[4] << 32) + 654 ((uint64_t)g_data[5] << 24) + ((uint64_t)g_data[6] << 16) + 655 ((uint64_t)g_data[7] << 8) + (uint64_t)g_data[8]; 656 657 payload->nrkey = ((uint64_t)g_data[9] << 56) + ((uint64_t)g_data[10] << 48) + 658 ((uint64_t)g_data[11] << 40) + ((uint64_t)g_data[12] << 32) + 659 ((uint64_t)g_data[13] << 24) + ((uint64_t)g_data[14] << 16) + 660 ((uint64_t)g_data[15] << 8) + (uint64_t)g_data[16]; 661 662 663 cmd->len = sizeof(struct spdk_nvme_reservation_register_data); 664 665 g_data += 17; 666 } 667 668 static void 669 fuzz_nvm_reservation_report_command(struct fuzz_command *cmd) 670 { 671 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 672 cmd->cmd.opc = SPDK_NVME_OPC_RESERVATION_REPORT; 673 674 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 675 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 676 677 cmd->cmd.cdw11_bits.resv_report.eds = (g_data[4] >> 7) & 0x01; 678 679 g_data += 5; 680 } 681 682 static void 683 fuzz_nvm_compare_command(struct fuzz_command *cmd) 684 { 685 memset(&cmd->cmd, 0, sizeof(cmd->cmd)); 686 cmd->cmd.opc = SPDK_NVME_OPC_COMPARE; 687 688 cmd->cmd.cdw10 = ((uint32_t)g_data[0] << 24) + ((uint32_t)g_data[1] << 16) + 689 ((uint32_t)g_data[2] << 8) + (uint32_t)g_data[3]; 690 cmd->cmd.cdw11 = ((uint32_t)g_data[4] << 24) + ((uint32_t)g_data[5] << 16) + 691 ((uint32_t)g_data[6] << 8) + (uint32_t)g_data[7]; 692 cmd->cmd.cdw12 = ((uint32_t)g_data[8] << 24) + ((uint32_t)g_data[9] << 16) + 693 ((uint32_t)g_data[10] << 8) + (uint32_t)g_data[11]; 694 cmd->cmd.cdw14 = ((uint32_t)g_data[12] << 24) + ((uint32_t)g_data[13] << 16) + 695 ((uint32_t)g_data[14] << 8) + (uint32_t)g_data[15]; 696 cmd->cmd.cdw15 = ((uint32_t)g_data[16] << 24) + ((uint32_t)g_data[17] << 16) + 697 ((uint32_t)g_data[18] << 8) + (uint32_t)g_data[19]; 698 699 g_data += 20; 700 } 701 702 static struct fuzz_type g_fuzzers[] = { 703 { .fn = fuzz_admin_command, .bytes_per_cmd = sizeof(struct spdk_nvme_cmd), .is_admin = true}, 704 { .fn = fuzz_admin_get_log_page_command, .bytes_per_cmd = 6, .is_admin = true}, 705 { .fn = fuzz_admin_identify_command, .bytes_per_cmd = 7, .is_admin = true}, 706 { .fn = fuzz_admin_abort_command, .bytes_per_cmd = 4, .is_admin = true}, 707 { .fn = fuzz_admin_create_io_completion_queue_command, .bytes_per_cmd = 7, .is_admin = true}, 708 { .fn = fuzz_admin_create_io_submission_queue_command, .bytes_per_cmd = 9, .is_admin = true}, 709 { .fn = fuzz_admin_delete_io_completion_queue_command, .bytes_per_cmd = 2, .is_admin = true}, 710 { .fn = fuzz_admin_delete_io_submission_queue_command, .bytes_per_cmd = 2, .is_admin = true}, 711 { .fn = fuzz_admin_namespace_attachment_command, .bytes_per_cmd = 1, .is_admin = true}, 712 { .fn = fuzz_admin_namespace_management_command, .bytes_per_cmd = 1, .is_admin = true}, 713 { .fn = fuzz_admin_security_receive_command, .bytes_per_cmd = 8, .is_admin = true}, 714 { .fn = fuzz_admin_security_send_command, .bytes_per_cmd = 8, .is_admin = true}, 715 { .fn = fuzz_admin_directive_send_command, .bytes_per_cmd = 8, .is_admin = true}, 716 { .fn = fuzz_admin_directive_receive_command, .bytes_per_cmd = 8, .is_admin = true}, 717 { .fn = fuzz_admin_set_features_command, .bytes_per_cmd = 7, .is_admin = true}, 718 { .fn = fuzz_admin_get_features_command, .bytes_per_cmd = 7, .is_admin = true}, 719 { .fn = fuzz_nvm_read_command, .bytes_per_cmd = 21, .is_admin = false}, 720 { .fn = fuzz_nvm_write_command, .bytes_per_cmd = 24, .is_admin = false}, 721 { .fn = fuzz_nvm_write_zeroes_command, .bytes_per_cmd = 20, .is_admin = false}, 722 { .fn = fuzz_nvm_write_uncorrectable_command, .bytes_per_cmd = 10, .is_admin = false}, 723 { .fn = fuzz_nvm_reservation_acquire_command, .bytes_per_cmd = 18, .is_admin = false}, 724 { .fn = fuzz_nvm_reservation_release_command, .bytes_per_cmd = 10, .is_admin = false}, 725 { .fn = fuzz_nvm_reservation_register_command, .bytes_per_cmd = 17, .is_admin = false}, 726 { .fn = fuzz_nvm_reservation_report_command, .bytes_per_cmd = 5, .is_admin = false}, 727 { .fn = fuzz_nvm_compare_command, .bytes_per_cmd = 20, .is_admin = false}, 728 { .fn = NULL, .bytes_per_cmd = 0, .is_admin = 0} 729 }; 730 731 #define NUM_FUZZERS (SPDK_COUNTOF(g_fuzzers) - 1) 732 733 static struct fuzz_type *g_fuzzer; 734 735 struct spdk_nvme_transport_id g_trid; 736 static struct spdk_nvme_ctrlr *g_ctrlr; 737 static struct spdk_nvme_qpair *g_io_qpair; 738 static void 739 nvme_fuzz_cpl_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl) 740 { 741 int *outstanding = cb_arg; 742 743 assert(*outstanding > 0); 744 (*outstanding)--; 745 } 746 747 static int 748 run_cmds(uint32_t queue_depth) 749 { 750 int rc, outstanding = 0; 751 uint32_t i; 752 753 for (i = 0; i < queue_depth; i++) { 754 struct fuzz_command *cmd = &g_cmds[i]; 755 756 g_fuzzer->fn(cmd); 757 outstanding++; 758 if (g_fuzzer->is_admin) { 759 rc = spdk_nvme_ctrlr_cmd_admin_raw(g_ctrlr, &cmd->cmd, cmd->buf, cmd->len, nvme_fuzz_cpl_cb, 760 &outstanding); 761 } else { 762 rc = spdk_nvme_ctrlr_cmd_io_raw(g_ctrlr, g_io_qpair, &cmd->cmd, cmd->buf, cmd->len, 763 nvme_fuzz_cpl_cb, &outstanding); 764 } 765 if (rc) { 766 return rc; 767 } 768 } 769 770 while (outstanding > 0) { 771 spdk_nvme_qpair_process_completions(g_io_qpair, 0); 772 spdk_nvme_ctrlr_process_admin_completions(g_ctrlr); 773 } 774 return 0; 775 } 776 777 static int 778 TestOneInput(const uint8_t *data, size_t size) 779 { 780 struct spdk_nvme_detach_ctx *detach_ctx = NULL; 781 782 g_ctrlr = spdk_nvme_connect(&g_trid, NULL, 0); 783 if (g_ctrlr == NULL) { 784 fprintf(stderr, "spdk_nvme_connect() failed for transport address '%s'\n", 785 g_trid.traddr); 786 spdk_app_stop(-1); 787 } 788 789 g_io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ctrlr, NULL, 0); 790 if (g_io_qpair == NULL) { 791 fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n"); 792 spdk_app_stop(-1); 793 } 794 795 g_data = data; 796 797 run_cmds(size / g_fuzzer->bytes_per_cmd); 798 spdk_nvme_ctrlr_free_io_qpair(g_io_qpair); 799 spdk_nvme_detach_async(g_ctrlr, &detach_ctx); 800 801 if (detach_ctx) { 802 spdk_nvme_detach_poll(detach_ctx); 803 } 804 805 return 0; 806 } 807 808 int LLVMFuzzerRunDriver(int *argc, char ***argv, int (*UserCb)(const uint8_t *Data, size_t Size)); 809 810 static void 811 exit_handler(void) 812 { 813 if (g_in_fuzzer) { 814 spdk_app_stop(0); 815 pthread_join(g_reactor_td, NULL); 816 } 817 } 818 819 static void * 820 start_fuzzer(void *ctx) 821 { 822 char *_argv[] = { 823 "spdk", 824 "-len_control=0", 825 "-detect_leaks=1", 826 NULL, 827 NULL, 828 NULL 829 }; 830 char time_str[128]; 831 char len_str[128]; 832 char **argv = _argv; 833 int argc = SPDK_COUNTOF(_argv); 834 uint32_t len; 835 836 spdk_unaffinitize_thread(); 837 len = MAX_COMMANDS * g_fuzzer->bytes_per_cmd; 838 snprintf(len_str, sizeof(len_str), "-max_len=%d", len); 839 argv[argc - 3] = len_str; 840 snprintf(time_str, sizeof(time_str), "-max_total_time=%d", g_time_in_sec); 841 argv[argc - 2] = time_str; 842 argv[argc - 1] = g_corpus_dir; 843 844 g_in_fuzzer = true; 845 atexit(exit_handler); 846 if (g_repro_data) { 847 printf("Running single test based on reproduction data file.\n"); 848 TestOneInput(g_repro_data, g_repro_size); 849 printf("Done.\n"); 850 } else { 851 LLVMFuzzerRunDriver(&argc, &argv, TestOneInput); 852 /* TODO: in the normal case, LLVMFuzzerRunDriver never returns - it calls exit() 853 * directly and we never get here. But this behavior isn't really documented 854 * anywhere by LLVM, so call spdk_app_stop(0) if it does return, which will 855 * result in the app exiting like a normal SPDK application (spdk_app_start() 856 * returns to main(). 857 */ 858 } 859 g_in_fuzzer = false; 860 spdk_app_stop(0); 861 862 return NULL; 863 } 864 865 static void 866 begin_fuzz(void *ctx) 867 { 868 int i; 869 870 g_reactor_td = pthread_self(); 871 872 for (i = 0; i < MAX_COMMANDS; i++) { 873 g_cmds[i].buf = spdk_malloc(4096, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 874 assert(g_cmds[i].buf); 875 g_cmds[i].len = 4096; 876 } 877 878 pthread_create(&g_fuzz_td, NULL, start_fuzzer, NULL); 879 } 880 881 static void 882 nvme_fuzz_usage(void) 883 { 884 fprintf(stderr, " -D Path of corpus directory.\n"); 885 fprintf(stderr, " -F Transport ID for subsystem that should be fuzzed.\n"); 886 fprintf(stderr, " -N Name of reproduction data file.\n"); 887 fprintf(stderr, " -t Time to run fuzz tests (in seconds). Default: 10\n"); 888 fprintf(stderr, " -Z Fuzzer to run (0 to %lu)\n", NUM_FUZZERS - 1); 889 } 890 891 static int 892 nvme_fuzz_parse(int ch, char *arg) 893 { 894 long long tmp; 895 int rc; 896 FILE *repro_file; 897 898 switch (ch) { 899 case 'D': 900 g_corpus_dir = strdup(optarg); 901 break; 902 case 'F': 903 if (g_trid_specified) { 904 fprintf(stderr, "Can only specify one trid\n"); 905 return -1; 906 } 907 g_trid_specified = true; 908 rc = spdk_nvme_transport_id_parse(&g_trid, optarg); 909 if (rc < 0) { 910 fprintf(stderr, "failed to parse transport ID: %s\n", optarg); 911 return -1; 912 } 913 break; 914 case 'N': 915 repro_file = fopen(optarg, "r"); 916 if (repro_file == NULL) { 917 fprintf(stderr, "could not open %s: %s\n", optarg, spdk_strerror(errno)); 918 return -1; 919 } 920 g_repro_data = spdk_posix_file_load(repro_file, &g_repro_size); 921 if (g_repro_data == NULL) { 922 fprintf(stderr, "could not load data for file %s\n", optarg); 923 return -1; 924 } 925 break; 926 case 't': 927 case 'Z': 928 tmp = spdk_strtoll(optarg, 10); 929 if (tmp < 0 || tmp >= INT_MAX) { 930 fprintf(stderr, "Invalid value '%s' for option -%c.\n", optarg, ch); 931 return -EINVAL; 932 } 933 switch (ch) { 934 case 't': 935 g_time_in_sec = tmp; 936 break; 937 case 'Z': 938 if ((unsigned long)tmp >= NUM_FUZZERS) { 939 fprintf(stderr, "Invalid fuzz type %lld (max %lu)\n", tmp, NUM_FUZZERS - 1); 940 return -EINVAL; 941 } 942 g_fuzzer = &g_fuzzers[tmp]; 943 break; 944 } 945 break; 946 case '?': 947 default: 948 return -EINVAL; 949 } 950 return 0; 951 } 952 953 static void 954 fuzz_shutdown(void) 955 { 956 /* If the user terminates the fuzzer prematurely, it is likely due 957 * to an input hang. So raise a SIGSEGV signal which will cause the 958 * fuzzer to generate a crash file for the last input. 959 * 960 * Note that the fuzzer will always generate a crash file, even if 961 * we get our TestOneInput() function (which is called by the fuzzer) 962 * to pthread_exit(). So just doing the SIGSEGV here in all cases is 963 * simpler than trying to differentiate between hung inputs and 964 * an impatient user. 965 */ 966 pthread_kill(g_fuzz_td, SIGSEGV); 967 } 968 969 int 970 main(int argc, char **argv) 971 { 972 struct spdk_app_opts opts = {}; 973 int rc; 974 975 spdk_app_opts_init(&opts, sizeof(opts)); 976 opts.name = "nvme_fuzz"; 977 opts.shutdown_cb = fuzz_shutdown; 978 979 if ((rc = spdk_app_parse_args(argc, argv, &opts, "D:F:N:t:Z:", NULL, nvme_fuzz_parse, 980 nvme_fuzz_usage) != SPDK_APP_PARSE_ARGS_SUCCESS)) { 981 return rc; 982 } 983 984 if (!g_corpus_dir) { 985 fprintf(stderr, "Must specify corpus dir with -D option\n"); 986 return -1; 987 } 988 989 if (!g_trid_specified) { 990 fprintf(stderr, "Must specify trid with -F option\n"); 991 return -1; 992 } 993 994 if (!g_fuzzer) { 995 fprintf(stderr, "Must specify fuzzer with -Z option\n"); 996 return -1; 997 } 998 999 rc = spdk_app_start(&opts, begin_fuzz, NULL); 1000 1001 spdk_app_fini(); 1002 return rc; 1003 } 1004