/spdk/python/spdk/spdkcli/ |
H A D | ui_node_nvmf.py | 31 def ui_command_create(self, trtype, max_queue_depth=None, max_io_qpairs_per_ctrlr=None, argument 44 max_queue_depth = self.ui_eval_param(max_queue_depth, "number", None) 52 max_queue_depth=max_queue_depth,
|
/spdk/lib/nvmf/ |
H A D | rdma.c | 269 uint32_t max_queue_depth; 275 /* Array of size "max_queue_depth" containing RDMA requests. */ 278 /* Array of size "max_queue_depth" containing RDMA recvs. */ 281 /* Array of size "max_queue_depth" containing 64 byte capsules 286 /* Array of size "max_queue_depth" containing 16 byte completions 291 /* Array of size "max_queue_depth * InCapsuleDataSize" containing 326 uint16_t max_queue_depth; 335 * recv queue. Should not exceed device->attr.max_queue_depth. 650 for (i = 0; i < rqpair->max_queue_depth; i++) { in nvmf_rdma_dump_qpair_contents() 688 resources->reqs = spdk_zmalloc(opts->max_queue_depth * sizeo in nvmf_rdma_resources_create() 267 uint32_t max_queue_depth; global() member 327 uint16_t max_queue_depth; global() member 1281 uint16_t max_queue_depth; nvmf_rdma_connect() local [all...] |
H A D | transport.c | 80 spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth); in nvmf_transport_dump_opts() 140 SET_FIELD(max_queue_depth); in nvmf_transport_opts_copy()
|
H A D | tcp.c | 766 opts->max_queue_depth, in nvmf_tcp_create() 810 * We will not check SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH, because max_queue_depth is 16bits and always not larger than 64k. */ in nvmf_tcp_canon_listen_trid() 811 if (opts->max_queue_depth < SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH) { in nvmf_tcp_canon_listen_trid() 812 SPDK_WARNLOG("TCP param max_queue_depth %u can't be smaller than %u or larger than %u. Using default value %u\n", in nvmf_tcp_canon_listen_trid() 813 opts->max_queue_depth, SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH, in nvmf_tcp_canon_listen_trid() 815 opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH; in nvmf_tcp_canon_listen_trid() 1364 tqpair->resource_count = opts->max_queue_depth; in nvmf_tcp_qpair_sock_init() 3919 opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH;
|
H A D | fc.c | 331 qd = fc_conn->max_queue_depth * 2; in nvmf_fc_create_conn_reqpool() 334 fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2), in nvmf_fc_create_conn_reqpool() 1930 opts->max_queue_depth = SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH; in nvmf_fc_opts_init() 1950 opts->max_queue_depth, in nvmf_fc_create() 2157 fc_conn->max_queue_depth)) { in nvmf_fc_poll_group_add()
|
H A D | nvmf_fc.h | 207 uint16_t max_queue_depth; 209 uint16_t max_queue_depth; global() member
|
H A D | fc_ls.c | 329 fc_conn->max_queue_depth = sq_size + 1; in nvmf_fc_ls_new_connection() 1061 from_be16(&rqst->connect_cmd.sqsize) > transport->opts.max_queue_depth) { in nvmf_fc_ls_process_cioc()
|
H A D | ctrlr.c | 542 ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth - in nvmf_ctrlr_create() 886 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues). in _nvmf_ctrlr_connect() 901 } else if (cmd->sqsize >= transport->opts.max_queue_depth) { in _nvmf_ctrlr_connect() 903 cmd->sqsize, transport->opts.max_queue_depth - 1); in _nvmf_ctrlr_connect() 2952 cdata->maxcmd = transport->opts.max_queue_depth; in nvmf_ns_identify_iocs_zns()
|
H A D | nvmf_rpc.c | 2470 "max_queue_depth", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_queue_depth), in rpc_nvmf_create_transport()
|
H A D | vfio_user.c | 5922 opts->max_queue_depth = NVMF_VFIO_USER_DEFAULT_MAX_QUEUE_DEPTH;
|
/spdk/python/spdk/rpc/ |
H A D | iscsi.py | 17 max_queue_depth=None, argument 79 if max_queue_depth: 80 params['max_queue_depth'] = max_queue_depth
|
/spdk/test/unit/lib/nvmf/fc_ls.c/ |
H A D | fc_ls_ut.c | 55 .max_queue_depth = 128, 133 fc_conn->max_queue_depth)) { in spdk_nvmf_tgt_new_qpair() 306 to_be16(&cc_rqst.connect_cmd.ersp_ratio, (g_nvmf_transport.opts.max_queue_depth / 2)); in run_create_conn_test() 307 to_be16(&cc_rqst.connect_cmd.sqsize, g_nvmf_transport.opts.max_queue_depth - 1); in run_create_conn_test() 812 g_nvmf_transport_opts.max_queue_depth); in usage() 842 g_nvmf_transport_opts.max_queue_depth = (uint16_t)val; in main()
|
/spdk/test/spdkcli/match_files/ |
H A D | spdkcli_iscsi.test.match | 22 …| o- max_queue_depth: 64 .........................................................................…
|
/spdk/test/unit/lib/nvmf/tcp.c/ |
H A D | tcp_ut.c | 435 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; in test_nvmf_tcp_create() 448 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); in test_nvmf_tcp_create() 457 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; in test_nvmf_tcp_destroy() 470 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); in test_nvmf_tcp_destroy() 479 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; in test_nvmf_tcp_destroy() 512 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; in test_nvmf_tcp_poll_group_create() 564 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; in test_nvmf_tcp_send_c2h_data() 832 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); in test_nvmf_tcp_qpair_init_mem_resource() 1350 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; in test_nvmf_tcp_tls_add_remove_credentials()
|
/spdk/test/unit/lib/nvmf/fc.c/ |
H A D | fc_ut.c | 248 CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth); in create_transport_test()
|
/spdk/test/unit/lib/nvmf/rdma.c/ |
H A D | rdma_ut.c | 18 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 1227 CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH); in test_nvmf_rdma_opts_init() 1282 opts.max_queue_depth = DEPTH; in test_nvmf_rdma_resources_create() 1383 rqpair.max_queue_depth = 2; in test_nvmf_rdma_resize_cq() 1389 CU_ASSERT(rpoller.required_num_wr == 10 + MAX_WR_PER_QP(rqpair.max_queue_depth)); in test_nvmf_rdma_resize_cq() 1435 CU_ASSERT(rpoller.required_num_wr == 18 + MAX_WR_PER_QP(rqpair.max_queue_depth)); in main()
|
/spdk/test/unit/lib/nvmf/transport.c/ |
H A D | transport_ut.c | 19 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
|
/spdk/scripts/perf/nvmf/ |
H A D | README.md | 176 - max_queue_depth - int, max number of outstanding I/O per queue. Default: 128.
|
H A D | run_nvmf.py | 1174 ConfigField(name='max_queue_depth', default=128), 1240 "max_queue_depth": self.max_queue_depth,
|
/spdk/test/unit/lib/nvmf/ctrlr.c/ |
H A D | ctrlr_ut.c | 492 transport.opts.max_queue_depth = 64; in test_connect() 681 /* Invalid I/O sqsize > max_queue_depth */ in test_connect() 2598 transport.opts.max_queue_depth = 64; in test_nvmf_ctrlr_use_zcopy()
|
/spdk/scripts/ |
H A D | rpc.py | 1426 max_queue_depth=args.max_queue_depth,
|