Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 31) sorted by relevance

12

/spdk/scripts/
H A Dprep_benchmarks.sh38 for queue in /sys/block/nvme*n*/queue; do
39 if [ -f "$queue/nomerges" ]; then
40 echo "1" > $queue/nomerges
43 if [ -f "$queue/io_poll" ]; then
44 echo "1" > $queue/io_poll
47 if [ -f "$queue/io_poll_delay" ]; then
48 echo "-1" > $queue/io_poll_delay
H A Dfio-wrapper125 if [[ -e /sys/block/$dev/queue/scheduler ]]; then
/spdk/doc/
H A Dnvme_spec.md12 queue pairs in host memory. The term "host" is used a lot, so to clarify that's
13 the system that the NVMe SSD is plugged into. A queue pair consists of two
14 queues - a submission queue and a completion queue. These queues are more
16 queue is an array of 64 byte command structures, plus 2 integers (head and tail
17 indices). The completion queue is similarly an array of 16 byte completion
22 it into the submission queue at the current location of the submission queue
23 tail index, and then writing the new index of the submission queue tail to the
24 submission queue tail doorbell register. It's actually valid to copy a whole set
38 The completion queue works similarly, but the device is instead the one writing
40 and 1 on each loop through the entire ring. When a queue pair is set up to
[all …]
H A Duserspace.md54 to initialize the device, create queue pairs, and ultimately send I/O.
66 memory needs to be read (no MMIO) to check a queue pair for a bit flip and
82 correct hardware queue for whatever core the current thread happens to be
83 running on. Often, they'll need to either acquire a lock around the queue or
86 from older hardware interfaces that only had a single queue or no queue at
93 that a hardware queue is only ever accessed from one thread at a time. In
94 practice, applications assign one hardware queue to each thread (as opposed to
95 one hardware queue per core in kernel drivers). This guarantees that the thread
H A Dnvmf_tgt_pg.md43 `struct spdk_nvmf_qpair`: An NVMe-oF queue pair, as defined by the NVMe-oF
163 transport allocates a single RDMA completion queue per poll group. All new
165 queues, but share this common completion queue. This allows the poll group to
166 poll a single queue for incoming messages instead of iterating through each
177 containing an NVMe completion. This is problematic at full queue depth because
181 queue for a SEND acknowledgement before they can acquire a full rdma_request
184 Further, RDMA NICs expose different queue depths for READ/WRITE operations
186 queue depth based on SEND/RECV operation limits and will queue in software as
H A Didxd.md22 that SPDK will use work queue(s) surfaced by the driver. Passing in `false` means
H A Devent.md38 from a queue. Each event consists of a bundled function pointer and its
50 Each reactor has a lock-free queue for incoming events to that core, and
51 threads from any core may insert events into the queue of any other core. The
H A Discsi.md170 echo noop > /sys/block/sdc/queue/scheduler
176 echo "2" > /sys/block/sdc/queue/nomerges
179 Increase requests for block queue
182 echo "1024" > /sys/block/sdc/queue/nr_requests
H A Dbdev_pg.md25 - Automatic queueing of I/O requests in response to queue full or out-of-memory conditions
144 module-specific. For example, NVMe devices will delete all queue pairs,
145 perform an NVMe reset, then recreate the queue pairs and continue. Most
H A Doverview.md85 - `queue.h` and `queue_extras.h`
H A Dnvme_multipath.md69 The active-active policy uses the round-robin algorithm or the minimum queue depth algorithm.
70 The round-robin algorithm submits an I/O to each I/O path in circular order. The minimum queue depth
H A Dvhost_processing.md116 *ENABLE* message for each extra queue it wants to be polled. Other queues are
/spdk/module/bdev/xnvme/
H A Dbdev_xnvme.c26 struct xnvme_queue *queue; member
192 struct xnvme_cmd_ctx *ctx = xnvme_queue_get_cmd_ctx(xnvme_ch->queue); in _xnvme_submit_request()
219 xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx); in bdev_xnvme_submit_request()
227 xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx); in bdev_xnvme_submit_request()
246 /* Submission failed: queue is full or no memory => Queue the I/O in bdev layer */ in bdev_xnvme_free()
250 SPDK_WARNLOG("Start to queue I/O for xnvme bdev\n"); in bdev_xnvme_free()
252 xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx); in bdev_xnvme_free()
256 /* Submission failed: unexpected error, put the command-context back in the queue */
260 xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx); in bdev_xnvme_cmd_cb()
272 xnvme_queue_put_cmd_ctx(xnvme_ch->queue, xnvme_queue_get_cmd_ct in bdev_xnvme_cmd_cb()
[all...]
/spdk/test/iscsi_tgt/perf/
H A Discsi_initiator.sh33 echo noop > /sys/block/${disks[i]}/queue/scheduler
34 echo "2" > /sys/block/${disks[i]}/queue/nomerges
35 echo "1024" > /sys/block/${disks[i]}/queue/nr_requests
/spdk/module/bdev/ocf/
H A Dctx.h31 /* Thread safe queue creation and deletion
33 int vbdev_ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue, const struct ocf_queue_ops *ops);
34 int vbdev_ocf_queue_create_mngt(ocf_cache_t cache, ocf_queue_t *queue,
36 void vbdev_ocf_queue_put(ocf_queue_t queue);
H A Dctx.c270 vbdev_ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue, const struct ocf_queue_ops *ops) in vbdev_ocf_queue_create() argument
273 return ocf_queue_create(cache, queue, ops); in vbdev_ocf_queue_create()
277 vbdev_ocf_queue_create_mngt(ocf_cache_t cache, ocf_queue_t *queue, const struct ocf_queue_ops *ops) in vbdev_ocf_queue_create()
279 return ocf_queue_create_mngt(cache, queue, ops); in vbdev_ocf_queue_create()
283 vbdev_ocf_queue_put(ocf_queue_t queue) in vbdev_ocf_queue_put()
285 ocf_queue_put(queue); in vbdev_ocf_queue_put()
282 vbdev_ocf_queue_put(ocf_queue_t queue) vbdev_ocf_queue_put() argument
H A Dvbdev_ocf.h22 struct ocf_queue *queue; member
H A Dvbdev_ocf.c619 io = ocf_volume_new_io(ocf_core_get_front_volume(vbdev->ocf_core), qctx->queue, offset, len, dir, 0, in io_handle()
774 /* Poller function for the OCF queue
780 uint32_t iono = ocf_queue_pending_io(qctx->queue); in queue_poll()
784 ocf_queue_run_single(qctx->queue); in queue_poll()
801 /* OCF queue deinitialization
818 /* Queue ops is an interface for running queue thread
819 * stop() operation in called just before queue gets destroyed */
835 rc = vbdev_ocf_queue_create(vbdev->ocf_cache, &qctx->queue, &queue_ops); in io_device_create_cb()
840 ocf_queue_set_priv(qctx->queue, qctx); in io_device_create_cb()
851 * Put OCF queue an
[all...]
/spdk/lib/thread/
H A Diobuf.c388 cache->small.queue = &ch_node->small_queue; in spdk_iobuf_channel_init()
389 cache->large.queue = &ch_node->large_queue;
514 /* Make sure none of the wait queue entries are coming from this module */ in spdk_iobuf_entry_abort()
515 STAILQ_FOREACH(entry, cache->small.queue, stailq) { in spdk_iobuf_entry_abort()
518 STAILQ_FOREACH(entry, cache->large.queue, stailq) {
613 STAILQ_FOREACH_SAFE(entry, pool->queue, stailq, tmp) { in spdk_iobuf_put()
669 STAILQ_FOREACH(e, pool->queue, stailq) { in spdk_iobuf_get_stats()
671 STAILQ_REMOVE(pool->queue, entry, spdk_iobuf_entry, stailq); in spdk_iobuf_get_stats()
725 STAILQ_INSERT_TAIL(pool->queue, entry, stailq);
772 if (STAILQ_EMPTY(pool->queue)) {
[all...]
/spdk/test/nvme/perf/
H A Drun_perf.sh248 echo -1 > /sys/block/$disk/queue/io_poll_delay
252 echo 0 > /sys/block/$disk/queue/io_poll_delay
264 sysfs=/sys/block/$disk/queue
274 sysfs=/sys/block/$disk/queue
437 sysfs=/sys/block/$disk/queue
/spdk/module/bdev/daos/
H A Dbdev_daos.c65 daos_handle_t queue; member
252 if ((rc = daos_event_init(&task->ev, ch->queue, NULL))) { in bdev_daos_writev()
298 if ((rc = daos_event_init(&task->ev, ch->queue, NULL))) { in bdev_daos_readv()
354 int io_inflight = daos_eq_query(dch->queue, DAOS_EQR_WAITING, 0, NULL); in _bdev_daos_get_io_inflight()
503 int rc = daos_eq_poll(ch->queue, 0, DAOS_EQ_NOWAIT, in bdev_daos_channel_poll()
628 if ((rc = daos_eq_create(&ch->queue))) { in bdev_daos_io_channel_setup_daos()
679 if ((rc = daos_eq_destroy(ch->queue, DAOS_EQ_DESTROY_FORCE))) { in bdev_daos_io_channel_destroy_cb()
/spdk/test/app/fuzz/vhost_fuzz/
H A DREADME.md7 queue or the scsi admin queue. Please see the NVMe fuzzer readme for information
/spdk/test/nvme/overhead/
H A DREADME3 It runs a random read, queue depth = 1 workload to a single device,
/spdk/test/app/fuzz/nvme_fuzz/
H A DREADME.md8 submit commands to that thread at a set queue depth. (currently 128 for I/O, 16 for Admin). The
/spdk/scripts/perf/nvmf/
H A DREADME.md176 - max_queue_depth - int, max number of outstanding I/O per queue. Default: 128.
179 - num_cqe - int, number of completion queue entries. See doc/json_rpc.md
351 #### Important note about queue depth parameter

12