Lines Matching defs:vq

73 	struct vhost_blk_queue *vq = task->vq;
74 struct vring_used *used = vq->vring.used;
77 vq->id, task->req_idx);
86 used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
87 used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
93 vq->id, used->idx, task->req_idx);
98 rte_vhost_vring_call(task->ctrlr->vid, vq->id);
104 struct vhost_blk_queue *vq = task->vq;
107 rte_vhost_set_last_inflight_io_packed(task->ctrlr->vid, vq->id,
110 desc = &vq->vring.desc_packed[vq->last_used_idx];
115 if (vq->used_wrap_counter)
121 rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
124 vq->last_used_idx += task->chain_num;
125 if (vq->last_used_idx >= vq->vring.size) {
126 vq->last_used_idx -= vq->vring.size;
127 vq->used_wrap_counter = !vq->used_wrap_counter;
133 rte_vhost_vring_call(task->ctrlr->vid, vq->id);
179 vring_get_next_desc(struct vhost_blk_queue *vq, struct vring_desc *desc)
182 return &vq->vring.desc[desc->next];
188 vring_get_next_desc_packed(struct vhost_blk_queue *vq, uint16_t *req_idx)
190 if (descriptor_has_next_packed(&vq->vring.desc_packed[*req_idx])) {
191 *req_idx = (*req_idx + 1) % vq->vring.size;
192 return &vq->vring.desc_packed[*req_idx];
199 vring_get_next_inflight_desc(struct vhost_blk_queue *vq,
203 return &vq->inflight_ring.inflight_packed->desc[desc->next];
210 struct vhost_blk_queue *vq, uint16_t req_idx,
214 struct vring_desc *desc = &vq->vring.desc[req_idx];
233 desc = vring_get_next_desc(vq, desc);
241 struct vhost_blk_queue *vq, uint16_t req_idx,
245 struct vring_packed_desc *desc = &vq->vring.desc_packed[req_idx];
264 desc = vring_get_next_desc_packed(vq, &req_idx);
272 struct vhost_blk_queue *vq, uint16_t req_idx,
279 inflight_vq = &vq->inflight_ring;
299 desc = vring_get_next_inflight_desc(vq, desc);
310 if (task->vq->packed_ring) {
314 inflight_ring = &task->vq->inflight_ring;
319 if (setup_iovs_from_inflight_desc(task->ctrlr, task->vq,
326 if (setup_iovs_from_descs_packed(task->ctrlr, task->vq,
334 if (setup_iovs_from_descs_split(task->ctrlr, task->vq,
360 if (task->vq->packed_ring)
376 submit_inflight_vq(struct vhost_blk_queue *vq)
382 inflight_ring = &vq->inflight_ring;
398 if (vq->packed_ring) {
404 task = &vq->tasks[task_idx];
411 vq->last_avail_idx += desc[desc_idx].num;
412 if (vq->last_avail_idx >= vq->vring.size) {
413 vq->last_avail_idx -= vq->vring.size;
414 vq->avail_wrap_counter =
415 !vq->avail_wrap_counter;
421 task = &vq->tasks[desc_idx];
433 vhost_blk_vq_get_desc_chain_buffer_id(struct vhost_blk_queue *vq,
436 struct vring_packed_desc *desc = &vq->vring.desc_packed[
437 vq->last_avail_idx];
439 *req_head = vq->last_avail_idx;
443 vq->last_avail_idx = (vq->last_avail_idx + 1) % vq->vring.size;
444 desc = &vq->vring.desc_packed[vq->last_avail_idx];
449 vq->last_avail_idx = (vq->last_avail_idx + 1) % vq->vring.size;
450 if (vq->last_avail_idx < *req_head)
451 vq->avail_wrap_counter = !vq->avail_wrap_counter;
457 vq_get_desc_idx(struct vhost_blk_queue *vq)
462 last_avail_idx = vq->last_avail_idx & (vq->vring.size - 1);
463 desc_idx = vq->vring.avail->ring[last_avail_idx];
464 vq->last_avail_idx++;
470 vhost_blk_vq_is_avail(struct vhost_blk_queue *vq)
472 if (vq->packed_ring) {
473 uint16_t flags = vq->vring.desc_packed[
474 vq->last_avail_idx].flags;
475 bool avail_wrap_counter = vq->avail_wrap_counter;
480 if (vq->vring.avail->idx != vq->last_avail_idx)
488 process_vq(struct vhost_blk_queue *vq)
492 if (vq->packed_ring) {
493 while (vhost_blk_vq_is_avail(vq)) {
496 task_idx = vhost_blk_vq_get_desc_chain_buffer_id(vq,
498 task = &vq->tasks[task_idx];
504 last_idx = (req_idx + chain_num - 1) % vq->vring.size;
507 vq->id,
515 while (vhost_blk_vq_is_avail(vq)) {
518 desc_idx = vq_get_desc_idx(vq);
519 task = &vq->tasks[desc_idx];
523 vq->id,
560 struct vhost_blk_queue *vq;
564 vq = &ctrlr->queues[i];
566 vq->tasks = rte_zmalloc(NULL,
567 sizeof(struct vhost_blk_task) * vq->vring.size, 0);
568 if (!vq->tasks) {
573 for (j = 0; j < vq->vring.size; j++) {
574 vq->tasks[j].req_idx = j;
575 vq->tasks[j].ctrlr = ctrlr;
576 vq->tasks[j].vq = vq;
596 struct vhost_blk_queue *vq;
638 vq = &ctrlr->queues[i];
639 vq->id = i;
642 &vq->vring) == 0);
644 &vq->last_avail_idx,
645 &vq->last_used_idx) == 0);
650 &vq->inflight_ring) == 0);
656 &vq->last_avail_idx,
657 &vq->last_used_idx) == 0);
659 vq->avail_wrap_counter = vq->last_avail_idx &
661 vq->last_avail_idx = vq->last_avail_idx &
663 vq->used_wrap_counter = vq->last_used_idx &
665 vq->last_used_idx = vq->last_used_idx &
669 vq->packed_ring = packed_ring;
697 struct vhost_blk_queue *vq;
720 vq = &ctrlr->queues[i];
721 if (vq->packed_ring) {
722 vq->last_avail_idx |= (vq->avail_wrap_counter <<
724 vq->last_used_idx |= (vq->used_wrap_counter <<
729 vq->last_avail_idx,
730 vq->last_used_idx);