Lines Matching defs:queue
246 * Otherwise the queue overflow detection mechanism does
341 struct gdma_queue *queue)
347 if (queue->type != GDMA_EQ)
353 req.hdr.dev_id = queue->gdma_dev->dev_id;
354 req.type = queue->type;
355 req.pdid = queue->gdma_dev->pdid;
356 req.doolbell_id = queue->gdma_dev->doorbell;
357 req.gdma_region = queue->mem_info.dma_region_handle;
358 req.queue_size = queue->queue_size;
359 req.log2_throttle_limit = queue->eq.log2_throttle_limit;
360 req.eq_pci_msix_index = queue->eq.msix_index;
365 "Failed to create queue: %d, 0x%x\n",
370 queue->id = resp.queue_index;
371 queue->eq.disable_needed = true;
372 queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
377 int mana_gd_disable_queue(struct gdma_queue *queue)
379 struct gdma_context *gc = queue->gdma_dev->gdma_context;
384 if (queue->type != GDMA_EQ)
385 mana_warn(NULL, "Not event queue type 0x%x\n",
386 queue->type);
391 req.hdr.dev_id = queue->gdma_dev->dev_id;
392 req.type = queue->type;
393 req.queue_index = queue->id;
399 "Failed to disable queue: %d, 0x%x\n", err,
454 mana_warn(NULL, "Invalid queue type 0x%x\n", q_type);
471 mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
473 mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
474 queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
629 mana_gd_register_irq(struct gdma_queue *queue,
632 struct gdma_dev *gd = queue->gdma_dev;
649 queue->eq.msix_index = msi_index;
673 gic->arg = queue;
684 mana_gd_deregiser_irq(struct gdma_queue *queue)
686 struct gdma_dev *gd = queue->gdma_dev;
696 msix_index = queue->eq.msix_index;
708 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
749 device_printf(dev, "test_eq timed out on queue %d\n",
756 "test_eq got an event on wrong queue %d (%d)\n",
769 struct gdma_queue *queue)
774 err = mana_gd_test_eq(gc, queue);
780 mana_gd_deregiser_irq(queue);
782 if (queue->eq.disable_needed)
783 mana_gd_disable_queue(queue);
788 bool create_hwq, struct gdma_queue *queue)
795 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
797 log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
806 err = mana_gd_register_irq(queue, spec);
812 queue->eq.callback = spec->eq.callback;
813 queue->eq.context = spec->eq.context;
814 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
815 queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
818 err = mana_gd_create_hw_eq(gc, queue);
822 err = mana_gd_test_eq(gc, queue);
830 mana_gd_destroy_eq(gc, false, queue);
836 struct gdma_queue *queue)
840 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
841 queue->cq.parent = spec->cq.parent_eq;
842 queue->cq.context = spec->cq.context;
843 queue->cq.callback = spec->cq.callback;
848 struct gdma_queue *queue)
850 uint32_t id = queue->id;
867 struct gdma_queue *queue;
870 queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
871 gmi = &queue->mem_info;
876 queue->head = 0;
877 queue->tail = 0;
878 queue->queue_mem_ptr = gmi->virt_addr;
879 queue->queue_size = spec->queue_size;
880 queue->monitor_avl_buf = spec->monitor_avl_buf;
881 queue->type = spec->type;
882 queue->gdma_dev = gd;
885 err = mana_gd_create_eq(gd, spec, false, queue);
887 mana_gd_create_cq(spec, queue);
892 *queue_ptr = queue;
897 free(queue, M_DEVBUF);
998 struct gdma_queue *queue;
1004 queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
1005 gmi = &queue->mem_info;
1014 queue->head = 0;
1015 queue->tail = 0;
1016 queue->queue_mem_ptr = gmi->virt_addr;
1017 queue->queue_size = spec->queue_size;
1018 queue->monitor_avl_buf = spec->monitor_avl_buf;
1019 queue->type = spec->type;
1020 queue->gdma_dev = gd;
1022 err = mana_gd_create_eq(gd, spec, true, queue);
1026 *queue_ptr = queue;
1032 free(queue, M_DEVBUF);
1042 struct gdma_queue *queue;
1049 queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
1050 gmi = &queue->mem_info;
1059 queue->head = 0;
1060 queue->tail = 0;
1061 queue->queue_mem_ptr = gmi->virt_addr;
1062 queue->queue_size = spec->queue_size;
1063 queue->monitor_avl_buf = spec->monitor_avl_buf;
1064 queue->type = spec->type;
1065 queue->gdma_dev = gd;
1068 mana_gd_create_cq(spec, queue);
1070 *queue_ptr = queue;
1076 free(queue, M_DEVBUF);
1081 mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
1083 struct gdma_mem_info *gmi = &queue->mem_info;
1085 switch (queue->type) {
1087 mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
1091 mana_gd_destroy_cq(gc, queue);
1102 "Can't destroy unknown queue: type = %d\n",
1103 queue->type);
1109 free(queue, M_DEVBUF);
1222 mana_warn(NULL, "failed: used space %u > queue size %u\n",
1236 mana_warn(NULL, "failed: write end out of queue bound %u, "
1237 "queue size %u\n",
1276 * the queue memory buffer boundary.
1377 mana_gd_post_and_ring(struct gdma_queue *queue,
1381 struct gdma_context *gc = queue->gdma_dev->gdma_context;
1384 err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1388 mana_gd_wq_ring_doorbell(gc, queue);