Lines Matching defs:que
393 struct ena_que *que;
410 que = &adapter->que[i];
411 que->adapter = adapter;
412 que->id = i;
413 que->tx_ring = txr;
414 que->rx_ring = rxr;
416 txr->que = que;
417 rxr->que = que;
639 struct ena_que *que = &adapter->que[qid];
640 struct ena_ring *tx_ring = que->tx_ring;
725 cpu_mask = &que->cpu_mask;
727 device_get_nameunit(adapter->pdev), que->cpu);
730 device_get_nameunit(adapter->pdev), que->id);
872 struct ena_que *que = &adapter->que[qid];
873 struct ena_ring *rx_ring = que->rx_ring;
1572 queue = &adapter->que[i];
1603 ctx.numa_node = adapter->que[i].domain;
1638 ctx.numa_node = adapter->que[i].domain;
1666 queue = &adapter->que[i];
1838 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1845 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1848 CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1852 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1855 CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1858 if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
1861 adapter->que[i].domain = idx;
1863 adapter->que[i].domain = -1;
3254 cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
3357 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3358 &rx_ring->que->cleanup_task);