Lines Matching defs:iq
59 struct lio_instr_queue *iq;
76 iq = oct->instr_queue[iq_no];
77 iq->oct_dev = oct;
92 &iq->txtag);
99 iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma);
100 if (!iq->base_addr) {
106 iq->max_count = num_descs;
112 iq->request_list = malloc(sizeof(*iq->request_list) * num_descs,
114 if (iq->request_list == NULL) {
121 iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma),
122 iq->max_count);
125 request_buf = iq->request_list;
127 error = bus_dmamap_create(iq->txtag, 0, &request_buf->map);
134 iq->txpciq.txpciq64 = txpciq.txpciq64;
135 iq->fill_cnt = 0;
136 iq->host_write_index = 0;
137 iq->octeon_read_index = 0;
138 iq->flush_index = 0;
139 iq->last_db_time = 0;
140 iq->db_timeout = (uint32_t)conf->db_timeout;
141 atomic_store_rel_int(&iq->instr_pending, 0);
144 mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF);
145 mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF);
146 mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF);
148 mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL,
151 oct->io_qmask.iq |= BIT_ULL(iq_no);
155 iq->iqcmd_64B = (conf->instr_type == 64);
184 struct lio_instr_queue *iq = oct->instr_queue[iq_no];
206 request_buf = iq->request_list;
207 for (i = 0; i < iq->max_count; i++, request_buf++) {
212 bus_dmamap_sync(iq->txtag, request_buf->map,
214 bus_dmamap_unload(iq->txtag,
219 bus_dmamap_destroy(iq->txtag,
224 bus_dmamap_unload(iq->txtag, request_buf->map);
225 bus_dmamap_destroy(iq->txtag, request_buf->map);
231 if (iq->br != NULL) {
232 buf_ring_free(iq->br, M_DEVBUF);
233 iq->br = NULL;
236 if (iq->request_list != NULL) {
237 free(iq->request_list, M_DEVBUF);
238 iq->request_list = NULL;
241 if (iq->txtag != NULL) {
242 bus_dma_tag_destroy(iq->txtag);
243 iq->txtag = NULL;
246 if (iq->base_addr) {
247 q_size = iq->max_count * desc_size;
248 lio_dma_free((uint32_t)q_size, iq->base_addr);
250 oct->io_qmask.iq &= ~(1ULL << iq_no);
298 if (!(oct->io_qmask.iq & BIT_ULL(i)))
318 lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq)
322 lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt);
325 iq->fill_cnt = 0;
326 iq->last_db_time = ticks;
332 __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
336 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
337 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
343 __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
353 if (atomic_load_acq_int(&iq->instr_pending) >=
354 (int32_t)(iq->max_count - 1)) {
360 if (atomic_load_acq_int(&iq->instr_pending) >=
361 (int32_t)(iq->max_count - 2))
364 __lio_copy_cmd_into_iq(iq, cmd);
367 st.index = iq->host_write_index;
368 iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
369 iq->max_count);
370 iq->fill_cnt++;
378 atomic_add_int(&iq->instr_pending, 1);
384 __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf,
388 iq->request_list[idx].buf = buf;
389 iq->request_list[idx].reqtype = reqtype;
395 struct lio_instr_queue *iq, uint32_t budget)
401 uint32_t old = iq->flush_index;
404 while (old != iq->octeon_read_index) {
405 reqtype = iq->request_list[old].reqtype;
406 buf = iq->request_list[old].buf;
413 lio_free_mbuf(iq, buf);
416 lio_free_sgmbuf(iq, buf);
456 iq->request_list[old].buf = NULL;
457 iq->request_list[old].reqtype = 0;
461 old = lio_incr_index(old, 1, iq->max_count);
467 iq->flush_index = old;
474 lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
481 if (!mtx_trylock(&iq->iq_flush_running_lock))
484 mtx_lock(&iq->lock);
486 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
490 if (iq->flush_index == iq->octeon_read_index)
495 lio_process_iq_request_list(oct, iq,
500 lio_process_iq_request_list(oct, iq, 0);
503 atomic_subtract_int(&iq->instr_pending, inst_processed);
504 iq->stats.instr_processed += inst_processed;
514 iq->last_db_time = ticks;
516 mtx_unlock(&iq->lock);
518 mtx_unlock(&iq->iq_flush_running_lock);
530 struct lio_instr_queue *iq;
536 iq = oct->instr_queue[iq_no];
537 if (iq == NULL)
540 if (atomic_load_acq_int(&iq->instr_pending)) {
542 next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout);
546 iq->last_db_time = ticks;
549 lio_flush_iq(oct, iq, 0);
551 lio_enable_irq(NULL, iq);
554 if (oct->props.ifp != NULL && iq->br != NULL) {
555 if (mtx_trylock(&iq->enq_lock)) {
556 if (!drbr_empty(oct->props.ifp, iq->br))
557 lio_mq_start_locked(oct->props.ifp, iq);
559 mtx_unlock(&iq->enq_lock);
587 struct lio_instr_queue *iq = oct->instr_queue[iq_no];
593 mtx_lock(&iq->post_lock);
595 st = __lio_post_command2(iq, cmd);
598 __lio_add_to_request_list(iq, st.index, buf, reqtype);
603 lio_ring_doorbell(oct, iq);
608 mtx_unlock(&iq->post_lock);