1024a8abbSNagadheeraj Rottela /* SPDX-License-Identifier: BSD-3-Clause
2024a8abbSNagadheeraj Rottela * Copyright(C) 2019 Marvell International Ltd.
3024a8abbSNagadheeraj Rottela */
4024a8abbSNagadheeraj Rottela
5024a8abbSNagadheeraj Rottela #ifndef _NITROX_QP_H_
6024a8abbSNagadheeraj Rottela #define _NITROX_QP_H_
7024a8abbSNagadheeraj Rottela
8024a8abbSNagadheeraj Rottela #include <stdbool.h>
9024a8abbSNagadheeraj Rottela
10024a8abbSNagadheeraj Rottela #include <rte_io.h>
11*751ea2c0SNagadheeraj Rottela #include "nitrox_hal.h"
12024a8abbSNagadheeraj Rottela
13024a8abbSNagadheeraj Rottela struct nitrox_softreq;
14024a8abbSNagadheeraj Rottela
15*751ea2c0SNagadheeraj Rottela enum nitrox_queue_type {
16*751ea2c0SNagadheeraj Rottela NITROX_QUEUE_SE,
17*751ea2c0SNagadheeraj Rottela NITROX_QUEUE_AE,
18*751ea2c0SNagadheeraj Rottela NITROX_QUEUE_ZIP,
19*751ea2c0SNagadheeraj Rottela };
20*751ea2c0SNagadheeraj Rottela
21024a8abbSNagadheeraj Rottela struct command_queue {
22024a8abbSNagadheeraj Rottela const struct rte_memzone *mz;
23024a8abbSNagadheeraj Rottela uint8_t *dbell_csr_addr;
24024a8abbSNagadheeraj Rottela uint8_t *ring;
25024a8abbSNagadheeraj Rottela uint8_t instr_size;
26024a8abbSNagadheeraj Rottela };
27024a8abbSNagadheeraj Rottela
28024a8abbSNagadheeraj Rottela struct rid {
29024a8abbSNagadheeraj Rottela struct nitrox_softreq *sr;
30024a8abbSNagadheeraj Rottela };
31024a8abbSNagadheeraj Rottela
32024a8abbSNagadheeraj Rottela struct nitrox_qp_stats {
33024a8abbSNagadheeraj Rottela uint64_t enqueued_count;
34024a8abbSNagadheeraj Rottela uint64_t dequeued_count;
35024a8abbSNagadheeraj Rottela uint64_t enqueue_err_count;
36024a8abbSNagadheeraj Rottela uint64_t dequeue_err_count;
37024a8abbSNagadheeraj Rottela };
38024a8abbSNagadheeraj Rottela
39024a8abbSNagadheeraj Rottela struct nitrox_qp {
40*751ea2c0SNagadheeraj Rottela enum nitrox_queue_type type;
41*751ea2c0SNagadheeraj Rottela uint8_t *bar_addr;
42024a8abbSNagadheeraj Rottela struct command_queue cmdq;
43024a8abbSNagadheeraj Rottela struct rid *ridq;
44024a8abbSNagadheeraj Rottela uint32_t count;
45024a8abbSNagadheeraj Rottela uint32_t head;
46024a8abbSNagadheeraj Rottela uint32_t tail;
47024a8abbSNagadheeraj Rottela struct rte_mempool *sr_mp;
48024a8abbSNagadheeraj Rottela struct nitrox_qp_stats stats;
49024a8abbSNagadheeraj Rottela uint16_t qno;
50*751ea2c0SNagadheeraj Rottela RTE_ATOMIC(uint16_t) pending_count;
51024a8abbSNagadheeraj Rottela };
52024a8abbSNagadheeraj Rottela
53024a8abbSNagadheeraj Rottela static inline uint16_t
nitrox_qp_free_count(struct nitrox_qp * qp)54024a8abbSNagadheeraj Rottela nitrox_qp_free_count(struct nitrox_qp *qp)
55024a8abbSNagadheeraj Rottela {
56*751ea2c0SNagadheeraj Rottela uint16_t pending_count;
57024a8abbSNagadheeraj Rottela
58*751ea2c0SNagadheeraj Rottela pending_count = rte_atomic_load_explicit(&qp->pending_count,
59*751ea2c0SNagadheeraj Rottela rte_memory_order_relaxed);
60024a8abbSNagadheeraj Rottela RTE_ASSERT(qp->count >= pending_count);
61024a8abbSNagadheeraj Rottela return (qp->count - pending_count);
62024a8abbSNagadheeraj Rottela }
63024a8abbSNagadheeraj Rottela
64024a8abbSNagadheeraj Rottela static inline bool
nitrox_qp_is_empty(struct nitrox_qp * qp)65024a8abbSNagadheeraj Rottela nitrox_qp_is_empty(struct nitrox_qp *qp)
66024a8abbSNagadheeraj Rottela {
67*751ea2c0SNagadheeraj Rottela return (rte_atomic_load_explicit(&qp->pending_count,
68*751ea2c0SNagadheeraj Rottela rte_memory_order_relaxed) == 0);
69024a8abbSNagadheeraj Rottela }
70024a8abbSNagadheeraj Rottela
71024a8abbSNagadheeraj Rottela static inline uint16_t
nitrox_qp_used_count(struct nitrox_qp * qp)72024a8abbSNagadheeraj Rottela nitrox_qp_used_count(struct nitrox_qp *qp)
73024a8abbSNagadheeraj Rottela {
74*751ea2c0SNagadheeraj Rottela return rte_atomic_load_explicit(&qp->pending_count,
75*751ea2c0SNagadheeraj Rottela rte_memory_order_relaxed);
76024a8abbSNagadheeraj Rottela }
77024a8abbSNagadheeraj Rottela
78024a8abbSNagadheeraj Rottela static inline struct nitrox_softreq *
nitrox_qp_get_softreq(struct nitrox_qp * qp)79024a8abbSNagadheeraj Rottela nitrox_qp_get_softreq(struct nitrox_qp *qp)
80024a8abbSNagadheeraj Rottela {
81024a8abbSNagadheeraj Rottela uint32_t tail = qp->tail % qp->count;
82024a8abbSNagadheeraj Rottela
83*751ea2c0SNagadheeraj Rottela rte_atomic_thread_fence(rte_memory_order_acquire);
84024a8abbSNagadheeraj Rottela return qp->ridq[tail].sr;
85024a8abbSNagadheeraj Rottela }
86024a8abbSNagadheeraj Rottela
87024a8abbSNagadheeraj Rottela static inline void
nitrox_ring_dbell(struct nitrox_qp * qp,uint16_t cnt)88024a8abbSNagadheeraj Rottela nitrox_ring_dbell(struct nitrox_qp *qp, uint16_t cnt)
89024a8abbSNagadheeraj Rottela {
90024a8abbSNagadheeraj Rottela struct command_queue *cmdq = &qp->cmdq;
91024a8abbSNagadheeraj Rottela
92024a8abbSNagadheeraj Rottela if (!cnt)
93024a8abbSNagadheeraj Rottela return;
94024a8abbSNagadheeraj Rottela
95024a8abbSNagadheeraj Rottela rte_io_wmb();
96024a8abbSNagadheeraj Rottela rte_write64(cnt, cmdq->dbell_csr_addr);
97024a8abbSNagadheeraj Rottela }
98024a8abbSNagadheeraj Rottela
99024a8abbSNagadheeraj Rottela static inline void
nitrox_qp_enqueue(struct nitrox_qp * qp,void * instr,struct nitrox_softreq * sr)100024a8abbSNagadheeraj Rottela nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct nitrox_softreq *sr)
101024a8abbSNagadheeraj Rottela {
102024a8abbSNagadheeraj Rottela uint32_t head = qp->head % qp->count;
103024a8abbSNagadheeraj Rottela
104024a8abbSNagadheeraj Rottela qp->head++;
105024a8abbSNagadheeraj Rottela memcpy(&qp->cmdq.ring[head * qp->cmdq.instr_size],
106024a8abbSNagadheeraj Rottela instr, qp->cmdq.instr_size);
107024a8abbSNagadheeraj Rottela qp->ridq[head].sr = sr;
108*751ea2c0SNagadheeraj Rottela rte_atomic_thread_fence(rte_memory_order_release);
109*751ea2c0SNagadheeraj Rottela rte_atomic_fetch_add_explicit(&qp->pending_count, 1,
110*751ea2c0SNagadheeraj Rottela rte_memory_order_relaxed);
111*751ea2c0SNagadheeraj Rottela }
112*751ea2c0SNagadheeraj Rottela
113*751ea2c0SNagadheeraj Rottela static inline int
nitrox_qp_enqueue_sr(struct nitrox_qp * qp,struct nitrox_softreq * sr)114*751ea2c0SNagadheeraj Rottela nitrox_qp_enqueue_sr(struct nitrox_qp *qp, struct nitrox_softreq *sr)
115*751ea2c0SNagadheeraj Rottela {
116*751ea2c0SNagadheeraj Rottela uint32_t head = qp->head % qp->count;
117*751ea2c0SNagadheeraj Rottela int err;
118*751ea2c0SNagadheeraj Rottela
119*751ea2c0SNagadheeraj Rottela err = inc_zqmq_next_cmd(qp->bar_addr, qp->qno);
120*751ea2c0SNagadheeraj Rottela if (unlikely(err))
121*751ea2c0SNagadheeraj Rottela return err;
122*751ea2c0SNagadheeraj Rottela
123*751ea2c0SNagadheeraj Rottela qp->head++;
124*751ea2c0SNagadheeraj Rottela qp->ridq[head].sr = sr;
125*751ea2c0SNagadheeraj Rottela rte_atomic_thread_fence(rte_memory_order_release);
126*751ea2c0SNagadheeraj Rottela rte_atomic_fetch_add_explicit(&qp->pending_count, 1,
127*751ea2c0SNagadheeraj Rottela rte_memory_order_relaxed);
128*751ea2c0SNagadheeraj Rottela return 0;
129024a8abbSNagadheeraj Rottela }
130024a8abbSNagadheeraj Rottela
131024a8abbSNagadheeraj Rottela static inline void
nitrox_qp_dequeue(struct nitrox_qp * qp)132024a8abbSNagadheeraj Rottela nitrox_qp_dequeue(struct nitrox_qp *qp)
133024a8abbSNagadheeraj Rottela {
134024a8abbSNagadheeraj Rottela qp->tail++;
135*751ea2c0SNagadheeraj Rottela rte_atomic_fetch_sub_explicit(&qp->pending_count, 1,
136*751ea2c0SNagadheeraj Rottela rte_memory_order_relaxed);
137024a8abbSNagadheeraj Rottela }
138024a8abbSNagadheeraj Rottela
139024a8abbSNagadheeraj Rottela __rte_internal
140024a8abbSNagadheeraj Rottela int nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr,
141024a8abbSNagadheeraj Rottela const char *dev_name, uint32_t nb_descriptors,
142024a8abbSNagadheeraj Rottela uint8_t inst_size, int socket_id);
143024a8abbSNagadheeraj Rottela __rte_internal
144024a8abbSNagadheeraj Rottela int nitrox_qp_release(struct nitrox_qp *qp, uint8_t *bar_addr);
145024a8abbSNagadheeraj Rottela
146024a8abbSNagadheeraj Rottela #endif /* _NITROX_QP_H_ */
147