1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #ifndef _NITROX_QP_H_
6 #define _NITROX_QP_H_
7
8 #include <stdbool.h>
9
10 #include <rte_io.h>
11 #include "nitrox_hal.h"
12
13 struct nitrox_softreq;
14
15 enum nitrox_queue_type {
16 NITROX_QUEUE_SE,
17 NITROX_QUEUE_AE,
18 NITROX_QUEUE_ZIP,
19 };
20
21 struct command_queue {
22 const struct rte_memzone *mz;
23 uint8_t *dbell_csr_addr;
24 uint8_t *ring;
25 uint8_t instr_size;
26 };
27
28 struct rid {
29 struct nitrox_softreq *sr;
30 };
31
32 struct nitrox_qp_stats {
33 uint64_t enqueued_count;
34 uint64_t dequeued_count;
35 uint64_t enqueue_err_count;
36 uint64_t dequeue_err_count;
37 };
38
39 struct nitrox_qp {
40 enum nitrox_queue_type type;
41 uint8_t *bar_addr;
42 struct command_queue cmdq;
43 struct rid *ridq;
44 uint32_t count;
45 uint32_t head;
46 uint32_t tail;
47 struct rte_mempool *sr_mp;
48 struct nitrox_qp_stats stats;
49 uint16_t qno;
50 RTE_ATOMIC(uint16_t) pending_count;
51 };
52
53 static inline uint16_t
nitrox_qp_free_count(struct nitrox_qp * qp)54 nitrox_qp_free_count(struct nitrox_qp *qp)
55 {
56 uint16_t pending_count;
57
58 pending_count = rte_atomic_load_explicit(&qp->pending_count,
59 rte_memory_order_relaxed);
60 RTE_ASSERT(qp->count >= pending_count);
61 return (qp->count - pending_count);
62 }
63
64 static inline bool
nitrox_qp_is_empty(struct nitrox_qp * qp)65 nitrox_qp_is_empty(struct nitrox_qp *qp)
66 {
67 return (rte_atomic_load_explicit(&qp->pending_count,
68 rte_memory_order_relaxed) == 0);
69 }
70
71 static inline uint16_t
nitrox_qp_used_count(struct nitrox_qp * qp)72 nitrox_qp_used_count(struct nitrox_qp *qp)
73 {
74 return rte_atomic_load_explicit(&qp->pending_count,
75 rte_memory_order_relaxed);
76 }
77
78 static inline struct nitrox_softreq *
nitrox_qp_get_softreq(struct nitrox_qp * qp)79 nitrox_qp_get_softreq(struct nitrox_qp *qp)
80 {
81 uint32_t tail = qp->tail % qp->count;
82
83 rte_atomic_thread_fence(rte_memory_order_acquire);
84 return qp->ridq[tail].sr;
85 }
86
87 static inline void
nitrox_ring_dbell(struct nitrox_qp * qp,uint16_t cnt)88 nitrox_ring_dbell(struct nitrox_qp *qp, uint16_t cnt)
89 {
90 struct command_queue *cmdq = &qp->cmdq;
91
92 if (!cnt)
93 return;
94
95 rte_io_wmb();
96 rte_write64(cnt, cmdq->dbell_csr_addr);
97 }
98
99 static inline void
nitrox_qp_enqueue(struct nitrox_qp * qp,void * instr,struct nitrox_softreq * sr)100 nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct nitrox_softreq *sr)
101 {
102 uint32_t head = qp->head % qp->count;
103
104 qp->head++;
105 memcpy(&qp->cmdq.ring[head * qp->cmdq.instr_size],
106 instr, qp->cmdq.instr_size);
107 qp->ridq[head].sr = sr;
108 rte_atomic_thread_fence(rte_memory_order_release);
109 rte_atomic_fetch_add_explicit(&qp->pending_count, 1,
110 rte_memory_order_relaxed);
111 }
112
113 static inline int
nitrox_qp_enqueue_sr(struct nitrox_qp * qp,struct nitrox_softreq * sr)114 nitrox_qp_enqueue_sr(struct nitrox_qp *qp, struct nitrox_softreq *sr)
115 {
116 uint32_t head = qp->head % qp->count;
117 int err;
118
119 err = inc_zqmq_next_cmd(qp->bar_addr, qp->qno);
120 if (unlikely(err))
121 return err;
122
123 qp->head++;
124 qp->ridq[head].sr = sr;
125 rte_atomic_thread_fence(rte_memory_order_release);
126 rte_atomic_fetch_add_explicit(&qp->pending_count, 1,
127 rte_memory_order_relaxed);
128 return 0;
129 }
130
131 static inline void
nitrox_qp_dequeue(struct nitrox_qp * qp)132 nitrox_qp_dequeue(struct nitrox_qp *qp)
133 {
134 qp->tail++;
135 rte_atomic_fetch_sub_explicit(&qp->pending_count, 1,
136 rte_memory_order_relaxed);
137 }
138
139 __rte_internal
140 int nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr,
141 const char *dev_name, uint32_t nb_descriptors,
142 uint8_t inst_size, int socket_id);
143 __rte_internal
144 int nitrox_qp_release(struct nitrox_qp *qp, uint8_t *bar_addr);
145
146 #endif /* _NITROX_QP_H_ */
147