1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
3 */
4
5 #ifndef _CPT_COMMON_H_
6 #define _CPT_COMMON_H_
7
8 #include <rte_prefetch.h>
9 #include <rte_mempool.h>
10
11 /*
12 * This file defines common macros and structs
13 */
14
15 #define TIME_IN_RESET_COUNT 5
16
17 /* Default command timeout in seconds */
18 #define DEFAULT_COMMAND_TIMEOUT 4
19
20 #define CPT_COUNT_THOLD 32
21 #define CPT_TIMER_THOLD 0x3F
22
23 #define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
24
25 struct cpt_qp_meta_info {
26 struct rte_mempool *pool;
27 int sg_mlen;
28 int lb_mlen;
29 };
30
31 /*
32 * Pending queue structure
33 *
34 */
35 struct pending_queue {
36 /** Array of pending requests */
37 void **rid_queue;
38 /** Tail of queue to be used for enqueue */
39 unsigned int tail;
40 /** Head of queue to be used for dequeue */
41 unsigned int head;
42 };
43
44 struct __rte_aligned(8) cpt_request_info {
45 /** Data path fields */
46 uint64_t comp_baddr;
47 volatile uint64_t *completion_addr;
48 volatile uint64_t *alternate_caddr;
49 void *op;
50 struct {
51 uint64_t ei0;
52 uint64_t ei1;
53 uint64_t ei2;
54 } ist;
55 uint8_t *rptr;
56 const void *qp;
57
58 /** Control path fields */
59 uint64_t time_out;
60 uint8_t extra_time;
61 };
62
63 static __rte_always_inline void
pending_queue_push(struct pending_queue * q,void * rid,unsigned int off,const int qsize)64 pending_queue_push(struct pending_queue *q, void *rid, unsigned int off,
65 const int qsize)
66 {
67 /* NOTE: no free space check, but it is expected that one is made */
68 q->rid_queue[(q->tail + off) & (qsize - 1)] = rid;
69 }
70
71 static __rte_always_inline void
pending_queue_commit(struct pending_queue * q,unsigned int cnt,const unsigned int qsize)72 pending_queue_commit(struct pending_queue *q, unsigned int cnt,
73 const unsigned int qsize)
74 {
75 /* Ensure ordering between setting the entry and updating the tail */
76 rte_atomic_thread_fence(rte_memory_order_release);
77
78 q->tail = (q->tail + cnt) & (qsize - 1);
79 }
80
81 static __rte_always_inline void
pending_queue_pop(struct pending_queue * q,const int qsize)82 pending_queue_pop(struct pending_queue *q, const int qsize)
83 {
84 /* NOTE: no empty check, but it is expected that one is made prior */
85
86 q->head = (q->head + 1) & (qsize - 1);
87 }
88
89 static __rte_always_inline void
pending_queue_peek(struct pending_queue * q,void ** rid,const int qsize,int prefetch_next)90 pending_queue_peek(struct pending_queue *q, void **rid, const int qsize,
91 int prefetch_next)
92 {
93 void *next_rid;
94 /* NOTE: no empty check, but it is expected that one is made */
95
96 *rid = q->rid_queue[q->head];
97
98 if (likely(prefetch_next)) {
99 next_rid = q->rid_queue[(q->head + 1) & (qsize - 1)];
100 rte_prefetch_non_temporal((void *)next_rid);
101 }
102 }
103
104 static __rte_always_inline unsigned int
pending_queue_level(struct pending_queue * q,const int qsize)105 pending_queue_level(struct pending_queue *q, const int qsize)
106 {
107 return (q->tail - q->head) & (qsize - 1);
108 }
109
110 static __rte_always_inline unsigned int
pending_queue_free_slots(struct pending_queue * q,const int qsize,const int reserved_slots)111 pending_queue_free_slots(struct pending_queue *q, const int qsize,
112 const int reserved_slots)
113 {
114 int free_slots;
115
116 free_slots = qsize - pending_queue_level(q, qsize);
117
118 /* Use only use qsize - 1 */
119 free_slots -= 1 + reserved_slots;
120
121 if (unlikely(free_slots < 0))
122 return 0;
123
124 return free_slots;
125 }
126
127 #endif /* _CPT_COMMON_H_ */
128