xref: /dpdk/drivers/net/enic/base/cq_desc.h (revision 04e8ec74192ae4bc537e4854410551255bf85cf5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #ifndef _CQ_DESC_H_
7 #define _CQ_DESC_H_
8 #include <rte_byteorder.h>
9 
10 /*
11  * Completion queue descriptor types
12  */
13 enum cq_desc_types {
14 	CQ_DESC_TYPE_WQ_ENET = 0,
15 	CQ_DESC_TYPE_DESC_COPY = 1,
16 	CQ_DESC_TYPE_WQ_EXCH = 2,
17 	CQ_DESC_TYPE_RQ_ENET = 3,
18 	CQ_DESC_TYPE_RQ_FCP = 4,
19 	CQ_DESC_TYPE_IOMMU_MISS = 5,
20 	CQ_DESC_TYPE_SGL = 6,
21 	CQ_DESC_TYPE_CLASSIFIER = 7,
22 	CQ_DESC_TYPE_TEST = 127,
23 };
24 
25 /* Completion queue descriptor: 16B
26  *
27  * All completion queues have this basic layout.  The
28  * type_specific area is unique for each completion
29  * queue type.
30  */
31 struct cq_desc {
32 	uint16_t completed_index;
33 	uint16_t q_number;
34 	uint8_t type_specific[11];
35 	uint8_t type_color;
36 };
37 
38 #define CQ_DESC_TYPE_BITS        4
39 #define CQ_DESC_TYPE_MASK        ((1 << CQ_DESC_TYPE_BITS) - 1)
40 #define CQ_DESC_COLOR_MASK       1
41 #define CQ_DESC_COLOR_SHIFT      7
42 #define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
43 #define CQ_DESC_Q_NUM_BITS       10
44 #define CQ_DESC_Q_NUM_MASK       ((1 << CQ_DESC_Q_NUM_BITS) - 1)
45 #define CQ_DESC_COMP_NDX_BITS    12
46 #define CQ_DESC_COMP_NDX_MASK    ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
47 
cq_color_enc(struct cq_desc * desc,const uint8_t color)48 static inline void cq_color_enc(struct cq_desc *desc, const uint8_t color)
49 {
50 	if (color)
51 		desc->type_color |=  (1 << CQ_DESC_COLOR_SHIFT);
52 	else
53 		desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
54 }
55 
cq_desc_enc(struct cq_desc * desc,const uint8_t type,const uint8_t color,const uint16_t q_number,const uint16_t completed_index)56 static inline void cq_desc_enc(struct cq_desc *desc,
57 	const uint8_t type, const uint8_t color, const uint16_t q_number,
58 	const uint16_t completed_index)
59 {
60 	desc->type_color = (type & CQ_DESC_TYPE_MASK) |
61 		((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
62 	desc->q_number = rte_cpu_to_le_16(q_number & CQ_DESC_Q_NUM_MASK);
63 	desc->completed_index = rte_cpu_to_le_16(completed_index &
64 		CQ_DESC_COMP_NDX_MASK);
65 }
66 
cq_desc_dec(const struct cq_desc * desc_arg,uint8_t * type,uint8_t * color,uint16_t * q_number,uint16_t * completed_index)67 static inline void cq_desc_dec(const struct cq_desc *desc_arg,
68 	uint8_t *type, uint8_t *color, uint16_t *q_number,
69 	uint16_t *completed_index)
70 {
71 	const struct cq_desc *desc = desc_arg;
72 	const uint8_t type_color = desc->type_color;
73 
74 	*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
75 
76 	/*
77 	 * Make sure color bit is read from desc *before* other fields
78 	 * are read from desc.  Hardware guarantees color bit is last
79 	 * bit (byte) written.  Adding the rmb() prevents the compiler
80 	 * and/or CPU from reordering the reads which would potentially
81 	 * result in reading stale values.
82 	 */
83 
84 	rte_rmb();
85 
86 	*type = type_color & CQ_DESC_TYPE_MASK;
87 	*q_number = rte_le_to_cpu_16(desc->q_number) & CQ_DESC_Q_NUM_MASK;
88 	*completed_index = rte_le_to_cpu_16(desc->completed_index) &
89 		CQ_DESC_COMP_NDX_MASK;
90 }
91 
cq_color_dec(const struct cq_desc * desc_arg,uint8_t * color)92 static inline void cq_color_dec(const struct cq_desc *desc_arg, uint8_t *color)
93 {
94 	volatile const struct cq_desc *desc = desc_arg;
95 
96 	*color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
97 }
98 
99 #endif /* _CQ_DESC_H_ */
100