xref: /dpdk/drivers/crypto/bcmfs/bcmfs_qp.h (revision 27595cd83053b2d39634a159d6709b3ce3cdf3b0)
147dcca06SVikas Gupta /* SPDX-License-Identifier: BSD-3-Clause
247dcca06SVikas Gupta  * Copyright(c) 2020 Broadcom
347dcca06SVikas Gupta  * All rights reserved.
447dcca06SVikas Gupta  */
547dcca06SVikas Gupta 
647dcca06SVikas Gupta #ifndef _BCMFS_QP_H_
747dcca06SVikas Gupta #define _BCMFS_QP_H_
847dcca06SVikas Gupta 
947dcca06SVikas Gupta #include <rte_memzone.h>
1047dcca06SVikas Gupta 
1147dcca06SVikas Gupta /* Maximum number of h/w queues supported by device */
1247dcca06SVikas Gupta #define BCMFS_MAX_HW_QUEUES		32
1347dcca06SVikas Gupta 
1447dcca06SVikas Gupta /* H/W queue IO address space len */
1547dcca06SVikas Gupta #define BCMFS_HW_QUEUE_IO_ADDR_LEN	(64 * 1024)
1647dcca06SVikas Gupta 
1747dcca06SVikas Gupta /* Maximum size of device ops name */
1847dcca06SVikas Gupta #define BCMFS_HW_OPS_NAMESIZE		32
1947dcca06SVikas Gupta 
2047dcca06SVikas Gupta enum bcmfs_queue_type {
2147dcca06SVikas Gupta 	/* TX or submission queue */
2247dcca06SVikas Gupta 	BCMFS_RM_TXQ,
2347dcca06SVikas Gupta 	 /* Completion or receive queue */
2447dcca06SVikas Gupta 	BCMFS_RM_CPLQ
2547dcca06SVikas Gupta };
2647dcca06SVikas Gupta 
27751dca90SVikas Gupta #define BCMFS_QP_IOBASE_XLATE(base, idx)	\
28751dca90SVikas Gupta 		((base) + ((idx) * BCMFS_HW_QUEUE_IO_ADDR_LEN))
29751dca90SVikas Gupta 
30751dca90SVikas Gupta /* Max pkts for preprocessing before submitting to h/w qp */
31751dca90SVikas Gupta #define BCMFS_MAX_REQS_BUFF	64
32751dca90SVikas Gupta 
33751dca90SVikas Gupta /* qp stats */
3447dcca06SVikas Gupta struct bcmfs_qp_stats {
3547dcca06SVikas Gupta 	/* Count of all operations enqueued */
3647dcca06SVikas Gupta 	uint64_t enqueued_count;
3747dcca06SVikas Gupta 	/* Count of all operations dequeued */
3847dcca06SVikas Gupta 	uint64_t dequeued_count;
3947dcca06SVikas Gupta 	/* Total error count on operations enqueued */
4047dcca06SVikas Gupta 	uint64_t enqueue_err_count;
4147dcca06SVikas Gupta 	/* Total error count on operations dequeued */
4247dcca06SVikas Gupta 	uint64_t dequeue_err_count;
4347dcca06SVikas Gupta };
4447dcca06SVikas Gupta 
4547dcca06SVikas Gupta struct bcmfs_qp_config {
4647dcca06SVikas Gupta 	/* Socket to allocate memory on */
4747dcca06SVikas Gupta 	int socket_id;
4847dcca06SVikas Gupta 	/* Mapped iobase for qp */
4947dcca06SVikas Gupta 	void *iobase;
5047dcca06SVikas Gupta 	/* nb_descriptors or requests a h/w queue can accommodate */
5147dcca06SVikas Gupta 	uint16_t nb_descriptors;
5247dcca06SVikas Gupta 	/* Maximum number of h/w descriptors needed by a request */
5347dcca06SVikas Gupta 	uint16_t max_descs_req;
54d5a7873cSVikas Gupta 	/* h/w ops associated with qp */
55d5a7873cSVikas Gupta 	struct bcmfs_hw_queue_pair_ops *ops;
5647dcca06SVikas Gupta };
5747dcca06SVikas Gupta 
5847dcca06SVikas Gupta struct bcmfs_queue {
5947dcca06SVikas Gupta 	/* Base virt address */
6047dcca06SVikas Gupta 	void *base_addr;
6147dcca06SVikas Gupta 	/* Base iova */
6247dcca06SVikas Gupta 	rte_iova_t base_phys_addr;
6347dcca06SVikas Gupta 	/* Queue type */
6447dcca06SVikas Gupta 	enum bcmfs_queue_type q_type;
6547dcca06SVikas Gupta 	/* Queue size based on nb_descriptors and max_descs_reqs */
6647dcca06SVikas Gupta 	uint32_t queue_size;
6747dcca06SVikas Gupta 	union {
6847dcca06SVikas Gupta 		/* s/w pointer for tx h/w queue*/
6947dcca06SVikas Gupta 		uint32_t tx_write_ptr;
7047dcca06SVikas Gupta 		/* s/w pointer for completion h/w queue*/
7147dcca06SVikas Gupta 		uint32_t cmpl_read_ptr;
7247dcca06SVikas Gupta 	};
73d5a7873cSVikas Gupta 	/* number of inflight descriptor accumulated  before next db ring */
74d5a7873cSVikas Gupta 	uint16_t descs_inflight;
7547dcca06SVikas Gupta 	/* Memzone name */
7647dcca06SVikas Gupta 	char memz_name[RTE_MEMZONE_NAMESIZE];
7747dcca06SVikas Gupta };
7847dcca06SVikas Gupta 
79*27595cd8STyler Retzlaff struct __rte_cache_aligned bcmfs_qp {
8047dcca06SVikas Gupta 	/* Queue-pair ID */
8147dcca06SVikas Gupta 	uint16_t qpair_id;
8247dcca06SVikas Gupta 	/* Mapped IO address */
8347dcca06SVikas Gupta 	void *ioreg;
8447dcca06SVikas Gupta 	/* A TX queue */
8547dcca06SVikas Gupta 	struct bcmfs_queue tx_q;
8647dcca06SVikas Gupta 	/* A Completion queue */
8747dcca06SVikas Gupta 	struct bcmfs_queue cmpl_q;
8847dcca06SVikas Gupta 	/* Number of requests queue can accommodate */
8947dcca06SVikas Gupta 	uint32_t nb_descriptors;
9047dcca06SVikas Gupta 	/* Number of pending requests and enqueued to h/w queue */
9147dcca06SVikas Gupta 	uint16_t nb_pending_requests;
9247dcca06SVikas Gupta 	/* A pool which act as a hash for <request-ID and virt address> pair */
9347dcca06SVikas Gupta 	unsigned long *ctx_pool;
9447dcca06SVikas Gupta 	/* virt address for mem allocated for bitmap */
9547dcca06SVikas Gupta 	void *ctx_bmp_mem;
9647dcca06SVikas Gupta 	/* Bitmap */
9747dcca06SVikas Gupta 	struct rte_bitmap *ctx_bmp;
9847dcca06SVikas Gupta 	/* Associated stats */
9947dcca06SVikas Gupta 	struct bcmfs_qp_stats stats;
10047dcca06SVikas Gupta 	/* h/w ops associated with qp */
10147dcca06SVikas Gupta 	struct bcmfs_hw_queue_pair_ops *ops;
102751dca90SVikas Gupta 	/* bcmfs requests pool*/
103751dca90SVikas Gupta 	struct rte_mempool *sr_mp;
104751dca90SVikas Gupta 	/* a temporary buffer to keep message pointers */
105751dca90SVikas Gupta 	struct bcmfs_qp_message *infl_msgs[BCMFS_MAX_REQS_BUFF];
10647dcca06SVikas Gupta 
107*27595cd8STyler Retzlaff };
10847dcca06SVikas Gupta 
10947dcca06SVikas Gupta /* Structure defining h/w queue pair operations */
11047dcca06SVikas Gupta struct bcmfs_hw_queue_pair_ops {
11147dcca06SVikas Gupta 	/* ops name */
11247dcca06SVikas Gupta 	char name[BCMFS_HW_OPS_NAMESIZE];
11347dcca06SVikas Gupta 	/* Enqueue an object */
11447dcca06SVikas Gupta 	int (*enq_one_req)(struct bcmfs_qp *qp, void *obj);
11547dcca06SVikas Gupta 	/* Ring doorbell */
11647dcca06SVikas Gupta 	void (*ring_db)(struct bcmfs_qp *qp);
11747dcca06SVikas Gupta 	/* Dequeue objects */
11847dcca06SVikas Gupta 	uint16_t (*dequeue)(struct bcmfs_qp *qp, void **obj,
11947dcca06SVikas Gupta 			    uint16_t nb_ops);
12047dcca06SVikas Gupta 	/* Start the h/w queue */
12147dcca06SVikas Gupta 	int (*startq)(struct bcmfs_qp *qp);
12247dcca06SVikas Gupta 	/* Stop the h/w queue */
12347dcca06SVikas Gupta 	void (*stopq)(struct bcmfs_qp *qp);
12447dcca06SVikas Gupta };
12547dcca06SVikas Gupta 
12647dcca06SVikas Gupta uint16_t
12747dcca06SVikas Gupta bcmfs_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
12847dcca06SVikas Gupta uint16_t
12947dcca06SVikas Gupta bcmfs_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
13047dcca06SVikas Gupta int
13147dcca06SVikas Gupta bcmfs_qp_release(struct bcmfs_qp **qp_addr);
13247dcca06SVikas Gupta int
13347dcca06SVikas Gupta bcmfs_qp_setup(struct bcmfs_qp **qp_addr,
13447dcca06SVikas Gupta 	       uint16_t queue_pair_id,
13547dcca06SVikas Gupta 	       struct bcmfs_qp_config *bcmfs_conf);
13647dcca06SVikas Gupta 
137751dca90SVikas Gupta /* stats functions*/
138751dca90SVikas Gupta void bcmfs_qp_stats_get(struct bcmfs_qp **qp, int num_qp,
139751dca90SVikas Gupta 			struct bcmfs_qp_stats *stats);
140751dca90SVikas Gupta void bcmfs_qp_stats_reset(struct bcmfs_qp **qp, int num_qp);
141751dca90SVikas Gupta 
14247dcca06SVikas Gupta #endif /* _BCMFS_QP_H_ */
143