xref: /dpdk/drivers/compress/octeontx/otx_zip.c (revision a80ea5c082f252f6e6fd4aa0c549913a80ef6c67)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include "otx_zip.h"
6 
7 uint64_t
zip_reg_read64(uint8_t * hw_addr,uint64_t offset)8 zip_reg_read64(uint8_t *hw_addr, uint64_t offset)
9 {
10 	uint8_t *base = hw_addr;
11 	return *(volatile uint64_t *)(base + offset);
12 }
13 
14 void
zip_reg_write64(uint8_t * hw_addr,uint64_t offset,uint64_t val)15 zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
16 {
17 	uint8_t *base = hw_addr;
18 	*(uint64_t *)(base + offset) = val;
19 }
20 
21 static void
zip_q_enable(struct zipvf_qp * qp)22 zip_q_enable(struct zipvf_qp *qp)
23 {
24 	zip_vqx_ena_t que_ena;
25 
26 	/*ZIP VFx command queue init*/
27 	que_ena.u = 0ull;
28 	que_ena.s.ena = 1;
29 
30 	zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
31 	rte_wmb();
32 }
33 
34 /* initialize given qp on zip device */
35 int
zipvf_q_init(struct zipvf_qp * qp)36 zipvf_q_init(struct zipvf_qp *qp)
37 {
38 	zip_vqx_sbuf_addr_t que_sbuf_addr;
39 
40 	uint64_t size;
41 	void *cmdq_addr;
42 	uint64_t iova;
43 	struct zipvf_cmdq *cmdq = &qp->cmdq;
44 	struct zip_vf *vf = qp->vf;
45 
46 	/* allocate and setup instruction queue */
47 	size = ZIP_MAX_CMDQ_SIZE;
48 	size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
49 
50 	cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
51 	if (cmdq_addr == NULL)
52 		return -1;
53 
54 	cmdq->sw_head = (uint64_t *)cmdq_addr;
55 	cmdq->va = (uint8_t *)cmdq_addr;
56 	iova = rte_mem_virt2iova(cmdq_addr);
57 
58 	cmdq->iova = iova;
59 
60 	que_sbuf_addr.u = 0ull;
61 	if (vf->pdev->id.device_id == PCI_DEVICE_ID_OCTEONTX2_ZIPVF)
62 		que_sbuf_addr.s9x.ptr = (cmdq->iova >> 7);
63 	else
64 		que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
65 
66 	zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
67 
68 	zip_q_enable(qp);
69 
70 	memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
71 	rte_spinlock_init(&cmdq->qlock);
72 
73 	return 0;
74 }
75 
76 int
zipvf_q_term(struct zipvf_qp * qp)77 zipvf_q_term(struct zipvf_qp *qp)
78 {
79 	struct zipvf_cmdq *cmdq = &qp->cmdq;
80 	zip_vqx_ena_t que_ena;
81 	struct zip_vf *vf = qp->vf;
82 
83 	if (cmdq->va != NULL) {
84 		memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
85 		rte_free(cmdq->va);
86 	}
87 
88 	/*Disabling the ZIP queue*/
89 	que_ena.u = 0ull;
90 	zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
91 
92 	return 0;
93 }
94 
95 void
zipvf_push_command(struct zipvf_qp * qp,union zip_inst_s * cmd)96 zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
97 {
98 	zip_quex_doorbell_t dbell;
99 	union zip_nptr_s ncp;
100 	uint64_t *ncb_ptr;
101 	struct zipvf_cmdq *cmdq = &qp->cmdq;
102 	void *reg_base = qp->vf->vbar0;
103 
104 	/*Held queue lock*/
105 	rte_spinlock_lock(&(cmdq->qlock));
106 
107 	/* Check space availability in zip cmd queue */
108 	if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
109 		ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) {
110 		/*Last buffer of the command queue*/
111 		memcpy((uint8_t *)cmdq->sw_head,
112 			(uint8_t *)cmd,
113 			sizeof(union zip_inst_s));
114 		/* move pointer to next loc in unit of 64-bit word */
115 		cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
116 
117 		/* now, point the "Next-Chunk Buffer Ptr" to sw_head */
118 		ncb_ptr = cmdq->sw_head;
119 		/* Pointing head again to cmdqueue base*/
120 		cmdq->sw_head = (uint64_t *)cmdq->va;
121 
122 		ncp.u = 0ull;
123 		ncp.s.addr = cmdq->iova;
124 		*ncb_ptr = ncp.u;
125 	} else {
126 		/*Enough buffers available in the command queue*/
127 		memcpy((uint8_t *)cmdq->sw_head,
128 			(uint8_t *)cmd,
129 			sizeof(union zip_inst_s));
130 		cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
131 	}
132 
133 	rte_wmb();
134 
135 	/* Ringing ZIP VF doorbell */
136 	dbell.u = 0ull;
137 	dbell.s.dbell_cnt = 1;
138 	zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
139 
140 	rte_spinlock_unlock(&(cmdq->qlock));
141 }
142 
143 int
zipvf_create(struct rte_compressdev * compressdev)144 zipvf_create(struct rte_compressdev *compressdev)
145 {
146 	struct   rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
147 	struct   zip_vf *zipvf = NULL;
148 	char     *dev_name = compressdev->data->name;
149 	void     *vbar0;
150 	uint64_t reg;
151 
152 	if (pdev->mem_resource[0].phys_addr == 0ULL)
153 		return -EIO;
154 
155 	vbar0 = pdev->mem_resource[0].addr;
156 	if (!vbar0) {
157 		ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
158 		return -ENODEV;
159 	}
160 
161 	zipvf = (struct zip_vf *)(compressdev->data->dev_private);
162 
163 	if (!zipvf)
164 		return -ENOMEM;
165 
166 	zipvf->vbar0 = vbar0;
167 	reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0));
168 	/* Storing domain in local to ZIP VF */
169 	zipvf->dom_sdom = reg;
170 	zipvf->pdev = pdev;
171 	zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE;
172 	return 0;
173 }
174 
175 int
zipvf_destroy(struct rte_compressdev * compressdev)176 zipvf_destroy(struct rte_compressdev *compressdev)
177 {
178 	struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private);
179 
180 	/* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */
181 	zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom);
182 
183 	return 0;
184 }
185