15438e4ecSFan Zhang /* SPDX-License-Identifier: BSD-3-Clause
25438e4ecSFan Zhang * Copyright(c) 2021 Intel Corporation
35438e4ecSFan Zhang */
45438e4ecSFan Zhang
55438e4ecSFan Zhang #include "qat_device.h"
64c778f1aSFan Zhang #include "qat_qp.h"
75438e4ecSFan Zhang #include "adf_transport_access_macros.h"
85438e4ecSFan Zhang #include "qat_dev_gens.h"
95438e4ecSFan Zhang
105438e4ecSFan Zhang #include <stdint.h>
115438e4ecSFan Zhang
125438e4ecSFan Zhang #define ADF_ARB_REG_SLOT 0x1000
135438e4ecSFan Zhang
144c778f1aSFan Zhang #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
154c778f1aSFan Zhang ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
164c778f1aSFan Zhang (ADF_ARB_REG_SLOT * index), value)
174c778f1aSFan Zhang
184c778f1aSFan Zhang __extension__
194c778f1aSFan Zhang const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
204c778f1aSFan Zhang [ADF_MAX_QPS_ON_ANY_SERVICE] = {
214c778f1aSFan Zhang /* queue pairs which provide an asymmetric crypto service */
224c778f1aSFan Zhang [QAT_SERVICE_ASYMMETRIC] = {
234c778f1aSFan Zhang {
244c778f1aSFan Zhang .service_type = QAT_SERVICE_ASYMMETRIC,
254c778f1aSFan Zhang .hw_bundle_num = 0,
264c778f1aSFan Zhang .tx_ring_num = 0,
274c778f1aSFan Zhang .rx_ring_num = 8,
284c778f1aSFan Zhang .tx_msg_size = 64,
294c778f1aSFan Zhang .rx_msg_size = 32,
304c778f1aSFan Zhang
314c778f1aSFan Zhang }, {
324c778f1aSFan Zhang .service_type = QAT_SERVICE_ASYMMETRIC,
334c778f1aSFan Zhang .hw_bundle_num = 0,
344c778f1aSFan Zhang .tx_ring_num = 1,
354c778f1aSFan Zhang .rx_ring_num = 9,
364c778f1aSFan Zhang .tx_msg_size = 64,
374c778f1aSFan Zhang .rx_msg_size = 32,
384c778f1aSFan Zhang }
394c778f1aSFan Zhang },
404c778f1aSFan Zhang /* queue pairs which provide a symmetric crypto service */
414c778f1aSFan Zhang [QAT_SERVICE_SYMMETRIC] = {
424c778f1aSFan Zhang {
434c778f1aSFan Zhang .service_type = QAT_SERVICE_SYMMETRIC,
444c778f1aSFan Zhang .hw_bundle_num = 0,
454c778f1aSFan Zhang .tx_ring_num = 2,
464c778f1aSFan Zhang .rx_ring_num = 10,
474c778f1aSFan Zhang .tx_msg_size = 128,
484c778f1aSFan Zhang .rx_msg_size = 32,
494c778f1aSFan Zhang },
504c778f1aSFan Zhang {
514c778f1aSFan Zhang .service_type = QAT_SERVICE_SYMMETRIC,
524c778f1aSFan Zhang .hw_bundle_num = 0,
534c778f1aSFan Zhang .tx_ring_num = 3,
544c778f1aSFan Zhang .rx_ring_num = 11,
554c778f1aSFan Zhang .tx_msg_size = 128,
564c778f1aSFan Zhang .rx_msg_size = 32,
574c778f1aSFan Zhang }
584c778f1aSFan Zhang },
594c778f1aSFan Zhang /* queue pairs which provide a compression service */
604c778f1aSFan Zhang [QAT_SERVICE_COMPRESSION] = {
614c778f1aSFan Zhang {
624c778f1aSFan Zhang .service_type = QAT_SERVICE_COMPRESSION,
634c778f1aSFan Zhang .hw_bundle_num = 0,
644c778f1aSFan Zhang .tx_ring_num = 6,
654c778f1aSFan Zhang .rx_ring_num = 14,
664c778f1aSFan Zhang .tx_msg_size = 128,
674c778f1aSFan Zhang .rx_msg_size = 32,
684c778f1aSFan Zhang }, {
694c778f1aSFan Zhang .service_type = QAT_SERVICE_COMPRESSION,
704c778f1aSFan Zhang .hw_bundle_num = 0,
714c778f1aSFan Zhang .tx_ring_num = 7,
724c778f1aSFan Zhang .rx_ring_num = 15,
734c778f1aSFan Zhang .tx_msg_size = 128,
744c778f1aSFan Zhang .rx_msg_size = 32,
754c778f1aSFan Zhang }
764c778f1aSFan Zhang }
774c778f1aSFan Zhang };
784c778f1aSFan Zhang
794c778f1aSFan Zhang const struct qat_qp_hw_data *
qat_qp_get_hw_data_gen1(struct qat_pci_device * dev __rte_unused,enum qat_service_type service_type,uint16_t qp_id)804c778f1aSFan Zhang qat_qp_get_hw_data_gen1(struct qat_pci_device *dev __rte_unused,
814c778f1aSFan Zhang enum qat_service_type service_type, uint16_t qp_id)
824c778f1aSFan Zhang {
834c778f1aSFan Zhang return qat_gen1_qps[service_type] + qp_id;
844c778f1aSFan Zhang }
854c778f1aSFan Zhang
864c778f1aSFan Zhang int
qat_qp_rings_per_service_gen1(struct qat_pci_device * qat_dev,enum qat_service_type service)874c778f1aSFan Zhang qat_qp_rings_per_service_gen1(struct qat_pci_device *qat_dev,
884c778f1aSFan Zhang enum qat_service_type service)
894c778f1aSFan Zhang {
904c778f1aSFan Zhang int i = 0, count = 0;
914c778f1aSFan Zhang
924c778f1aSFan Zhang for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
934c778f1aSFan Zhang const struct qat_qp_hw_data *hw_qps =
944c778f1aSFan Zhang qat_qp_get_hw_data(qat_dev, service, i);
954c778f1aSFan Zhang
964c778f1aSFan Zhang if (hw_qps == NULL)
974c778f1aSFan Zhang continue;
98156eee21SArek Kusztal if (hw_qps->service_type == service && hw_qps->tx_msg_size)
994c778f1aSFan Zhang count++;
1004c778f1aSFan Zhang }
1014c778f1aSFan Zhang
1024c778f1aSFan Zhang return count;
1034c778f1aSFan Zhang }
1044c778f1aSFan Zhang
1054c778f1aSFan Zhang void
qat_qp_csr_build_ring_base_gen1(void * io_addr,struct qat_queue * queue)1064c778f1aSFan Zhang qat_qp_csr_build_ring_base_gen1(void *io_addr,
1074c778f1aSFan Zhang struct qat_queue *queue)
1084c778f1aSFan Zhang {
1094c778f1aSFan Zhang uint64_t queue_base;
1104c778f1aSFan Zhang
1114c778f1aSFan Zhang queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
1124c778f1aSFan Zhang queue->queue_size);
1134c778f1aSFan Zhang WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
1144c778f1aSFan Zhang queue->hw_queue_number, queue_base);
1154c778f1aSFan Zhang }
1164c778f1aSFan Zhang
1174c778f1aSFan Zhang void
qat_qp_adf_arb_enable_gen1(const struct qat_queue * txq,void * base_addr,rte_spinlock_t * lock)1184c778f1aSFan Zhang qat_qp_adf_arb_enable_gen1(const struct qat_queue *txq,
1194c778f1aSFan Zhang void *base_addr, rte_spinlock_t *lock)
1204c778f1aSFan Zhang {
1214c778f1aSFan Zhang uint32_t arb_csr_offset = 0, value;
1224c778f1aSFan Zhang
1234c778f1aSFan Zhang rte_spinlock_lock(lock);
1244c778f1aSFan Zhang arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
1254c778f1aSFan Zhang (ADF_ARB_REG_SLOT *
1264c778f1aSFan Zhang txq->hw_bundle_number);
1274c778f1aSFan Zhang value = ADF_CSR_RD(base_addr,
1284c778f1aSFan Zhang arb_csr_offset);
1294c778f1aSFan Zhang value |= (0x01 << txq->hw_queue_number);
1304c778f1aSFan Zhang ADF_CSR_WR(base_addr, arb_csr_offset, value);
1314c778f1aSFan Zhang rte_spinlock_unlock(lock);
1324c778f1aSFan Zhang }
1334c778f1aSFan Zhang
1344c778f1aSFan Zhang void
qat_qp_adf_arb_disable_gen1(const struct qat_queue * txq,void * base_addr,rte_spinlock_t * lock)1354c778f1aSFan Zhang qat_qp_adf_arb_disable_gen1(const struct qat_queue *txq,
1364c778f1aSFan Zhang void *base_addr, rte_spinlock_t *lock)
1374c778f1aSFan Zhang {
1384c778f1aSFan Zhang uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
1394c778f1aSFan Zhang (ADF_ARB_REG_SLOT * txq->hw_bundle_number);
1404c778f1aSFan Zhang uint32_t value;
1414c778f1aSFan Zhang
1424c778f1aSFan Zhang rte_spinlock_lock(lock);
1434c778f1aSFan Zhang value = ADF_CSR_RD(base_addr, arb_csr_offset);
1444c778f1aSFan Zhang value &= ~(0x01 << txq->hw_queue_number);
1454c778f1aSFan Zhang ADF_CSR_WR(base_addr, arb_csr_offset, value);
1464c778f1aSFan Zhang rte_spinlock_unlock(lock);
1474c778f1aSFan Zhang }
1484c778f1aSFan Zhang
1494c778f1aSFan Zhang void
qat_qp_adf_configure_queues_gen1(struct qat_qp * qp)1504c778f1aSFan Zhang qat_qp_adf_configure_queues_gen1(struct qat_qp *qp)
1514c778f1aSFan Zhang {
1524c778f1aSFan Zhang uint32_t q_tx_config, q_resp_config;
1534c778f1aSFan Zhang struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
1544c778f1aSFan Zhang
1554c778f1aSFan Zhang q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
1564c778f1aSFan Zhang q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
1574c778f1aSFan Zhang ADF_RING_NEAR_WATERMARK_512,
1584c778f1aSFan Zhang ADF_RING_NEAR_WATERMARK_0);
1594c778f1aSFan Zhang WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr,
1604c778f1aSFan Zhang q_tx->hw_bundle_number, q_tx->hw_queue_number,
1614c778f1aSFan Zhang q_tx_config);
1624c778f1aSFan Zhang WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr,
1634c778f1aSFan Zhang q_rx->hw_bundle_number, q_rx->hw_queue_number,
1644c778f1aSFan Zhang q_resp_config);
1654c778f1aSFan Zhang }
1664c778f1aSFan Zhang
1674c778f1aSFan Zhang void
qat_qp_csr_write_tail_gen1(struct qat_qp * qp,struct qat_queue * q)1684c778f1aSFan Zhang qat_qp_csr_write_tail_gen1(struct qat_qp *qp, struct qat_queue *q)
1694c778f1aSFan Zhang {
1704c778f1aSFan Zhang WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
1714c778f1aSFan Zhang q->hw_queue_number, q->tail);
1724c778f1aSFan Zhang }
1734c778f1aSFan Zhang
1744c778f1aSFan Zhang void
qat_qp_csr_write_head_gen1(struct qat_qp * qp,struct qat_queue * q,uint32_t new_head)1754c778f1aSFan Zhang qat_qp_csr_write_head_gen1(struct qat_qp *qp, struct qat_queue *q,
1764c778f1aSFan Zhang uint32_t new_head)
1774c778f1aSFan Zhang {
1784c778f1aSFan Zhang WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1794c778f1aSFan Zhang q->hw_queue_number, new_head);
1804c778f1aSFan Zhang }
1814c778f1aSFan Zhang
1824c778f1aSFan Zhang void
qat_qp_csr_setup_gen1(struct qat_pci_device * qat_dev,void * io_addr,struct qat_qp * qp)1834c778f1aSFan Zhang qat_qp_csr_setup_gen1(struct qat_pci_device *qat_dev,
1844c778f1aSFan Zhang void *io_addr, struct qat_qp *qp)
1854c778f1aSFan Zhang {
1864c778f1aSFan Zhang qat_qp_csr_build_ring_base_gen1(io_addr, &qp->tx_q);
1874c778f1aSFan Zhang qat_qp_csr_build_ring_base_gen1(io_addr, &qp->rx_q);
1884c778f1aSFan Zhang qat_qp_adf_configure_queues_gen1(qp);
1894c778f1aSFan Zhang qat_qp_adf_arb_enable_gen1(&qp->tx_q, qp->mmap_bar_addr,
1904c778f1aSFan Zhang &qat_dev->arb_csr_lock);
1914c778f1aSFan Zhang }
1924c778f1aSFan Zhang
1934c778f1aSFan Zhang static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen1 = {
1944c778f1aSFan Zhang .qat_qp_rings_per_service = qat_qp_rings_per_service_gen1,
1954c778f1aSFan Zhang .qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1,
1964c778f1aSFan Zhang .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1,
1974c778f1aSFan Zhang .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1,
1984c778f1aSFan Zhang .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1,
1994c778f1aSFan Zhang .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1,
2004c778f1aSFan Zhang .qat_qp_csr_write_head = qat_qp_csr_write_head_gen1,
2014c778f1aSFan Zhang .qat_qp_csr_setup = qat_qp_csr_setup_gen1,
2024c778f1aSFan Zhang .qat_qp_get_hw_data = qat_qp_get_hw_data_gen1,
2034c778f1aSFan Zhang };
2044c778f1aSFan Zhang
2055438e4ecSFan Zhang int
qat_reset_ring_pairs_gen1(struct qat_pci_device * qat_pci_dev __rte_unused)2065438e4ecSFan Zhang qat_reset_ring_pairs_gen1(struct qat_pci_device *qat_pci_dev __rte_unused)
2075438e4ecSFan Zhang {
2085438e4ecSFan Zhang /*
2095438e4ecSFan Zhang * Ring pairs reset not supported on base, continue
2105438e4ecSFan Zhang */
2115438e4ecSFan Zhang return 0;
2125438e4ecSFan Zhang }
2135438e4ecSFan Zhang
2145438e4ecSFan Zhang const struct rte_mem_resource *
qat_dev_get_transport_bar_gen1(struct rte_pci_device * pci_dev)2155438e4ecSFan Zhang qat_dev_get_transport_bar_gen1(struct rte_pci_device *pci_dev)
2165438e4ecSFan Zhang {
2175438e4ecSFan Zhang return &pci_dev->mem_resource[0];
2185438e4ecSFan Zhang }
2195438e4ecSFan Zhang
2205438e4ecSFan Zhang int
qat_dev_get_misc_bar_gen1(struct rte_mem_resource ** mem_resource __rte_unused,struct rte_pci_device * pci_dev __rte_unused)2215438e4ecSFan Zhang qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource __rte_unused,
2225438e4ecSFan Zhang struct rte_pci_device *pci_dev __rte_unused)
2235438e4ecSFan Zhang {
2245438e4ecSFan Zhang return -1;
2255438e4ecSFan Zhang }
2265438e4ecSFan Zhang
2275438e4ecSFan Zhang int
qat_dev_read_config_gen1(struct qat_pci_device * qat_dev __rte_unused)2285438e4ecSFan Zhang qat_dev_read_config_gen1(struct qat_pci_device *qat_dev __rte_unused)
2295438e4ecSFan Zhang {
2305438e4ecSFan Zhang /*
2315438e4ecSFan Zhang * Base generations do not have configuration,
2325438e4ecSFan Zhang * but set this pointer anyway that we can
2335438e4ecSFan Zhang * distinguish higher generations faulty set to NULL
2345438e4ecSFan Zhang */
2355438e4ecSFan Zhang return 0;
2365438e4ecSFan Zhang }
2375438e4ecSFan Zhang
2385438e4ecSFan Zhang int
qat_dev_get_extra_size_gen1(void)2395438e4ecSFan Zhang qat_dev_get_extra_size_gen1(void)
2405438e4ecSFan Zhang {
2415438e4ecSFan Zhang return 0;
2425438e4ecSFan Zhang }
2435438e4ecSFan Zhang
244b3cbbcdfSArek Kusztal static int
qat_get_dev_slice_map_gen1(uint32_t * map __rte_unused,const struct rte_pci_device * pci_dev __rte_unused)245*d848fcb8SVikash Poddar qat_get_dev_slice_map_gen1(uint32_t *map __rte_unused,
246b3cbbcdfSArek Kusztal const struct rte_pci_device *pci_dev __rte_unused)
247b3cbbcdfSArek Kusztal {
248b3cbbcdfSArek Kusztal return 0;
249b3cbbcdfSArek Kusztal }
250b3cbbcdfSArek Kusztal
2515438e4ecSFan Zhang static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen1 = {
2525438e4ecSFan Zhang .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
2535438e4ecSFan Zhang .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
2545438e4ecSFan Zhang .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
2555438e4ecSFan Zhang .qat_dev_read_config = qat_dev_read_config_gen1,
2565438e4ecSFan Zhang .qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
257b3cbbcdfSArek Kusztal .qat_dev_get_slice_map = qat_get_dev_slice_map_gen1,
2585438e4ecSFan Zhang };
2595438e4ecSFan Zhang
RTE_INIT(qat_dev_gen_gen1_init)2605438e4ecSFan Zhang RTE_INIT(qat_dev_gen_gen1_init)
2615438e4ecSFan Zhang {
2624c778f1aSFan Zhang qat_qp_hw_spec[QAT_GEN1] = &qat_qp_hw_spec_gen1;
2635438e4ecSFan Zhang qat_dev_hw_spec[QAT_GEN1] = &qat_dev_hw_spec_gen1;
2645438e4ecSFan Zhang qat_gen_config[QAT_GEN1].dev_gen = QAT_GEN1;
2655438e4ecSFan Zhang }
266