125500d4bSJay Zhou /* SPDX-License-Identifier: BSD-3-Clause
225500d4bSJay Zhou * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
325500d4bSJay Zhou */
425500d4bSJay Zhou
525500d4bSJay Zhou #include <stdint.h>
625500d4bSJay Zhou
7742bde12SBruce Richardson #ifdef RTE_EXEC_ENV_LINUX
825500d4bSJay Zhou #include <dirent.h>
925500d4bSJay Zhou #include <fcntl.h>
1025500d4bSJay Zhou #endif
1125500d4bSJay Zhou
1225500d4bSJay Zhou #include <rte_io.h>
1325500d4bSJay Zhou
1425500d4bSJay Zhou #include "virtio_pci.h"
1525500d4bSJay Zhou #include "virtqueue.h"
1625500d4bSJay Zhou
1725500d4bSJay Zhou /*
1825500d4bSJay Zhou * The remaining space is defined by each driver as the per-driver
1925500d4bSJay Zhou * configuration space.
2025500d4bSJay Zhou */
2125500d4bSJay Zhou #define VIRTIO_PCI_CONFIG(hw) \
2225500d4bSJay Zhou (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
2325500d4bSJay Zhou
242c449644SFerruh Yigit struct virtio_hw_internal crypto_virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
2525500d4bSJay Zhou
2625500d4bSJay Zhou static inline int
check_vq_phys_addr_ok(struct virtqueue * vq)2725500d4bSJay Zhou check_vq_phys_addr_ok(struct virtqueue *vq)
2825500d4bSJay Zhou {
2925500d4bSJay Zhou /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
3025500d4bSJay Zhou * and only accepts 32 bit page frame number.
3125500d4bSJay Zhou * Check if the allocated physical memory exceeds 16TB.
3225500d4bSJay Zhou */
3325500d4bSJay Zhou if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
3425500d4bSJay Zhou (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
3525500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
3625500d4bSJay Zhou return 0;
3725500d4bSJay Zhou }
3825500d4bSJay Zhou
3925500d4bSJay Zhou return 1;
4025500d4bSJay Zhou }
4125500d4bSJay Zhou
4225500d4bSJay Zhou static inline void
io_write64_twopart(uint64_t val,uint32_t * lo,uint32_t * hi)4325500d4bSJay Zhou io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
4425500d4bSJay Zhou {
4525500d4bSJay Zhou rte_write32(val & ((1ULL << 32) - 1), lo);
4625500d4bSJay Zhou rte_write32(val >> 32, hi);
4725500d4bSJay Zhou }
4825500d4bSJay Zhou
4925500d4bSJay Zhou static void
modern_read_dev_config(struct virtio_crypto_hw * hw,size_t offset,void * dst,int length)5025500d4bSJay Zhou modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
5125500d4bSJay Zhou void *dst, int length)
5225500d4bSJay Zhou {
5325500d4bSJay Zhou int i;
5425500d4bSJay Zhou uint8_t *p;
5525500d4bSJay Zhou uint8_t old_gen, new_gen;
5625500d4bSJay Zhou
5725500d4bSJay Zhou do {
5825500d4bSJay Zhou old_gen = rte_read8(&hw->common_cfg->config_generation);
5925500d4bSJay Zhou
6025500d4bSJay Zhou p = dst;
6125500d4bSJay Zhou for (i = 0; i < length; i++)
6225500d4bSJay Zhou *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
6325500d4bSJay Zhou
6425500d4bSJay Zhou new_gen = rte_read8(&hw->common_cfg->config_generation);
6525500d4bSJay Zhou } while (old_gen != new_gen);
6625500d4bSJay Zhou }
6725500d4bSJay Zhou
6825500d4bSJay Zhou static void
modern_write_dev_config(struct virtio_crypto_hw * hw,size_t offset,const void * src,int length)6925500d4bSJay Zhou modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
7025500d4bSJay Zhou const void *src, int length)
7125500d4bSJay Zhou {
7225500d4bSJay Zhou int i;
7325500d4bSJay Zhou const uint8_t *p = src;
7425500d4bSJay Zhou
7525500d4bSJay Zhou for (i = 0; i < length; i++)
7625500d4bSJay Zhou rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
7725500d4bSJay Zhou }
7825500d4bSJay Zhou
7925500d4bSJay Zhou static uint64_t
modern_get_features(struct virtio_crypto_hw * hw)8025500d4bSJay Zhou modern_get_features(struct virtio_crypto_hw *hw)
8125500d4bSJay Zhou {
8225500d4bSJay Zhou uint32_t features_lo, features_hi;
8325500d4bSJay Zhou
8425500d4bSJay Zhou rte_write32(0, &hw->common_cfg->device_feature_select);
8525500d4bSJay Zhou features_lo = rte_read32(&hw->common_cfg->device_feature);
8625500d4bSJay Zhou
8725500d4bSJay Zhou rte_write32(1, &hw->common_cfg->device_feature_select);
8825500d4bSJay Zhou features_hi = rte_read32(&hw->common_cfg->device_feature);
8925500d4bSJay Zhou
9025500d4bSJay Zhou return ((uint64_t)features_hi << 32) | features_lo;
9125500d4bSJay Zhou }
9225500d4bSJay Zhou
9325500d4bSJay Zhou static void
modern_set_features(struct virtio_crypto_hw * hw,uint64_t features)9425500d4bSJay Zhou modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
9525500d4bSJay Zhou {
9625500d4bSJay Zhou rte_write32(0, &hw->common_cfg->guest_feature_select);
9725500d4bSJay Zhou rte_write32(features & ((1ULL << 32) - 1),
9825500d4bSJay Zhou &hw->common_cfg->guest_feature);
9925500d4bSJay Zhou
10025500d4bSJay Zhou rte_write32(1, &hw->common_cfg->guest_feature_select);
10125500d4bSJay Zhou rte_write32(features >> 32,
10225500d4bSJay Zhou &hw->common_cfg->guest_feature);
10325500d4bSJay Zhou }
10425500d4bSJay Zhou
10525500d4bSJay Zhou static uint8_t
modern_get_status(struct virtio_crypto_hw * hw)10625500d4bSJay Zhou modern_get_status(struct virtio_crypto_hw *hw)
10725500d4bSJay Zhou {
10825500d4bSJay Zhou return rte_read8(&hw->common_cfg->device_status);
10925500d4bSJay Zhou }
11025500d4bSJay Zhou
11125500d4bSJay Zhou static void
modern_set_status(struct virtio_crypto_hw * hw,uint8_t status)11225500d4bSJay Zhou modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
11325500d4bSJay Zhou {
11425500d4bSJay Zhou rte_write8(status, &hw->common_cfg->device_status);
11525500d4bSJay Zhou }
11625500d4bSJay Zhou
11725500d4bSJay Zhou static void
modern_reset(struct virtio_crypto_hw * hw)11825500d4bSJay Zhou modern_reset(struct virtio_crypto_hw *hw)
11925500d4bSJay Zhou {
12025500d4bSJay Zhou modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
12125500d4bSJay Zhou modern_get_status(hw);
12225500d4bSJay Zhou }
12325500d4bSJay Zhou
12425500d4bSJay Zhou static uint8_t
modern_get_isr(struct virtio_crypto_hw * hw)12525500d4bSJay Zhou modern_get_isr(struct virtio_crypto_hw *hw)
12625500d4bSJay Zhou {
12725500d4bSJay Zhou return rte_read8(hw->isr);
12825500d4bSJay Zhou }
12925500d4bSJay Zhou
13025500d4bSJay Zhou static uint16_t
modern_set_config_irq(struct virtio_crypto_hw * hw,uint16_t vec)13125500d4bSJay Zhou modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
13225500d4bSJay Zhou {
13325500d4bSJay Zhou rte_write16(vec, &hw->common_cfg->msix_config);
13425500d4bSJay Zhou return rte_read16(&hw->common_cfg->msix_config);
13525500d4bSJay Zhou }
13625500d4bSJay Zhou
13725500d4bSJay Zhou static uint16_t
modern_set_queue_irq(struct virtio_crypto_hw * hw,struct virtqueue * vq,uint16_t vec)13825500d4bSJay Zhou modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
13925500d4bSJay Zhou uint16_t vec)
14025500d4bSJay Zhou {
14125500d4bSJay Zhou rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
14225500d4bSJay Zhou rte_write16(vec, &hw->common_cfg->queue_msix_vector);
14325500d4bSJay Zhou return rte_read16(&hw->common_cfg->queue_msix_vector);
14425500d4bSJay Zhou }
14525500d4bSJay Zhou
14625500d4bSJay Zhou static uint16_t
modern_get_queue_num(struct virtio_crypto_hw * hw,uint16_t queue_id)14725500d4bSJay Zhou modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
14825500d4bSJay Zhou {
14925500d4bSJay Zhou rte_write16(queue_id, &hw->common_cfg->queue_select);
15025500d4bSJay Zhou return rte_read16(&hw->common_cfg->queue_size);
15125500d4bSJay Zhou }
15225500d4bSJay Zhou
15325500d4bSJay Zhou static int
modern_setup_queue(struct virtio_crypto_hw * hw,struct virtqueue * vq)15425500d4bSJay Zhou modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
15525500d4bSJay Zhou {
15625500d4bSJay Zhou uint64_t desc_addr, avail_addr, used_addr;
15725500d4bSJay Zhou uint16_t notify_off;
15825500d4bSJay Zhou
15925500d4bSJay Zhou if (!check_vq_phys_addr_ok(vq))
16025500d4bSJay Zhou return -1;
16125500d4bSJay Zhou
16225500d4bSJay Zhou desc_addr = vq->vq_ring_mem;
16325500d4bSJay Zhou avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
16425500d4bSJay Zhou used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
16525500d4bSJay Zhou ring[vq->vq_nentries]),
16625500d4bSJay Zhou VIRTIO_PCI_VRING_ALIGN);
16725500d4bSJay Zhou
16825500d4bSJay Zhou rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
16925500d4bSJay Zhou
17025500d4bSJay Zhou io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
17125500d4bSJay Zhou &hw->common_cfg->queue_desc_hi);
17225500d4bSJay Zhou io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
17325500d4bSJay Zhou &hw->common_cfg->queue_avail_hi);
17425500d4bSJay Zhou io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
17525500d4bSJay Zhou &hw->common_cfg->queue_used_hi);
17625500d4bSJay Zhou
17725500d4bSJay Zhou notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
17825500d4bSJay Zhou vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
17925500d4bSJay Zhou notify_off * hw->notify_off_multiplier);
18025500d4bSJay Zhou
18125500d4bSJay Zhou rte_write16(1, &hw->common_cfg->queue_enable);
18225500d4bSJay Zhou
18325500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
18425500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
18525500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
18625500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
18725500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
18825500d4bSJay Zhou vq->notify_addr, notify_off);
18925500d4bSJay Zhou
19025500d4bSJay Zhou return 0;
19125500d4bSJay Zhou }
19225500d4bSJay Zhou
19325500d4bSJay Zhou static void
modern_del_queue(struct virtio_crypto_hw * hw,struct virtqueue * vq)19425500d4bSJay Zhou modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
19525500d4bSJay Zhou {
19625500d4bSJay Zhou rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
19725500d4bSJay Zhou
19825500d4bSJay Zhou io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
19925500d4bSJay Zhou &hw->common_cfg->queue_desc_hi);
20025500d4bSJay Zhou io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
20125500d4bSJay Zhou &hw->common_cfg->queue_avail_hi);
20225500d4bSJay Zhou io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
20325500d4bSJay Zhou &hw->common_cfg->queue_used_hi);
20425500d4bSJay Zhou
20525500d4bSJay Zhou rte_write16(0, &hw->common_cfg->queue_enable);
20625500d4bSJay Zhou }
20725500d4bSJay Zhou
20825500d4bSJay Zhou static void
modern_notify_queue(struct virtio_crypto_hw * hw __rte_unused,struct virtqueue * vq)20925500d4bSJay Zhou modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
21025500d4bSJay Zhou struct virtqueue *vq)
21125500d4bSJay Zhou {
21225500d4bSJay Zhou rte_write16(vq->vq_queue_index, vq->notify_addr);
21325500d4bSJay Zhou }
21425500d4bSJay Zhou
21525500d4bSJay Zhou const struct virtio_pci_ops virtio_crypto_modern_ops = {
21625500d4bSJay Zhou .read_dev_cfg = modern_read_dev_config,
21725500d4bSJay Zhou .write_dev_cfg = modern_write_dev_config,
21825500d4bSJay Zhou .reset = modern_reset,
21925500d4bSJay Zhou .get_status = modern_get_status,
22025500d4bSJay Zhou .set_status = modern_set_status,
22125500d4bSJay Zhou .get_features = modern_get_features,
22225500d4bSJay Zhou .set_features = modern_set_features,
22325500d4bSJay Zhou .get_isr = modern_get_isr,
22425500d4bSJay Zhou .set_config_irq = modern_set_config_irq,
22525500d4bSJay Zhou .set_queue_irq = modern_set_queue_irq,
22625500d4bSJay Zhou .get_queue_num = modern_get_queue_num,
22725500d4bSJay Zhou .setup_queue = modern_setup_queue,
22825500d4bSJay Zhou .del_queue = modern_del_queue,
22925500d4bSJay Zhou .notify_queue = modern_notify_queue,
23025500d4bSJay Zhou };
23125500d4bSJay Zhou
23225500d4bSJay Zhou void
vtpci_read_cryptodev_config(struct virtio_crypto_hw * hw,size_t offset,void * dst,int length)23325500d4bSJay Zhou vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
23425500d4bSJay Zhou void *dst, int length)
23525500d4bSJay Zhou {
23625500d4bSJay Zhou VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
23725500d4bSJay Zhou }
23825500d4bSJay Zhou
23925500d4bSJay Zhou void
vtpci_write_cryptodev_config(struct virtio_crypto_hw * hw,size_t offset,const void * src,int length)24025500d4bSJay Zhou vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
24125500d4bSJay Zhou const void *src, int length)
24225500d4bSJay Zhou {
24325500d4bSJay Zhou VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
24425500d4bSJay Zhou }
24525500d4bSJay Zhou
24625500d4bSJay Zhou uint64_t
vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw * hw,uint64_t host_features)24725500d4bSJay Zhou vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
24825500d4bSJay Zhou uint64_t host_features)
24925500d4bSJay Zhou {
25025500d4bSJay Zhou uint64_t features;
25125500d4bSJay Zhou
25225500d4bSJay Zhou /*
25325500d4bSJay Zhou * Limit negotiated features to what the driver, virtqueue, and
25425500d4bSJay Zhou * host all support.
25525500d4bSJay Zhou */
25625500d4bSJay Zhou features = host_features & hw->guest_features;
25725500d4bSJay Zhou VTPCI_OPS(hw)->set_features(hw, features);
25825500d4bSJay Zhou
25925500d4bSJay Zhou return features;
26025500d4bSJay Zhou }
26125500d4bSJay Zhou
26225500d4bSJay Zhou void
vtpci_cryptodev_reset(struct virtio_crypto_hw * hw)26325500d4bSJay Zhou vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
26425500d4bSJay Zhou {
26525500d4bSJay Zhou VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
26625500d4bSJay Zhou /* flush status write */
26725500d4bSJay Zhou VTPCI_OPS(hw)->get_status(hw);
26825500d4bSJay Zhou }
26925500d4bSJay Zhou
27025500d4bSJay Zhou void
vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw * hw)27125500d4bSJay Zhou vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
27225500d4bSJay Zhou {
27325500d4bSJay Zhou vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
27425500d4bSJay Zhou }
27525500d4bSJay Zhou
27625500d4bSJay Zhou void
vtpci_cryptodev_set_status(struct virtio_crypto_hw * hw,uint8_t status)27725500d4bSJay Zhou vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
27825500d4bSJay Zhou {
27925500d4bSJay Zhou if (status != VIRTIO_CONFIG_STATUS_RESET)
28025500d4bSJay Zhou status |= VTPCI_OPS(hw)->get_status(hw);
28125500d4bSJay Zhou
28225500d4bSJay Zhou VTPCI_OPS(hw)->set_status(hw, status);
28325500d4bSJay Zhou }
28425500d4bSJay Zhou
28525500d4bSJay Zhou uint8_t
vtpci_cryptodev_get_status(struct virtio_crypto_hw * hw)28625500d4bSJay Zhou vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
28725500d4bSJay Zhou {
28825500d4bSJay Zhou return VTPCI_OPS(hw)->get_status(hw);
28925500d4bSJay Zhou }
29025500d4bSJay Zhou
29125500d4bSJay Zhou uint8_t
vtpci_cryptodev_isr(struct virtio_crypto_hw * hw)29225500d4bSJay Zhou vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
29325500d4bSJay Zhou {
29425500d4bSJay Zhou return VTPCI_OPS(hw)->get_isr(hw);
29525500d4bSJay Zhou }
29625500d4bSJay Zhou
29725500d4bSJay Zhou static void *
get_cfg_addr(struct rte_pci_device * dev,struct virtio_pci_cap * cap)29825500d4bSJay Zhou get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
29925500d4bSJay Zhou {
30025500d4bSJay Zhou uint8_t bar = cap->bar;
30125500d4bSJay Zhou uint32_t length = cap->length;
30225500d4bSJay Zhou uint32_t offset = cap->offset;
30325500d4bSJay Zhou uint8_t *base;
30425500d4bSJay Zhou
30525500d4bSJay Zhou if (bar >= PCI_MAX_RESOURCE) {
30625500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
30725500d4bSJay Zhou return NULL;
30825500d4bSJay Zhou }
30925500d4bSJay Zhou
31025500d4bSJay Zhou if (offset + length < offset) {
31125500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
31225500d4bSJay Zhou offset, length);
31325500d4bSJay Zhou return NULL;
31425500d4bSJay Zhou }
31525500d4bSJay Zhou
31625500d4bSJay Zhou if (offset + length > dev->mem_resource[bar].len) {
31725500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_ERR(
31825500d4bSJay Zhou "invalid cap: overflows bar space: %u > %" PRIu64,
31925500d4bSJay Zhou offset + length, dev->mem_resource[bar].len);
32025500d4bSJay Zhou return NULL;
32125500d4bSJay Zhou }
32225500d4bSJay Zhou
32325500d4bSJay Zhou base = dev->mem_resource[bar].addr;
32425500d4bSJay Zhou if (base == NULL) {
32525500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
32625500d4bSJay Zhou return NULL;
32725500d4bSJay Zhou }
32825500d4bSJay Zhou
32925500d4bSJay Zhou return base + offset;
33025500d4bSJay Zhou }
33125500d4bSJay Zhou
33225500d4bSJay Zhou static int
virtio_read_caps(struct rte_pci_device * dev,struct virtio_crypto_hw * hw)33325500d4bSJay Zhou virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
33425500d4bSJay Zhou {
33525500d4bSJay Zhou struct virtio_pci_cap cap;
336a10b6e53SDavid Marchand uint16_t flags;
337a10b6e53SDavid Marchand off_t pos;
33825500d4bSJay Zhou int ret;
33925500d4bSJay Zhou
34025500d4bSJay Zhou if (rte_pci_map_device(dev)) {
34125500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
34225500d4bSJay Zhou return -1;
34325500d4bSJay Zhou }
34425500d4bSJay Zhou
345a10b6e53SDavid Marchand /*
346a10b6e53SDavid Marchand * Transitional devices would also have this capability,
34725500d4bSJay Zhou * that's why we also check if msix is enabled.
34825500d4bSJay Zhou */
349baa9c550SDavid Marchand pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX);
350a10b6e53SDavid Marchand if (pos > 0 && rte_pci_read_config(dev, &flags, sizeof(flags),
351*7bb1168dSDavid Marchand pos + RTE_PCI_MSIX_FLAGS) == sizeof(flags)) {
352*7bb1168dSDavid Marchand if (flags & RTE_PCI_MSIX_FLAGS_ENABLE)
35325500d4bSJay Zhou hw->use_msix = VIRTIO_MSIX_ENABLED;
35425500d4bSJay Zhou else
35525500d4bSJay Zhou hw->use_msix = VIRTIO_MSIX_DISABLED;
356a10b6e53SDavid Marchand } else {
357a10b6e53SDavid Marchand hw->use_msix = VIRTIO_MSIX_NONE;
35825500d4bSJay Zhou }
35925500d4bSJay Zhou
360baa9c550SDavid Marchand pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_VNDR);
361a10b6e53SDavid Marchand while (pos > 0) {
362a10b6e53SDavid Marchand if (rte_pci_read_config(dev, &cap, sizeof(cap), pos) != sizeof(cap))
363a10b6e53SDavid Marchand break;
36425500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG(
36525500d4bSJay Zhou "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
366a10b6e53SDavid Marchand (unsigned int)pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
36725500d4bSJay Zhou
36825500d4bSJay Zhou switch (cap.cfg_type) {
36925500d4bSJay Zhou case VIRTIO_PCI_CAP_COMMON_CFG:
37025500d4bSJay Zhou hw->common_cfg = get_cfg_addr(dev, &cap);
37125500d4bSJay Zhou break;
37225500d4bSJay Zhou case VIRTIO_PCI_CAP_NOTIFY_CFG:
373691733e7SChenbo Xia ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,
37425500d4bSJay Zhou 4, pos + sizeof(cap));
375691733e7SChenbo Xia if (ret != 4)
376691733e7SChenbo Xia VIRTIO_CRYPTO_INIT_LOG_ERR(
377691733e7SChenbo Xia "failed to read notify_off_multiplier: ret %d", ret);
378691733e7SChenbo Xia else
37925500d4bSJay Zhou hw->notify_base = get_cfg_addr(dev, &cap);
38025500d4bSJay Zhou break;
38125500d4bSJay Zhou case VIRTIO_PCI_CAP_DEVICE_CFG:
38225500d4bSJay Zhou hw->dev_cfg = get_cfg_addr(dev, &cap);
38325500d4bSJay Zhou break;
38425500d4bSJay Zhou case VIRTIO_PCI_CAP_ISR_CFG:
38525500d4bSJay Zhou hw->isr = get_cfg_addr(dev, &cap);
38625500d4bSJay Zhou break;
38725500d4bSJay Zhou }
38825500d4bSJay Zhou
389baa9c550SDavid Marchand pos = rte_pci_find_next_capability(dev, RTE_PCI_CAP_ID_VNDR, pos);
39025500d4bSJay Zhou }
39125500d4bSJay Zhou
39225500d4bSJay Zhou if (hw->common_cfg == NULL || hw->notify_base == NULL ||
39325500d4bSJay Zhou hw->dev_cfg == NULL || hw->isr == NULL) {
39425500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
39525500d4bSJay Zhou return -1;
39625500d4bSJay Zhou }
39725500d4bSJay Zhou
39825500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
39925500d4bSJay Zhou
40025500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
40125500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
40225500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
40325500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
40425500d4bSJay Zhou hw->notify_base, hw->notify_off_multiplier);
40525500d4bSJay Zhou
40625500d4bSJay Zhou return 0;
40725500d4bSJay Zhou }
40825500d4bSJay Zhou
40925500d4bSJay Zhou /*
41025500d4bSJay Zhou * Return -1:
41125500d4bSJay Zhou * if there is error mapping with VFIO/UIO.
41225500d4bSJay Zhou * if port map error when driver type is KDRV_NONE.
413a65a34a8SStephen Hemminger * if marked as allowed but driver type is KDRV_UNKNOWN.
41425500d4bSJay Zhou * Return 1 if kernel driver is managing the device.
41525500d4bSJay Zhou * Return 0 on success.
41625500d4bSJay Zhou */
41725500d4bSJay Zhou int
vtpci_cryptodev_init(struct rte_pci_device * dev,struct virtio_crypto_hw * hw)41825500d4bSJay Zhou vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
41925500d4bSJay Zhou {
42025500d4bSJay Zhou /*
42125500d4bSJay Zhou * Try if we can succeed reading virtio pci caps, which exists
42225500d4bSJay Zhou * only on modern pci device. If failed, we fallback to legacy
42325500d4bSJay Zhou * virtio handling.
42425500d4bSJay Zhou */
42525500d4bSJay Zhou if (virtio_read_caps(dev, hw) == 0) {
42625500d4bSJay Zhou VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
4272c449644SFerruh Yigit crypto_virtio_hw_internal[hw->dev_id].vtpci_ops =
42825500d4bSJay Zhou &virtio_crypto_modern_ops;
42925500d4bSJay Zhou hw->modern = 1;
43025500d4bSJay Zhou return 0;
43125500d4bSJay Zhou }
43225500d4bSJay Zhou
43325500d4bSJay Zhou /*
43425500d4bSJay Zhou * virtio crypto conforms to virtio 1.0 and doesn't support
43525500d4bSJay Zhou * legacy mode
43625500d4bSJay Zhou */
43725500d4bSJay Zhou return -1;
43825500d4bSJay Zhou }
439