1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
3 */
4
5 #ifndef _SFC_VDPA_H
6 #define _SFC_VDPA_H
7
8 #include <stdint.h>
9 #include <sys/queue.h>
10
11 #include <bus_pci_driver.h>
12
13 #include "sfc_efx.h"
14 #include "sfc_efx_mcdi.h"
15 #include "sfc_vdpa_debug.h"
16 #include "sfc_vdpa_log.h"
17 #include "sfc_vdpa_ops.h"
18
19 #define SFC_VDPA_MAC_ADDR "mac"
20 #define SFC_VDPA_DEFAULT_MCDI_IOVA 0x200000000000
21
22 /* Broadcast & Unicast MAC filters are supported */
23 #define SFC_MAX_SUPPORTED_FILTERS 3
24
25 /*
26 * Get function-local index of the associated VI from the
27 * virtqueue number. Queue 0 is reserved for MCDI
28 */
29 #define SFC_VDPA_GET_VI_INDEX(vq_num) (((vq_num) / 2) + 1)
30
31 enum sfc_vdpa_filter_type {
32 SFC_VDPA_BCAST_MAC_FILTER = 0,
33 SFC_VDPA_UCAST_MAC_FILTER = 1,
34 SFC_VDPA_MCAST_DST_FILTER = 2,
35 SFC_VDPA_FILTER_NTYPE
36 };
37
38 typedef struct sfc_vdpa_filter_s {
39 int filter_cnt;
40 efx_filter_spec_t spec[SFC_MAX_SUPPORTED_FILTERS];
41 } sfc_vdpa_filter_t;
42
43 /* Adapter private data */
44 struct sfc_vdpa_adapter {
45 TAILQ_ENTRY(sfc_vdpa_adapter) next;
46 /*
47 * PMD setup and configuration is not thread safe. Since it is not
48 * performance sensitive, it is better to guarantee thread-safety
49 * and add device level lock. vDPA control operations which
50 * change its state should acquire the lock.
51 */
52 rte_spinlock_t lock;
53 struct rte_pci_device *pdev;
54
55 struct rte_kvargs *kvargs;
56
57 efx_family_t family;
58 efx_nic_t *nic;
59 rte_spinlock_t nic_lock;
60
61 efsys_bar_t mem_bar;
62
63 struct sfc_efx_mcdi mcdi;
64 size_t mcdi_buff_size;
65
66 uint32_t max_queue_count;
67
68 char log_prefix[SFC_VDPA_LOG_PREFIX_MAX];
69 uint32_t logtype_main;
70
71 sfc_vdpa_filter_t filters;
72
73 int vfio_group_fd;
74 int vfio_dev_fd;
75 int vfio_container_fd;
76 int iommu_group_num;
77 struct sfc_vdpa_ops_data *ops_data;
78 };
79
80 uint32_t
81 sfc_vdpa_register_logtype(const struct rte_pci_addr *pci_addr,
82 const char *lt_prefix_str,
83 uint32_t ll_default);
84
85 struct sfc_vdpa_adapter *
86 sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev);
87 struct sfc_vdpa_ops_data *
88 sfc_vdpa_get_data_by_dev(struct rte_vdpa_device *vdpa_dev);
89
90 int
91 sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva);
92 void
93 sfc_vdpa_hw_fini(struct sfc_vdpa_adapter *sva);
94
95 int
96 sfc_vdpa_mcdi_init(struct sfc_vdpa_adapter *sva);
97 void
98 sfc_vdpa_mcdi_fini(struct sfc_vdpa_adapter *sva);
99
100 int
101 sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,
102 size_t len, efsys_mem_t *esmp);
103
104 void
105 sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);
106
107 int
108 sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *vdpa_data, bool do_map);
109
110 int
111 sfc_vdpa_filter_remove(struct sfc_vdpa_ops_data *ops_data);
112 int
113 sfc_vdpa_filter_config(struct sfc_vdpa_ops_data *ops_data);
114
115 static inline struct sfc_vdpa_adapter *
sfc_vdpa_adapter_by_dev_handle(void * dev_handle)116 sfc_vdpa_adapter_by_dev_handle(void *dev_handle)
117 {
118 return (struct sfc_vdpa_adapter *)dev_handle;
119 }
120
121 /*
122 * Add wrapper functions to acquire/release lock to be able to remove or
123 * change the lock in one place.
124 */
125 #define sfc_vdpa_adapter_lock_init(sva) rte_spinlock_init(&(sva)->lock)
126 #define sfc_vdpa_adapter_is_locked(sva) rte_spinlock_is_locked(&(sva)->lock)
127 #define sfc_vdpa_adapter_lock(sva) rte_spinlock_lock(&(sva)->lock)
128 #define sfc_vdpa_adapter_trylock(sva) rte_spinlock_trylock(&(sva)->lock)
129 #define sfc_vdpa_adapter_unlock(sva) rte_spinlock_unlock(&(sva)->lock)
130 #define sfc_vdpa_adapter_lock_fini(sva) RTE_SET_USED(sva)
131
132 #endif /* _SFC_VDPA_H */
133