1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. 3 */ 4 5 #include <stdint.h> 6 7 #ifdef RTE_EXEC_ENV_LINUX 8 #include <dirent.h> 9 #include <fcntl.h> 10 #endif 11 12 #include <rte_io.h> 13 14 #include "virtio_pci.h" 15 #include "virtqueue.h" 16 17 /* 18 * The remaining space is defined by each driver as the per-driver 19 * configuration space. 20 */ 21 #define VIRTIO_PCI_CONFIG(hw) \ 22 (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) 23 24 struct virtio_hw_internal crypto_virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO]; 25 26 static inline int 27 check_vq_phys_addr_ok(struct virtqueue *vq) 28 { 29 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 30 * and only accepts 32 bit page frame number. 31 * Check if the allocated physical memory exceeds 16TB. 32 */ 33 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 34 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 35 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!"); 36 return 0; 37 } 38 39 return 1; 40 } 41 42 static inline void 43 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 44 { 45 rte_write32(val & ((1ULL << 32) - 1), lo); 46 rte_write32(val >> 32, hi); 47 } 48 49 static void 50 modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset, 51 void *dst, int length) 52 { 53 int i; 54 uint8_t *p; 55 uint8_t old_gen, new_gen; 56 57 do { 58 old_gen = rte_read8(&hw->common_cfg->config_generation); 59 60 p = dst; 61 for (i = 0; i < length; i++) 62 *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); 63 64 new_gen = rte_read8(&hw->common_cfg->config_generation); 65 } while (old_gen != new_gen); 66 } 67 68 static void 69 modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset, 70 const void *src, int length) 71 { 72 int i; 73 const uint8_t *p = src; 74 75 for (i = 0; i < length; i++) 76 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); 77 } 78 79 static uint64_t 80 modern_get_features(struct virtio_crypto_hw *hw) 81 { 82 uint32_t features_lo, features_hi; 83 84 rte_write32(0, &hw->common_cfg->device_feature_select); 85 features_lo = rte_read32(&hw->common_cfg->device_feature); 86 87 rte_write32(1, &hw->common_cfg->device_feature_select); 88 features_hi = rte_read32(&hw->common_cfg->device_feature); 89 90 return ((uint64_t)features_hi << 32) | features_lo; 91 } 92 93 static void 94 modern_set_features(struct virtio_crypto_hw *hw, uint64_t features) 95 { 96 rte_write32(0, &hw->common_cfg->guest_feature_select); 97 rte_write32(features & ((1ULL << 32) - 1), 98 &hw->common_cfg->guest_feature); 99 100 rte_write32(1, &hw->common_cfg->guest_feature_select); 101 rte_write32(features >> 32, 102 &hw->common_cfg->guest_feature); 103 } 104 105 static uint8_t 106 modern_get_status(struct virtio_crypto_hw *hw) 107 { 108 return rte_read8(&hw->common_cfg->device_status); 109 } 110 111 static void 112 modern_set_status(struct virtio_crypto_hw *hw, uint8_t status) 113 { 114 rte_write8(status, &hw->common_cfg->device_status); 115 } 116 117 static void 118 modern_reset(struct virtio_crypto_hw *hw) 119 { 120 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 121 modern_get_status(hw); 122 } 123 124 static uint8_t 125 modern_get_isr(struct virtio_crypto_hw *hw) 126 { 127 return rte_read8(hw->isr); 128 } 129 130 static uint16_t 131 modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec) 132 { 133 rte_write16(vec, &hw->common_cfg->msix_config); 134 return rte_read16(&hw->common_cfg->msix_config); 135 } 136 137 static uint16_t 138 modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq, 139 uint16_t vec) 140 { 141 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 142 rte_write16(vec, &hw->common_cfg->queue_msix_vector); 143 return rte_read16(&hw->common_cfg->queue_msix_vector); 144 } 145 146 static uint16_t 147 modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id) 148 { 149 rte_write16(queue_id, &hw->common_cfg->queue_select); 150 return rte_read16(&hw->common_cfg->queue_size); 151 } 152 153 static int 154 modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) 155 { 156 uint64_t desc_addr, avail_addr, used_addr; 157 uint16_t notify_off; 158 159 if (!check_vq_phys_addr_ok(vq)) 160 return -1; 161 162 desc_addr = vq->vq_ring_mem; 163 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 164 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 165 ring[vq->vq_nentries]), 166 VIRTIO_PCI_VRING_ALIGN); 167 168 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 169 170 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 171 &hw->common_cfg->queue_desc_hi); 172 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 173 &hw->common_cfg->queue_avail_hi); 174 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 175 &hw->common_cfg->queue_used_hi); 176 177 notify_off = rte_read16(&hw->common_cfg->queue_notify_off); 178 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 179 notify_off * hw->notify_off_multiplier); 180 181 rte_write16(1, &hw->common_cfg->queue_enable); 182 183 VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index); 184 VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr); 185 VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr); 186 VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr); 187 VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)", 188 vq->notify_addr, notify_off); 189 190 return 0; 191 } 192 193 static void 194 modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) 195 { 196 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 197 198 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 199 &hw->common_cfg->queue_desc_hi); 200 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 201 &hw->common_cfg->queue_avail_hi); 202 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 203 &hw->common_cfg->queue_used_hi); 204 205 rte_write16(0, &hw->common_cfg->queue_enable); 206 } 207 208 static void 209 modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused, 210 struct virtqueue *vq) 211 { 212 rte_write16(vq->vq_queue_index, vq->notify_addr); 213 } 214 215 const struct virtio_pci_ops virtio_crypto_modern_ops = { 216 .read_dev_cfg = modern_read_dev_config, 217 .write_dev_cfg = modern_write_dev_config, 218 .reset = modern_reset, 219 .get_status = modern_get_status, 220 .set_status = modern_set_status, 221 .get_features = modern_get_features, 222 .set_features = modern_set_features, 223 .get_isr = modern_get_isr, 224 .set_config_irq = modern_set_config_irq, 225 .set_queue_irq = modern_set_queue_irq, 226 .get_queue_num = modern_get_queue_num, 227 .setup_queue = modern_setup_queue, 228 .del_queue = modern_del_queue, 229 .notify_queue = modern_notify_queue, 230 }; 231 232 void 233 vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, 234 void *dst, int length) 235 { 236 VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); 237 } 238 239 void 240 vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, 241 const void *src, int length) 242 { 243 VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); 244 } 245 246 uint64_t 247 vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw, 248 uint64_t host_features) 249 { 250 uint64_t features; 251 252 /* 253 * Limit negotiated features to what the driver, virtqueue, and 254 * host all support. 255 */ 256 features = host_features & hw->guest_features; 257 VTPCI_OPS(hw)->set_features(hw, features); 258 259 return features; 260 } 261 262 void 263 vtpci_cryptodev_reset(struct virtio_crypto_hw *hw) 264 { 265 VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 266 /* flush status write */ 267 VTPCI_OPS(hw)->get_status(hw); 268 } 269 270 void 271 vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw) 272 { 273 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 274 } 275 276 void 277 vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status) 278 { 279 if (status != VIRTIO_CONFIG_STATUS_RESET) 280 status |= VTPCI_OPS(hw)->get_status(hw); 281 282 VTPCI_OPS(hw)->set_status(hw, status); 283 } 284 285 uint8_t 286 vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw) 287 { 288 return VTPCI_OPS(hw)->get_status(hw); 289 } 290 291 uint8_t 292 vtpci_cryptodev_isr(struct virtio_crypto_hw *hw) 293 { 294 return VTPCI_OPS(hw)->get_isr(hw); 295 } 296 297 static void * 298 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) 299 { 300 uint8_t bar = cap->bar; 301 uint32_t length = cap->length; 302 uint32_t offset = cap->offset; 303 uint8_t *base; 304 305 if (bar >= PCI_MAX_RESOURCE) { 306 VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar); 307 return NULL; 308 } 309 310 if (offset + length < offset) { 311 VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows", 312 offset, length); 313 return NULL; 314 } 315 316 if (offset + length > dev->mem_resource[bar].len) { 317 VIRTIO_CRYPTO_INIT_LOG_ERR( 318 "invalid cap: overflows bar space: %u > %" PRIu64, 319 offset + length, dev->mem_resource[bar].len); 320 return NULL; 321 } 322 323 base = dev->mem_resource[bar].addr; 324 if (base == NULL) { 325 VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar); 326 return NULL; 327 } 328 329 return base + offset; 330 } 331 332 static int 333 virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) 334 { 335 struct virtio_pci_cap cap; 336 uint16_t flags; 337 off_t pos; 338 int ret; 339 340 if (rte_pci_map_device(dev)) { 341 VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!"); 342 return -1; 343 } 344 345 /* 346 * Transitional devices would also have this capability, 347 * that's why we also check if msix is enabled. 348 */ 349 pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX); 350 if (pos > 0 && rte_pci_read_config(dev, &flags, sizeof(flags), 351 pos + RTE_PCI_MSIX_FLAGS) == sizeof(flags)) { 352 if (flags & RTE_PCI_MSIX_FLAGS_ENABLE) 353 hw->use_msix = VIRTIO_MSIX_ENABLED; 354 else 355 hw->use_msix = VIRTIO_MSIX_DISABLED; 356 } else { 357 hw->use_msix = VIRTIO_MSIX_NONE; 358 } 359 360 pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_VNDR); 361 while (pos > 0) { 362 if (rte_pci_read_config(dev, &cap, sizeof(cap), pos) != sizeof(cap)) 363 break; 364 VIRTIO_CRYPTO_INIT_LOG_DBG( 365 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", 366 (unsigned int)pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 367 368 switch (cap.cfg_type) { 369 case VIRTIO_PCI_CAP_COMMON_CFG: 370 hw->common_cfg = get_cfg_addr(dev, &cap); 371 break; 372 case VIRTIO_PCI_CAP_NOTIFY_CFG: 373 ret = rte_pci_read_config(dev, &hw->notify_off_multiplier, 374 4, pos + sizeof(cap)); 375 if (ret != 4) 376 VIRTIO_CRYPTO_INIT_LOG_ERR( 377 "failed to read notify_off_multiplier: ret %d", ret); 378 else 379 hw->notify_base = get_cfg_addr(dev, &cap); 380 break; 381 case VIRTIO_PCI_CAP_DEVICE_CFG: 382 hw->dev_cfg = get_cfg_addr(dev, &cap); 383 break; 384 case VIRTIO_PCI_CAP_ISR_CFG: 385 hw->isr = get_cfg_addr(dev, &cap); 386 break; 387 } 388 389 pos = rte_pci_find_next_capability(dev, RTE_PCI_CAP_ID_VNDR, pos); 390 } 391 392 if (hw->common_cfg == NULL || hw->notify_base == NULL || 393 hw->dev_cfg == NULL || hw->isr == NULL) { 394 VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found."); 395 return -1; 396 } 397 398 VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device."); 399 400 VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg); 401 VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg); 402 VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr); 403 VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u", 404 hw->notify_base, hw->notify_off_multiplier); 405 406 return 0; 407 } 408 409 /* 410 * Return -1: 411 * if there is error mapping with VFIO/UIO. 412 * if port map error when driver type is KDRV_NONE. 413 * if marked as allowed but driver type is KDRV_UNKNOWN. 414 * Return 1 if kernel driver is managing the device. 415 * Return 0 on success. 416 */ 417 int 418 vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) 419 { 420 /* 421 * Try if we can succeed reading virtio pci caps, which exists 422 * only on modern pci device. If failed, we fallback to legacy 423 * virtio handling. 424 */ 425 if (virtio_read_caps(dev, hw) == 0) { 426 VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected."); 427 crypto_virtio_hw_internal[hw->dev_id].vtpci_ops = 428 &virtio_crypto_modern_ops; 429 hw->modern = 1; 430 return 0; 431 } 432 433 /* 434 * virtio crypto conforms to virtio 1.0 and doesn't support 435 * legacy mode 436 */ 437 return -1; 438 } 439