xref: /dpdk/drivers/crypto/virtio/virtio_pci.c (revision a10b6e53fef4ef322d95a15db02d567b9d5daafd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 
5 #include <stdint.h>
6 
7 #ifdef RTE_EXEC_ENV_LINUX
8  #include <dirent.h>
9  #include <fcntl.h>
10 #endif
11 
12 #include <rte_io.h>
13 
14 #include "virtio_pci.h"
15 #include "virtqueue.h"
16 
17 /*
18  * Following macros are derived from linux/pci_regs.h, however,
19  * we can't simply include that header here, as there is no such
20  * file for non-Linux platform.
21  */
22 #define PCI_CAP_ID_VNDR		0x09
23 #define PCI_CAP_ID_MSIX		0x11
24 
25 /*
26  * The remaining space is defined by each driver as the per-driver
27  * configuration space.
28  */
29 #define VIRTIO_PCI_CONFIG(hw) \
30 		(((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
31 
32 struct virtio_hw_internal crypto_virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
33 
34 static inline int
35 check_vq_phys_addr_ok(struct virtqueue *vq)
36 {
37 	/* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
38 	 * and only accepts 32 bit page frame number.
39 	 * Check if the allocated physical memory exceeds 16TB.
40 	 */
41 	if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
42 			(VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
43 		VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
44 		return 0;
45 	}
46 
47 	return 1;
48 }
49 
50 static inline void
51 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
52 {
53 	rte_write32(val & ((1ULL << 32) - 1), lo);
54 	rte_write32(val >> 32,		     hi);
55 }
56 
57 static void
58 modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
59 		       void *dst, int length)
60 {
61 	int i;
62 	uint8_t *p;
63 	uint8_t old_gen, new_gen;
64 
65 	do {
66 		old_gen = rte_read8(&hw->common_cfg->config_generation);
67 
68 		p = dst;
69 		for (i = 0;  i < length; i++)
70 			*p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
71 
72 		new_gen = rte_read8(&hw->common_cfg->config_generation);
73 	} while (old_gen != new_gen);
74 }
75 
76 static void
77 modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
78 			const void *src, int length)
79 {
80 	int i;
81 	const uint8_t *p = src;
82 
83 	for (i = 0;  i < length; i++)
84 		rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
85 }
86 
87 static uint64_t
88 modern_get_features(struct virtio_crypto_hw *hw)
89 {
90 	uint32_t features_lo, features_hi;
91 
92 	rte_write32(0, &hw->common_cfg->device_feature_select);
93 	features_lo = rte_read32(&hw->common_cfg->device_feature);
94 
95 	rte_write32(1, &hw->common_cfg->device_feature_select);
96 	features_hi = rte_read32(&hw->common_cfg->device_feature);
97 
98 	return ((uint64_t)features_hi << 32) | features_lo;
99 }
100 
101 static void
102 modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
103 {
104 	rte_write32(0, &hw->common_cfg->guest_feature_select);
105 	rte_write32(features & ((1ULL << 32) - 1),
106 		    &hw->common_cfg->guest_feature);
107 
108 	rte_write32(1, &hw->common_cfg->guest_feature_select);
109 	rte_write32(features >> 32,
110 		    &hw->common_cfg->guest_feature);
111 }
112 
113 static uint8_t
114 modern_get_status(struct virtio_crypto_hw *hw)
115 {
116 	return rte_read8(&hw->common_cfg->device_status);
117 }
118 
119 static void
120 modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
121 {
122 	rte_write8(status, &hw->common_cfg->device_status);
123 }
124 
125 static void
126 modern_reset(struct virtio_crypto_hw *hw)
127 {
128 	modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
129 	modern_get_status(hw);
130 }
131 
132 static uint8_t
133 modern_get_isr(struct virtio_crypto_hw *hw)
134 {
135 	return rte_read8(hw->isr);
136 }
137 
138 static uint16_t
139 modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
140 {
141 	rte_write16(vec, &hw->common_cfg->msix_config);
142 	return rte_read16(&hw->common_cfg->msix_config);
143 }
144 
145 static uint16_t
146 modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
147 		uint16_t vec)
148 {
149 	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
150 	rte_write16(vec, &hw->common_cfg->queue_msix_vector);
151 	return rte_read16(&hw->common_cfg->queue_msix_vector);
152 }
153 
154 static uint16_t
155 modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
156 {
157 	rte_write16(queue_id, &hw->common_cfg->queue_select);
158 	return rte_read16(&hw->common_cfg->queue_size);
159 }
160 
161 static int
162 modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
163 {
164 	uint64_t desc_addr, avail_addr, used_addr;
165 	uint16_t notify_off;
166 
167 	if (!check_vq_phys_addr_ok(vq))
168 		return -1;
169 
170 	desc_addr = vq->vq_ring_mem;
171 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
172 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
173 							 ring[vq->vq_nentries]),
174 				   VIRTIO_PCI_VRING_ALIGN);
175 
176 	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
177 
178 	io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
179 				      &hw->common_cfg->queue_desc_hi);
180 	io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
181 				       &hw->common_cfg->queue_avail_hi);
182 	io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
183 				      &hw->common_cfg->queue_used_hi);
184 
185 	notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
186 	vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
187 				notify_off * hw->notify_off_multiplier);
188 
189 	rte_write16(1, &hw->common_cfg->queue_enable);
190 
191 	VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
192 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
193 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
194 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
195 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
196 		vq->notify_addr, notify_off);
197 
198 	return 0;
199 }
200 
201 static void
202 modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
203 {
204 	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
205 
206 	io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
207 				  &hw->common_cfg->queue_desc_hi);
208 	io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
209 				  &hw->common_cfg->queue_avail_hi);
210 	io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
211 				  &hw->common_cfg->queue_used_hi);
212 
213 	rte_write16(0, &hw->common_cfg->queue_enable);
214 }
215 
216 static void
217 modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
218 		struct virtqueue *vq)
219 {
220 	rte_write16(vq->vq_queue_index, vq->notify_addr);
221 }
222 
223 const struct virtio_pci_ops virtio_crypto_modern_ops = {
224 	.read_dev_cfg	= modern_read_dev_config,
225 	.write_dev_cfg	= modern_write_dev_config,
226 	.reset		= modern_reset,
227 	.get_status	= modern_get_status,
228 	.set_status	= modern_set_status,
229 	.get_features	= modern_get_features,
230 	.set_features	= modern_set_features,
231 	.get_isr	= modern_get_isr,
232 	.set_config_irq	= modern_set_config_irq,
233 	.set_queue_irq  = modern_set_queue_irq,
234 	.get_queue_num	= modern_get_queue_num,
235 	.setup_queue	= modern_setup_queue,
236 	.del_queue	= modern_del_queue,
237 	.notify_queue	= modern_notify_queue,
238 };
239 
240 void
241 vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
242 		void *dst, int length)
243 {
244 	VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
245 }
246 
247 void
248 vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
249 		const void *src, int length)
250 {
251 	VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
252 }
253 
254 uint64_t
255 vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
256 		uint64_t host_features)
257 {
258 	uint64_t features;
259 
260 	/*
261 	 * Limit negotiated features to what the driver, virtqueue, and
262 	 * host all support.
263 	 */
264 	features = host_features & hw->guest_features;
265 	VTPCI_OPS(hw)->set_features(hw, features);
266 
267 	return features;
268 }
269 
270 void
271 vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
272 {
273 	VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
274 	/* flush status write */
275 	VTPCI_OPS(hw)->get_status(hw);
276 }
277 
278 void
279 vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
280 {
281 	vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
282 }
283 
284 void
285 vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
286 {
287 	if (status != VIRTIO_CONFIG_STATUS_RESET)
288 		status |= VTPCI_OPS(hw)->get_status(hw);
289 
290 	VTPCI_OPS(hw)->set_status(hw, status);
291 }
292 
293 uint8_t
294 vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
295 {
296 	return VTPCI_OPS(hw)->get_status(hw);
297 }
298 
299 uint8_t
300 vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
301 {
302 	return VTPCI_OPS(hw)->get_isr(hw);
303 }
304 
305 static void *
306 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
307 {
308 	uint8_t  bar    = cap->bar;
309 	uint32_t length = cap->length;
310 	uint32_t offset = cap->offset;
311 	uint8_t *base;
312 
313 	if (bar >= PCI_MAX_RESOURCE) {
314 		VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
315 		return NULL;
316 	}
317 
318 	if (offset + length < offset) {
319 		VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
320 			offset, length);
321 		return NULL;
322 	}
323 
324 	if (offset + length > dev->mem_resource[bar].len) {
325 		VIRTIO_CRYPTO_INIT_LOG_ERR(
326 			"invalid cap: overflows bar space: %u > %" PRIu64,
327 			offset + length, dev->mem_resource[bar].len);
328 		return NULL;
329 	}
330 
331 	base = dev->mem_resource[bar].addr;
332 	if (base == NULL) {
333 		VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
334 		return NULL;
335 	}
336 
337 	return base + offset;
338 }
339 
340 #define PCI_MSIX_ENABLE 0x8000
341 
342 static int
343 virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
344 {
345 	struct virtio_pci_cap cap;
346 	uint16_t flags;
347 	off_t pos;
348 	int ret;
349 
350 	if (rte_pci_map_device(dev)) {
351 		VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
352 		return -1;
353 	}
354 
355 	/*
356 	 * Transitional devices would also have this capability,
357 	 * that's why we also check if msix is enabled.
358 	 */
359 	pos = rte_pci_find_capability(dev, PCI_CAP_ID_MSIX);
360 	if (pos > 0 && rte_pci_read_config(dev, &flags, sizeof(flags),
361 			pos + 2) == sizeof(flags)) {
362 		if (flags & PCI_MSIX_ENABLE)
363 			hw->use_msix = VIRTIO_MSIX_ENABLED;
364 		else
365 			hw->use_msix = VIRTIO_MSIX_DISABLED;
366 	} else {
367 		hw->use_msix = VIRTIO_MSIX_NONE;
368 	}
369 
370 	pos = rte_pci_find_capability(dev, PCI_CAP_ID_VNDR);
371 	while (pos > 0) {
372 		if (rte_pci_read_config(dev, &cap, sizeof(cap), pos) != sizeof(cap))
373 			break;
374 		VIRTIO_CRYPTO_INIT_LOG_DBG(
375 			"[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
376 			(unsigned int)pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
377 
378 		switch (cap.cfg_type) {
379 		case VIRTIO_PCI_CAP_COMMON_CFG:
380 			hw->common_cfg = get_cfg_addr(dev, &cap);
381 			break;
382 		case VIRTIO_PCI_CAP_NOTIFY_CFG:
383 			ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,
384 					4, pos + sizeof(cap));
385 			if (ret != 4)
386 				VIRTIO_CRYPTO_INIT_LOG_ERR(
387 					"failed to read notify_off_multiplier: ret %d", ret);
388 			else
389 				hw->notify_base = get_cfg_addr(dev, &cap);
390 			break;
391 		case VIRTIO_PCI_CAP_DEVICE_CFG:
392 			hw->dev_cfg = get_cfg_addr(dev, &cap);
393 			break;
394 		case VIRTIO_PCI_CAP_ISR_CFG:
395 			hw->isr = get_cfg_addr(dev, &cap);
396 			break;
397 		}
398 
399 		pos = rte_pci_find_next_capability(dev, PCI_CAP_ID_VNDR, pos);
400 	}
401 
402 	if (hw->common_cfg == NULL || hw->notify_base == NULL ||
403 	    hw->dev_cfg == NULL    || hw->isr == NULL) {
404 		VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
405 		return -1;
406 	}
407 
408 	VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
409 
410 	VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
411 	VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
412 	VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
413 	VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
414 		hw->notify_base, hw->notify_off_multiplier);
415 
416 	return 0;
417 }
418 
419 /*
420  * Return -1:
421  *   if there is error mapping with VFIO/UIO.
422  *   if port map error when driver type is KDRV_NONE.
423  *   if marked as allowed but driver type is KDRV_UNKNOWN.
424  * Return 1 if kernel driver is managing the device.
425  * Return 0 on success.
426  */
427 int
428 vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
429 {
430 	/*
431 	 * Try if we can succeed reading virtio pci caps, which exists
432 	 * only on modern pci device. If failed, we fallback to legacy
433 	 * virtio handling.
434 	 */
435 	if (virtio_read_caps(dev, hw) == 0) {
436 		VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
437 		crypto_virtio_hw_internal[hw->dev_id].vtpci_ops =
438 					&virtio_crypto_modern_ops;
439 		hw->modern = 1;
440 		return 0;
441 	}
442 
443 	/*
444 	 * virtio crypto conforms to virtio 1.0 and doesn't support
445 	 * legacy mode
446 	 */
447 	return -1;
448 }
449