xref: /dpdk/drivers/crypto/virtio/virtio_pci.c (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 
5 #include <stdint.h>
6 
7 #ifdef RTE_EXEC_ENV_LINUX
8  #include <dirent.h>
9  #include <fcntl.h>
10 #endif
11 
12 #include <rte_io.h>
13 
14 #include "virtio_pci.h"
15 #include "virtqueue.h"
16 
17 /*
18  * Following macros are derived from linux/pci_regs.h, however,
19  * we can't simply include that header here, as there is no such
20  * file for non-Linux platform.
21  */
22 #define PCI_CAPABILITY_LIST	0x34
23 #define PCI_CAP_ID_VNDR		0x09
24 #define PCI_CAP_ID_MSIX		0x11
25 
26 /*
27  * The remaining space is defined by each driver as the per-driver
28  * configuration space.
29  */
30 #define VIRTIO_PCI_CONFIG(hw) \
31 		(((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
32 
33 struct virtio_hw_internal crypto_virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
34 
35 static inline int
36 check_vq_phys_addr_ok(struct virtqueue *vq)
37 {
38 	/* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
39 	 * and only accepts 32 bit page frame number.
40 	 * Check if the allocated physical memory exceeds 16TB.
41 	 */
42 	if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
43 			(VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
44 		VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
45 		return 0;
46 	}
47 
48 	return 1;
49 }
50 
51 static inline void
52 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
53 {
54 	rte_write32(val & ((1ULL << 32) - 1), lo);
55 	rte_write32(val >> 32,		     hi);
56 }
57 
58 static void
59 modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
60 		       void *dst, int length)
61 {
62 	int i;
63 	uint8_t *p;
64 	uint8_t old_gen, new_gen;
65 
66 	do {
67 		old_gen = rte_read8(&hw->common_cfg->config_generation);
68 
69 		p = dst;
70 		for (i = 0;  i < length; i++)
71 			*p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
72 
73 		new_gen = rte_read8(&hw->common_cfg->config_generation);
74 	} while (old_gen != new_gen);
75 }
76 
77 static void
78 modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
79 			const void *src, int length)
80 {
81 	int i;
82 	const uint8_t *p = src;
83 
84 	for (i = 0;  i < length; i++)
85 		rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
86 }
87 
88 static uint64_t
89 modern_get_features(struct virtio_crypto_hw *hw)
90 {
91 	uint32_t features_lo, features_hi;
92 
93 	rte_write32(0, &hw->common_cfg->device_feature_select);
94 	features_lo = rte_read32(&hw->common_cfg->device_feature);
95 
96 	rte_write32(1, &hw->common_cfg->device_feature_select);
97 	features_hi = rte_read32(&hw->common_cfg->device_feature);
98 
99 	return ((uint64_t)features_hi << 32) | features_lo;
100 }
101 
102 static void
103 modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
104 {
105 	rte_write32(0, &hw->common_cfg->guest_feature_select);
106 	rte_write32(features & ((1ULL << 32) - 1),
107 		    &hw->common_cfg->guest_feature);
108 
109 	rte_write32(1, &hw->common_cfg->guest_feature_select);
110 	rte_write32(features >> 32,
111 		    &hw->common_cfg->guest_feature);
112 }
113 
114 static uint8_t
115 modern_get_status(struct virtio_crypto_hw *hw)
116 {
117 	return rte_read8(&hw->common_cfg->device_status);
118 }
119 
120 static void
121 modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
122 {
123 	rte_write8(status, &hw->common_cfg->device_status);
124 }
125 
126 static void
127 modern_reset(struct virtio_crypto_hw *hw)
128 {
129 	modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
130 	modern_get_status(hw);
131 }
132 
133 static uint8_t
134 modern_get_isr(struct virtio_crypto_hw *hw)
135 {
136 	return rte_read8(hw->isr);
137 }
138 
139 static uint16_t
140 modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
141 {
142 	rte_write16(vec, &hw->common_cfg->msix_config);
143 	return rte_read16(&hw->common_cfg->msix_config);
144 }
145 
146 static uint16_t
147 modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
148 		uint16_t vec)
149 {
150 	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
151 	rte_write16(vec, &hw->common_cfg->queue_msix_vector);
152 	return rte_read16(&hw->common_cfg->queue_msix_vector);
153 }
154 
155 static uint16_t
156 modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
157 {
158 	rte_write16(queue_id, &hw->common_cfg->queue_select);
159 	return rte_read16(&hw->common_cfg->queue_size);
160 }
161 
162 static int
163 modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
164 {
165 	uint64_t desc_addr, avail_addr, used_addr;
166 	uint16_t notify_off;
167 
168 	if (!check_vq_phys_addr_ok(vq))
169 		return -1;
170 
171 	desc_addr = vq->vq_ring_mem;
172 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
173 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
174 							 ring[vq->vq_nentries]),
175 				   VIRTIO_PCI_VRING_ALIGN);
176 
177 	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
178 
179 	io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
180 				      &hw->common_cfg->queue_desc_hi);
181 	io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
182 				       &hw->common_cfg->queue_avail_hi);
183 	io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
184 				      &hw->common_cfg->queue_used_hi);
185 
186 	notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
187 	vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
188 				notify_off * hw->notify_off_multiplier);
189 
190 	rte_write16(1, &hw->common_cfg->queue_enable);
191 
192 	VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
193 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
194 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
195 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
196 	VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
197 		vq->notify_addr, notify_off);
198 
199 	return 0;
200 }
201 
202 static void
203 modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
204 {
205 	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
206 
207 	io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
208 				  &hw->common_cfg->queue_desc_hi);
209 	io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
210 				  &hw->common_cfg->queue_avail_hi);
211 	io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
212 				  &hw->common_cfg->queue_used_hi);
213 
214 	rte_write16(0, &hw->common_cfg->queue_enable);
215 }
216 
217 static void
218 modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
219 		struct virtqueue *vq)
220 {
221 	rte_write16(vq->vq_queue_index, vq->notify_addr);
222 }
223 
224 const struct virtio_pci_ops virtio_crypto_modern_ops = {
225 	.read_dev_cfg	= modern_read_dev_config,
226 	.write_dev_cfg	= modern_write_dev_config,
227 	.reset		= modern_reset,
228 	.get_status	= modern_get_status,
229 	.set_status	= modern_set_status,
230 	.get_features	= modern_get_features,
231 	.set_features	= modern_set_features,
232 	.get_isr	= modern_get_isr,
233 	.set_config_irq	= modern_set_config_irq,
234 	.set_queue_irq  = modern_set_queue_irq,
235 	.get_queue_num	= modern_get_queue_num,
236 	.setup_queue	= modern_setup_queue,
237 	.del_queue	= modern_del_queue,
238 	.notify_queue	= modern_notify_queue,
239 };
240 
241 void
242 vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
243 		void *dst, int length)
244 {
245 	VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
246 }
247 
248 void
249 vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
250 		const void *src, int length)
251 {
252 	VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
253 }
254 
255 uint64_t
256 vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
257 		uint64_t host_features)
258 {
259 	uint64_t features;
260 
261 	/*
262 	 * Limit negotiated features to what the driver, virtqueue, and
263 	 * host all support.
264 	 */
265 	features = host_features & hw->guest_features;
266 	VTPCI_OPS(hw)->set_features(hw, features);
267 
268 	return features;
269 }
270 
271 void
272 vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
273 {
274 	VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
275 	/* flush status write */
276 	VTPCI_OPS(hw)->get_status(hw);
277 }
278 
279 void
280 vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
281 {
282 	vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
283 }
284 
285 void
286 vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
287 {
288 	if (status != VIRTIO_CONFIG_STATUS_RESET)
289 		status |= VTPCI_OPS(hw)->get_status(hw);
290 
291 	VTPCI_OPS(hw)->set_status(hw, status);
292 }
293 
294 uint8_t
295 vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
296 {
297 	return VTPCI_OPS(hw)->get_status(hw);
298 }
299 
300 uint8_t
301 vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
302 {
303 	return VTPCI_OPS(hw)->get_isr(hw);
304 }
305 
306 static void *
307 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
308 {
309 	uint8_t  bar    = cap->bar;
310 	uint32_t length = cap->length;
311 	uint32_t offset = cap->offset;
312 	uint8_t *base;
313 
314 	if (bar >= PCI_MAX_RESOURCE) {
315 		VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
316 		return NULL;
317 	}
318 
319 	if (offset + length < offset) {
320 		VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
321 			offset, length);
322 		return NULL;
323 	}
324 
325 	if (offset + length > dev->mem_resource[bar].len) {
326 		VIRTIO_CRYPTO_INIT_LOG_ERR(
327 			"invalid cap: overflows bar space: %u > %" PRIu64,
328 			offset + length, dev->mem_resource[bar].len);
329 		return NULL;
330 	}
331 
332 	base = dev->mem_resource[bar].addr;
333 	if (base == NULL) {
334 		VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
335 		return NULL;
336 	}
337 
338 	return base + offset;
339 }
340 
341 #define PCI_MSIX_ENABLE 0x8000
342 
343 static int
344 virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
345 {
346 	uint8_t pos;
347 	struct virtio_pci_cap cap;
348 	int ret;
349 
350 	if (rte_pci_map_device(dev)) {
351 		VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
352 		return -1;
353 	}
354 
355 	ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
356 	if (ret < 0) {
357 		VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
358 		return -1;
359 	}
360 
361 	while (pos) {
362 		ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
363 		if (ret < 0) {
364 			VIRTIO_CRYPTO_INIT_LOG_ERR(
365 				"failed to read pci cap at pos: %x", pos);
366 			break;
367 		}
368 
369 		if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
370 			/* Transitional devices would also have this capability,
371 			 * that's why we also check if msix is enabled.
372 			 * 1st byte is cap ID; 2nd byte is the position of next
373 			 * cap; next two bytes are the flags.
374 			 */
375 			uint16_t flags = ((uint16_t *)&cap)[1];
376 
377 			if (flags & PCI_MSIX_ENABLE)
378 				hw->use_msix = VIRTIO_MSIX_ENABLED;
379 			else
380 				hw->use_msix = VIRTIO_MSIX_DISABLED;
381 		}
382 
383 		if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
384 			VIRTIO_CRYPTO_INIT_LOG_DBG(
385 				"[%2x] skipping non VNDR cap id: %02x",
386 				pos, cap.cap_vndr);
387 			goto next;
388 		}
389 
390 		VIRTIO_CRYPTO_INIT_LOG_DBG(
391 			"[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
392 			pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
393 
394 		switch (cap.cfg_type) {
395 		case VIRTIO_PCI_CAP_COMMON_CFG:
396 			hw->common_cfg = get_cfg_addr(dev, &cap);
397 			break;
398 		case VIRTIO_PCI_CAP_NOTIFY_CFG:
399 			ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,
400 					4, pos + sizeof(cap));
401 			if (ret != 4)
402 				VIRTIO_CRYPTO_INIT_LOG_ERR(
403 					"failed to read notify_off_multiplier: ret %d", ret);
404 			else
405 				hw->notify_base = get_cfg_addr(dev, &cap);
406 			break;
407 		case VIRTIO_PCI_CAP_DEVICE_CFG:
408 			hw->dev_cfg = get_cfg_addr(dev, &cap);
409 			break;
410 		case VIRTIO_PCI_CAP_ISR_CFG:
411 			hw->isr = get_cfg_addr(dev, &cap);
412 			break;
413 		}
414 
415 next:
416 		pos = cap.cap_next;
417 	}
418 
419 	if (hw->common_cfg == NULL || hw->notify_base == NULL ||
420 	    hw->dev_cfg == NULL    || hw->isr == NULL) {
421 		VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
422 		return -1;
423 	}
424 
425 	VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
426 
427 	VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
428 	VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
429 	VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
430 	VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
431 		hw->notify_base, hw->notify_off_multiplier);
432 
433 	return 0;
434 }
435 
436 /*
437  * Return -1:
438  *   if there is error mapping with VFIO/UIO.
439  *   if port map error when driver type is KDRV_NONE.
440  *   if marked as allowed but driver type is KDRV_UNKNOWN.
441  * Return 1 if kernel driver is managing the device.
442  * Return 0 on success.
443  */
444 int
445 vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
446 {
447 	/*
448 	 * Try if we can succeed reading virtio pci caps, which exists
449 	 * only on modern pci device. If failed, we fallback to legacy
450 	 * virtio handling.
451 	 */
452 	if (virtio_read_caps(dev, hw) == 0) {
453 		VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
454 		crypto_virtio_hw_internal[hw->dev_id].vtpci_ops =
455 					&virtio_crypto_modern_ops;
456 		hw->modern = 1;
457 		return 0;
458 	}
459 
460 	/*
461 	 * virtio crypto conforms to virtio 1.0 and doesn't support
462 	 * legacy mode
463 	 */
464 	return -1;
465 }
466