Home
last modified time | relevance | path

Searched refs:dev (Results 1 – 25 of 551) sorted by relevance

12345678910>>...23

/dpdk/drivers/raw/ifpga/base/
H A Dopae_i2c.c9 static int i2c_transfer(struct altera_i2c_dev *dev, in i2c_transfer() argument
15 ret = dev->xfer(dev, msg, num); in i2c_transfer()
26 int i2c_read(struct altera_i2c_dev *dev, int flags, unsigned int slave_addr, in i2c_read() argument
33 pthread_mutex_lock(dev->mutex); in i2c_read()
55 if (!dev->xfer) { in i2c_read()
60 ret = i2c_transfer(dev, msg, 2); in i2c_read()
63 pthread_mutex_unlock(dev->mutex); in i2c_read()
67 int i2c_write(struct altera_i2c_dev *dev, int flags, unsigned int slave_addr, in i2c_write() argument
75 pthread_mutex_lock(dev->mutex); in i2c_write()
77 if (!dev->xfer) { in i2c_write()
[all …]
H A Dopae_eth_group.c25 static int eth_group_get_select(struct eth_group_device *dev, in eth_group_get_select() argument
37 if (type == ETH_GROUP_PHY && index < dev->phy_num) in eth_group_get_select()
39 else if (type == ETH_GROUP_MAC && index < dev->mac_num) in eth_group_get_select()
49 int eth_group_write_reg(struct eth_group_device *dev, in eth_group_write_reg() argument
56 dev_debug(dev, "%s type %s index %u addr 0x%x\n", in eth_group_write_reg()
60 ret = eth_group_get_select(dev, type, index, &dev_select); in eth_group_write_reg()
73 opae_writeq(v, dev->base + ETH_GROUP_CTRL); in eth_group_write_reg()
78 int eth_group_read_reg(struct eth_group_device *dev, in eth_group_read_reg() argument
85 dev_debug(dev, "%s type %s index %u addr 0x%x\n", in eth_group_read_reg()
90 ret = eth_group_get_select(dev, type, index, &dev_select); in eth_group_read_reg()
[all …]
H A Dopae_spi.c8 static int nios_spi_indirect_read(struct altera_spi_device *dev, u32 reg, in nios_spi_indirect_read() argument
16 opae_writeq(ctrl, dev->regs + NIOS_SPI_CTRL); in nios_spi_indirect_read()
18 stat = opae_readq(dev->regs + NIOS_SPI_STAT); in nios_spi_indirect_read()
20 stat = opae_readq(dev->regs + NIOS_SPI_STAT); in nios_spi_indirect_read()
27 static int nios_spi_indirect_write(struct altera_spi_device *dev, u32 reg, in nios_spi_indirect_write() argument
38 opae_writeq(ctrl, dev->regs + NIOS_SPI_CTRL); in nios_spi_indirect_write()
40 stat = opae_readq(dev->regs + NIOS_SPI_STAT); in nios_spi_indirect_write()
42 stat = opae_readq(dev->regs + NIOS_SPI_STAT); in nios_spi_indirect_write()
47 static int spi_indirect_write(struct altera_spi_device *dev, u32 reg, in spi_indirect_write() argument
52 opae_writeq(value & WRITE_DATA_MASK, dev->regs + SPI_WRITE); in spi_indirect_write()
[all …]
H A Difpga_sec_mgr.c56 static int poll_timeout(struct intel_max10_device *dev, uint32_t offset, in poll_timeout() argument
63 ret = max10_sys_read(dev, offset, &val); in poll_timeout()
65 dev_err(dev, in poll_timeout()
72 dev_debug(dev, in poll_timeout()
83 dev_debug(dev, in poll_timeout()
95 static int n3000_secure_update_start(struct intel_max10_device *dev) in n3000_secure_update_start() argument
102 ret = max10_sys_read(dev, MAX10_DOORBELL, &doorbell); in n3000_secure_update_start()
104 dev_err(dev, in n3000_secure_update_start()
112 dev_debug(dev, "Current RSU progress is %s\n", in n3000_secure_update_start()
117 ret = max10_sys_update_bits(dev, MAX10_DOORBELL, in n3000_secure_update_start()
[all …]
/dpdk/lib/vhost/
H A Diotlb.c29 vhost_user_iotlb_remove_notify(struct virtio_net *dev, struct vhost_iotlb_entry *entry) in vhost_user_iotlb_remove_notify() argument
31 if (dev->backend_ops->iotlb_remove_notify == NULL) in vhost_user_iotlb_remove_notify()
34 dev->backend_ops->iotlb_remove_notify(entry->uaddr, entry->uoffset, entry->size); in vhost_user_iotlb_remove_notify()
57 vhost_user_iotlb_set_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node) in vhost_user_iotlb_set_dump() argument
62 mem_set_dump(dev, (void *)(uintptr_t)start, node->size, true, RTE_BIT64(node->page_shift)); in vhost_user_iotlb_set_dump()
66 vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node, in vhost_user_iotlb_clear_dump() argument
83 mem_set_dump(dev, (void *)(uintptr_t)start, end - start, false, in vhost_user_iotlb_clear_dump()
88 vhost_user_iotlb_pool_get(struct virtio_net *dev) in vhost_user_iotlb_pool_get() argument
92 rte_spinlock_lock(&dev->iotlb_free_lock); in vhost_user_iotlb_pool_get()
93 node = SLIST_FIRST(&dev->iotlb_free_list); in vhost_user_iotlb_pool_get()
[all …]
H A Dvhost_user.c121 #define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \ argument
124 vq_assert_lock(dev, vq); \
127 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
128 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
151 validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds) in validate_msg_fds() argument
156 VHOST_CONFIG_LOG(dev->ifname, ERR, in validate_msg_fds()
177 async_dma_map(struct virtio_net *dev, bool do_map) in async_dma_map() argument
184 for (i = 0; i < dev->nr_guest_pages; i++) { in async_dma_map()
185 page = &dev->guest_pages[i]; in async_dma_map()
207 VHOST_CONFIG_LOG(dev in async_dma_map()
230 free_mem_region(struct virtio_net * dev) free_mem_region() argument
251 vhost_backend_cleanup(struct virtio_net * dev) vhost_backend_cleanup() argument
305 vhost_user_notify_queue_state(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_user_notify_queue_state() argument
338 struct virtio_net *dev = *pdev; vhost_user_reset_owner() local
355 struct virtio_net *dev = *pdev; vhost_user_get_features() local
375 struct virtio_net *dev = *pdev; vhost_user_get_queue_num() local
395 struct virtio_net *dev = *pdev; vhost_user_set_features() local
485 struct virtio_net *dev = *pdev; vhost_user_set_vring_num() local
563 struct virtio_net *dev; numa_realloc() local
742 qva_to_vva(struct virtio_net * dev,uint64_t qva,uint64_t * len) qva_to_vva() argument
777 ring_addr_to_vva(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t ra,uint64_t * size) ring_addr_to_vva() argument
795 log_addr_to_gpa(struct virtio_net * dev,struct vhost_virtqueue * vq) log_addr_to_gpa() argument
826 mem_set_dump(struct virtio_net * dev,void * ptr,size_t size,bool enable,uint64_t pagesz) mem_set_dump() argument
844 struct virtio_net *dev; translate_ring_addresses() local
978 struct virtio_net *dev = *pdev; vhost_user_set_vring_addr() local
1024 struct virtio_net *dev = *pdev; vhost_user_set_vring_base() local
1055 add_one_guest_page(struct virtio_net * dev,uint64_t guest_phys_addr,uint64_t host_iova,uint64_t host_user_addr,uint64_t size) add_one_guest_page() argument
1095 add_guest_pages(struct virtio_net * dev,struct rte_vhost_mem_region * reg,uint64_t page_size) add_guest_pages() argument
1141 dump_guest_pages(struct virtio_net * dev) dump_guest_pages() argument
1159 dump_guest_pages(dev) global() argument
1188 vhost_user_postcopy_region_register(struct virtio_net * dev,struct rte_vhost_mem_region * reg) vhost_user_postcopy_region_register() argument
1232 vhost_user_postcopy_register(struct virtio_net * dev,int main_fd,struct vhu_msg_context * ctx) vhost_user_postcopy_register() argument
1288 vhost_user_mmap_region(struct virtio_net * dev,struct rte_vhost_mem_region * region,uint64_t mmap_offset) vhost_user_mmap_region() argument
1389 struct virtio_net *dev = *pdev; vhost_user_set_mem_table() local
1546 vq_is_ready(struct virtio_net * dev,struct vhost_virtqueue * vq) vq_is_ready() argument
1569 virtio_is_ready(struct virtio_net * dev) virtio_is_ready() argument
1618 inflight_mem_alloc(struct virtio_net * dev,const char * name,size_t size,int * fd) inflight_mem_alloc() argument
1685 struct virtio_net *dev = *pdev; vhost_user_get_inflight_fd() local
1785 struct virtio_net *dev = *pdev; vhost_user_set_inflight_fd() local
1893 struct virtio_net *dev = *pdev; vhost_user_set_vring_call() local
1930 struct virtio_net *dev = *pdev; vhost_user_set_vring_err() local
1957 vhost_check_queue_inflights_split(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_check_queue_inflights_split() argument
2044 vhost_check_queue_inflights_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_check_queue_inflights_packed() argument
2144 struct virtio_net *dev = *pdev; vhost_user_set_vring_kick() local
2212 struct virtio_net *dev = *pdev; vhost_user_get_vring_base() local
2290 struct virtio_net *dev = *pdev; vhost_user_set_vring_enable() local
2320 struct virtio_net *dev = *pdev; vhost_user_get_protocol_features() local
2337 struct virtio_net *dev = *pdev; vhost_user_set_protocol_features() local
2361 struct virtio_net *dev = *pdev; vhost_user_set_log_base() local
2458 struct virtio_net *dev = *pdev; vhost_user_set_log_fd() local
2482 struct virtio_net *dev = *pdev; vhost_user_send_rarp() local
2511 struct virtio_net *dev = *pdev; vhost_user_net_set_mtu() local
2532 struct virtio_net *dev = *pdev; vhost_user_set_req_fd() local
2617 is_vring_iotlb(struct virtio_net * dev,struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg) is_vring_iotlb() argument
2632 struct virtio_net *dev = *pdev; vhost_user_get_config() local
2664 struct virtio_net *dev = *pdev; vhost_user_set_config() local
2706 struct virtio_net *dev = *pdev; vhost_user_iotlb_msg() local
2766 struct virtio_net *dev = *pdev; vhost_user_set_postcopy_advise() local
2805 struct virtio_net *dev = *pdev; vhost_user_set_postcopy_listen() local
2822 struct virtio_net *dev = *pdev; vhost_user_postcopy_end() local
2842 struct virtio_net *dev = *pdev; vhost_user_get_status() local
2856 struct virtio_net *dev = *pdev; vhost_user_set_status() local
2914 read_vhost_message(struct virtio_net * dev,int sockfd,struct vhu_msg_context * ctx) read_vhost_message() argument
2954 send_vhost_message(struct virtio_net * dev,int sockfd,struct vhu_msg_context * ctx) send_vhost_message() argument
2964 send_vhost_reply(struct virtio_net * dev,int sockfd,struct vhu_msg_context * ctx) send_vhost_reply() argument
2978 send_vhost_backend_message(struct virtio_net * dev,struct vhu_msg_context * ctx) send_vhost_backend_message() argument
2984 send_vhost_backend_message_process_reply(struct virtio_net * dev,struct vhu_msg_context * ctx) send_vhost_backend_message_process_reply() argument
3025 vhost_user_check_and_alloc_queue_pair(struct virtio_net * dev,struct vhu_msg_context * ctx) vhost_user_check_and_alloc_queue_pair() argument
3064 vhost_user_lock_all_queue_pairs(struct virtio_net * dev) vhost_user_lock_all_queue_pairs() argument
3082 vhost_user_unlock_all_queue_pairs(struct virtio_net * dev) vhost_user_unlock_all_queue_pairs() argument
3102 struct virtio_net *dev; vhost_user_msg_handler() local
3329 vhost_user_iotlb_miss(struct virtio_net * dev,uint64_t iova,uint8_t perm) vhost_user_iotlb_miss() argument
3366 struct virtio_net *dev; rte_vhost_backend_config_change() local
3385 vhost_user_backend_set_vring_host_notifier(struct virtio_net * dev,int index,int fd,uint64_t offset,uint64_t size) vhost_user_backend_set_vring_host_notifier() argument
3420 struct virtio_net *dev; rte_vhost_host_notifier_ctrl() local
[all...]
H A Dvdpa.c37 struct rte_vdpa_device *dev, *ret = NULL; in __vdpa_find_device_by_name() local
42 TAILQ_FOREACH(dev, vdpa_device_list, next) { in __vdpa_find_device_by_name()
43 if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) { in __vdpa_find_device_by_name()
44 ret = dev; in __vdpa_find_device_by_name()
55 struct rte_vdpa_device *dev; in rte_vdpa_find_device_by_name() local
58 dev = __vdpa_find_device_by_name(name); in rte_vdpa_find_device_by_name()
61 return dev; in rte_vdpa_find_device_by_name()
77 struct rte_vdpa_device *dev; in rte_vdpa_register_device() local
95 dev = __vdpa_find_device_by_name(rte_dev->name); in rte_vdpa_register_device()
96 if (dev) { in rte_vdpa_register_device()
129 rte_vdpa_unregister_device(struct rte_vdpa_device * dev) rte_vdpa_unregister_device() argument
152 struct virtio_net *dev = get_device(vid); rte_vdpa_relay_vring_used() local
260 rte_vdpa_get_queue_num(struct rte_vdpa_device * dev,uint32_t * queue_num) rte_vdpa_get_queue_num() argument
269 rte_vdpa_get_features(struct rte_vdpa_device * dev,uint64_t * features) rte_vdpa_get_features() argument
278 rte_vdpa_get_protocol_features(struct rte_vdpa_device * dev,uint64_t * features) rte_vdpa_get_protocol_features() argument
288 rte_vdpa_get_stats_names(struct rte_vdpa_device * dev,struct rte_vdpa_stat_name * stats_names,unsigned int size) rte_vdpa_get_stats_names() argument
302 rte_vdpa_get_stats(struct rte_vdpa_device * dev,uint16_t qid,struct rte_vdpa_stat * stats,unsigned int n) rte_vdpa_get_stats() argument
315 rte_vdpa_reset_stats(struct rte_vdpa_device * dev,uint16_t qid) rte_vdpa_reset_stats() argument
327 vdpa_dev_match(struct rte_vdpa_device * dev,const struct rte_device * rte_dev) vdpa_dev_match() argument
344 struct rte_vdpa_device *dev; vdpa_find_device() local
[all...]
H A Dvduse.c28 #define VDUSE_CTRL_PATH "/dev/vduse/control"
47 vduse_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq) in vduse_inject_irq() argument
49 return ioctl(dev->vduse_dev_fd, VDUSE_VQ_INJECT_IRQ, &vq->index); in vduse_inject_irq()
59 vduse_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm __rte_unused) in vduse_iotlb_miss() argument
70 ret = ioctl(dev->vduse_dev_fd, VDUSE_IOTLB_GET_FD, &entry); in vduse_iotlb_miss()
72 VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64, in vduse_iotlb_miss()
79 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "New IOTLB entry:"); in vduse_iotlb_miss()
80 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64, in vduse_iotlb_miss()
82 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\toffset: %" PRIx64, (uint64_t)entry.offset); in vduse_iotlb_miss()
83 VHOST_CONFIG_LOG(dev in vduse_iotlb_miss()
122 struct virtio_net *dev = arg; vduse_control_queue_event() local
139 vduse_vring_setup(struct virtio_net * dev,unsigned int index) vduse_vring_setup() argument
232 vduse_vring_cleanup(struct virtio_net * dev,unsigned int index) vduse_vring_cleanup() argument
270 vduse_device_start(struct virtio_net * dev) vduse_device_start() argument
322 vduse_device_stop(struct virtio_net * dev) vduse_device_stop() argument
341 struct virtio_net *dev = arg; vduse_events_handler() local
415 struct virtio_net *dev; vduse_device_create() local
574 struct virtio_net *dev; vduse_device_destroy() local
[all...]
H A Dvhost.c64 vhost_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm) in vhost_iotlb_miss() argument
66 return dev->backend_ops->iotlb_miss(dev, iova, perm); in vhost_iotlb_miss()
70 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, in __vhost_iova_to_vva() argument
80 vva = vhost_user_iotlb_cache_find(dev, iova, &tmp_size, perm); in __vhost_iova_to_vva()
82 if (dev->flags & VIRTIO_DEV_STATS_ENABLED) in __vhost_iova_to_vva()
87 if (dev->flags & VIRTIO_DEV_STATS_ENABLED) in __vhost_iova_to_vva()
92 if (!vhost_user_iotlb_pending_miss(dev, iova, perm)) { in __vhost_iova_to_vva()
102 vhost_user_iotlb_pending_insert(dev, iova, perm); in __vhost_iova_to_vva()
103 if (vhost_iotlb_miss(dev, iov in __vhost_iova_to_vva()
149 __vhost_log_write(struct virtio_net * dev,uint64_t addr,uint64_t len) __vhost_log_write() argument
170 __vhost_log_write_iova(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t iova,uint64_t len) __vhost_log_write_iova() argument
190 __vhost_log_cache_sync(struct virtio_net * dev,struct vhost_virtqueue * vq) __vhost_log_cache_sync() argument
228 vhost_log_cache_page(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t page) vhost_log_cache_page() argument
269 __vhost_log_cache_write(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t addr,uint64_t len) __vhost_log_cache_write() argument
288 __vhost_log_cache_write_iova(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t iova,uint64_t len) __vhost_log_cache_write_iova() argument
308 vhost_alloc_copy_ind_table(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t desc_addr,uint64_t desc_len) vhost_alloc_copy_ind_table() argument
350 cleanup_vq_inflight(struct virtio_net * dev,struct vhost_virtqueue * vq) cleanup_vq_inflight() argument
379 cleanup_device(struct virtio_net * dev,int destroy) cleanup_device() argument
411 free_vq(struct virtio_net * dev,struct vhost_virtqueue * vq) free_vq() argument
430 free_device(struct virtio_net * dev) free_device() argument
441 log_translate(struct virtio_net * dev,struct vhost_virtqueue * vq) log_translate() argument
461 translate_log_addr(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t log_addr) translate_log_addr() argument
490 vring_translate_split(struct virtio_net * dev,struct vhost_virtqueue * vq) vring_translate_split() argument
529 vring_translate_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vring_translate_packed() argument
562 vring_translate(struct virtio_net * dev,struct vhost_virtqueue * vq) vring_translate() argument
622 reset_vring_queue(struct virtio_net * dev,struct vhost_virtqueue * vq) reset_vring_queue() argument
632 alloc_vring_queue(struct virtio_net * dev,uint32_t vring_idx) alloc_vring_queue() argument
670 reset_device(struct virtio_net * dev) reset_device() argument
697 struct virtio_net *dev; vhost_new_device() local
748 vhost_destroy_device_notify(struct virtio_net * dev) vhost_destroy_device_notify() argument
768 struct virtio_net *dev = get_device(vid); vhost_destroy_device() local
784 struct virtio_net *dev = get_device(vid); vhost_attach_vdpa_device() local
795 struct virtio_net *dev; vhost_set_ifname() local
813 struct virtio_net *dev = get_device(vid); vhost_setup_virtio_net() local
843 struct virtio_net *dev = get_device(vid); vhost_enable_extbuf() local
854 struct virtio_net *dev = get_device(vid); vhost_enable_linearbuf() local
865 struct virtio_net *dev = get_device(vid); rte_vhost_get_mtu() local
885 struct virtio_net *dev = get_device(vid); rte_vhost_get_numa_node() local
910 struct virtio_net *dev = get_device(vid); rte_vhost_get_vring_num() local
921 struct virtio_net *dev = get_device(vid); rte_vhost_get_ifname() local
937 struct virtio_net *dev; rte_vhost_get_negotiated_features() local
951 struct virtio_net *dev; rte_vhost_get_negotiated_protocol_features() local
964 struct virtio_net *dev; rte_vhost_get_mem_table() local
988 struct virtio_net *dev; rte_vhost_get_vhost_vring() local
1024 struct virtio_net *dev; rte_vhost_get_vhost_ring_inflight() local
1060 struct virtio_net *dev; rte_vhost_set_inflight_desc_split() local
1097 struct virtio_net *dev; rte_vhost_set_inflight_desc_packed() local
1163 struct virtio_net *dev; rte_vhost_clr_inflight_desc_split() local
1205 struct virtio_net *dev; rte_vhost_clr_inflight_desc_packed() local
1250 struct virtio_net *dev; rte_vhost_set_last_inflight_io_split() local
1286 struct virtio_net *dev; rte_vhost_set_last_inflight_io_packed() local
1334 struct virtio_net *dev; rte_vhost_vring_call() local
1370 struct virtio_net *dev; rte_vhost_vring_call_nonblock() local
1407 struct virtio_net *dev; rte_vhost_avail_entries() local
1438 vhost_enable_notify_split(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_enable_notify_split() argument
1457 vhost_enable_notify_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_enable_notify_packed() argument
1484 vhost_enable_guest_notification(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_enable_guest_notification() argument
1503 struct virtio_net *dev = get_device(vid); rte_vhost_enable_guest_notification() local
1536 struct virtio_net *dev = get_device(vid); rte_vhost_notify_guest() local
1572 struct virtio_net *dev = get_device(vid); rte_vhost_log_write() local
1584 struct virtio_net *dev; rte_vhost_log_used_vring() local
1603 struct virtio_net *dev; rte_vhost_rx_queue_count() local
1640 struct virtio_net *dev = get_device(vid); rte_vhost_get_vdpa_device() local
1652 struct virtio_net *dev = get_device(vid); rte_vhost_get_log_base() local
1668 struct virtio_net *dev = get_device(vid); rte_vhost_get_vring_base() local
1698 struct virtio_net *dev = get_device(vid); rte_vhost_set_vring_base() local
1731 struct virtio_net *dev = get_device(vid); rte_vhost_get_vring_base_from_inflight() local
1761 struct virtio_net *dev = get_device(vid); rte_vhost_extern_callback_register() local
1772 async_channel_register(struct virtio_net * dev,struct vhost_virtqueue * vq) async_channel_register() argument
1848 struct virtio_net *dev = get_device(vid); rte_vhost_async_channel_register() local
1881 struct virtio_net *dev = get_device(vid); rte_vhost_async_channel_register_thread_unsafe() local
1903 struct virtio_net *dev = get_device(vid); rte_vhost_async_channel_unregister() local
1949 struct virtio_net *dev = get_device(vid); rte_vhost_async_channel_unregister_thread_unsafe() local
2059 struct virtio_net *dev = get_device(vid); rte_vhost_async_get_inflight() local
2097 struct virtio_net *dev = get_device(vid); rte_vhost_async_get_inflight_thread_unsafe() local
2125 struct virtio_net *dev = get_device(vid); rte_vhost_get_monitor_addr() local
2175 struct virtio_net *dev = get_device(vid); rte_vhost_vring_stats_get_names() local
2202 struct virtio_net *dev = get_device(vid); rte_vhost_vring_stats_get() local
2246 struct virtio_net *dev = get_device(vid); rte_vhost_vring_stats_reset() local
[all...]
/dpdk/lib/mldev/
H A Drte_mldev.c35 struct rte_ml_dev *dev; in rte_ml_dev_pmd_get_named_dev() local
42 dev = rte_ml_dev_pmd_get_dev(dev_id); in rte_ml_dev_pmd_get_named_dev()
43 if ((dev->attached == ML_DEV_ATTACHED) && (strcmp(dev->data->name, name) == 0)) in rte_ml_dev_pmd_get_named_dev()
44 return dev; in rte_ml_dev_pmd_get_named_dev()
55 struct rte_ml_dev *dev; in rte_ml_dev_pmd_allocate() local
71 dev = rte_ml_dev_pmd_get_dev(dev_id); in rte_ml_dev_pmd_allocate()
72 if (dev->attached == ML_DEV_DETACHED) in rte_ml_dev_pmd_allocate()
81 if (dev->data == NULL) { in rte_ml_dev_pmd_allocate()
101 dev in rte_ml_dev_pmd_allocate()
124 rte_ml_dev_pmd_release(struct rte_ml_dev * dev) rte_ml_dev_pmd_release() argument
202 struct rte_ml_dev *dev = NULL; rte_ml_dev_is_valid_dev() local
217 struct rte_ml_dev *dev; rte_ml_dev_socket_id() local
232 struct rte_ml_dev *dev; rte_ml_dev_info_get() local
256 struct rte_ml_dev *dev; rte_ml_dev_configure() local
294 struct rte_ml_dev *dev; rte_ml_dev_close() local
317 struct rte_ml_dev *dev; rte_ml_dev_start() local
344 struct rte_ml_dev *dev; rte_ml_dev_stop() local
372 struct rte_ml_dev *dev; rte_ml_dev_queue_pair_setup() local
404 struct rte_ml_dev *dev; rte_ml_dev_stats_get() local
427 struct rte_ml_dev *dev; rte_ml_dev_stats_reset() local
445 struct rte_ml_dev *dev; rte_ml_dev_xstats_names_get() local
462 struct rte_ml_dev *dev; rte_ml_dev_xstats_by_name_get() local
490 struct rte_ml_dev *dev; rte_ml_dev_xstats_get() local
518 struct rte_ml_dev *dev; rte_ml_dev_xstats_reset() local
535 struct rte_ml_dev *dev; rte_ml_dev_dump() local
557 struct rte_ml_dev *dev; rte_ml_dev_selftest() local
574 struct rte_ml_dev *dev; rte_ml_model_load() local
601 struct rte_ml_dev *dev; rte_ml_model_unload() local
618 struct rte_ml_dev *dev; rte_ml_model_start() local
635 struct rte_ml_dev *dev; rte_ml_model_stop() local
652 struct rte_ml_dev *dev; rte_ml_model_info_get() local
675 struct rte_ml_dev *dev; rte_ml_model_params_update() local
698 struct rte_ml_dev *dev; rte_ml_io_quantize() local
726 struct rte_ml_dev *dev; rte_ml_io_dequantize() local
810 struct rte_ml_dev *dev; rte_ml_enqueue_burst() local
846 struct rte_ml_dev *dev; rte_ml_dequeue_burst() local
882 struct rte_ml_dev *dev; rte_ml_op_error_get() local
[all...]
/dpdk/lib/compressdev/
H A Drte_compressdev.c86 struct rte_compressdev *dev; in rte_compressdev_pmd_get_named_dev() local
93 dev = &compressdev_globals.devs[i]; in rte_compressdev_pmd_get_named_dev()
95 if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) && in rte_compressdev_pmd_get_named_dev()
96 (strcmp(dev->data->name, name) == 0)) in rte_compressdev_pmd_get_named_dev()
97 return dev; in rte_compressdev_pmd_get_named_dev()
106 struct rte_compressdev *dev = NULL; in rte_compressdev_is_valid_dev() local
111 dev = rte_compressdev_get_dev(dev_id); in rte_compressdev_is_valid_dev()
112 if (dev->attached != RTE_COMPRESSDEV_ATTACHED) in rte_compressdev_is_valid_dev()
171 struct rte_compressdev *dev; in rte_compressdev_socket_id() local
176 dev = rte_compressdev_get_dev(dev_id); in rte_compressdev_socket_id()
[all …]
/dpdk/lib/regexdev/
H A Drte_regexdev.c97 struct rte_regexdev *dev; in rte_regexdev_register() local
102 dev = regexdev_allocated(name); in rte_regexdev_register()
103 if (dev != NULL) { in rte_regexdev_register()
118 dev = &rte_regex_devices[dev_id]; in rte_regexdev_register()
119 dev->state = RTE_REGEXDEV_REGISTERED; in rte_regexdev_register()
120 if (dev->data == NULL) in rte_regexdev_register()
121 dev->data = &rte_regexdev_shared_data->data[dev_id]; in rte_regexdev_register()
123 memset(dev->data, 1, sizeof(*dev->data)); in rte_regexdev_register()
124 dev->data->dev_id = dev_id; in rte_regexdev_register()
125 strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name)); in rte_regexdev_register()
[all …]
/dpdk/lib/rawdev/
H A Drte_rawdev.c58 struct rte_rawdev *dev; in rte_rawdev_socket_id() local
61 dev = &rte_rawdevs[dev_id]; in rte_rawdev_socket_id()
63 return dev->socket_id; in rte_rawdev_socket_id()
98 struct rte_rawdev *dev; in rte_rawdev_configure() local
105 dev = &rte_rawdevs[dev_id]; in rte_rawdev_configure()
107 if (*dev->dev_ops->dev_configure == NULL) in rte_rawdev_configure()
110 if (dev->started) { in rte_rawdev_configure()
117 diag = (*dev->dev_ops->dev_configure)(dev, dev_conf->dev_private, in rte_rawdev_configure()
122 dev->attached = 1; in rte_rawdev_configure()
133 struct rte_rawdev *dev; in rte_rawdev_queue_conf_get() local
[all …]
/dpdk/drivers/raw/ifpga/
H A Dafu_pmd_core.c26 #define afu_rawdev_trylock(dev) rte_spinlock_trylock(&dev->sd->lock) argument
27 #define afu_rawdev_unlock(dev) rte_spinlock_unlock(&dev->sd->lock) argument
32 struct afu_rawdev *dev = NULL; in afu_rawdev_configure() local
37 dev = afu_rawdev_get_priv(rawdev); in afu_rawdev_configure()
38 if (!dev) in afu_rawdev_configure()
41 if (dev->ops && dev->ops->config) in afu_rawdev_configure()
42 ret = (*dev->ops->config)(dev, config, config_size); in afu_rawdev_configure()
49 struct afu_rawdev *dev = NULL; in afu_rawdev_start() local
54 dev = afu_rawdev_get_priv(rawdev); in afu_rawdev_start()
55 if (!dev) in afu_rawdev_start()
[all …]
/dpdk/drivers/bus/pci/
H A Dpci_common.c95 pci_common_set(struct rte_pci_device *dev) in pci_common_set() argument
100 rte_pci_device_name(&dev->addr, in pci_common_set()
101 dev->name, sizeof(dev->name)); in pci_common_set()
102 devargs = pci_devargs_lookup(&dev->addr); in pci_common_set()
103 dev->device.devargs = devargs; in pci_common_set()
112 dev->device.name = dev->device.devargs->name; in pci_common_set()
115 dev->device.name = dev->name; in pci_common_set()
117 if (dev->bus_info != NULL || in pci_common_set()
118 asprintf(&dev->bus_info, "vendor_id=%"PRIx16", device_id=%"PRIx16, in pci_common_set()
119 dev->id.vendor_id, dev->id.device_id) != -1) in pci_common_set()
[all …]
/dpdk/lib/bbdev/
H A Drte_bbdev.c42 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \ argument
43 if (dev == NULL) { \
50 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \ argument
51 if (dev->dev_ops == NULL) { \
68 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \ argument
69 if (queue_id >= dev->data->num_queues) { \
71 queue_id, dev->data->dev_id); \
279 struct rte_bbdev *dev = get_dev(i); in rte_bbdev_get_named_dev() local
280 if (dev && (strncmp(dev in rte_bbdev_get_named_dev()
319 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_setup_queues() local
407 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_intr_enable() local
441 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_configure() local
555 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_start() local
586 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_stop() local
609 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_close() local
652 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_start() local
682 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_stop() local
711 get_stats_from_queues(struct rte_bbdev * dev,struct rte_bbdev_stats * stats) get_stats_from_queues() argument
729 reset_stats_in_queues(struct rte_bbdev * dev) reset_stats_in_queues() argument
744 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_stats_get() local
767 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_stats_reset() local
784 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_info_get() local
813 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_info_get() local
947 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_callback_register() local
993 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_callback_unregister() local
1033 rte_bbdev_pmd_callback_process(struct rte_bbdev * dev,enum rte_bbdev_event_type event,void * ret_param) rte_bbdev_pmd_callback_process() argument
1077 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_intr_enable() local
1088 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_intr_disable() local
1101 struct rte_bbdev *dev = get_dev(dev_id); rte_bbdev_queue_intr_ctl() local
[all...]
/dpdk/drivers/crypto/ionic/
H A Dionic_crypto_main.c132 iocpt_get_abs_stats(const struct iocpt_dev *dev, in iocpt_get_abs_stats() argument
140 for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) { in iocpt_get_abs_stats()
141 struct rte_cryptodev_stats *q_stats = &dev->cryptoqs[i]->stats; in iocpt_get_abs_stats()
151 iocpt_get_stats(const struct iocpt_dev *dev, struct rte_cryptodev_stats *stats) in iocpt_get_stats() argument
154 iocpt_get_abs_stats(dev, stats); in iocpt_get_stats()
157 stats->enqueued_count -= dev->stats_base.enqueued_count; in iocpt_get_stats()
158 stats->dequeued_count -= dev->stats_base.dequeued_count; in iocpt_get_stats()
159 stats->enqueue_err_count -= dev->stats_base.enqueue_err_count; in iocpt_get_stats()
160 stats->dequeue_err_count -= dev->stats_base.dequeue_err_count; in iocpt_get_stats()
164 iocpt_reset_stats(struct iocpt_dev *dev) in iocpt_reset_stats() argument
[all …]
/dpdk/lib/eventdev/
H A Drte_eventdev.c65 (rte_event_devices[i].dev ? (strncmp( in rte_event_dev_get_dev_id()
66 rte_event_devices[i].dev->driver->name, name, in rte_event_dev_get_dev_id()
80 struct rte_eventdev *dev; in rte_event_dev_socket_id() local
83 dev = &rte_eventdevs[dev_id]; in rte_event_dev_socket_id()
85 rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id); in rte_event_dev_socket_id()
87 return dev->data->socket_id; in rte_event_dev_socket_id()
93 struct rte_eventdev *dev; in rte_event_dev_info_get() local
96 dev = &rte_eventdevs[dev_id]; in rte_event_dev_info_get()
103 if (*dev->dev_ops->dev_infos_get == NULL) in rte_event_dev_info_get()
105 (*dev->dev_ops->dev_infos_get)(dev, dev_info); in rte_event_dev_info_get()
[all …]
/dpdk/drivers/net/mlx5/windows/
H A Dmlx5_ethdev_os.c31 mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) in mlx5_get_mac() argument
36 if (!dev) { in mlx5_get_mac()
40 priv = dev->data->dev_private; in mlx5_get_mac()
59 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE]) in mlx5_get_ifname() argument
64 if (!dev) { in mlx5_get_ifname()
68 priv = dev->data->dev_private; in mlx5_get_ifname()
86 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) in mlx5_get_mtu() argument
93 if (!dev) { in mlx5_get_mtu()
97 priv = dev->data->dev_private; in mlx5_get_mtu()
122 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) in mlx5_set_mtu() argument
[all …]
/dpdk/lib/ethdev/
H A Dethdev_driver.h212 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
215 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
218 typedef int (*eth_dev_stop_t)(struct rte_eth_dev *dev);
221 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
224 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
227 typedef int (*eth_dev_close_t)(struct rte_eth_dev *dev);
230 typedef int (*eth_dev_reset_t)(struct rte_eth_dev *dev);
233 typedef int (*eth_is_removed_t)(struct rte_eth_dev *dev);
239 * @param dev
258 typedef int (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
1674 rte_eth_linkstatus_set(struct rte_eth_dev * dev,const struct rte_eth_link * new_link) rte_eth_linkstatus_set() argument
1696 rte_eth_linkstatus_get(const struct rte_eth_dev * dev,struct rte_eth_link * link) rte_eth_linkstatus_get() argument
1822 rte_eth_dev_is_repr(const struct rte_eth_dev * dev) rte_eth_dev_is_repr() argument
[all...]
H A Drte_ethdev.c763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) in eth_dev_validate_rx_queue() argument
767 if (rx_queue_id >= dev->data->nb_rx_queues) { in eth_dev_validate_rx_queue()
768 port_id = dev->data->port_id; in eth_dev_validate_rx_queue()
775 if (dev->data->rx_queues[rx_queue_id] == NULL) { in eth_dev_validate_rx_queue()
776 port_id = dev->data->port_id; in eth_dev_validate_rx_queue()
787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) in eth_dev_validate_tx_queue() argument
791 if (tx_queue_id >= dev->data->nb_tx_queues) { in eth_dev_validate_tx_queue()
792 port_id = dev->data->port_id; in eth_dev_validate_tx_queue()
799 if (dev->data->tx_queues[tx_queue_id] == NULL) { in eth_dev_validate_tx_queue()
800 port_id = dev in eth_dev_validate_tx_queue()
813 struct rte_eth_dev *dev; rte_eth_rx_queue_is_valid() local
824 struct rte_eth_dev *dev; rte_eth_tx_queue_is_valid() local
835 struct rte_eth_dev *dev; rte_eth_dev_rx_queue_start() local
879 struct rte_eth_dev *dev; rte_eth_dev_rx_queue_stop() local
916 struct rte_eth_dev *dev; rte_eth_dev_tx_queue_start() local
960 struct rte_eth_dev *dev; rte_eth_dev_tx_queue_stop() local
1284 struct rte_eth_dev *dev; rte_eth_dev_configure() local
1613 eth_dev_mac_restore(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info) eth_dev_mac_restore() argument
1651 eth_dev_config_restore(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info,uint16_t port_id) eth_dev_config_restore() argument
1719 struct rte_eth_dev *dev; rte_eth_dev_start() local
1789 struct rte_eth_dev *dev; rte_eth_dev_stop() local
1819 struct rte_eth_dev *dev; rte_eth_dev_set_link_up() local
1837 struct rte_eth_dev *dev; rte_eth_dev_set_link_down() local
1855 struct rte_eth_dev *dev; rte_eth_dev_close() local
1889 struct rte_eth_dev *dev; rte_eth_dev_reset() local
1914 struct rte_eth_dev *dev; rte_eth_dev_is_removed() local
2147 struct rte_eth_dev *dev; rte_eth_rx_queue_setup() local
2361 struct rte_eth_dev *dev; rte_eth_rx_hairpin_queue_setup() local
2466 struct rte_eth_dev *dev; rte_eth_tx_queue_setup() local
2569 struct rte_eth_dev *dev; rte_eth_tx_hairpin_queue_setup() local
2667 struct rte_eth_dev *dev; rte_eth_hairpin_bind() local
2694 struct rte_eth_dev *dev; rte_eth_hairpin_unbind() local
2722 struct rte_eth_dev *dev; rte_eth_hairpin_get_peer_ports() local
2820 struct rte_eth_dev *dev; rte_eth_tx_done_cleanup() local
2842 struct rte_eth_dev *dev; rte_eth_promiscuous_enable() local
2868 struct rte_eth_dev *dev; rte_eth_promiscuous_disable() local
2896 struct rte_eth_dev *dev; rte_eth_promiscuous_get() local
2909 struct rte_eth_dev *dev; rte_eth_allmulticast_enable() local
2934 struct rte_eth_dev *dev; rte_eth_allmulticast_disable() local
2961 struct rte_eth_dev *dev; rte_eth_allmulticast_get() local
2974 struct rte_eth_dev *dev; rte_eth_link_get() local
3002 struct rte_eth_dev *dev; rte_eth_link_get_nowait() local
3129 struct rte_eth_dev *dev; rte_eth_stats_get() local
3156 struct rte_eth_dev *dev; rte_eth_stats_reset() local
3176 eth_dev_get_xstats_basic_count(struct rte_eth_dev * dev) eth_dev_get_xstats_basic_count() argument
3196 struct rte_eth_dev *dev; eth_dev_get_xstats_count() local
3268 eth_basic_stats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names) eth_basic_stats_get_names() argument
3320 struct rte_eth_dev *dev; rte_eth_xstats_get_names_by_id() local
3426 struct rte_eth_dev *dev; rte_eth_xstats_get_names() local
3466 struct rte_eth_dev *dev; eth_basic_stats_get() local
3527 struct rte_eth_dev *dev; rte_eth_xstats_get_by_id() local
3618 struct rte_eth_dev *dev; rte_eth_xstats_get() local
3668 struct rte_eth_dev *dev; rte_eth_xstats_reset() local
3690 struct rte_eth_dev *dev; eth_dev_set_queue_stats_mapping() local
3744 struct rte_eth_dev *dev; rte_eth_dev_fw_version_get() local
3770 struct rte_eth_dev *dev; rte_eth_dev_info_get() local
3834 struct rte_eth_dev *dev; rte_eth_dev_conf_get() local
3859 struct rte_eth_dev *dev; rte_eth_dev_get_supported_ptypes() local
3909 struct rte_eth_dev *dev; rte_eth_dev_set_ptypes() local
3997 struct rte_eth_dev *dev; rte_eth_macaddrs_get() local
4022 struct rte_eth_dev *dev; rte_eth_macaddr_get() local
4044 struct rte_eth_dev *dev; rte_eth_dev_get_mtu() local
4067 struct rte_eth_dev *dev; rte_eth_dev_set_mtu() local
4111 struct rte_eth_dev *dev; rte_eth_dev_vlan_filter() local
4159 struct rte_eth_dev *dev; rte_eth_dev_set_vlan_strip_on_queue() local
4183 struct rte_eth_dev *dev; rte_eth_dev_set_vlan_ether_type() local
4203 struct rte_eth_dev *dev; rte_eth_dev_set_vlan_offload() local
4298 struct rte_eth_dev *dev; rte_eth_dev_get_vlan_offload() local
4326 struct rte_eth_dev *dev; rte_eth_dev_set_vlan_pvid() local
4344 struct rte_eth_dev *dev; rte_eth_dev_flow_ctrl_get() local
4370 struct rte_eth_dev *dev; rte_eth_dev_flow_ctrl_set() local
4401 struct rte_eth_dev *dev; rte_eth_dev_priority_flow_ctrl_set() local
4484 struct rte_eth_dev *dev; rte_eth_dev_priority_flow_ctrl_queue_info_get() local
4513 struct rte_eth_dev *dev; rte_eth_dev_priority_flow_ctrl_queue_configure() local
4632 struct rte_eth_dev *dev; rte_eth_dev_rss_reta_update() local
4684 struct rte_eth_dev *dev; rte_eth_dev_rss_reta_query() local
4716 struct rte_eth_dev *dev; rte_eth_dev_rss_hash_update() local
4784 struct rte_eth_dev *dev; rte_eth_dev_rss_hash_conf_get() local
4854 struct rte_eth_dev *dev; rte_eth_dev_udp_tunnel_port_add() local
4886 struct rte_eth_dev *dev; rte_eth_dev_udp_tunnel_port_delete() local
4917 struct rte_eth_dev *dev; rte_eth_led_on() local
4935 struct rte_eth_dev *dev; rte_eth_led_off() local
4955 struct rte_eth_dev *dev; rte_eth_fec_get_capability() local
4980 struct rte_eth_dev *dev; rte_eth_fec_get() local
5005 struct rte_eth_dev *dev; rte_eth_fec_set() local
5033 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; eth_dev_get_mac_addr_index() local
5055 struct rte_eth_dev *dev; rte_eth_dev_mac_addr_add() local
5120 struct rte_eth_dev *dev; rte_eth_dev_mac_addr_remove() local
5162 struct rte_eth_dev *dev; rte_eth_dev_default_mac_addr_set() local
5213 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; eth_dev_get_hash_mac_addr_index() local
5238 struct rte_eth_dev *dev; rte_eth_dev_uc_hash_table_set() local
5300 struct rte_eth_dev *dev; rte_eth_dev_uc_all_hash_table_set() local
5318 struct rte_eth_dev *dev; rte_eth_set_queue_rate_limit() local
5359 struct rte_eth_dev *dev; rte_eth_rx_avail_thresh_set() local
5391 struct rte_eth_dev *dev; rte_eth_rx_avail_thresh_query() local
5433 struct rte_eth_dev *dev; rte_eth_dev_callback_register() local
5503 struct rte_eth_dev *dev; rte_eth_dev_callback_unregister() local
5566 struct rte_eth_dev *dev; rte_eth_dev_rx_intr_ctl() local
5605 struct rte_eth_dev *dev; rte_eth_dev_rx_intr_ctl_q_get_fd() local
5644 struct rte_eth_dev *dev; rte_eth_dev_rx_intr_ctl_q() local
5686 struct rte_eth_dev *dev; rte_eth_dev_rx_intr_enable() local
5709 struct rte_eth_dev *dev; rte_eth_dev_rx_intr_disable() local
5737 struct rte_eth_dev *dev; rte_eth_add_rx_callback() local
5838 struct rte_eth_dev *dev; rte_eth_add_tx_callback() local
5904 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; rte_eth_remove_rx_callback() local
5940 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; rte_eth_remove_tx_callback() local
5967 struct rte_eth_dev *dev; rte_eth_rx_queue_info_get() local
6015 struct rte_eth_dev *dev; rte_eth_tx_queue_info_get() local
6063 struct rte_eth_dev *dev; rte_eth_recycle_rx_queue_info_get() local
6085 struct rte_eth_dev *dev; rte_eth_rx_burst_mode_get() local
6118 struct rte_eth_dev *dev; rte_eth_tx_burst_mode_get() local
6151 struct rte_eth_dev *dev; rte_eth_get_monitor_addr() local
6184 struct rte_eth_dev *dev; rte_eth_dev_set_mc_addr_list() local
6204 struct rte_eth_dev *dev; rte_eth_timesync_enable() local
6222 struct rte_eth_dev *dev; rte_eth_timesync_disable() local
6241 struct rte_eth_dev *dev; rte_eth_timesync_read_rx_timestamp() local
6270 struct rte_eth_dev *dev; rte_eth_timesync_read_tx_timestamp() local
6298 struct rte_eth_dev *dev; rte_eth_timesync_adjust_time() local
6316 struct rte_eth_dev *dev; rte_eth_timesync_read_time() local
6342 struct rte_eth_dev *dev; rte_eth_timesync_write_time() local
6368 struct rte_eth_dev *dev; rte_eth_read_clock() local
6392 struct rte_eth_dev *dev; rte_eth_dev_get_reg_info() local
6417 struct rte_eth_dev *dev; rte_eth_dev_get_eeprom_length() local
6435 struct rte_eth_dev *dev; rte_eth_dev_get_eeprom() local
6460 struct rte_eth_dev *dev; rte_eth_dev_set_eeprom() local
6486 struct rte_eth_dev *dev; rte_eth_dev_get_module_info() local
6512 struct rte_eth_dev *dev; rte_eth_dev_get_module_eeprom() local
6552 struct rte_eth_dev *dev; rte_eth_dev_get_dcb_info() local
6618 struct rte_eth_dev *dev; rte_eth_dev_hairpin_capability_get() local
6644 struct rte_eth_dev *dev; rte_eth_dev_pool_ops_supported() local
6671 struct rte_eth_dev *dev; rte_eth_representor_info_get() local
6689 struct rte_eth_dev *dev; rte_eth_rx_metadata_negotiate() local
6725 struct rte_eth_dev *dev; rte_eth_ip_reassembly_capability_get() local
6760 struct rte_eth_dev *dev; rte_eth_ip_reassembly_conf_get() local
6793 struct rte_eth_dev *dev; rte_eth_ip_reassembly_conf_set() local
6832 struct rte_eth_dev *dev; rte_eth_dev_priv_dump() local
6851 struct rte_eth_dev *dev; rte_eth_rx_descriptor_dump() local
6877 struct rte_eth_dev *dev; rte_eth_tx_descriptor_dump() local
6904 struct rte_eth_dev *dev; rte_eth_buffer_split_get_supported_hdr_ptypes() local
6941 struct rte_eth_dev *dev; rte_eth_dev_count_aggr_ports() local
6959 struct rte_eth_dev *dev; rte_eth_dev_map_aggr_tx_affinity() local
[all...]
/dpdk/drivers/net/cnxk/
H A Dcnxk_ptp.c10 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); in cnxk_nix_read_clock() local
19 *clock = (rte_get_tsc_cycles() + dev->clk_delta) * dev->clk_freq_mult; in cnxk_nix_read_clock()
29 cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev) in cnxk_nix_tsc_convert() argument
32 struct roc_nix *nix = &dev->nix; in cnxk_nix_tsc_convert()
55 dev->clk_freq_mult = in cnxk_nix_tsc_convert()
69 dev->clk_delta = ((uint64_t)(ticks / dev->clk_freq_mult) - tsc); in cnxk_nix_tsc_convert()
78 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); in cnxk_nix_timesync_read_time() local
79 struct roc_nix *nix = &dev->nix; in cnxk_nix_timesync_read_time()
87 ns = rte_timecounter_update(&dev->systime_tc, clock); in cnxk_nix_timesync_read_time()
96 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); in cnxk_nix_timesync_write_time() local
[all …]
/dpdk/drivers/net/mlx5/
H A Dmlx5_rxmode.c26 mlx5_promiscuous_enable(struct rte_eth_dev *dev) in mlx5_promiscuous_enable() argument
28 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_promiscuous_enable()
31 dev->data->promiscuous = 1; in mlx5_promiscuous_enable()
36 dev->data->port_id); in mlx5_promiscuous_enable()
40 ret = mlx5_os_set_promisc(dev, 1); in mlx5_promiscuous_enable()
44 ret = mlx5_traffic_restart(dev); in mlx5_promiscuous_enable()
47 dev->data->port_id, strerror(rte_errno)); in mlx5_promiscuous_enable()
66 mlx5_promiscuous_disable(struct rte_eth_dev *dev) in mlx5_promiscuous_disable() argument
68 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_promiscuous_disable()
71 dev->data->promiscuous = 0; in mlx5_promiscuous_disable()
[all …]
/dpdk/drivers/net/atlantic/
H A Drte_pmd_atlantic.c15 struct rte_eth_dev *dev; in rte_pmd_atl_macsec_enable() local
19 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_enable()
21 if (!is_atlantic_supported(dev)) in rte_pmd_atl_macsec_enable()
24 return atl_macsec_enable(dev, encr, repl_prot); in rte_pmd_atl_macsec_enable()
30 struct rte_eth_dev *dev; in rte_pmd_atl_macsec_disable() local
34 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_disable()
36 if (!is_atlantic_supported(dev)) in rte_pmd_atl_macsec_disable()
39 return atl_macsec_disable(dev); in rte_pmd_atl_macsec_disable()
45 struct rte_eth_dev *dev; in rte_pmd_atl_macsec_config_txsc() local
49 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_config_txsc()
[all …]
/dpdk/drivers/net/virtio/
H A Dvirtio_pci.c22 #define VIRTIO_PCI_CONFIG(dev) \ argument
23 (((dev)->msix_status == VIRTIO_MSIX_ENABLED) ? 24 : 20)
28 vtpci_msix_detect(struct rte_pci_device *dev) in vtpci_msix_detect() argument
33 pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX); in vtpci_msix_detect()
34 if (pos > 0 && rte_pci_read_config(dev, &flags, sizeof(flags), in vtpci_msix_detect()
60 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw); in legacy_read_dev_config() local
68 VIRTIO_PCI_CONFIG(dev) + offset); in legacy_read_dev_config()
73 VIRTIO_PCI_CONFIG(dev) + offset); in legacy_read_dev_config()
78 VIRTIO_PCI_CONFIG(dev) + offset); in legacy_read_dev_config()
87 VIRTIO_PCI_CONFIG(dev) + offset); in legacy_read_dev_config()
[all …]

12345678910>>...23