1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2021 NXP 5 * 6 */ 7 8 #ifndef _DPAA2_ETHDEV_H 9 #define _DPAA2_ETHDEV_H 10 11 #include <rte_event_eth_rx_adapter.h> 12 #include <rte_pmd_dpaa2.h> 13 14 #include <dpaa2_hw_pvt.h> 15 #include "dpaa2_tm.h" 16 17 #include <mc/fsl_dpni.h> 18 #include <mc/fsl_mc_sys.h> 19 20 #define DPAA2_MIN_RX_BUF_SIZE 512 21 #define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/ 22 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2 23 24 #define MAX_TCS DPNI_MAX_TC 25 #define MAX_RX_QUEUES 128 26 #define MAX_TX_QUEUES 16 27 #define MAX_DPNI 8 28 29 #define DPAA2_RX_DEFAULT_NBDESC 512 30 31 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \ 32 RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ 33 VLAN_TAG_SIZE) 34 35 /*default tc to be used for ,congestion, distribution etc configuration. */ 36 #define DPAA2_DEF_TC 0 37 38 /* Threshold for a Tx queue to *Enter* Congestion state. 39 */ 40 #define CONG_ENTER_TX_THRESHOLD 512 41 42 /* Threshold for a queue to *Exit* Congestion state. 43 */ 44 #define CONG_EXIT_TX_THRESHOLD 480 45 46 #define CONG_RETRY_COUNT 18000 47 48 /* RX queue tail drop threshold 49 * currently considering 64 KB packets 50 */ 51 #define CONG_THRESHOLD_RX_BYTES_Q (64 * 1024) 52 #define CONG_RX_OAL 128 53 54 /* Size of the input SMMU mapped memory required by MC */ 55 #define DIST_PARAM_IOVA_SIZE 256 56 57 /* Enable TX Congestion control support 58 * default is disable 59 */ 60 #define DPAA2_TX_CGR_OFF 0x01 61 62 /* Disable RX tail drop, default is enable */ 63 #define DPAA2_RX_TAILDROP_OFF 0x04 64 /* Tx confirmation enabled */ 65 #define DPAA2_TX_CONF_ENABLE 0x08 66 67 #define DPAA2_RSS_OFFLOAD_ALL ( \ 68 ETH_RSS_L2_PAYLOAD | \ 69 ETH_RSS_IP | \ 70 ETH_RSS_UDP | \ 71 ETH_RSS_TCP | \ 72 ETH_RSS_SCTP | \ 73 ETH_RSS_MPLS) 74 75 /* LX2 FRC Parsed values (Little Endian) */ 76 #define DPAA2_PKT_TYPE_ETHER 0x0060 77 #define DPAA2_PKT_TYPE_IPV4 0x0000 78 #define DPAA2_PKT_TYPE_IPV6 0x0020 79 #define DPAA2_PKT_TYPE_IPV4_EXT \ 80 (0x0001 | DPAA2_PKT_TYPE_IPV4) 81 #define DPAA2_PKT_TYPE_IPV6_EXT \ 82 (0x0001 | DPAA2_PKT_TYPE_IPV6) 83 #define DPAA2_PKT_TYPE_IPV4_TCP \ 84 (0x000e | DPAA2_PKT_TYPE_IPV4) 85 #define DPAA2_PKT_TYPE_IPV6_TCP \ 86 (0x000e | DPAA2_PKT_TYPE_IPV6) 87 #define DPAA2_PKT_TYPE_IPV4_UDP \ 88 (0x0010 | DPAA2_PKT_TYPE_IPV4) 89 #define DPAA2_PKT_TYPE_IPV6_UDP \ 90 (0x0010 | DPAA2_PKT_TYPE_IPV6) 91 #define DPAA2_PKT_TYPE_IPV4_SCTP \ 92 (0x000f | DPAA2_PKT_TYPE_IPV4) 93 #define DPAA2_PKT_TYPE_IPV6_SCTP \ 94 (0x000f | DPAA2_PKT_TYPE_IPV6) 95 #define DPAA2_PKT_TYPE_IPV4_ICMP \ 96 (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT) 97 #define DPAA2_PKT_TYPE_IPV6_ICMP \ 98 (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT) 99 #define DPAA2_PKT_TYPE_VLAN_1 0x0160 100 #define DPAA2_PKT_TYPE_VLAN_2 0x0260 101 102 /* enable timestamp in mbuf*/ 103 extern bool dpaa2_enable_ts[]; 104 extern uint64_t dpaa2_timestamp_rx_dynflag; 105 extern int dpaa2_timestamp_dynfield_offset; 106 107 #define DPAA2_QOS_TABLE_RECONFIGURE 1 108 #define DPAA2_FS_TABLE_RECONFIGURE 2 109 110 #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4 111 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8 112 113 #define DPAA2_FLOW_MAX_KEY_SIZE 16 114 115 /*Externaly defined*/ 116 extern const struct rte_flow_ops dpaa2_flow_ops; 117 118 extern const struct rte_tm_ops dpaa2_tm_ops; 119 120 extern bool dpaa2_enable_err_queue; 121 122 #define IP_ADDRESS_OFFSET_INVALID (-1) 123 124 struct dpaa2_key_info { 125 uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS]; 126 uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS]; 127 /* Special for IP address. */ 128 int ipv4_src_offset; 129 int ipv4_dst_offset; 130 int ipv6_src_offset; 131 int ipv6_dst_offset; 132 uint8_t key_total_size; 133 }; 134 135 struct dpaa2_key_extract { 136 struct dpkg_profile_cfg dpkg; 137 struct dpaa2_key_info key_info; 138 }; 139 140 struct extract_s { 141 struct dpaa2_key_extract qos_key_extract; 142 struct dpaa2_key_extract tc_key_extract[MAX_TCS]; 143 uint64_t qos_extract_param; 144 uint64_t tc_extract_param[MAX_TCS]; 145 }; 146 147 struct dpaa2_dev_priv { 148 void *hw; 149 int32_t hw_id; 150 int32_t qdid; 151 uint16_t token; 152 uint8_t nb_tx_queues; 153 uint8_t nb_rx_queues; 154 uint32_t options; 155 void *rx_vq[MAX_RX_QUEUES]; 156 void *tx_vq[MAX_TX_QUEUES]; 157 struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */ 158 void *tx_conf_vq[MAX_TX_QUEUES]; 159 void *rx_err_vq; 160 uint8_t flags; /*dpaa2 config flags */ 161 uint8_t max_mac_filters; 162 uint8_t max_vlan_filters; 163 uint8_t num_rx_tc; 164 uint16_t qos_entries; 165 uint16_t fs_entries; 166 uint8_t dist_queues; 167 uint8_t en_ordered; 168 uint8_t en_loose_ordered; 169 uint8_t max_cgs; 170 uint8_t cgid_in_use[MAX_RX_QUEUES]; 171 172 struct extract_s extract; 173 174 uint16_t ss_offset; 175 uint64_t ss_iova; 176 uint64_t ss_param_iova; 177 /*stores timestamp of last received packet on dev*/ 178 uint64_t rx_timestamp; 179 /*stores timestamp of last received tx confirmation packet on dev*/ 180 uint64_t tx_timestamp; 181 /* stores pointer to next tx_conf queue that should be processed, 182 * it corresponds to last packet transmitted 183 */ 184 struct dpaa2_queue *next_tx_conf_queue; 185 186 struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */ 187 188 LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ 189 LIST_HEAD(nodes, dpaa2_tm_node) nodes; 190 LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles; 191 }; 192 193 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set, 194 struct dpkg_profile_cfg *kg_cfg); 195 196 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, 197 uint64_t req_dist_set, int tc_index); 198 199 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev, 200 uint8_t tc_index); 201 202 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist); 203 204 __rte_internal 205 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 206 int eth_rx_queue_id, 207 struct dpaa2_dpcon_dev *dpcon, 208 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 209 210 __rte_internal 211 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 212 int eth_rx_queue_id); 213 214 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); 215 216 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs, 217 uint16_t nb_pkts); 218 219 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, 220 uint16_t nb_pkts); 221 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 222 const struct qbman_fd *fd, 223 const struct qbman_result *dq, 224 struct dpaa2_queue *rxq, 225 struct rte_event *ev); 226 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp, 227 const struct qbman_fd *fd, 228 const struct qbman_result *dq, 229 struct dpaa2_queue *rxq, 230 struct rte_event *ev); 231 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp, 232 const struct qbman_fd *fd, 233 const struct qbman_result *dq, 234 struct dpaa2_queue *rxq, 235 struct rte_event *ev); 236 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); 237 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, 238 uint16_t nb_pkts); 239 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); 240 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci); 241 void dpaa2_flow_clean(struct rte_eth_dev *dev); 242 uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused; 243 244 int dpaa2_timesync_enable(struct rte_eth_dev *dev); 245 int dpaa2_timesync_disable(struct rte_eth_dev *dev); 246 int dpaa2_timesync_read_time(struct rte_eth_dev *dev, 247 struct timespec *timestamp); 248 int dpaa2_timesync_write_time(struct rte_eth_dev *dev, 249 const struct timespec *timestamp); 250 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 251 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 252 struct timespec *timestamp, 253 uint32_t flags __rte_unused); 254 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 255 struct timespec *timestamp); 256 #endif /* _DPAA2_ETHDEV_H */ 257