1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 #ifndef __DPAA_ETHDEV_H__ 8 #define __DPAA_ETHDEV_H__ 9 10 /* System headers */ 11 #include <stdbool.h> 12 #include <ethdev_driver.h> 13 #include <rte_event_eth_rx_adapter.h> 14 15 #include <fsl_usd.h> 16 #include <fsl_qman.h> 17 #include <fsl_bman.h> 18 #include <dpaa_of.h> 19 #include <netcfg.h> 20 21 #define MAX_DPAA_CORES 4 22 #define DPAA_MBUF_HW_ANNOTATION 64 23 #define DPAA_FD_PTA_SIZE 64 24 25 /* we will re-use the HEADROOM for annotation in RX */ 26 #define DPAA_HW_BUF_RESERVE 0 27 #define DPAA_PACKET_LAYOUT_ALIGN 64 28 29 /* Alignment to use for cpu-local structs to avoid coherency problems. */ 30 #define MAX_CACHELINE 64 31 32 #define DPAA_MAX_RX_PKT_LEN 10240 33 34 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 35 36 /* Maximum SG segments supported on all cores*/ 37 #define DPAA_MAX_SGS 128 38 /* SG pool size */ 39 #define DPAA_POOL_SIZE 2048 40 /* SG pool cache size */ 41 #define DPAA_POOL_CACHE_SIZE 256 42 43 /* RX queue tail drop threshold (CGR Based) in frame count */ 44 #define CGR_RX_PERFQ_THRESH 256 45 #define CGR_TX_CGR_THRESH 512 46 47 /*max mac filter for memac(8) including primary mac addr*/ 48 #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) 49 50 /*Maximum number of slots available in TX ring*/ 51 #define DPAA_TX_BURST_SIZE 7 52 53 /* Optimal burst size for RX and TX as default */ 54 #define DPAA_DEF_RX_BURST_SIZE 7 55 #define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE 56 57 #ifndef VLAN_TAG_SIZE 58 #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ 59 #endif 60 61 #define DPAA_ETH_MAX_LEN (RTE_ETHER_MTU + \ 62 RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ 63 VLAN_TAG_SIZE) 64 65 /* PCD frame queues */ 66 #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 67 #define DPAA_VSP_PROFILE_MAX_NUM 8 68 #define DPAA_MAX_NUM_PCD_QUEUES DPAA_VSP_PROFILE_MAX_NUM 69 /*Same as VSP profile number*/ 70 71 #define DPAA_IF_TX_PRIORITY 3 72 #define DPAA_IF_RX_PRIORITY 0 73 #define DPAA_IF_DEBUG_PRIORITY 7 74 75 #define DPAA_IF_RX_ANNOTATION_STASH 1 76 #define DPAA_IF_RX_DATA_STASH 1 77 #define DPAA_IF_RX_CONTEXT_STASH 0 78 79 /* Each "debug" FQ is represented by one of these */ 80 #define DPAA_DEBUG_FQ_RX_ERROR 0 81 #define DPAA_DEBUG_FQ_TX_ERROR 1 82 83 #define DPAA_RSS_OFFLOAD_ALL ( \ 84 RTE_ETH_RSS_L2_PAYLOAD | \ 85 RTE_ETH_RSS_IP | \ 86 RTE_ETH_RSS_UDP | \ 87 RTE_ETH_RSS_TCP | \ 88 RTE_ETH_RSS_SCTP) 89 90 #define DPAA_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ 91 RTE_MBUF_F_TX_TCP_CKSUM | \ 92 RTE_MBUF_F_TX_UDP_CKSUM) 93 94 /* DPAA Frame descriptor macros */ 95 96 #define DPAA_FD_CMD_FCO 0x80000000 97 /**< Frame queue Context Override */ 98 #define DPAA_FD_CMD_RPD 0x40000000 99 /**< Read Prepended Data */ 100 #define DPAA_FD_CMD_UPD 0x20000000 101 /**< Update Prepended Data */ 102 #define DPAA_FD_CMD_DTC 0x10000000 103 /**< Do IP/TCP/UDP Checksum */ 104 #define DPAA_FD_CMD_DCL4C 0x10000000 105 /**< Didn't calculate L4 Checksum */ 106 #define DPAA_FD_CMD_CFQ 0x00ffffff 107 /**< Confirmation Frame Queue */ 108 109 #define DPAA_DEFAULT_RXQ_VSP_ID 1 110 111 #define FMC_FILE "/tmp/fmc.bin" 112 113 extern struct rte_mempool *dpaa_tx_sg_pool; 114 115 /* structure to free external and indirect 116 * buffers. 117 */ 118 struct dpaa_sw_buf_free { 119 /* To which packet this segment belongs */ 120 uint16_t pkt_id; 121 /* The actual segment */ 122 struct rte_mbuf *seg; 123 }; 124 125 /* Each network interface is represented by one of these */ 126 struct dpaa_if { 127 int valid; 128 char *name; 129 const struct fm_eth_port_cfg *cfg; 130 struct qman_fq *rx_queues; 131 struct qman_cgr *cgr_rx; 132 struct qman_fq *tx_queues; 133 struct qman_cgr *cgr_tx; 134 struct qman_fq debug_queues[2]; 135 uint16_t nb_rx_queues; 136 uint16_t nb_tx_queues; 137 uint32_t ifid; 138 struct dpaa_bp_info *bp_info; 139 struct rte_eth_fc_conf *fc_conf; 140 void *port_handle; 141 void *netenv_handle; 142 void *scheme_handle[2]; 143 uint32_t scheme_count; 144 145 void *vsp_handle[DPAA_VSP_PROFILE_MAX_NUM]; 146 uint32_t vsp_bpid[DPAA_VSP_PROFILE_MAX_NUM]; 147 }; 148 149 struct dpaa_if_stats { 150 /* Rx Statistics Counter */ 151 uint64_t reoct; /**<Rx Eth Octets Counter */ 152 uint64_t roct; /**<Rx Octet Counters */ 153 uint64_t raln; /**<Rx Alignment Error Counter */ 154 uint64_t rxpf; /**<Rx valid Pause Frame */ 155 uint64_t rfrm; /**<Rx Frame counter */ 156 uint64_t rfcs; /**<Rx frame check seq error */ 157 uint64_t rvlan; /**<Rx Vlan Frame Counter */ 158 uint64_t rerr; /**<Rx Frame error */ 159 uint64_t ruca; /**<Rx Unicast */ 160 uint64_t rmca; /**<Rx Multicast */ 161 uint64_t rbca; /**<Rx Broadcast */ 162 uint64_t rdrp; /**<Rx Dropped Packet */ 163 uint64_t rpkt; /**<Rx packet */ 164 uint64_t rund; /**<Rx undersized packets */ 165 uint32_t res_x[14]; 166 uint64_t rovr; /**<Rx oversized but good */ 167 uint64_t rjbr; /**<Rx oversized with bad csum */ 168 uint64_t rfrg; /**<Rx fragment Packet */ 169 uint64_t rcnp; /**<Rx control packets (0x8808 */ 170 uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */ 171 uint32_t res01d0[12]; 172 /* Tx Statistics Counter */ 173 uint64_t teoct; /**<Tx eth octets */ 174 uint64_t toct; /**<Tx Octets */ 175 uint32_t res0210[2]; 176 uint64_t txpf; /**<Tx valid pause frame */ 177 uint64_t tfrm; /**<Tx frame counter */ 178 uint64_t tfcs; /**<Tx FCS error */ 179 uint64_t tvlan; /**<Tx Vlan Frame */ 180 uint64_t terr; /**<Tx frame error */ 181 uint64_t tuca; /**<Tx Unicast */ 182 uint64_t tmca; /**<Tx Multicast */ 183 uint64_t tbca; /**<Tx Broadcast */ 184 uint32_t res0258[2]; 185 uint64_t tpkt; /**<Tx Packet */ 186 uint64_t tund; /**<Tx Undersized */ 187 }; 188 189 __rte_internal 190 int 191 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 192 int eth_rx_queue_id, 193 u16 ch_id, 194 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 195 196 __rte_internal 197 int 198 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 199 int eth_rx_queue_id); 200 201 enum qman_cb_dqrr_result 202 dpaa_rx_cb_parallel(void *event, 203 struct qman_portal *qm __always_unused, 204 struct qman_fq *fq, 205 const struct qm_dqrr_entry *dqrr, 206 void **bufs); 207 enum qman_cb_dqrr_result 208 dpaa_rx_cb_atomic(void *event, 209 struct qman_portal *qm __always_unused, 210 struct qman_fq *fq, 211 const struct qm_dqrr_entry *dqrr, 212 void **bufs); 213 214 /* PMD related logs */ 215 extern int dpaa_logtype_pmd; 216 217 #define DPAA_PMD_LOG(level, fmt, args...) \ 218 rte_log(RTE_LOG_ ## level, dpaa_logtype_pmd, "%s(): " fmt "\n", \ 219 __func__, ##args) 220 221 #define PMD_INIT_FUNC_TRACE() DPAA_PMD_LOG(DEBUG, " >>") 222 223 #define DPAA_PMD_DEBUG(fmt, args...) \ 224 DPAA_PMD_LOG(DEBUG, fmt, ## args) 225 #define DPAA_PMD_ERR(fmt, args...) \ 226 DPAA_PMD_LOG(ERR, fmt, ## args) 227 #define DPAA_PMD_INFO(fmt, args...) \ 228 DPAA_PMD_LOG(INFO, fmt, ## args) 229 #define DPAA_PMD_WARN(fmt, args...) \ 230 DPAA_PMD_LOG(WARNING, fmt, ## args) 231 232 /* DP Logs, toggled out at compile time if level lower than current level */ 233 #define DPAA_DP_LOG(level, fmt, args...) \ 234 RTE_LOG_DP(level, PMD, fmt, ## args) 235 236 #endif 237