1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <rte_common.h> 6 #include <rte_cycles.h> 7 #include <rte_memory.h> 8 #include <rte_byteorder.h> 9 10 #include "nitrox_hal.h" 11 #include "nitrox_csr.h" 12 #include "nitrox_logs.h" 13 14 #define MAX_VF_QUEUES 8 15 #define MAX_PF_QUEUES 64 16 #define NITROX_TIMER_THOLD 0x3FFFFF 17 #define NITROX_COUNT_THOLD 0xFFFFFFFF 18 19 void 20 nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring) 21 { 22 union nps_pkt_in_instr_ctl pkt_in_instr_ctl; 23 uint64_t reg_addr; 24 int max_retries = 5; 25 26 reg_addr = NPS_PKT_IN_INSTR_CTLX(ring); 27 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 28 pkt_in_instr_ctl.s.enb = 0; 29 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64); 30 rte_delay_us_block(100); 31 32 /* wait for enable bit to be cleared */ 33 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 34 while (pkt_in_instr_ctl.s.enb && max_retries--) { 35 rte_delay_ms(10); 36 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 37 } 38 } 39 40 void 41 nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port) 42 { 43 union nps_pkt_slc_ctl pkt_slc_ctl; 44 uint64_t reg_addr; 45 int max_retries = 5; 46 47 /* clear enable bit */ 48 reg_addr = NPS_PKT_SLC_CTLX(port); 49 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 50 pkt_slc_ctl.s.enb = 0; 51 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64); 52 rte_delay_us_block(100); 53 54 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 55 while (pkt_slc_ctl.s.enb && max_retries--) { 56 rte_delay_ms(10); 57 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 58 } 59 } 60 61 void 62 setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize, 63 phys_addr_t raddr) 64 { 65 union nps_pkt_in_instr_ctl pkt_in_instr_ctl; 66 union nps_pkt_in_instr_rsize pkt_in_instr_rsize; 67 union nps_pkt_in_instr_baoff_dbell pkt_in_instr_baoff_dbell; 68 union nps_pkt_in_done_cnts pkt_in_done_cnts; 69 uint64_t base_addr, reg_addr; 70 int max_retries = 5; 71 72 nps_pkt_input_ring_disable(bar_addr, ring); 73 74 /* write base address */ 75 reg_addr = NPS_PKT_IN_INSTR_BADDRX(ring); 76 base_addr = raddr; 77 nitrox_write_csr(bar_addr, reg_addr, base_addr); 78 rte_delay_us_block(CSR_DELAY); 79 80 /* write ring size */ 81 reg_addr = NPS_PKT_IN_INSTR_RSIZEX(ring); 82 pkt_in_instr_rsize.u64 = 0; 83 pkt_in_instr_rsize.s.rsize = rsize; 84 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_rsize.u64); 85 rte_delay_us_block(CSR_DELAY); 86 87 /* clear door bell */ 88 reg_addr = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring); 89 pkt_in_instr_baoff_dbell.u64 = 0; 90 pkt_in_instr_baoff_dbell.s.dbell = 0xFFFFFFFF; 91 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_baoff_dbell.u64); 92 rte_delay_us_block(CSR_DELAY); 93 94 /* clear done count */ 95 reg_addr = NPS_PKT_IN_DONE_CNTSX(ring); 96 pkt_in_done_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr); 97 nitrox_write_csr(bar_addr, reg_addr, pkt_in_done_cnts.u64); 98 rte_delay_us_block(CSR_DELAY); 99 100 /* Setup PKT IN RING Interrupt Threshold */ 101 reg_addr = NPS_PKT_IN_INT_LEVELSX(ring); 102 nitrox_write_csr(bar_addr, reg_addr, 0xFFFFFFFF); 103 rte_delay_us_block(CSR_DELAY); 104 105 /* enable ring */ 106 reg_addr = NPS_PKT_IN_INSTR_CTLX(ring); 107 pkt_in_instr_ctl.u64 = 0; 108 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 109 pkt_in_instr_ctl.s.is64b = 1; 110 pkt_in_instr_ctl.s.enb = 1; 111 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64); 112 rte_delay_us_block(100); 113 114 pkt_in_instr_ctl.u64 = 0; 115 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 116 /* wait for ring to be enabled */ 117 while (!pkt_in_instr_ctl.s.enb && max_retries--) { 118 rte_delay_ms(10); 119 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 120 } 121 } 122 123 void 124 setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port) 125 { 126 union nps_pkt_slc_ctl pkt_slc_ctl; 127 union nps_pkt_slc_cnts pkt_slc_cnts; 128 union nps_pkt_slc_int_levels pkt_slc_int_levels; 129 uint64_t reg_addr; 130 int max_retries = 5; 131 132 nps_pkt_solicited_port_disable(bar_addr, port); 133 134 /* clear pkt counts */ 135 reg_addr = NPS_PKT_SLC_CNTSX(port); 136 pkt_slc_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr); 137 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_cnts.u64); 138 rte_delay_us_block(CSR_DELAY); 139 140 /* slc interrupt levels */ 141 reg_addr = NPS_PKT_SLC_INT_LEVELSX(port); 142 pkt_slc_int_levels.u64 = 0; 143 pkt_slc_int_levels.s.bmode = 0; 144 pkt_slc_int_levels.s.timet = NITROX_TIMER_THOLD; 145 146 if (NITROX_COUNT_THOLD > 0) 147 pkt_slc_int_levels.s.cnt = NITROX_COUNT_THOLD - 1; 148 149 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_int_levels.u64); 150 rte_delay_us_block(CSR_DELAY); 151 152 /* enable ring */ 153 reg_addr = NPS_PKT_SLC_CTLX(port); 154 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 155 pkt_slc_ctl.s.rh = 1; 156 pkt_slc_ctl.s.z = 1; 157 pkt_slc_ctl.s.enb = 1; 158 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64); 159 rte_delay_us_block(100); 160 161 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 162 while (!pkt_slc_ctl.s.enb && max_retries--) { 163 rte_delay_ms(10); 164 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr); 165 } 166 } 167 168 int 169 zqmq_input_ring_disable(uint8_t *bar_addr, uint16_t ring) 170 { 171 union zqmq_activity_stat zqmq_activity_stat; 172 union zqmq_en zqmq_en; 173 union zqmq_cmp_cnt zqmq_cmp_cnt; 174 uint64_t reg_addr; 175 int max_retries = 5; 176 177 /* clear queue enable */ 178 reg_addr = ZQMQ_ENX(ring); 179 zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); 180 zqmq_en.s.queue_enable = 0; 181 nitrox_write_csr(bar_addr, reg_addr, zqmq_en.u64); 182 rte_delay_us_block(100); 183 184 /* wait for queue active to clear */ 185 reg_addr = ZQMQ_ACTIVITY_STATX(ring); 186 zqmq_activity_stat.u64 = nitrox_read_csr(bar_addr, reg_addr); 187 while (zqmq_activity_stat.s.queue_active && max_retries--) { 188 rte_delay_ms(10); 189 zqmq_activity_stat.u64 = nitrox_read_csr(bar_addr, reg_addr); 190 } 191 192 if (zqmq_activity_stat.s.queue_active) { 193 NITROX_LOG_LINE(ERR, "Failed to disable zqmq ring %d", ring); 194 return -EBUSY; 195 } 196 197 /* clear commands completed count */ 198 reg_addr = ZQMQ_CMP_CNTX(ring); 199 zqmq_cmp_cnt.u64 = nitrox_read_csr(bar_addr, reg_addr); 200 nitrox_write_csr(bar_addr, reg_addr, zqmq_cmp_cnt.u64); 201 rte_delay_us_block(CSR_DELAY); 202 return 0; 203 } 204 205 int 206 setup_zqmq_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize, 207 phys_addr_t raddr) 208 { 209 union zqmq_drbl zqmq_drbl; 210 union zqmq_qsz zqmq_qsz; 211 union zqmq_en zqmq_en; 212 union zqmq_cmp_thr zqmq_cmp_thr; 213 union zqmq_timer_ld zqmq_timer_ld; 214 uint64_t reg_addr = 0; 215 int max_retries = 5; 216 int err = 0; 217 218 err = zqmq_input_ring_disable(bar_addr, ring); 219 if (err) 220 return err; 221 222 /* clear doorbell count */ 223 reg_addr = ZQMQ_DRBLX(ring); 224 zqmq_drbl.u64 = 0; 225 zqmq_drbl.s.dbell_count = 0xFFFFFFFF; 226 nitrox_write_csr(bar_addr, reg_addr, zqmq_drbl.u64); 227 rte_delay_us_block(CSR_DELAY); 228 229 reg_addr = ZQMQ_NXT_CMDX(ring); 230 nitrox_write_csr(bar_addr, reg_addr, 0); 231 rte_delay_us_block(CSR_DELAY); 232 233 /* write queue length */ 234 reg_addr = ZQMQ_QSZX(ring); 235 zqmq_qsz.u64 = 0; 236 zqmq_qsz.s.host_queue_size = rsize; 237 nitrox_write_csr(bar_addr, reg_addr, zqmq_qsz.u64); 238 rte_delay_us_block(CSR_DELAY); 239 240 /* write queue base address */ 241 reg_addr = ZQMQ_BADRX(ring); 242 nitrox_write_csr(bar_addr, reg_addr, raddr); 243 rte_delay_us_block(CSR_DELAY); 244 245 /* write commands completed threshold */ 246 reg_addr = ZQMQ_CMP_THRX(ring); 247 zqmq_cmp_thr.u64 = 0; 248 zqmq_cmp_thr.s.commands_completed_threshold = 0; 249 nitrox_write_csr(bar_addr, reg_addr, zqmq_cmp_thr.u64); 250 rte_delay_us_block(CSR_DELAY); 251 252 /* write timer load value */ 253 reg_addr = ZQMQ_TIMER_LDX(ring); 254 zqmq_timer_ld.u64 = 0; 255 zqmq_timer_ld.s.timer_load_value = 0; 256 nitrox_write_csr(bar_addr, reg_addr, zqmq_timer_ld.u64); 257 rte_delay_us_block(CSR_DELAY); 258 259 reg_addr = ZQMQ_ENX(ring); 260 zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); 261 zqmq_en.s.queue_enable = 1; 262 nitrox_write_csr(bar_addr, reg_addr, zqmq_en.u64); 263 rte_delay_us_block(100); 264 265 /* enable queue */ 266 zqmq_en.u64 = 0; 267 zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); 268 while (!zqmq_en.s.queue_enable && max_retries--) { 269 rte_delay_ms(10); 270 zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); 271 } 272 273 if (!zqmq_en.s.queue_enable) { 274 NITROX_LOG_LINE(ERR, "Failed to enable zqmq ring %d", ring); 275 err = -EFAULT; 276 } else { 277 err = 0; 278 } 279 280 return err; 281 } 282 283 int 284 vf_get_vf_config_mode(uint8_t *bar_addr) 285 { 286 union aqmq_qsz aqmq_qsz; 287 uint64_t reg_addr; 288 int q, vf_mode; 289 290 aqmq_qsz.u64 = 0; 291 aqmq_qsz.s.host_queue_size = 0xDEADBEEF; 292 reg_addr = AQMQ_QSZX(0); 293 nitrox_write_csr(bar_addr, reg_addr, aqmq_qsz.u64); 294 rte_delay_us_block(CSR_DELAY); 295 296 aqmq_qsz.u64 = 0; 297 for (q = 1; q < MAX_VF_QUEUES; q++) { 298 reg_addr = AQMQ_QSZX(q); 299 aqmq_qsz.u64 = nitrox_read_csr(bar_addr, reg_addr); 300 if (aqmq_qsz.s.host_queue_size == 0xDEADBEEF) 301 break; 302 } 303 304 switch (q) { 305 case 1: 306 vf_mode = NITROX_MODE_VF128; 307 break; 308 case 2: 309 vf_mode = NITROX_MODE_VF64; 310 break; 311 case 4: 312 vf_mode = NITROX_MODE_VF32; 313 break; 314 case 8: 315 vf_mode = NITROX_MODE_VF16; 316 break; 317 default: 318 vf_mode = 0; 319 break; 320 } 321 322 return vf_mode; 323 } 324 325 int 326 vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode) 327 { 328 int nr_queues; 329 330 switch (vf_mode) { 331 case NITROX_MODE_PF: 332 nr_queues = MAX_PF_QUEUES; 333 break; 334 case NITROX_MODE_VF16: 335 nr_queues = 8; 336 break; 337 case NITROX_MODE_VF32: 338 nr_queues = 4; 339 break; 340 case NITROX_MODE_VF64: 341 nr_queues = 2; 342 break; 343 case NITROX_MODE_VF128: 344 nr_queues = 1; 345 break; 346 default: 347 nr_queues = 0; 348 break; 349 } 350 351 return nr_queues; 352 } 353