1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2007-2013 Broadcom Corporation. 3 * 4 * Eric Davis <edavis@broadcom.com> 5 * David Christensen <davidch@broadcom.com> 6 * Gary Zambrano <zambrano@broadcom.com> 7 * 8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. 9 * Copyright (c) 2015-2018 Cavium Inc. 10 * All rights reserved. 11 * www.cavium.com 12 */ 13 14 #ifndef ECORE_INIT_OPS_H 15 #define ECORE_INIT_OPS_H 16 17 static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len); 18 static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc, 19 ecore_dma_addr_t phys_addr, uint32_t addr, 20 uint32_t len); 21 22 static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr, 23 const uint32_t *data, uint32_t len) 24 { 25 uint32_t i; 26 27 for (i = 0; i < len; i++) 28 REG_WR(sc, addr + i*4, data[i]); 29 } 30 31 static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, 32 uint32_t len, uint8_t wb __rte_unused) 33 { 34 if (DMAE_READY(sc)) 35 ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); 36 37 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 38 else 39 ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); 40 } 41 42 static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, 43 uint32_t len, uint8_t wb) 44 { 45 uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); 46 uint32_t buf_len32 = buf_len/4; 47 uint32_t i; 48 49 ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len); 50 51 for (i = 0; i < len; i += buf_len32) { 52 uint32_t cur_len = min(buf_len32, len - i); 53 54 ecore_write_big_buf(sc, addr + i * 4, cur_len, wb); 55 } 56 } 57 58 static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len) 59 { 60 if (DMAE_READY(sc)) 61 ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); 62 63 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 64 else 65 ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); 66 } 67 68 static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr, 69 const uint32_t *data, uint32_t len64) 70 { 71 uint32_t buf_len32 = FW_BUF_SIZE/4; 72 uint32_t len = len64*2; 73 uint64_t data64 = 0; 74 uint32_t i; 75 76 /* 64 bit value is in a blob: first low DWORD, then high DWORD */ 77 data64 = HILO_U64((*(data + 1)), (*data)); 78 79 len64 = min((uint32_t)(FW_BUF_SIZE/8), len64); 80 for (i = 0; i < len64; i++) { 81 uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i; 82 83 *pdata = data64; 84 } 85 86 for (i = 0; i < len; i += buf_len32) { 87 uint32_t cur_len = min(buf_len32, len - i); 88 89 ecore_write_big_buf_wb(sc, addr + i*4, cur_len); 90 } 91 } 92 93 /********************************************************* 94 There are different blobs for each PRAM section. 95 In addition, each blob write operation is divided into a few operations 96 in order to decrease the amount of phys. contiguous buffer needed. 97 Thus, when we select a blob the address may be with some offset 98 from the beginning of PRAM section. 99 The same holds for the INT_TABLE sections. 100 **********************************************************/ 101 #define IF_IS_INT_TABLE_ADDR(base, addr) \ 102 if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) 103 104 #define IF_IS_PRAM_ADDR(base, addr) \ 105 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) 106 107 static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr, 108 const uint8_t *data) 109 { 110 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) 111 data = INIT_TSEM_INT_TABLE_DATA(sc); 112 else 113 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) 114 data = INIT_CSEM_INT_TABLE_DATA(sc); 115 else 116 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) 117 data = INIT_USEM_INT_TABLE_DATA(sc); 118 else 119 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) 120 data = INIT_XSEM_INT_TABLE_DATA(sc); 121 else 122 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) 123 data = INIT_TSEM_PRAM_DATA(sc); 124 else 125 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) 126 data = INIT_CSEM_PRAM_DATA(sc); 127 else 128 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) 129 data = INIT_USEM_PRAM_DATA(sc); 130 else 131 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) 132 data = INIT_XSEM_PRAM_DATA(sc); 133 134 return data; 135 } 136 137 static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr, 138 const uint32_t *data, uint32_t len) 139 { 140 if (DMAE_READY(sc)) 141 VIRT_WR_DMAE_LEN(sc, data, addr, len, 0); 142 143 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 144 else 145 ecore_init_str_wr(sc, addr, data, len); 146 } 147 148 149 static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo, 150 uint32_t val_hi) 151 { 152 uint32_t wb_write[2]; 153 154 wb_write[0] = val_lo; 155 wb_write[1] = val_hi; 156 REG_WR_DMAE_LEN(sc, reg, wb_write, 2); 157 } 158 159 static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len, 160 uint32_t blob_off) 161 { 162 const uint8_t *data = NULL; 163 int rc; 164 uint32_t i; 165 166 data = ecore_sel_blob(sc, addr, data) + blob_off*4; 167 168 rc = ecore_gunzip(sc, data, len); 169 if (rc) 170 return; 171 172 /* gunzip_outlen is in dwords */ 173 len = GUNZIP_OUTLEN(sc); 174 for (i = 0; i < len; i++) 175 ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t) 176 ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]); 177 178 ecore_write_big_buf_wb(sc, addr, len); 179 } 180 181 static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage) 182 { 183 uint16_t op_start = 184 INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, 185 STAGE_START)]; 186 uint16_t op_end = 187 INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, 188 STAGE_END)]; 189 const union init_op *op; 190 uint32_t op_idx, op_type, addr, len; 191 const uint32_t *data, *data_base; 192 193 /* If empty block */ 194 if (op_start == op_end) 195 return; 196 197 data_base = INIT_DATA(sc); 198 199 for (op_idx = op_start; op_idx < op_end; op_idx++) { 200 201 op = (const union init_op *)&(INIT_OPS(sc)[op_idx]); 202 /* Get generic data */ 203 op_type = op->raw.op; 204 addr = op->raw.offset; 205 /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and 206 * OP_WR64 (we assume that op_arr_write and op_write have the 207 * same structure). 208 */ 209 len = op->arr_wr.data_len; 210 data = data_base + op->arr_wr.data_off; 211 212 switch (op_type) { 213 case OP_RD: 214 REG_RD(sc, addr); 215 break; 216 case OP_WR: 217 REG_WR(sc, addr, op->write.val); 218 break; 219 case OP_SW: 220 ecore_init_str_wr(sc, addr, data, len); 221 break; 222 case OP_WB: 223 ecore_init_wr_wb(sc, addr, data, len); 224 break; 225 case OP_ZR: 226 ecore_init_fill(sc, addr, 0, op->zero.len, 0); 227 break; 228 case OP_WB_ZR: 229 ecore_init_fill(sc, addr, 0, op->zero.len, 1); 230 break; 231 case OP_ZP: 232 ecore_init_wr_zp(sc, addr, len, 233 op->arr_wr.data_off); 234 break; 235 case OP_WR_64: 236 ecore_init_wr_64(sc, addr, data, len); 237 break; 238 case OP_IF_MODE_AND: 239 /* if any of the flags doesn't match, skip the 240 * conditional block. 241 */ 242 if ((INIT_MODE_FLAGS(sc) & 243 op->if_mode.mode_bit_map) != 244 op->if_mode.mode_bit_map) 245 op_idx += op->if_mode.cmd_offset; 246 break; 247 case OP_IF_MODE_OR: 248 /* if all the flags don't match, skip the conditional 249 * block. 250 */ 251 if ((INIT_MODE_FLAGS(sc) & 252 op->if_mode.mode_bit_map) == 0) 253 op_idx += op->if_mode.cmd_offset; 254 break; 255 default: 256 /* Should never get here! */ 257 258 break; 259 } 260 } 261 } 262 263 264 /**************************************************************************** 265 * PXP Arbiter 266 ****************************************************************************/ 267 /* 268 * This code configures the PCI read/write arbiter 269 * which implements a weighted round robin 270 * between the virtual queues in the chip. 271 * 272 * The values were derived for each PCI max payload and max request size. 273 * since max payload and max request size are only known at run time, 274 * this is done as a separate init stage. 275 */ 276 277 #define NUM_WR_Q 13 278 #define NUM_RD_Q 29 279 #define MAX_RD_ORD 3 280 #define MAX_WR_ORD 2 281 282 /* configuration for one arbiter queue */ 283 struct arb_line { 284 int l; 285 int add; 286 int ubound; 287 }; 288 289 /* derived configuration for each read queue for each max request size */ 290 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { 291 /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, 292 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, 293 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, 294 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, 295 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, 296 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 297 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 298 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 299 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 300 /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 301 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 302 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 303 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 304 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 305 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 306 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 307 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, 308 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 309 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 310 /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 311 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 312 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 313 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 314 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 315 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 316 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 317 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 318 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 319 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } 320 }; 321 322 /* derived configuration for each write queue for each max request size */ 323 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { 324 /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, 325 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, 326 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, 327 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, 328 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, 329 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, 330 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, 331 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, 332 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, 333 /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, 334 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, 335 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, 336 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } 337 }; 338 339 /* register addresses for read queues */ 340 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { 341 /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, 342 PXP2_REG_RQ_BW_RD_UBOUND0}, 343 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, 344 PXP2_REG_PSWRQ_BW_UB1}, 345 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, 346 PXP2_REG_PSWRQ_BW_UB2}, 347 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, 348 PXP2_REG_PSWRQ_BW_UB3}, 349 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, 350 PXP2_REG_RQ_BW_RD_UBOUND4}, 351 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, 352 PXP2_REG_RQ_BW_RD_UBOUND5}, 353 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, 354 PXP2_REG_PSWRQ_BW_UB6}, 355 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, 356 PXP2_REG_PSWRQ_BW_UB7}, 357 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, 358 PXP2_REG_PSWRQ_BW_UB8}, 359 /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, 360 PXP2_REG_PSWRQ_BW_UB9}, 361 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, 362 PXP2_REG_PSWRQ_BW_UB10}, 363 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, 364 PXP2_REG_PSWRQ_BW_UB11}, 365 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, 366 PXP2_REG_RQ_BW_RD_UBOUND12}, 367 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, 368 PXP2_REG_RQ_BW_RD_UBOUND13}, 369 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, 370 PXP2_REG_RQ_BW_RD_UBOUND14}, 371 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, 372 PXP2_REG_RQ_BW_RD_UBOUND15}, 373 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, 374 PXP2_REG_RQ_BW_RD_UBOUND16}, 375 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, 376 PXP2_REG_RQ_BW_RD_UBOUND17}, 377 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, 378 PXP2_REG_RQ_BW_RD_UBOUND18}, 379 /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, 380 PXP2_REG_RQ_BW_RD_UBOUND19}, 381 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, 382 PXP2_REG_RQ_BW_RD_UBOUND20}, 383 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, 384 PXP2_REG_RQ_BW_RD_UBOUND22}, 385 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, 386 PXP2_REG_RQ_BW_RD_UBOUND23}, 387 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, 388 PXP2_REG_RQ_BW_RD_UBOUND24}, 389 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, 390 PXP2_REG_RQ_BW_RD_UBOUND25}, 391 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, 392 PXP2_REG_RQ_BW_RD_UBOUND26}, 393 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, 394 PXP2_REG_RQ_BW_RD_UBOUND27}, 395 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, 396 PXP2_REG_PSWRQ_BW_UB28} 397 }; 398 399 /* register addresses for write queues */ 400 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { 401 /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, 402 PXP2_REG_PSWRQ_BW_UB1}, 403 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, 404 PXP2_REG_PSWRQ_BW_UB2}, 405 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, 406 PXP2_REG_PSWRQ_BW_UB3}, 407 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, 408 PXP2_REG_PSWRQ_BW_UB6}, 409 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, 410 PXP2_REG_PSWRQ_BW_UB7}, 411 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, 412 PXP2_REG_PSWRQ_BW_UB8}, 413 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, 414 PXP2_REG_PSWRQ_BW_UB9}, 415 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, 416 PXP2_REG_PSWRQ_BW_UB10}, 417 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, 418 PXP2_REG_PSWRQ_BW_UB11}, 419 /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, 420 PXP2_REG_PSWRQ_BW_UB28}, 421 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, 422 PXP2_REG_RQ_BW_WR_UBOUND29}, 423 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, 424 PXP2_REG_RQ_BW_WR_UBOUND30} 425 }; 426 427 static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order, 428 int w_order) 429 { 430 uint32_t val, i; 431 432 if (r_order > MAX_RD_ORD) { 433 ECORE_MSG(sc, "read order of %d order adjusted to %d", 434 r_order, MAX_RD_ORD); 435 r_order = MAX_RD_ORD; 436 } 437 if (w_order > MAX_WR_ORD) { 438 ECORE_MSG(sc, "write order of %d order adjusted to %d", 439 w_order, MAX_WR_ORD); 440 w_order = MAX_WR_ORD; 441 } 442 if (CHIP_REV_IS_FPGA(sc)) { 443 ECORE_MSG(sc, "write order adjusted to 1 for FPGA"); 444 w_order = 0; 445 } 446 ECORE_MSG(sc, "read order %d write order %d", r_order, w_order); 447 448 for (i = 0; i < NUM_RD_Q-1; i++) { 449 REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l); 450 REG_WR(sc, read_arb_addr[i].add, 451 read_arb_data[i][r_order].add); 452 REG_WR(sc, read_arb_addr[i].ubound, 453 read_arb_data[i][r_order].ubound); 454 } 455 456 for (i = 0; i < NUM_WR_Q-1; i++) { 457 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || 458 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { 459 460 REG_WR(sc, write_arb_addr[i].l, 461 write_arb_data[i][w_order].l); 462 463 REG_WR(sc, write_arb_addr[i].add, 464 write_arb_data[i][w_order].add); 465 466 REG_WR(sc, write_arb_addr[i].ubound, 467 write_arb_data[i][w_order].ubound); 468 } else { 469 470 val = REG_RD(sc, write_arb_addr[i].l); 471 REG_WR(sc, write_arb_addr[i].l, 472 val | (write_arb_data[i][w_order].l << 10)); 473 474 val = REG_RD(sc, write_arb_addr[i].add); 475 REG_WR(sc, write_arb_addr[i].add, 476 val | (write_arb_data[i][w_order].add << 10)); 477 478 val = REG_RD(sc, write_arb_addr[i].ubound); 479 REG_WR(sc, write_arb_addr[i].ubound, 480 val | (write_arb_data[i][w_order].ubound << 7)); 481 } 482 } 483 484 val = write_arb_data[NUM_WR_Q-1][w_order].add; 485 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; 486 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; 487 REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val); 488 489 val = read_arb_data[NUM_RD_Q-1][r_order].add; 490 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; 491 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; 492 REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val); 493 494 REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order); 495 REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order); 496 REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order); 497 REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order); 498 499 if ((CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) && (r_order == MAX_RD_ORD)) 500 REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00); 501 502 if (CHIP_IS_E3(sc)) 503 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); 504 else if (CHIP_IS_E2(sc)) 505 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); 506 else 507 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); 508 509 if (!CHIP_IS_E1(sc)) { 510 /* MPS w_order optimal TH presently TH 511 * 128 0 0 2 512 * 256 1 1 3 513 * >=512 2 2 3 514 */ 515 /* DMAE is special */ 516 if (!CHIP_IS_E1H(sc)) { 517 /* E2 can use optimal TH */ 518 val = w_order; 519 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val); 520 } else { 521 val = ((w_order == 0) ? 2 : 3); 522 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); 523 } 524 525 REG_WR(sc, PXP2_REG_WR_HC_MPS, val); 526 REG_WR(sc, PXP2_REG_WR_USDM_MPS, val); 527 REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val); 528 REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val); 529 REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val); 530 REG_WR(sc, PXP2_REG_WR_QM_MPS, val); 531 REG_WR(sc, PXP2_REG_WR_TM_MPS, val); 532 REG_WR(sc, PXP2_REG_WR_SRC_MPS, val); 533 REG_WR(sc, PXP2_REG_WR_DBG_MPS, val); 534 REG_WR(sc, PXP2_REG_WR_CDU_MPS, val); 535 } 536 537 /* Validate number of tags suppoted by device */ 538 #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 539 val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST); 540 val &= 0xFF; 541 if (val <= 0x20) 542 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20); 543 } 544 545 /**************************************************************************** 546 * ILT management 547 ****************************************************************************/ 548 /* 549 * This codes hides the low level HW interaction for ILT management and 550 * configuration. The API consists of a shadow ILT table which is set by the 551 * driver and a set of routines to use it to configure the HW. 552 * 553 */ 554 555 /* ILT HW init operations */ 556 557 /* ILT memory management operations */ 558 #define ILT_MEMOP_ALLOC 0 559 #define ILT_MEMOP_FREE 1 560 561 /* the phys address is shifted right 12 bits and has an added 562 * 1=valid bit added to the 53rd bit 563 * then since this is a wide register(TM) 564 * we split it into two 32 bit writes 565 */ 566 #define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) 567 #define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) 568 #define ILT_RANGE(f, l) (((l) << 10) | f) 569 570 static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc __rte_unused, 571 struct ilt_line *line, uint32_t size, 572 uint8_t memop) 573 { 574 if (memop == ILT_MEMOP_FREE) { 575 ECORE_ILT_FREE(line->page, line->page_mapping, line->size); 576 return 0; 577 } 578 ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size); 579 if (!line->page) 580 return -1; 581 line->size = size; 582 return 0; 583 } 584 585 586 static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num, 587 uint8_t memop) 588 { 589 int i, rc; 590 struct ecore_ilt *ilt = SC_ILT(sc); 591 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; 592 593 if (!ilt || !ilt->lines) 594 return -1; 595 596 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) 597 return 0; 598 599 for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { 600 rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i], 601 ilt_cli->page_size, memop); 602 } 603 return rc; 604 } 605 606 static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop) 607 { 608 int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop); 609 if (!rc) 610 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop); 611 if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) 612 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); 613 614 return rc; 615 } 616 617 static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx, 618 ecore_dma_addr_t page_mapping) 619 { 620 uint32_t reg; 621 622 if (CHIP_IS_E1(sc)) 623 reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx * 8; 624 else 625 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx * 8; 626 627 ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); 628 } 629 630 static void ecore_ilt_line_init_op(struct bnx2x_softc *sc, 631 struct ecore_ilt *ilt, int idx, uint8_t initop) 632 { 633 ecore_dma_addr_t null_mapping; 634 int abs_idx = ilt->start_line + idx; 635 636 637 switch (initop) { 638 case INITOP_INIT: 639 /* set in the init-value array */ 640 case INITOP_SET: 641 ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping); 642 break; 643 case INITOP_CLEAR: 644 null_mapping = 0; 645 ecore_ilt_line_wr(sc, abs_idx, null_mapping); 646 break; 647 } 648 } 649 650 static void ecore_ilt_boundary_init_op(struct bnx2x_softc *sc, 651 struct ilt_client_info *ilt_cli, 652 uint32_t ilt_start, 653 uint8_t initop __rte_unused) 654 { 655 uint32_t start_reg = 0; 656 uint32_t end_reg = 0; 657 658 /* The boundary is either SET or INIT, 659 CLEAR => SET and for now SET ~~ INIT */ 660 661 /* find the appropriate regs */ 662 if (CHIP_IS_E1(sc)) { 663 switch (ilt_cli->client_num) { 664 case ILT_CLIENT_CDU: 665 start_reg = PXP2_REG_PSWRQ_CDU0_L2P; 666 break; 667 case ILT_CLIENT_QM: 668 start_reg = PXP2_REG_PSWRQ_QM0_L2P; 669 break; 670 case ILT_CLIENT_SRC: 671 start_reg = PXP2_REG_PSWRQ_SRC0_L2P; 672 break; 673 case ILT_CLIENT_TM: 674 start_reg = PXP2_REG_PSWRQ_TM0_L2P; 675 break; 676 } 677 REG_WR(sc, start_reg + SC_FUNC(sc) * 4, 678 ILT_RANGE((ilt_start + ilt_cli->start), 679 (ilt_start + ilt_cli->end))); 680 } else { 681 switch (ilt_cli->client_num) { 682 case ILT_CLIENT_CDU: 683 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; 684 end_reg = PXP2_REG_RQ_CDU_LAST_ILT; 685 break; 686 case ILT_CLIENT_QM: 687 start_reg = PXP2_REG_RQ_QM_FIRST_ILT; 688 end_reg = PXP2_REG_RQ_QM_LAST_ILT; 689 break; 690 case ILT_CLIENT_SRC: 691 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; 692 end_reg = PXP2_REG_RQ_SRC_LAST_ILT; 693 break; 694 case ILT_CLIENT_TM: 695 start_reg = PXP2_REG_RQ_TM_FIRST_ILT; 696 end_reg = PXP2_REG_RQ_TM_LAST_ILT; 697 break; 698 } 699 REG_WR(sc, start_reg, (ilt_start + ilt_cli->start)); 700 REG_WR(sc, end_reg, (ilt_start + ilt_cli->end)); 701 } 702 } 703 704 static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc, 705 struct ecore_ilt *ilt, 706 struct ilt_client_info *ilt_cli, 707 uint8_t initop) 708 { 709 int i; 710 711 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) 712 return; 713 714 for (i = ilt_cli->start; i <= ilt_cli->end; i++) 715 ecore_ilt_line_init_op(sc, ilt, i, initop); 716 717 /* init/clear the ILT boundries */ 718 ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop); 719 } 720 721 static void ecore_ilt_client_init_op(struct bnx2x_softc *sc, 722 struct ilt_client_info *ilt_cli, uint8_t initop) 723 { 724 struct ecore_ilt *ilt = SC_ILT(sc); 725 726 ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop); 727 } 728 729 static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc, 730 int cli_num, uint8_t initop) 731 { 732 struct ecore_ilt *ilt = SC_ILT(sc); 733 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; 734 735 ecore_ilt_client_init_op(sc, ilt_cli, initop); 736 } 737 738 static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop) 739 { 740 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop); 741 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop); 742 if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) 743 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); 744 } 745 746 static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num, 747 uint32_t psz_reg, uint8_t initop) 748 { 749 struct ecore_ilt *ilt = SC_ILT(sc); 750 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; 751 752 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) 753 return; 754 755 switch (initop) { 756 case INITOP_INIT: 757 /* set in the init-value array */ 758 case INITOP_SET: 759 REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12)); 760 break; 761 case INITOP_CLEAR: 762 break; 763 } 764 } 765 766 /* 767 * called during init common stage, ilt clients should be initialized 768 * prioir to calling this function 769 */ 770 static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop) 771 { 772 ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU, 773 PXP2_REG_RQ_CDU_P_SIZE, initop); 774 ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM, 775 PXP2_REG_RQ_QM_P_SIZE, initop); 776 ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC, 777 PXP2_REG_RQ_SRC_P_SIZE, initop); 778 ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM, 779 PXP2_REG_RQ_TM_P_SIZE, initop); 780 } 781 782 /**************************************************************************** 783 * QM initializations 784 ****************************************************************************/ 785 #define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */ 786 #define QM_INIT_MIN_CID_COUNT 31 787 #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) 788 789 /* called during init port stage */ 790 static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count, 791 uint8_t initop) 792 { 793 int port = SC_PORT(sc); 794 795 if (QM_INIT(qm_cid_count)) { 796 switch (initop) { 797 case INITOP_INIT: 798 /* set in the init-value array */ 799 case INITOP_SET: 800 REG_WR(sc, QM_REG_CONNNUM_0 + port*4, 801 qm_cid_count/16 - 1); 802 break; 803 case INITOP_CLEAR: 804 break; 805 } 806 } 807 } 808 809 static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, 810 uint32_t base_reg, uint32_t reg) 811 { 812 int i; 813 uint32_t wb_data[2] = {0, 0}; 814 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { 815 REG_WR(sc, base_reg + i*4, 816 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); 817 ecore_init_wr_wb(sc, reg + i*8, 818 wb_data, 2); 819 } 820 } 821 822 /* called during init common stage */ 823 static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, 824 uint8_t initop) 825 { 826 if (!QM_INIT(qm_cid_count)) 827 return; 828 829 switch (initop) { 830 case INITOP_INIT: 831 /* set in the init-value array */ 832 case INITOP_SET: 833 ecore_qm_set_ptr_table(sc, qm_cid_count, 834 QM_REG_BASEADDR, QM_REG_PTRTBL); 835 if (CHIP_IS_E1H(sc)) 836 ecore_qm_set_ptr_table(sc, qm_cid_count, 837 QM_REG_BASEADDR_EXT_A, 838 QM_REG_PTRTBL_EXT_A); 839 break; 840 case INITOP_CLEAR: 841 break; 842 } 843 } 844 845 #endif /* ECORE_INIT_OPS_H */ 846