Lines Matching defs:blk

113  * @blk: block type
119 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
121 return ice_sect_lkup[blk][sect];
696 * @blk: hardware block
703 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
708 if (prof >= hw->blk[blk].es.count)
711 if (fv_idx >= hw->blk[blk].es.fvw)
714 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
727 * @blk: HW block
732 int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
739 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
749 sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
761 * @blk: HW block
770 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
775 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
782 * @blk: HW block
788 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
790 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
796 * @blk: HW block
802 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
806 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
807 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
815 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
821 * @blk: HW block
829 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
837 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
841 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
845 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
846 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
848 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
857 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
858 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
866 * @blk: HW block
876 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
884 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
887 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
897 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
904 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
905 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
906 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
907 &hw->blk[blk].xlt1.ptypes[ptype];
909 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
910 hw->blk[blk].xlt1.t[ptype] = ptg;
1012 * @blk: HW block
1020 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
1027 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
1047 * @blk: HW block
1052 int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
1058 if (hw->blk[blk].xlt2.vsis[vsi].changed) {
1062 vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
1063 status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
1067 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1077 * @blk: HW block
1085 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
1094 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
1102 * @blk: HW block
1107 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
1111 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
1112 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1113 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
1122 * @blk: HW block
1127 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
1132 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
1133 return ice_vsig_alloc_val(hw, blk, i);
1141 * @blk: HW block
1154 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
1157 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
1173 * @blk: HW block
1180 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
1190 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1193 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
1195 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1211 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
1216 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
1225 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1233 * @blk: HW block
1241 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1251 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1258 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1262 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1289 * @blk: HW block
1299 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1313 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1317 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1327 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1336 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1337 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1340 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1341 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1342 &hw->blk[blk].xlt2.vsis[vsi];
1343 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1344 hw->blk[blk].xlt2.t[vsi] = vsig;
1352 * @blk: HW block
1357 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
1360 struct ice_es *es = &hw->blk[blk].es;
1379 * @blk: the block type
1382 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1384 switch (blk) {
1399 * @blk: the block type
1402 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1404 switch (blk) {
1420 * @blk: the block to allocate the TCAM for
1428 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
1433 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1442 * @blk: the block from which to free the TCAM entry
1448 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
1452 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1461 * @blk: the block to allocate the profile ID for
1468 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
1474 if (!ice_prof_id_rsrc_type(blk, &res_type))
1487 * @blk: the block from which to free the profile ID
1493 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1498 if (!ice_prof_id_rsrc_type(blk, &res_type))
1507 * @blk: the block from which to free the profile ID
1511 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1513 if (prof_id > hw->blk[blk].es.count)
1516 hw->blk[blk].es.ref_count[prof_id]++;
1524 * @blk: the block in which to write the extraction sequence
1529 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
1534 off = prof_id * hw->blk[blk].es.fvw;
1536 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
1538 hw->blk[blk].es.written[prof_id] = false;
1540 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
1548 * @blk: the block from which to free the profile ID
1552 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1554 if (prof_id > hw->blk[blk].es.count)
1557 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
1558 if (!--hw->blk[blk].es.ref_count[prof_id]) {
1559 ice_write_es(hw, blk, prof_id, NULL);
1560 return ice_free_prof_id(hw, blk, prof_id);
1613 * @blk: the HW block to initialize
1615 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1619 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1622 ptg = hw->blk[blk].xlt1.t[pt];
1624 ice_ptg_alloc_val(hw, blk, ptg);
1625 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1633 * @blk: the HW block to initialize
1635 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1639 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1642 vsig = hw->blk[blk].xlt2.t[vsi];
1644 ice_vsig_alloc_val(hw, blk, vsig);
1645 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1649 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1715 sizeof(*hw->blk[block_id].xlt1.t);
1716 dst = hw->blk[block_id].xlt1.t;
1717 dst_len = hw->blk[block_id].xlt1.count *
1718 sizeof(*hw->blk[block_id].xlt1.t);
1728 sizeof(*hw->blk[block_id].xlt2.t);
1729 dst = (u8 *)hw->blk[block_id].xlt2.t;
1730 dst_len = hw->blk[block_id].xlt2.count *
1731 sizeof(*hw->blk[block_id].xlt2.t);
1741 sizeof(*hw->blk[block_id].prof.t);
1742 dst = (u8 *)hw->blk[block_id].prof.t;
1743 dst_len = hw->blk[block_id].prof.count *
1744 sizeof(*hw->blk[block_id].prof.t);
1754 sizeof(*hw->blk[block_id].prof_redir.t);
1755 dst = hw->blk[block_id].prof_redir.t;
1756 dst_len = hw->blk[block_id].prof_redir.count *
1757 sizeof(*hw->blk[block_id].prof_redir.t);
1767 hw->blk[block_id].es.fvw) *
1768 sizeof(*hw->blk[block_id].es.t);
1769 dst = (u8 *)hw->blk[block_id].es.t;
1770 dst_len = (u32)(hw->blk[block_id].es.count *
1771 hw->blk[block_id].es.fvw) *
1772 sizeof(*hw->blk[block_id].es.t);
1821 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
1822 struct ice_prof_tcam *prof = &hw->blk[i].prof;
1823 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
1824 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
1825 struct ice_es *es = &hw->blk[i].es;
1828 if (hw->blk[i].is_list_init)
1834 hw->blk[i].is_list_init = true;
1836 hw->blk[i].overwrite = blk_sizes[i].overwrite;
1941 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
1942 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
1943 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
1944 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
1945 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
1958 struct ice_es *es = &hw->blk[blk_idx].es;
1998 * @blk: the HW block on which to free the VSIG table entries
2000 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2004 if (!hw->blk[blk].xlt2.vsig_tbl)
2008 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2009 ice_vsig_free(hw, blk, i);
2022 if (hw->blk[i].is_list_init) {
2023 struct ice_es *es = &hw->blk[i].es;
2031 hw->blk[i].is_list_init = false;
2034 ice_free(hw, hw->blk[i].xlt1.ptypes);
2035 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
2036 ice_free(hw, hw->blk[i].xlt1.t);
2037 ice_free(hw, hw->blk[i].xlt2.t);
2038 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
2039 ice_free(hw, hw->blk[i].xlt2.vsis);
2040 ice_free(hw, hw->blk[i].prof.t);
2041 ice_free(hw, hw->blk[i].prof_redir.t);
2042 ice_free(hw, hw->blk[i].es.t);
2043 ice_free(hw, hw->blk[i].es.ref_count);
2044 ice_free(hw, hw->blk[i].es.written);
2053 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
2065 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2066 struct ice_prof_tcam *prof = &hw->blk[i].prof;
2067 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2068 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2069 struct ice_es *es = &hw->blk[i].es;
2071 if (hw->blk[i].is_list_init) {
2136 * @blk: the block in which to write profile ID to
2147 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
2158 switch (hw->blk[blk].prof.cdid_bits) {
2191 * @blk: the block in which to write profile ID to
2203 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
2212 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
2213 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
2215 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
2216 hw->blk[blk].prof.t[idx].prof_id = prof_id;
2225 * @blk: HW block
2230 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
2237 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2240 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2252 * @blk: HW block
2257 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
2262 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2275 * @blk: hardware block
2280 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
2283 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
2288 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
2292 id = ice_sect_id(blk, ICE_VEC_TBL);
2306 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
2316 * @blk: hardware block
2321 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
2331 id = ice_sect_id(blk, ICE_PROF_TCAM);
2346 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
2347 sizeof(hw->blk[blk].prof.t->key),
2356 * @blk: hardware block
2361 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
2371 id = ice_sect_id(blk, ICE_XLT1);
2391 * @blk: hardware block
2396 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
2409 id = ice_sect_id(blk, ICE_XLT2);
2434 * @blk: hardware block
2438 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
2488 status = ice_prof_bld_es(hw, blk, b, chgs);
2494 status = ice_prof_bld_tcam(hw, blk, b, chgs);
2500 status = ice_prof_bld_xlt1(blk, b, chgs);
2506 status = ice_prof_bld_xlt2(blk, b, chgs);
2534 * @blk: hardware block
2545 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
2556 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2559 status = ice_find_prof_id(hw, blk, es, &prof_id);
2562 status = ice_alloc_prof_id(hw, blk, &prof_id);
2567 ice_write_es(hw, blk, prof_id, es);
2570 ice_prof_inc_ref(hw, blk, prof_id);
2590 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
2604 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
2608 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2615 * @blk: hardware block
2622 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
2627 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
2639 * @blk: hardware block
2644 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
2649 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2650 entry = ice_search_prof_id(hw, blk, id);
2655 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2662 * @blk: hardware block
2667 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
2672 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2673 entry = ice_search_prof_id(hw, blk, id);
2678 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2685 * @blk: hardware block
2689 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2694 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2704 * @blk: hardware block
2708 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
2717 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
2723 status = ice_free_tcam_ent(hw, blk, idx);
2731 * @blk: hardware block
2735 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
2744 status = ice_rel_tcam_idx(hw, blk,
2756 * @blk: hardware block
2761 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
2770 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2774 status = ice_rem_prof_id(hw, blk, d);
2783 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2799 p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis);
2806 return ice_vsig_free(hw, blk, vsig);
2812 * @blk: hardware block
2818 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
2825 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2830 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
2832 return ice_rem_vsig(hw, blk, vsig, chg);
2834 status = ice_rem_prof_id(hw, blk, p);
2848 * @blk: hardware block
2852 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
2862 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
2863 if (ice_has_prof_vsig(hw, blk, i, id)) {
2864 status = ice_rem_prof_id_vsig(hw, blk, i, id,
2871 status = ice_upd_prof_hw(hw, blk, &chg);
2885 * @blk: hardware block
2892 int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
2897 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2899 pmap = ice_search_prof_id(hw, blk, id);
2906 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
2911 ice_prof_dec_ref(hw, blk, pmap->prof_id);
2917 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2924 * @blk: hardware block
2929 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
2937 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2939 map = ice_search_prof_id(hw, blk, hdl);
2946 if (!hw->blk[blk].es.written[map->prof_id]) {
2962 hw->blk[blk].es.written[map->prof_id] = true;
2968 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2976 * @blk: hardware block
2983 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
2989 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3016 * @blk: hardware block
3021 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
3029 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
3030 map = ice_search_prof_id(hw, blk, hdl);
3055 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
3062 * @blk: hardware block
3068 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
3079 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3081 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3119 * @blk: hardware block
3128 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
3141 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
3154 status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx);
3163 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
3191 * @blk: hardware block
3196 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3216 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3233 status = ice_prof_tcam_ena_dis(hw, blk, false,
3243 status = ice_prof_tcam_ena_dis(hw, blk, true,
3262 * @blk: hardware block
3269 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3283 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
3291 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
3293 map = ice_search_prof_id(hw, blk, hdl);
3315 status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx);
3334 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
3351 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3354 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3356 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
3360 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
3369 * @blk: hardware block
3375 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
3386 new_vsig = ice_vsig_alloc(hw, blk);
3392 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
3396 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
3418 * @blk: hardware block
3425 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
3433 vsig = ice_vsig_alloc(hw, blk);
3437 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
3443 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
3457 * @blk: hardware block
3462 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
3477 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
3488 * @blk: hardware block
3500 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3513 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3516 status = ice_upd_prof_hw(hw, blk, &chg);
3529 * @blk: hardware block
3538 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
3551 status = ice_get_prof(hw, blk, hdl, &chg);
3556 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
3569 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
3575 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
3583 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
3587 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
3592 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
3595 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3603 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
3613 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
3619 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
3624 status = ice_create_vsig_from_lst(hw, blk, vsi,
3631 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
3638 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
3641 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3647 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
3656 status = ice_upd_prof_hw(hw, blk, &chg);
3675 * @blk: hardware block
3685 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
3693 status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
3725 * @blk: hardware block
3734 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
3746 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
3753 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
3754 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
3770 status = ice_rem_vsig(hw, blk, vsig, &chg);
3774 status = ice_rem_prof_id_vsig(hw, blk, vsig,
3780 status = ice_adj_prof_priorities(hw, blk, vsig,
3788 status = ice_get_profs_vsig(hw, blk, vsig, &copy);
3798 status = ice_move_vsi(hw, blk, vsi,
3803 } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
3812 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3820 status = ice_create_vsig_from_lst(hw, blk, vsi,
3827 status = ice_adj_prof_priorities(hw, blk, vsig,
3839 status = ice_upd_prof_hw(hw, blk, &chg);
3858 * @blk: hardware block
3868 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
3876 status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);