1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2019 Intel Corporation
3 */
4
5 #include "opae_osdep.h"
6 #include "opae_eth_group.h"
7
8 #define DATA_VAL_INVL 1 /* us */
9 #define DATA_VAL_POLL_TIMEOUT 10 /* us */
10
eth_type_to_string(u8 type)11 static const char *eth_type_to_string(u8 type)
12 {
13 switch (type) {
14 case ETH_GROUP_PHY:
15 return "phy";
16 case ETH_GROUP_MAC:
17 return "mac";
18 case ETH_GROUP_ETHER:
19 return "ethernet wrapper";
20 }
21
22 return "unknown";
23 }
24
eth_group_get_select(struct eth_group_device * dev,u8 type,u8 index,u8 * select)25 static int eth_group_get_select(struct eth_group_device *dev,
26 u8 type, u8 index, u8 *select)
27 {
28 /*
29 * in different speed configuration, the index of
30 * PHY and MAC are different.
31 *
32 * 1 ethernet wrapper -> Device Select 0x0 - fixed value
33 * n PHYs -> Device Select 0x2,4,6,8,A,C,E,10,...
34 * n MACs -> Device Select 0x3,5,7,9,B,D,F,11,...
35 */
36
37 if (type == ETH_GROUP_PHY && index < dev->phy_num)
38 *select = index * 2 + 2;
39 else if (type == ETH_GROUP_MAC && index < dev->mac_num)
40 *select = index * 2 + 3;
41 else if (type == ETH_GROUP_ETHER && index == 0)
42 *select = 0;
43 else
44 return -EINVAL;
45
46 return 0;
47 }
48
eth_group_write_reg(struct eth_group_device * dev,u8 type,u8 index,u16 addr,u32 data)49 int eth_group_write_reg(struct eth_group_device *dev,
50 u8 type, u8 index, u16 addr, u32 data)
51 {
52 u8 dev_select = 0;
53 u64 v = 0;
54 int ret;
55
56 dev_debug(dev, "%s type %s index %u addr 0x%x\n",
57 __func__, eth_type_to_string(type), index, addr);
58
59 /* find device select */
60 ret = eth_group_get_select(dev, type, index, &dev_select);
61 if (ret)
62 return ret;
63
64 v = CMD_WR << CTRL_CMD_SHIT |
65 (u64)dev_select << CTRL_DS_SHIFT |
66 (u64)addr << CTRL_ADDR_SHIFT |
67 (data & CTRL_WR_DATA);
68
69 /* only PHY has additional feature bit */
70 if (type == ETH_GROUP_PHY)
71 v |= CTRL_FEAT_SELECT;
72
73 opae_writeq(v, dev->base + ETH_GROUP_CTRL);
74
75 return 0;
76 }
77
eth_group_read_reg(struct eth_group_device * dev,u8 type,u8 index,u16 addr,u32 * data)78 int eth_group_read_reg(struct eth_group_device *dev,
79 u8 type, u8 index, u16 addr, u32 *data)
80 {
81 u8 dev_select = 0;
82 u64 v = 0;
83 int ret;
84
85 dev_debug(dev, "%s type %s index %u addr 0x%x\n",
86 __func__, eth_type_to_string(type), index,
87 addr);
88
89 /* find device select */
90 ret = eth_group_get_select(dev, type, index, &dev_select);
91 if (ret)
92 return ret;
93
94 v = CMD_RD << CTRL_CMD_SHIT |
95 (u64)dev_select << CTRL_DS_SHIFT |
96 (u64)addr << CTRL_ADDR_SHIFT;
97
98 /* only PHY has additional feature bit */
99 if (type == ETH_GROUP_PHY)
100 v |= CTRL_FEAT_SELECT;
101
102 opae_writeq(v, dev->base + ETH_GROUP_CTRL);
103
104 if (opae_readq_poll_timeout(dev->base + ETH_GROUP_STAT,
105 v, v & STAT_DATA_VAL, DATA_VAL_INVL,
106 DATA_VAL_POLL_TIMEOUT))
107 return -ETIMEDOUT;
108
109 *data = (v & STAT_RD_DATA);
110
111 dev_debug(dev, "%s data 0x%x\n", __func__, *data);
112
113 return 0;
114 }
115
eth_group_reset_mac(struct eth_group_device * dev,u8 index,bool enable)116 static int eth_group_reset_mac(struct eth_group_device *dev, u8 index,
117 bool enable)
118 {
119 u32 val;
120 int ret;
121
122 /*
123 * only support 25G & 40G mac reset for now. It uses internal reset.
124 * as PHY and MAC are integrated together, below action will trigger
125 * PHY reset too.
126 */
127 if (dev->speed != 25 && dev->speed != 40)
128 return 0;
129
130 ret = eth_group_read_reg(dev, ETH_GROUP_MAC, index, MAC_CONFIG,
131 &val);
132 if (ret) {
133 dev_err(dev, "fail to read PHY_CONFIG: %d\n", ret);
134 return ret;
135 }
136
137 /* skip if mac is in expected state already */
138 if ((((val & MAC_RESET_MASK) == MAC_RESET_MASK) && enable) ||
139 (((val & MAC_RESET_MASK) == 0) && !enable))
140 return 0;
141
142 if (enable)
143 val |= MAC_RESET_MASK;
144 else
145 val &= ~MAC_RESET_MASK;
146
147 ret = eth_group_write_reg(dev, ETH_GROUP_MAC, index, MAC_CONFIG,
148 val);
149 if (ret)
150 dev_err(dev, "fail to write PHY_CONFIG: %d\n", ret);
151
152 return ret;
153 }
154
eth_group_mac_uinit(struct eth_group_device * dev)155 static void eth_group_mac_uinit(struct eth_group_device *dev)
156 {
157 u8 i;
158
159 for (i = 0; i < dev->mac_num; i++) {
160 if (eth_group_reset_mac(dev, i, true))
161 dev_err(dev, "fail to disable mac %d\n", i);
162 }
163 }
164
eth_group_mac_init(struct eth_group_device * dev)165 static int eth_group_mac_init(struct eth_group_device *dev)
166 {
167 int ret;
168 u8 i;
169
170 for (i = 0; i < dev->mac_num; i++) {
171 ret = eth_group_reset_mac(dev, i, false);
172 if (ret) {
173 dev_err(dev, "fail to enable mac %d\n", i);
174 goto exit;
175 }
176 }
177
178 return 0;
179
180 exit:
181 while (i--)
182 eth_group_reset_mac(dev, i, true);
183
184 return ret;
185 }
186
eth_group_reset_phy(struct eth_group_device * dev,u8 index,bool enable)187 static int eth_group_reset_phy(struct eth_group_device *dev, u8 index,
188 bool enable)
189 {
190 u32 val;
191 int ret;
192
193 /* only support 10G PHY reset for now. It uses external reset. */
194 if (dev->speed != 10)
195 return 0;
196
197 ret = eth_group_read_reg(dev, ETH_GROUP_PHY, index,
198 ADD_PHY_CTRL, &val);
199 if (ret) {
200 dev_err(dev, "fail to read ADD_PHY_CTRL reg: %d\n", ret);
201 return ret;
202 }
203
204 /* return if PHY is already in expected state */
205 if ((val & PHY_RESET && enable) || (!(val & PHY_RESET) && !enable))
206 return 0;
207
208 if (enable)
209 val |= PHY_RESET;
210 else
211 val &= ~PHY_RESET;
212
213 ret = eth_group_write_reg(dev, ETH_GROUP_PHY, index,
214 ADD_PHY_CTRL, val);
215 if (ret)
216 dev_err(dev, "fail to write ADD_PHY_CTRL reg: %d\n", ret);
217
218 return ret;
219 }
220
eth_group_phy_init(struct eth_group_device * dev)221 static int eth_group_phy_init(struct eth_group_device *dev)
222 {
223 int ret;
224 int i;
225
226 for (i = 0; i < dev->phy_num; i++) {
227 ret = eth_group_reset_phy(dev, i, false);
228 if (ret) {
229 dev_err(dev, "fail to enable phy %d\n", i);
230 goto exit;
231 }
232 }
233
234 return 0;
235 exit:
236 while (i--)
237 eth_group_reset_phy(dev, i, true);
238
239 return ret;
240 }
241
eth_group_phy_uinit(struct eth_group_device * dev)242 static void eth_group_phy_uinit(struct eth_group_device *dev)
243 {
244 int i;
245
246 for (i = 0; i < dev->phy_num; i++) {
247 if (eth_group_reset_phy(dev, i, true))
248 dev_err(dev, "fail to disable phy %d\n", i);
249 }
250 }
251
eth_group_hw_init(struct eth_group_device * dev)252 static int eth_group_hw_init(struct eth_group_device *dev)
253 {
254 int ret;
255
256 ret = eth_group_phy_init(dev);
257 if (ret) {
258 dev_err(dev, "fail to init eth group phys\n");
259 return ret;
260 }
261
262 ret = eth_group_mac_init(dev);
263 if (ret) {
264 dev_err(priv->dev, "fail to init eth group macs\n");
265 goto phy_exit;
266 }
267
268 return 0;
269
270 phy_exit:
271 eth_group_phy_uinit(dev);
272 return ret;
273 }
274
eth_group_hw_uinit(struct eth_group_device * dev)275 static void eth_group_hw_uinit(struct eth_group_device *dev)
276 {
277 eth_group_mac_uinit(dev);
278 eth_group_phy_uinit(dev);
279 }
280
eth_group_probe(void * base)281 struct eth_group_device *eth_group_probe(void *base)
282 {
283 struct eth_group_device *dev;
284
285 dev = opae_malloc(sizeof(*dev));
286 if (!dev)
287 return NULL;
288
289 dev->base = (u8 *)base;
290
291 dev->info.info = opae_readq(dev->base + ETH_GROUP_INFO);
292 dev->group_id = dev->info.group_id;
293 dev->phy_num = dev->mac_num = dev->info.num_phys;
294 dev->speed = dev->info.speed;
295
296 dev->status = ETH_GROUP_DEV_ATTACHED;
297
298 if (eth_group_hw_init(dev)) {
299 dev_err(dev, "eth group hw init fail\n");
300 return NULL;
301 }
302
303 dev_info(dev, "eth group device %d probe done: phy_num=mac_num:%d, speed=%d\n",
304 dev->group_id, dev->phy_num, dev->speed);
305
306 return dev;
307 }
308
eth_group_release(struct eth_group_device * dev)309 void eth_group_release(struct eth_group_device *dev)
310 {
311 if (dev) {
312 eth_group_hw_uinit(dev);
313 dev->status = ETH_GROUP_DEV_NOUSED;
314 opae_free(dev);
315 }
316 }
317