xref: /dpdk/drivers/net/cxgbe/base/t4_hw.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 
6 #include <netinet/in.h>
7 
8 #include <rte_interrupts.h>
9 #include <rte_log.h>
10 #include <rte_debug.h>
11 #include <rte_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_memory.h>
15 #include <rte_tailq.h>
16 #include <rte_eal.h>
17 #include <rte_alarm.h>
18 #include <rte_ether.h>
19 #include <ethdev_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
22 #include <rte_dev.h>
23 #include <rte_byteorder.h>
24 
25 #include "common.h"
26 #include "t4_regs.h"
27 #include "t4_regs_values.h"
28 #include "t4fw_interface.h"
29 
30 /**
31  * t4_read_mtu_tbl - returns the values in the HW path MTU table
32  * @adap: the adapter
33  * @mtus: where to store the MTU values
34  * @mtu_log: where to store the MTU base-2 log (may be %NULL)
35  *
36  * Reads the HW path MTU table.
37  */
38 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
39 {
40 	u32 v;
41 	int i;
42 
43 	for (i = 0; i < NMTUS; ++i) {
44 		t4_write_reg(adap, A_TP_MTU_TABLE,
45 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
46 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
47 		mtus[i] = G_MTUVALUE(v);
48 		if (mtu_log)
49 			mtu_log[i] = G_MTUWIDTH(v);
50 	}
51 }
52 
53 /**
54  * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
55  * @adap: the adapter
56  * @addr: the indirect TP register address
57  * @mask: specifies the field within the register to modify
58  * @val: new value for the field
59  *
60  * Sets a field of an indirect TP register to the given value.
61  */
62 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
63 			    unsigned int mask, unsigned int val)
64 {
65 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
66 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
67 	t4_write_reg(adap, A_TP_PIO_DATA, val);
68 }
69 
70 /* The minimum additive increment value for the congestion control table */
71 #define CC_MIN_INCR 2U
72 
73 /**
74  * t4_load_mtus - write the MTU and congestion control HW tables
75  * @adap: the adapter
76  * @mtus: the values for the MTU table
77  * @alpha: the values for the congestion control alpha parameter
78  * @beta: the values for the congestion control beta parameter
79  *
80  * Write the HW MTU table with the supplied MTUs and the high-speed
81  * congestion control table with the supplied alpha, beta, and MTUs.
82  * We write the two tables together because the additive increments
83  * depend on the MTUs.
84  */
85 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
86 		  const unsigned short *alpha, const unsigned short *beta)
87 {
88 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
89 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
90 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
91 		28672, 40960, 57344, 81920, 114688, 163840, 229376
92 	};
93 
94 	unsigned int i, w;
95 
96 	for (i = 0; i < NMTUS; ++i) {
97 		unsigned int mtu = mtus[i];
98 		unsigned int log2 = cxgbe_fls(mtu);
99 
100 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
101 			log2--;
102 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
103 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
104 
105 		for (w = 0; w < NCCTRL_WIN; ++w) {
106 			unsigned int inc;
107 
108 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
109 				  CC_MIN_INCR);
110 
111 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
112 				     (w << 16) | (beta[w] << 13) | inc);
113 		}
114 	}
115 }
116 
117 /**
118  * t4_wait_op_done_val - wait until an operation is completed
119  * @adapter: the adapter performing the operation
120  * @reg: the register to check for completion
121  * @mask: a single-bit field within @reg that indicates completion
122  * @polarity: the value of the field when the operation is completed
123  * @attempts: number of check iterations
124  * @delay: delay in usecs between iterations
125  * @valp: where to store the value of the register at completion time
126  *
127  * Wait until an operation is completed by checking a bit in a register
128  * up to @attempts times.  If @valp is not NULL the value of the register
129  * at the time it indicated completion is stored there.  Returns 0 if the
130  * operation completes and -EAGAIN otherwise.
131  */
132 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
133 			int polarity, int attempts, int delay, u32 *valp)
134 {
135 	while (1) {
136 		u32 val = t4_read_reg(adapter, reg);
137 
138 		if (!!(val & mask) == polarity) {
139 			if (valp)
140 				*valp = val;
141 			return 0;
142 		}
143 		if (--attempts == 0)
144 			return -EAGAIN;
145 		if (delay)
146 			udelay(delay);
147 	}
148 }
149 
150 /**
151  * t4_set_reg_field - set a register field to a value
152  * @adapter: the adapter to program
153  * @addr: the register address
154  * @mask: specifies the portion of the register to modify
155  * @val: the new value for the register field
156  *
157  * Sets a register field specified by the supplied mask to the
158  * given value.
159  */
160 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
161 		      u32 val)
162 {
163 	u32 v = t4_read_reg(adapter, addr) & ~mask;
164 
165 	t4_write_reg(adapter, addr, v | val);
166 	(void)t4_read_reg(adapter, addr);      /* flush */
167 }
168 
169 /**
170  * t4_read_indirect - read indirectly addressed registers
171  * @adap: the adapter
172  * @addr_reg: register holding the indirect address
173  * @data_reg: register holding the value of the indirect register
174  * @vals: where the read register values are stored
175  * @nregs: how many indirect registers to read
176  * @start_idx: index of first indirect register to read
177  *
178  * Reads registers that are accessed indirectly through an address/data
179  * register pair.
180  */
181 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
182 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
183 		      unsigned int start_idx)
184 {
185 	while (nregs--) {
186 		t4_write_reg(adap, addr_reg, start_idx);
187 		*vals++ = t4_read_reg(adap, data_reg);
188 		start_idx++;
189 	}
190 }
191 
192 /**
193  * t4_write_indirect - write indirectly addressed registers
194  * @adap: the adapter
195  * @addr_reg: register holding the indirect addresses
196  * @data_reg: register holding the value for the indirect registers
197  * @vals: values to write
198  * @nregs: how many indirect registers to write
199  * @start_idx: address of first indirect register to write
200  *
201  * Writes a sequential block of registers that are accessed indirectly
202  * through an address/data register pair.
203  */
204 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
205 		       unsigned int data_reg, const u32 *vals,
206 		       unsigned int nregs, unsigned int start_idx)
207 {
208 	while (nregs--) {
209 		t4_write_reg(adap, addr_reg, start_idx++);
210 		t4_write_reg(adap, data_reg, *vals++);
211 	}
212 }
213 
214 /**
215  * t4_report_fw_error - report firmware error
216  * @adap: the adapter
217  *
218  * The adapter firmware can indicate error conditions to the host.
219  * If the firmware has indicated an error, print out the reason for
220  * the firmware error.
221  */
222 static void t4_report_fw_error(struct adapter *adap)
223 {
224 	static const char * const reason[] = {
225 		"Crash",			/* PCIE_FW_EVAL_CRASH */
226 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
227 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
228 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
229 		"Unexpected Event",	/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
230 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
231 		"Device Shutdown",	/* PCIE_FW_EVAL_DEVICESHUTDOWN */
232 		"Reserved",			/* reserved */
233 	};
234 	u32 pcie_fw;
235 
236 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
237 	if (pcie_fw & F_PCIE_FW_ERR)
238 		pr_err("%s: Firmware reports adapter error: %s\n",
239 		       __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
240 }
241 
242 /*
243  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
244  */
245 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
246 			 u32 mbox_addr)
247 {
248 	for ( ; nflit; nflit--, mbox_addr += 8)
249 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
250 }
251 
252 /*
253  * Handle a FW assertion reported in a mailbox.
254  */
255 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
256 {
257 	struct fw_debug_cmd asrt;
258 
259 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
260 	pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
261 		asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
262 		be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
263 }
264 
265 #define X_CIM_PF_NOACCESS 0xeeeeeeee
266 
267 /*
268  * If the Host OS Driver needs locking arround accesses to the mailbox, this
269  * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
270  */
271 /* makes single-statement usage a bit cleaner ... */
272 #ifdef T4_OS_NEEDS_MBOX_LOCKING
273 #define T4_OS_MBOX_LOCKING(x) x
274 #else
275 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
276 #endif
277 
278 /**
279  * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
280  * @adap: the adapter
281  * @mbox: index of the mailbox to use
282  * @cmd: the command to write
283  * @size: command length in bytes
284  * @rpl: where to optionally store the reply
285  * @sleep_ok: if true we may sleep while awaiting command completion
286  * @timeout: time to wait for command to finish before timing out
287  *	     (negative implies @sleep_ok=false)
288  *
289  * Sends the given command to FW through the selected mailbox and waits
290  * for the FW to execute the command.  If @rpl is not %NULL it is used to
291  * store the FW's reply to the command.  The command and its optional
292  * reply are of the same length.  Some FW commands like RESET and
293  * INITIALIZE can take a considerable amount of time to execute.
294  * @sleep_ok determines whether we may sleep while awaiting the response.
295  * If sleeping is allowed we use progressive backoff otherwise we spin.
296  * Note that passing in a negative @timeout is an alternate mechanism
297  * for specifying @sleep_ok=false.  This is useful when a higher level
298  * interface allows for specification of @timeout but not @sleep_ok ...
299  *
300  * Returns 0 on success or a negative errno on failure.  A
301  * failure can happen either because we are not able to execute the
302  * command or FW executes it but signals an error.  In the latter case
303  * the return value is the error code indicated by FW (negated).
304  */
305 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
306 			    const void __attribute__((__may_alias__)) *cmd,
307 			    int size, void *rpl, bool sleep_ok, int timeout)
308 {
309 	/*
310 	 * We delay in small increments at first in an effort to maintain
311 	 * responsiveness for simple, fast executing commands but then back
312 	 * off to larger delays to a maximum retry delay.
313 	 */
314 	static const int delay[] = {
315 		1, 1, 3, 5, 10, 10, 20, 50, 100
316 	};
317 
318 	u32 v;
319 	u64 res;
320 	int i, ms;
321 	unsigned int delay_idx;
322 	__be64 *temp = (__be64 *)malloc(size * sizeof(char));
323 	__be64 *p = temp;
324 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
325 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
326 	u32 ctl;
327 	struct mbox_entry entry;
328 	u32 pcie_fw = 0;
329 
330 	if (!temp)
331 		return -ENOMEM;
332 
333 	if ((size & 15) || size > MBOX_LEN) {
334 		free(temp);
335 		return -EINVAL;
336 	}
337 
338 	memset(p, 0, size);
339 	memcpy(p, (const __be64 *)cmd, size);
340 
341 	/*
342 	 * If we have a negative timeout, that implies that we can't sleep.
343 	 */
344 	if (timeout < 0) {
345 		sleep_ok = false;
346 		timeout = -timeout;
347 	}
348 
349 #ifdef T4_OS_NEEDS_MBOX_LOCKING
350 	/*
351 	 * Queue ourselves onto the mailbox access list.  When our entry is at
352 	 * the front of the list, we have rights to access the mailbox.  So we
353 	 * wait [for a while] till we're at the front [or bail out with an
354 	 * EBUSY] ...
355 	 */
356 	t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
357 
358 	delay_idx = 0;
359 	ms = delay[0];
360 
361 	for (i = 0; ; i += ms) {
362 		/*
363 		 * If we've waited too long, return a busy indication.  This
364 		 * really ought to be based on our initial position in the
365 		 * mailbox access list but this is a start.  We very rarely
366 		 * contend on access to the mailbox ...  Also check for a
367 		 * firmware error which we'll report as a device error.
368 		 */
369 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
370 		if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
371 			t4_os_atomic_list_del(&entry, &adap->mbox_list,
372 					      &adap->mbox_lock);
373 			t4_report_fw_error(adap);
374 			free(temp);
375 			return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
376 		}
377 
378 		/*
379 		 * If we're at the head, break out and start the mailbox
380 		 * protocol.
381 		 */
382 		if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
383 			break;
384 
385 		/*
386 		 * Delay for a bit before checking again ...
387 		 */
388 		if (sleep_ok) {
389 			ms = delay[delay_idx];  /* last element may repeat */
390 			if (delay_idx < ARRAY_SIZE(delay) - 1)
391 				delay_idx++;
392 			msleep(ms);
393 		} else {
394 			rte_delay_ms(ms);
395 		}
396 	}
397 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
398 
399 	/*
400 	 * Attempt to gain access to the mailbox.
401 	 */
402 	for (i = 0; i < 4; i++) {
403 		ctl = t4_read_reg(adap, ctl_reg);
404 		v = G_MBOWNER(ctl);
405 		if (v != X_MBOWNER_NONE)
406 			break;
407 	}
408 
409 	/*
410 	 * If we were unable to gain access, dequeue ourselves from the
411 	 * mailbox atomic access list and report the error to our caller.
412 	 */
413 	if (v != X_MBOWNER_PL) {
414 		T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
415 							 &adap->mbox_list,
416 							 &adap->mbox_lock));
417 		t4_report_fw_error(adap);
418 		free(temp);
419 		return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
420 	}
421 
422 	/*
423 	 * If we gain ownership of the mailbox and there's a "valid" message
424 	 * in it, this is likely an asynchronous error message from the
425 	 * firmware.  So we'll report that and then proceed on with attempting
426 	 * to issue our own command ... which may well fail if the error
427 	 * presaged the firmware crashing ...
428 	 */
429 	if (ctl & F_MBMSGVALID) {
430 		dev_err(adap, "found VALID command in mbox %u: "
431 			"%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
432 			(unsigned long long)t4_read_reg64(adap, data_reg),
433 			(unsigned long long)t4_read_reg64(adap, data_reg + 8),
434 			(unsigned long long)t4_read_reg64(adap, data_reg + 16),
435 			(unsigned long long)t4_read_reg64(adap, data_reg + 24),
436 			(unsigned long long)t4_read_reg64(adap, data_reg + 32),
437 			(unsigned long long)t4_read_reg64(adap, data_reg + 40),
438 			(unsigned long long)t4_read_reg64(adap, data_reg + 48),
439 			(unsigned long long)t4_read_reg64(adap, data_reg + 56));
440 	}
441 
442 	/*
443 	 * Copy in the new mailbox command and send it on its way ...
444 	 */
445 	for (i = 0; i < size; i += 8, p++)
446 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
447 
448 	CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
449 			"%016llx %016llx %016llx %016llx\n", __func__,  (mbox),
450 			(unsigned long long)t4_read_reg64(adap, data_reg),
451 			(unsigned long long)t4_read_reg64(adap, data_reg + 8),
452 			(unsigned long long)t4_read_reg64(adap, data_reg + 16),
453 			(unsigned long long)t4_read_reg64(adap, data_reg + 24),
454 			(unsigned long long)t4_read_reg64(adap, data_reg + 32),
455 			(unsigned long long)t4_read_reg64(adap, data_reg + 40),
456 			(unsigned long long)t4_read_reg64(adap, data_reg + 48),
457 			(unsigned long long)t4_read_reg64(adap, data_reg + 56));
458 
459 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
460 	t4_read_reg(adap, ctl_reg);          /* flush write */
461 
462 	delay_idx = 0;
463 	ms = delay[0];
464 
465 	/*
466 	 * Loop waiting for the reply; bail out if we time out or the firmware
467 	 * reports an error.
468 	 */
469 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
470 	for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
471 		if (sleep_ok) {
472 			ms = delay[delay_idx];  /* last element may repeat */
473 			if (delay_idx < ARRAY_SIZE(delay) - 1)
474 				delay_idx++;
475 			msleep(ms);
476 		} else {
477 			msleep(ms);
478 		}
479 
480 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
481 		v = t4_read_reg(adap, ctl_reg);
482 		if (v == X_CIM_PF_NOACCESS)
483 			continue;
484 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
485 			if (!(v & F_MBMSGVALID)) {
486 				t4_write_reg(adap, ctl_reg,
487 					     V_MBOWNER(X_MBOWNER_NONE));
488 				continue;
489 			}
490 
491 			CXGBE_DEBUG_MBOX(adap,
492 			"%s: mbox %u: %016llx %016llx %016llx %016llx "
493 			"%016llx %016llx %016llx %016llx\n", __func__,  (mbox),
494 			(unsigned long long)t4_read_reg64(adap, data_reg),
495 			(unsigned long long)t4_read_reg64(adap, data_reg + 8),
496 			(unsigned long long)t4_read_reg64(adap, data_reg + 16),
497 			(unsigned long long)t4_read_reg64(adap, data_reg + 24),
498 			(unsigned long long)t4_read_reg64(adap, data_reg + 32),
499 			(unsigned long long)t4_read_reg64(adap, data_reg + 40),
500 			(unsigned long long)t4_read_reg64(adap, data_reg + 48),
501 			(unsigned long long)t4_read_reg64(adap, data_reg + 56));
502 
503 			CXGBE_DEBUG_MBOX(adap,
504 				"command %#x completed in %d ms (%ssleeping)\n",
505 				*(const u8 *)cmd,
506 				i + ms, sleep_ok ? "" : "non-");
507 
508 			res = t4_read_reg64(adap, data_reg);
509 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
510 				fw_asrt(adap, data_reg);
511 				res = V_FW_CMD_RETVAL(EIO);
512 			} else if (rpl) {
513 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
514 			}
515 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
516 			T4_OS_MBOX_LOCKING(
517 				t4_os_atomic_list_del(&entry, &adap->mbox_list,
518 						      &adap->mbox_lock));
519 			free(temp);
520 			return -G_FW_CMD_RETVAL((int)res);
521 		}
522 	}
523 
524 	/*
525 	 * We timed out waiting for a reply to our mailbox command.  Report
526 	 * the error and also check to see if the firmware reported any
527 	 * errors ...
528 	 */
529 	dev_err(adap, "command %#x in mailbox %d timed out\n",
530 		*(const u8 *)cmd, mbox);
531 	T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
532 						 &adap->mbox_list,
533 						 &adap->mbox_lock));
534 	t4_report_fw_error(adap);
535 	free(temp);
536 	return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
537 }
538 
539 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
540 		    void *rpl, bool sleep_ok)
541 {
542 	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
543 				       FW_CMD_MAX_TIMEOUT);
544 }
545 
546 /**
547  * t4_get_regs_len - return the size of the chips register set
548  * @adapter: the adapter
549  *
550  * Returns the size of the chip's BAR0 register space.
551  */
552 unsigned int t4_get_regs_len(struct adapter *adapter)
553 {
554 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
555 
556 	switch (chip_version) {
557 	case CHELSIO_T5:
558 	case CHELSIO_T6:
559 		return T5_REGMAP_SIZE;
560 	}
561 
562 	dev_err(adapter,
563 		"Unsupported chip version %d\n", chip_version);
564 	return 0;
565 }
566 
567 /**
568  * t4_get_regs - read chip registers into provided buffer
569  * @adap: the adapter
570  * @buf: register buffer
571  * @buf_size: size (in bytes) of register buffer
572  *
573  * If the provided register buffer isn't large enough for the chip's
574  * full register range, the register dump will be truncated to the
575  * register buffer's size.
576  */
577 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
578 {
579 	static const unsigned int t5_reg_ranges[] = {
580 		0x1008, 0x10c0,
581 		0x10cc, 0x10f8,
582 		0x1100, 0x1100,
583 		0x110c, 0x1148,
584 		0x1180, 0x1184,
585 		0x1190, 0x1194,
586 		0x11a0, 0x11a4,
587 		0x11b0, 0x11b4,
588 		0x11fc, 0x123c,
589 		0x1280, 0x173c,
590 		0x1800, 0x18fc,
591 		0x3000, 0x3028,
592 		0x3060, 0x30b0,
593 		0x30b8, 0x30d8,
594 		0x30e0, 0x30fc,
595 		0x3140, 0x357c,
596 		0x35a8, 0x35cc,
597 		0x35ec, 0x35ec,
598 		0x3600, 0x5624,
599 		0x56cc, 0x56ec,
600 		0x56f4, 0x5720,
601 		0x5728, 0x575c,
602 		0x580c, 0x5814,
603 		0x5890, 0x589c,
604 		0x58a4, 0x58ac,
605 		0x58b8, 0x58bc,
606 		0x5940, 0x59c8,
607 		0x59d0, 0x59dc,
608 		0x59fc, 0x5a18,
609 		0x5a60, 0x5a70,
610 		0x5a80, 0x5a9c,
611 		0x5b94, 0x5bfc,
612 		0x6000, 0x6020,
613 		0x6028, 0x6040,
614 		0x6058, 0x609c,
615 		0x60a8, 0x614c,
616 		0x7700, 0x7798,
617 		0x77c0, 0x78fc,
618 		0x7b00, 0x7b58,
619 		0x7b60, 0x7b84,
620 		0x7b8c, 0x7c54,
621 		0x7d00, 0x7d38,
622 		0x7d40, 0x7d80,
623 		0x7d8c, 0x7ddc,
624 		0x7de4, 0x7e04,
625 		0x7e10, 0x7e1c,
626 		0x7e24, 0x7e38,
627 		0x7e40, 0x7e44,
628 		0x7e4c, 0x7e78,
629 		0x7e80, 0x7edc,
630 		0x7ee8, 0x7efc,
631 		0x8dc0, 0x8de0,
632 		0x8df8, 0x8e04,
633 		0x8e10, 0x8e84,
634 		0x8ea0, 0x8f84,
635 		0x8fc0, 0x9058,
636 		0x9060, 0x9060,
637 		0x9068, 0x90f8,
638 		0x9400, 0x9408,
639 		0x9410, 0x9470,
640 		0x9600, 0x9600,
641 		0x9608, 0x9638,
642 		0x9640, 0x96f4,
643 		0x9800, 0x9808,
644 		0x9820, 0x983c,
645 		0x9850, 0x9864,
646 		0x9c00, 0x9c6c,
647 		0x9c80, 0x9cec,
648 		0x9d00, 0x9d6c,
649 		0x9d80, 0x9dec,
650 		0x9e00, 0x9e6c,
651 		0x9e80, 0x9eec,
652 		0x9f00, 0x9f6c,
653 		0x9f80, 0xa020,
654 		0xd004, 0xd004,
655 		0xd010, 0xd03c,
656 		0xdfc0, 0xdfe0,
657 		0xe000, 0x1106c,
658 		0x11074, 0x11088,
659 		0x1109c, 0x1117c,
660 		0x11190, 0x11204,
661 		0x19040, 0x1906c,
662 		0x19078, 0x19080,
663 		0x1908c, 0x190e8,
664 		0x190f0, 0x190f8,
665 		0x19100, 0x19110,
666 		0x19120, 0x19124,
667 		0x19150, 0x19194,
668 		0x1919c, 0x191b0,
669 		0x191d0, 0x191e8,
670 		0x19238, 0x19290,
671 		0x193f8, 0x19428,
672 		0x19430, 0x19444,
673 		0x1944c, 0x1946c,
674 		0x19474, 0x19474,
675 		0x19490, 0x194cc,
676 		0x194f0, 0x194f8,
677 		0x19c00, 0x19c08,
678 		0x19c10, 0x19c60,
679 		0x19c94, 0x19ce4,
680 		0x19cf0, 0x19d40,
681 		0x19d50, 0x19d94,
682 		0x19da0, 0x19de8,
683 		0x19df0, 0x19e10,
684 		0x19e50, 0x19e90,
685 		0x19ea0, 0x19f24,
686 		0x19f34, 0x19f34,
687 		0x19f40, 0x19f50,
688 		0x19f90, 0x19fb4,
689 		0x19fc4, 0x19fe4,
690 		0x1a000, 0x1a004,
691 		0x1a010, 0x1a06c,
692 		0x1a0b0, 0x1a0e4,
693 		0x1a0ec, 0x1a0f8,
694 		0x1a100, 0x1a108,
695 		0x1a114, 0x1a120,
696 		0x1a128, 0x1a130,
697 		0x1a138, 0x1a138,
698 		0x1a190, 0x1a1c4,
699 		0x1a1fc, 0x1a1fc,
700 		0x1e008, 0x1e00c,
701 		0x1e040, 0x1e044,
702 		0x1e04c, 0x1e04c,
703 		0x1e284, 0x1e290,
704 		0x1e2c0, 0x1e2c0,
705 		0x1e2e0, 0x1e2e0,
706 		0x1e300, 0x1e384,
707 		0x1e3c0, 0x1e3c8,
708 		0x1e408, 0x1e40c,
709 		0x1e440, 0x1e444,
710 		0x1e44c, 0x1e44c,
711 		0x1e684, 0x1e690,
712 		0x1e6c0, 0x1e6c0,
713 		0x1e6e0, 0x1e6e0,
714 		0x1e700, 0x1e784,
715 		0x1e7c0, 0x1e7c8,
716 		0x1e808, 0x1e80c,
717 		0x1e840, 0x1e844,
718 		0x1e84c, 0x1e84c,
719 		0x1ea84, 0x1ea90,
720 		0x1eac0, 0x1eac0,
721 		0x1eae0, 0x1eae0,
722 		0x1eb00, 0x1eb84,
723 		0x1ebc0, 0x1ebc8,
724 		0x1ec08, 0x1ec0c,
725 		0x1ec40, 0x1ec44,
726 		0x1ec4c, 0x1ec4c,
727 		0x1ee84, 0x1ee90,
728 		0x1eec0, 0x1eec0,
729 		0x1eee0, 0x1eee0,
730 		0x1ef00, 0x1ef84,
731 		0x1efc0, 0x1efc8,
732 		0x1f008, 0x1f00c,
733 		0x1f040, 0x1f044,
734 		0x1f04c, 0x1f04c,
735 		0x1f284, 0x1f290,
736 		0x1f2c0, 0x1f2c0,
737 		0x1f2e0, 0x1f2e0,
738 		0x1f300, 0x1f384,
739 		0x1f3c0, 0x1f3c8,
740 		0x1f408, 0x1f40c,
741 		0x1f440, 0x1f444,
742 		0x1f44c, 0x1f44c,
743 		0x1f684, 0x1f690,
744 		0x1f6c0, 0x1f6c0,
745 		0x1f6e0, 0x1f6e0,
746 		0x1f700, 0x1f784,
747 		0x1f7c0, 0x1f7c8,
748 		0x1f808, 0x1f80c,
749 		0x1f840, 0x1f844,
750 		0x1f84c, 0x1f84c,
751 		0x1fa84, 0x1fa90,
752 		0x1fac0, 0x1fac0,
753 		0x1fae0, 0x1fae0,
754 		0x1fb00, 0x1fb84,
755 		0x1fbc0, 0x1fbc8,
756 		0x1fc08, 0x1fc0c,
757 		0x1fc40, 0x1fc44,
758 		0x1fc4c, 0x1fc4c,
759 		0x1fe84, 0x1fe90,
760 		0x1fec0, 0x1fec0,
761 		0x1fee0, 0x1fee0,
762 		0x1ff00, 0x1ff84,
763 		0x1ffc0, 0x1ffc8,
764 		0x30000, 0x30030,
765 		0x30038, 0x30038,
766 		0x30040, 0x30040,
767 		0x30100, 0x30144,
768 		0x30190, 0x301a0,
769 		0x301a8, 0x301b8,
770 		0x301c4, 0x301c8,
771 		0x301d0, 0x301d0,
772 		0x30200, 0x30318,
773 		0x30400, 0x304b4,
774 		0x304c0, 0x3052c,
775 		0x30540, 0x3061c,
776 		0x30800, 0x30828,
777 		0x30834, 0x30834,
778 		0x308c0, 0x30908,
779 		0x30910, 0x309ac,
780 		0x30a00, 0x30a14,
781 		0x30a1c, 0x30a2c,
782 		0x30a44, 0x30a50,
783 		0x30a74, 0x30a74,
784 		0x30a7c, 0x30afc,
785 		0x30b08, 0x30c24,
786 		0x30d00, 0x30d00,
787 		0x30d08, 0x30d14,
788 		0x30d1c, 0x30d20,
789 		0x30d3c, 0x30d3c,
790 		0x30d48, 0x30d50,
791 		0x31200, 0x3120c,
792 		0x31220, 0x31220,
793 		0x31240, 0x31240,
794 		0x31600, 0x3160c,
795 		0x31a00, 0x31a1c,
796 		0x31e00, 0x31e20,
797 		0x31e38, 0x31e3c,
798 		0x31e80, 0x31e80,
799 		0x31e88, 0x31ea8,
800 		0x31eb0, 0x31eb4,
801 		0x31ec8, 0x31ed4,
802 		0x31fb8, 0x32004,
803 		0x32200, 0x32200,
804 		0x32208, 0x32240,
805 		0x32248, 0x32280,
806 		0x32288, 0x322c0,
807 		0x322c8, 0x322fc,
808 		0x32600, 0x32630,
809 		0x32a00, 0x32abc,
810 		0x32b00, 0x32b10,
811 		0x32b20, 0x32b30,
812 		0x32b40, 0x32b50,
813 		0x32b60, 0x32b70,
814 		0x33000, 0x33028,
815 		0x33030, 0x33048,
816 		0x33060, 0x33068,
817 		0x33070, 0x3309c,
818 		0x330f0, 0x33128,
819 		0x33130, 0x33148,
820 		0x33160, 0x33168,
821 		0x33170, 0x3319c,
822 		0x331f0, 0x33238,
823 		0x33240, 0x33240,
824 		0x33248, 0x33250,
825 		0x3325c, 0x33264,
826 		0x33270, 0x332b8,
827 		0x332c0, 0x332e4,
828 		0x332f8, 0x33338,
829 		0x33340, 0x33340,
830 		0x33348, 0x33350,
831 		0x3335c, 0x33364,
832 		0x33370, 0x333b8,
833 		0x333c0, 0x333e4,
834 		0x333f8, 0x33428,
835 		0x33430, 0x33448,
836 		0x33460, 0x33468,
837 		0x33470, 0x3349c,
838 		0x334f0, 0x33528,
839 		0x33530, 0x33548,
840 		0x33560, 0x33568,
841 		0x33570, 0x3359c,
842 		0x335f0, 0x33638,
843 		0x33640, 0x33640,
844 		0x33648, 0x33650,
845 		0x3365c, 0x33664,
846 		0x33670, 0x336b8,
847 		0x336c0, 0x336e4,
848 		0x336f8, 0x33738,
849 		0x33740, 0x33740,
850 		0x33748, 0x33750,
851 		0x3375c, 0x33764,
852 		0x33770, 0x337b8,
853 		0x337c0, 0x337e4,
854 		0x337f8, 0x337fc,
855 		0x33814, 0x33814,
856 		0x3382c, 0x3382c,
857 		0x33880, 0x3388c,
858 		0x338e8, 0x338ec,
859 		0x33900, 0x33928,
860 		0x33930, 0x33948,
861 		0x33960, 0x33968,
862 		0x33970, 0x3399c,
863 		0x339f0, 0x33a38,
864 		0x33a40, 0x33a40,
865 		0x33a48, 0x33a50,
866 		0x33a5c, 0x33a64,
867 		0x33a70, 0x33ab8,
868 		0x33ac0, 0x33ae4,
869 		0x33af8, 0x33b10,
870 		0x33b28, 0x33b28,
871 		0x33b3c, 0x33b50,
872 		0x33bf0, 0x33c10,
873 		0x33c28, 0x33c28,
874 		0x33c3c, 0x33c50,
875 		0x33cf0, 0x33cfc,
876 		0x34000, 0x34030,
877 		0x34038, 0x34038,
878 		0x34040, 0x34040,
879 		0x34100, 0x34144,
880 		0x34190, 0x341a0,
881 		0x341a8, 0x341b8,
882 		0x341c4, 0x341c8,
883 		0x341d0, 0x341d0,
884 		0x34200, 0x34318,
885 		0x34400, 0x344b4,
886 		0x344c0, 0x3452c,
887 		0x34540, 0x3461c,
888 		0x34800, 0x34828,
889 		0x34834, 0x34834,
890 		0x348c0, 0x34908,
891 		0x34910, 0x349ac,
892 		0x34a00, 0x34a14,
893 		0x34a1c, 0x34a2c,
894 		0x34a44, 0x34a50,
895 		0x34a74, 0x34a74,
896 		0x34a7c, 0x34afc,
897 		0x34b08, 0x34c24,
898 		0x34d00, 0x34d00,
899 		0x34d08, 0x34d14,
900 		0x34d1c, 0x34d20,
901 		0x34d3c, 0x34d3c,
902 		0x34d48, 0x34d50,
903 		0x35200, 0x3520c,
904 		0x35220, 0x35220,
905 		0x35240, 0x35240,
906 		0x35600, 0x3560c,
907 		0x35a00, 0x35a1c,
908 		0x35e00, 0x35e20,
909 		0x35e38, 0x35e3c,
910 		0x35e80, 0x35e80,
911 		0x35e88, 0x35ea8,
912 		0x35eb0, 0x35eb4,
913 		0x35ec8, 0x35ed4,
914 		0x35fb8, 0x36004,
915 		0x36200, 0x36200,
916 		0x36208, 0x36240,
917 		0x36248, 0x36280,
918 		0x36288, 0x362c0,
919 		0x362c8, 0x362fc,
920 		0x36600, 0x36630,
921 		0x36a00, 0x36abc,
922 		0x36b00, 0x36b10,
923 		0x36b20, 0x36b30,
924 		0x36b40, 0x36b50,
925 		0x36b60, 0x36b70,
926 		0x37000, 0x37028,
927 		0x37030, 0x37048,
928 		0x37060, 0x37068,
929 		0x37070, 0x3709c,
930 		0x370f0, 0x37128,
931 		0x37130, 0x37148,
932 		0x37160, 0x37168,
933 		0x37170, 0x3719c,
934 		0x371f0, 0x37238,
935 		0x37240, 0x37240,
936 		0x37248, 0x37250,
937 		0x3725c, 0x37264,
938 		0x37270, 0x372b8,
939 		0x372c0, 0x372e4,
940 		0x372f8, 0x37338,
941 		0x37340, 0x37340,
942 		0x37348, 0x37350,
943 		0x3735c, 0x37364,
944 		0x37370, 0x373b8,
945 		0x373c0, 0x373e4,
946 		0x373f8, 0x37428,
947 		0x37430, 0x37448,
948 		0x37460, 0x37468,
949 		0x37470, 0x3749c,
950 		0x374f0, 0x37528,
951 		0x37530, 0x37548,
952 		0x37560, 0x37568,
953 		0x37570, 0x3759c,
954 		0x375f0, 0x37638,
955 		0x37640, 0x37640,
956 		0x37648, 0x37650,
957 		0x3765c, 0x37664,
958 		0x37670, 0x376b8,
959 		0x376c0, 0x376e4,
960 		0x376f8, 0x37738,
961 		0x37740, 0x37740,
962 		0x37748, 0x37750,
963 		0x3775c, 0x37764,
964 		0x37770, 0x377b8,
965 		0x377c0, 0x377e4,
966 		0x377f8, 0x377fc,
967 		0x37814, 0x37814,
968 		0x3782c, 0x3782c,
969 		0x37880, 0x3788c,
970 		0x378e8, 0x378ec,
971 		0x37900, 0x37928,
972 		0x37930, 0x37948,
973 		0x37960, 0x37968,
974 		0x37970, 0x3799c,
975 		0x379f0, 0x37a38,
976 		0x37a40, 0x37a40,
977 		0x37a48, 0x37a50,
978 		0x37a5c, 0x37a64,
979 		0x37a70, 0x37ab8,
980 		0x37ac0, 0x37ae4,
981 		0x37af8, 0x37b10,
982 		0x37b28, 0x37b28,
983 		0x37b3c, 0x37b50,
984 		0x37bf0, 0x37c10,
985 		0x37c28, 0x37c28,
986 		0x37c3c, 0x37c50,
987 		0x37cf0, 0x37cfc,
988 		0x38000, 0x38030,
989 		0x38038, 0x38038,
990 		0x38040, 0x38040,
991 		0x38100, 0x38144,
992 		0x38190, 0x381a0,
993 		0x381a8, 0x381b8,
994 		0x381c4, 0x381c8,
995 		0x381d0, 0x381d0,
996 		0x38200, 0x38318,
997 		0x38400, 0x384b4,
998 		0x384c0, 0x3852c,
999 		0x38540, 0x3861c,
1000 		0x38800, 0x38828,
1001 		0x38834, 0x38834,
1002 		0x388c0, 0x38908,
1003 		0x38910, 0x389ac,
1004 		0x38a00, 0x38a14,
1005 		0x38a1c, 0x38a2c,
1006 		0x38a44, 0x38a50,
1007 		0x38a74, 0x38a74,
1008 		0x38a7c, 0x38afc,
1009 		0x38b08, 0x38c24,
1010 		0x38d00, 0x38d00,
1011 		0x38d08, 0x38d14,
1012 		0x38d1c, 0x38d20,
1013 		0x38d3c, 0x38d3c,
1014 		0x38d48, 0x38d50,
1015 		0x39200, 0x3920c,
1016 		0x39220, 0x39220,
1017 		0x39240, 0x39240,
1018 		0x39600, 0x3960c,
1019 		0x39a00, 0x39a1c,
1020 		0x39e00, 0x39e20,
1021 		0x39e38, 0x39e3c,
1022 		0x39e80, 0x39e80,
1023 		0x39e88, 0x39ea8,
1024 		0x39eb0, 0x39eb4,
1025 		0x39ec8, 0x39ed4,
1026 		0x39fb8, 0x3a004,
1027 		0x3a200, 0x3a200,
1028 		0x3a208, 0x3a240,
1029 		0x3a248, 0x3a280,
1030 		0x3a288, 0x3a2c0,
1031 		0x3a2c8, 0x3a2fc,
1032 		0x3a600, 0x3a630,
1033 		0x3aa00, 0x3aabc,
1034 		0x3ab00, 0x3ab10,
1035 		0x3ab20, 0x3ab30,
1036 		0x3ab40, 0x3ab50,
1037 		0x3ab60, 0x3ab70,
1038 		0x3b000, 0x3b028,
1039 		0x3b030, 0x3b048,
1040 		0x3b060, 0x3b068,
1041 		0x3b070, 0x3b09c,
1042 		0x3b0f0, 0x3b128,
1043 		0x3b130, 0x3b148,
1044 		0x3b160, 0x3b168,
1045 		0x3b170, 0x3b19c,
1046 		0x3b1f0, 0x3b238,
1047 		0x3b240, 0x3b240,
1048 		0x3b248, 0x3b250,
1049 		0x3b25c, 0x3b264,
1050 		0x3b270, 0x3b2b8,
1051 		0x3b2c0, 0x3b2e4,
1052 		0x3b2f8, 0x3b338,
1053 		0x3b340, 0x3b340,
1054 		0x3b348, 0x3b350,
1055 		0x3b35c, 0x3b364,
1056 		0x3b370, 0x3b3b8,
1057 		0x3b3c0, 0x3b3e4,
1058 		0x3b3f8, 0x3b428,
1059 		0x3b430, 0x3b448,
1060 		0x3b460, 0x3b468,
1061 		0x3b470, 0x3b49c,
1062 		0x3b4f0, 0x3b528,
1063 		0x3b530, 0x3b548,
1064 		0x3b560, 0x3b568,
1065 		0x3b570, 0x3b59c,
1066 		0x3b5f0, 0x3b638,
1067 		0x3b640, 0x3b640,
1068 		0x3b648, 0x3b650,
1069 		0x3b65c, 0x3b664,
1070 		0x3b670, 0x3b6b8,
1071 		0x3b6c0, 0x3b6e4,
1072 		0x3b6f8, 0x3b738,
1073 		0x3b740, 0x3b740,
1074 		0x3b748, 0x3b750,
1075 		0x3b75c, 0x3b764,
1076 		0x3b770, 0x3b7b8,
1077 		0x3b7c0, 0x3b7e4,
1078 		0x3b7f8, 0x3b7fc,
1079 		0x3b814, 0x3b814,
1080 		0x3b82c, 0x3b82c,
1081 		0x3b880, 0x3b88c,
1082 		0x3b8e8, 0x3b8ec,
1083 		0x3b900, 0x3b928,
1084 		0x3b930, 0x3b948,
1085 		0x3b960, 0x3b968,
1086 		0x3b970, 0x3b99c,
1087 		0x3b9f0, 0x3ba38,
1088 		0x3ba40, 0x3ba40,
1089 		0x3ba48, 0x3ba50,
1090 		0x3ba5c, 0x3ba64,
1091 		0x3ba70, 0x3bab8,
1092 		0x3bac0, 0x3bae4,
1093 		0x3baf8, 0x3bb10,
1094 		0x3bb28, 0x3bb28,
1095 		0x3bb3c, 0x3bb50,
1096 		0x3bbf0, 0x3bc10,
1097 		0x3bc28, 0x3bc28,
1098 		0x3bc3c, 0x3bc50,
1099 		0x3bcf0, 0x3bcfc,
1100 		0x3c000, 0x3c030,
1101 		0x3c038, 0x3c038,
1102 		0x3c040, 0x3c040,
1103 		0x3c100, 0x3c144,
1104 		0x3c190, 0x3c1a0,
1105 		0x3c1a8, 0x3c1b8,
1106 		0x3c1c4, 0x3c1c8,
1107 		0x3c1d0, 0x3c1d0,
1108 		0x3c200, 0x3c318,
1109 		0x3c400, 0x3c4b4,
1110 		0x3c4c0, 0x3c52c,
1111 		0x3c540, 0x3c61c,
1112 		0x3c800, 0x3c828,
1113 		0x3c834, 0x3c834,
1114 		0x3c8c0, 0x3c908,
1115 		0x3c910, 0x3c9ac,
1116 		0x3ca00, 0x3ca14,
1117 		0x3ca1c, 0x3ca2c,
1118 		0x3ca44, 0x3ca50,
1119 		0x3ca74, 0x3ca74,
1120 		0x3ca7c, 0x3cafc,
1121 		0x3cb08, 0x3cc24,
1122 		0x3cd00, 0x3cd00,
1123 		0x3cd08, 0x3cd14,
1124 		0x3cd1c, 0x3cd20,
1125 		0x3cd3c, 0x3cd3c,
1126 		0x3cd48, 0x3cd50,
1127 		0x3d200, 0x3d20c,
1128 		0x3d220, 0x3d220,
1129 		0x3d240, 0x3d240,
1130 		0x3d600, 0x3d60c,
1131 		0x3da00, 0x3da1c,
1132 		0x3de00, 0x3de20,
1133 		0x3de38, 0x3de3c,
1134 		0x3de80, 0x3de80,
1135 		0x3de88, 0x3dea8,
1136 		0x3deb0, 0x3deb4,
1137 		0x3dec8, 0x3ded4,
1138 		0x3dfb8, 0x3e004,
1139 		0x3e200, 0x3e200,
1140 		0x3e208, 0x3e240,
1141 		0x3e248, 0x3e280,
1142 		0x3e288, 0x3e2c0,
1143 		0x3e2c8, 0x3e2fc,
1144 		0x3e600, 0x3e630,
1145 		0x3ea00, 0x3eabc,
1146 		0x3eb00, 0x3eb10,
1147 		0x3eb20, 0x3eb30,
1148 		0x3eb40, 0x3eb50,
1149 		0x3eb60, 0x3eb70,
1150 		0x3f000, 0x3f028,
1151 		0x3f030, 0x3f048,
1152 		0x3f060, 0x3f068,
1153 		0x3f070, 0x3f09c,
1154 		0x3f0f0, 0x3f128,
1155 		0x3f130, 0x3f148,
1156 		0x3f160, 0x3f168,
1157 		0x3f170, 0x3f19c,
1158 		0x3f1f0, 0x3f238,
1159 		0x3f240, 0x3f240,
1160 		0x3f248, 0x3f250,
1161 		0x3f25c, 0x3f264,
1162 		0x3f270, 0x3f2b8,
1163 		0x3f2c0, 0x3f2e4,
1164 		0x3f2f8, 0x3f338,
1165 		0x3f340, 0x3f340,
1166 		0x3f348, 0x3f350,
1167 		0x3f35c, 0x3f364,
1168 		0x3f370, 0x3f3b8,
1169 		0x3f3c0, 0x3f3e4,
1170 		0x3f3f8, 0x3f428,
1171 		0x3f430, 0x3f448,
1172 		0x3f460, 0x3f468,
1173 		0x3f470, 0x3f49c,
1174 		0x3f4f0, 0x3f528,
1175 		0x3f530, 0x3f548,
1176 		0x3f560, 0x3f568,
1177 		0x3f570, 0x3f59c,
1178 		0x3f5f0, 0x3f638,
1179 		0x3f640, 0x3f640,
1180 		0x3f648, 0x3f650,
1181 		0x3f65c, 0x3f664,
1182 		0x3f670, 0x3f6b8,
1183 		0x3f6c0, 0x3f6e4,
1184 		0x3f6f8, 0x3f738,
1185 		0x3f740, 0x3f740,
1186 		0x3f748, 0x3f750,
1187 		0x3f75c, 0x3f764,
1188 		0x3f770, 0x3f7b8,
1189 		0x3f7c0, 0x3f7e4,
1190 		0x3f7f8, 0x3f7fc,
1191 		0x3f814, 0x3f814,
1192 		0x3f82c, 0x3f82c,
1193 		0x3f880, 0x3f88c,
1194 		0x3f8e8, 0x3f8ec,
1195 		0x3f900, 0x3f928,
1196 		0x3f930, 0x3f948,
1197 		0x3f960, 0x3f968,
1198 		0x3f970, 0x3f99c,
1199 		0x3f9f0, 0x3fa38,
1200 		0x3fa40, 0x3fa40,
1201 		0x3fa48, 0x3fa50,
1202 		0x3fa5c, 0x3fa64,
1203 		0x3fa70, 0x3fab8,
1204 		0x3fac0, 0x3fae4,
1205 		0x3faf8, 0x3fb10,
1206 		0x3fb28, 0x3fb28,
1207 		0x3fb3c, 0x3fb50,
1208 		0x3fbf0, 0x3fc10,
1209 		0x3fc28, 0x3fc28,
1210 		0x3fc3c, 0x3fc50,
1211 		0x3fcf0, 0x3fcfc,
1212 		0x40000, 0x4000c,
1213 		0x40040, 0x40050,
1214 		0x40060, 0x40068,
1215 		0x4007c, 0x4008c,
1216 		0x40094, 0x400b0,
1217 		0x400c0, 0x40144,
1218 		0x40180, 0x4018c,
1219 		0x40200, 0x40254,
1220 		0x40260, 0x40264,
1221 		0x40270, 0x40288,
1222 		0x40290, 0x40298,
1223 		0x402ac, 0x402c8,
1224 		0x402d0, 0x402e0,
1225 		0x402f0, 0x402f0,
1226 		0x40300, 0x4033c,
1227 		0x403f8, 0x403fc,
1228 		0x41304, 0x413c4,
1229 		0x41400, 0x4140c,
1230 		0x41414, 0x4141c,
1231 		0x41480, 0x414d0,
1232 		0x44000, 0x44054,
1233 		0x4405c, 0x44078,
1234 		0x440c0, 0x44174,
1235 		0x44180, 0x441ac,
1236 		0x441b4, 0x441b8,
1237 		0x441c0, 0x44254,
1238 		0x4425c, 0x44278,
1239 		0x442c0, 0x44374,
1240 		0x44380, 0x443ac,
1241 		0x443b4, 0x443b8,
1242 		0x443c0, 0x44454,
1243 		0x4445c, 0x44478,
1244 		0x444c0, 0x44574,
1245 		0x44580, 0x445ac,
1246 		0x445b4, 0x445b8,
1247 		0x445c0, 0x44654,
1248 		0x4465c, 0x44678,
1249 		0x446c0, 0x44774,
1250 		0x44780, 0x447ac,
1251 		0x447b4, 0x447b8,
1252 		0x447c0, 0x44854,
1253 		0x4485c, 0x44878,
1254 		0x448c0, 0x44974,
1255 		0x44980, 0x449ac,
1256 		0x449b4, 0x449b8,
1257 		0x449c0, 0x449fc,
1258 		0x45000, 0x45004,
1259 		0x45010, 0x45030,
1260 		0x45040, 0x45060,
1261 		0x45068, 0x45068,
1262 		0x45080, 0x45084,
1263 		0x450a0, 0x450b0,
1264 		0x45200, 0x45204,
1265 		0x45210, 0x45230,
1266 		0x45240, 0x45260,
1267 		0x45268, 0x45268,
1268 		0x45280, 0x45284,
1269 		0x452a0, 0x452b0,
1270 		0x460c0, 0x460e4,
1271 		0x47000, 0x4703c,
1272 		0x47044, 0x4708c,
1273 		0x47200, 0x47250,
1274 		0x47400, 0x47408,
1275 		0x47414, 0x47420,
1276 		0x47600, 0x47618,
1277 		0x47800, 0x47814,
1278 		0x48000, 0x4800c,
1279 		0x48040, 0x48050,
1280 		0x48060, 0x48068,
1281 		0x4807c, 0x4808c,
1282 		0x48094, 0x480b0,
1283 		0x480c0, 0x48144,
1284 		0x48180, 0x4818c,
1285 		0x48200, 0x48254,
1286 		0x48260, 0x48264,
1287 		0x48270, 0x48288,
1288 		0x48290, 0x48298,
1289 		0x482ac, 0x482c8,
1290 		0x482d0, 0x482e0,
1291 		0x482f0, 0x482f0,
1292 		0x48300, 0x4833c,
1293 		0x483f8, 0x483fc,
1294 		0x49304, 0x493c4,
1295 		0x49400, 0x4940c,
1296 		0x49414, 0x4941c,
1297 		0x49480, 0x494d0,
1298 		0x4c000, 0x4c054,
1299 		0x4c05c, 0x4c078,
1300 		0x4c0c0, 0x4c174,
1301 		0x4c180, 0x4c1ac,
1302 		0x4c1b4, 0x4c1b8,
1303 		0x4c1c0, 0x4c254,
1304 		0x4c25c, 0x4c278,
1305 		0x4c2c0, 0x4c374,
1306 		0x4c380, 0x4c3ac,
1307 		0x4c3b4, 0x4c3b8,
1308 		0x4c3c0, 0x4c454,
1309 		0x4c45c, 0x4c478,
1310 		0x4c4c0, 0x4c574,
1311 		0x4c580, 0x4c5ac,
1312 		0x4c5b4, 0x4c5b8,
1313 		0x4c5c0, 0x4c654,
1314 		0x4c65c, 0x4c678,
1315 		0x4c6c0, 0x4c774,
1316 		0x4c780, 0x4c7ac,
1317 		0x4c7b4, 0x4c7b8,
1318 		0x4c7c0, 0x4c854,
1319 		0x4c85c, 0x4c878,
1320 		0x4c8c0, 0x4c974,
1321 		0x4c980, 0x4c9ac,
1322 		0x4c9b4, 0x4c9b8,
1323 		0x4c9c0, 0x4c9fc,
1324 		0x4d000, 0x4d004,
1325 		0x4d010, 0x4d030,
1326 		0x4d040, 0x4d060,
1327 		0x4d068, 0x4d068,
1328 		0x4d080, 0x4d084,
1329 		0x4d0a0, 0x4d0b0,
1330 		0x4d200, 0x4d204,
1331 		0x4d210, 0x4d230,
1332 		0x4d240, 0x4d260,
1333 		0x4d268, 0x4d268,
1334 		0x4d280, 0x4d284,
1335 		0x4d2a0, 0x4d2b0,
1336 		0x4e0c0, 0x4e0e4,
1337 		0x4f000, 0x4f03c,
1338 		0x4f044, 0x4f08c,
1339 		0x4f200, 0x4f250,
1340 		0x4f400, 0x4f408,
1341 		0x4f414, 0x4f420,
1342 		0x4f600, 0x4f618,
1343 		0x4f800, 0x4f814,
1344 		0x50000, 0x50084,
1345 		0x50090, 0x500cc,
1346 		0x50400, 0x50400,
1347 		0x50800, 0x50884,
1348 		0x50890, 0x508cc,
1349 		0x50c00, 0x50c00,
1350 		0x51000, 0x5101c,
1351 		0x51300, 0x51308,
1352 	};
1353 
1354 	static const unsigned int t6_reg_ranges[] = {
1355 		0x1008, 0x101c,
1356 		0x1024, 0x10a8,
1357 		0x10b4, 0x10f8,
1358 		0x1100, 0x1114,
1359 		0x111c, 0x112c,
1360 		0x1138, 0x113c,
1361 		0x1144, 0x114c,
1362 		0x1180, 0x1184,
1363 		0x1190, 0x1194,
1364 		0x11a0, 0x11a4,
1365 		0x11b0, 0x11b4,
1366 		0x11fc, 0x1274,
1367 		0x1280, 0x133c,
1368 		0x1800, 0x18fc,
1369 		0x3000, 0x302c,
1370 		0x3060, 0x30b0,
1371 		0x30b8, 0x30d8,
1372 		0x30e0, 0x30fc,
1373 		0x3140, 0x357c,
1374 		0x35a8, 0x35cc,
1375 		0x35ec, 0x35ec,
1376 		0x3600, 0x5624,
1377 		0x56cc, 0x56ec,
1378 		0x56f4, 0x5720,
1379 		0x5728, 0x575c,
1380 		0x580c, 0x5814,
1381 		0x5890, 0x589c,
1382 		0x58a4, 0x58ac,
1383 		0x58b8, 0x58bc,
1384 		0x5940, 0x595c,
1385 		0x5980, 0x598c,
1386 		0x59b0, 0x59c8,
1387 		0x59d0, 0x59dc,
1388 		0x59fc, 0x5a18,
1389 		0x5a60, 0x5a6c,
1390 		0x5a80, 0x5a8c,
1391 		0x5a94, 0x5a9c,
1392 		0x5b94, 0x5bfc,
1393 		0x5c10, 0x5e48,
1394 		0x5e50, 0x5e94,
1395 		0x5ea0, 0x5eb0,
1396 		0x5ec0, 0x5ec0,
1397 		0x5ec8, 0x5ed0,
1398 		0x5ee0, 0x5ee0,
1399 		0x5ef0, 0x5ef0,
1400 		0x5f00, 0x5f00,
1401 		0x6000, 0x6020,
1402 		0x6028, 0x6040,
1403 		0x6058, 0x609c,
1404 		0x60a8, 0x619c,
1405 		0x7700, 0x7798,
1406 		0x77c0, 0x7880,
1407 		0x78cc, 0x78fc,
1408 		0x7b00, 0x7b58,
1409 		0x7b60, 0x7b84,
1410 		0x7b8c, 0x7c54,
1411 		0x7d00, 0x7d38,
1412 		0x7d40, 0x7d84,
1413 		0x7d8c, 0x7ddc,
1414 		0x7de4, 0x7e04,
1415 		0x7e10, 0x7e1c,
1416 		0x7e24, 0x7e38,
1417 		0x7e40, 0x7e44,
1418 		0x7e4c, 0x7e78,
1419 		0x7e80, 0x7edc,
1420 		0x7ee8, 0x7efc,
1421 		0x8dc0, 0x8de4,
1422 		0x8df8, 0x8e04,
1423 		0x8e10, 0x8e84,
1424 		0x8ea0, 0x8f88,
1425 		0x8fb8, 0x9058,
1426 		0x9060, 0x9060,
1427 		0x9068, 0x90f8,
1428 		0x9100, 0x9124,
1429 		0x9400, 0x9470,
1430 		0x9600, 0x9600,
1431 		0x9608, 0x9638,
1432 		0x9640, 0x9704,
1433 		0x9710, 0x971c,
1434 		0x9800, 0x9808,
1435 		0x9820, 0x983c,
1436 		0x9850, 0x9864,
1437 		0x9c00, 0x9c6c,
1438 		0x9c80, 0x9cec,
1439 		0x9d00, 0x9d6c,
1440 		0x9d80, 0x9dec,
1441 		0x9e00, 0x9e6c,
1442 		0x9e80, 0x9eec,
1443 		0x9f00, 0x9f6c,
1444 		0x9f80, 0xa020,
1445 		0xd004, 0xd03c,
1446 		0xd100, 0xd118,
1447 		0xd200, 0xd214,
1448 		0xd220, 0xd234,
1449 		0xd240, 0xd254,
1450 		0xd260, 0xd274,
1451 		0xd280, 0xd294,
1452 		0xd2a0, 0xd2b4,
1453 		0xd2c0, 0xd2d4,
1454 		0xd2e0, 0xd2f4,
1455 		0xd300, 0xd31c,
1456 		0xdfc0, 0xdfe0,
1457 		0xe000, 0xf008,
1458 		0xf010, 0xf018,
1459 		0xf020, 0xf028,
1460 		0x11000, 0x11014,
1461 		0x11048, 0x1106c,
1462 		0x11074, 0x11088,
1463 		0x11098, 0x11120,
1464 		0x1112c, 0x1117c,
1465 		0x11190, 0x112e0,
1466 		0x11300, 0x1130c,
1467 		0x12000, 0x1206c,
1468 		0x19040, 0x1906c,
1469 		0x19078, 0x19080,
1470 		0x1908c, 0x190e8,
1471 		0x190f0, 0x190f8,
1472 		0x19100, 0x19110,
1473 		0x19120, 0x19124,
1474 		0x19150, 0x19194,
1475 		0x1919c, 0x191b0,
1476 		0x191d0, 0x191e8,
1477 		0x19238, 0x19290,
1478 		0x192a4, 0x192b0,
1479 		0x192bc, 0x192bc,
1480 		0x19348, 0x1934c,
1481 		0x193f8, 0x19418,
1482 		0x19420, 0x19428,
1483 		0x19430, 0x19444,
1484 		0x1944c, 0x1946c,
1485 		0x19474, 0x19474,
1486 		0x19490, 0x194cc,
1487 		0x194f0, 0x194f8,
1488 		0x19c00, 0x19c48,
1489 		0x19c50, 0x19c80,
1490 		0x19c94, 0x19c98,
1491 		0x19ca0, 0x19cbc,
1492 		0x19ce4, 0x19ce4,
1493 		0x19cf0, 0x19cf8,
1494 		0x19d00, 0x19d28,
1495 		0x19d50, 0x19d78,
1496 		0x19d94, 0x19d98,
1497 		0x19da0, 0x19dc8,
1498 		0x19df0, 0x19e10,
1499 		0x19e50, 0x19e6c,
1500 		0x19ea0, 0x19ebc,
1501 		0x19ec4, 0x19ef4,
1502 		0x19f04, 0x19f2c,
1503 		0x19f34, 0x19f34,
1504 		0x19f40, 0x19f50,
1505 		0x19f90, 0x19fac,
1506 		0x19fc4, 0x19fc8,
1507 		0x19fd0, 0x19fe4,
1508 		0x1a000, 0x1a004,
1509 		0x1a010, 0x1a06c,
1510 		0x1a0b0, 0x1a0e4,
1511 		0x1a0ec, 0x1a0f8,
1512 		0x1a100, 0x1a108,
1513 		0x1a114, 0x1a120,
1514 		0x1a128, 0x1a130,
1515 		0x1a138, 0x1a138,
1516 		0x1a190, 0x1a1c4,
1517 		0x1a1fc, 0x1a1fc,
1518 		0x1e008, 0x1e00c,
1519 		0x1e040, 0x1e044,
1520 		0x1e04c, 0x1e04c,
1521 		0x1e284, 0x1e290,
1522 		0x1e2c0, 0x1e2c0,
1523 		0x1e2e0, 0x1e2e0,
1524 		0x1e300, 0x1e384,
1525 		0x1e3c0, 0x1e3c8,
1526 		0x1e408, 0x1e40c,
1527 		0x1e440, 0x1e444,
1528 		0x1e44c, 0x1e44c,
1529 		0x1e684, 0x1e690,
1530 		0x1e6c0, 0x1e6c0,
1531 		0x1e6e0, 0x1e6e0,
1532 		0x1e700, 0x1e784,
1533 		0x1e7c0, 0x1e7c8,
1534 		0x1e808, 0x1e80c,
1535 		0x1e840, 0x1e844,
1536 		0x1e84c, 0x1e84c,
1537 		0x1ea84, 0x1ea90,
1538 		0x1eac0, 0x1eac0,
1539 		0x1eae0, 0x1eae0,
1540 		0x1eb00, 0x1eb84,
1541 		0x1ebc0, 0x1ebc8,
1542 		0x1ec08, 0x1ec0c,
1543 		0x1ec40, 0x1ec44,
1544 		0x1ec4c, 0x1ec4c,
1545 		0x1ee84, 0x1ee90,
1546 		0x1eec0, 0x1eec0,
1547 		0x1eee0, 0x1eee0,
1548 		0x1ef00, 0x1ef84,
1549 		0x1efc0, 0x1efc8,
1550 		0x1f008, 0x1f00c,
1551 		0x1f040, 0x1f044,
1552 		0x1f04c, 0x1f04c,
1553 		0x1f284, 0x1f290,
1554 		0x1f2c0, 0x1f2c0,
1555 		0x1f2e0, 0x1f2e0,
1556 		0x1f300, 0x1f384,
1557 		0x1f3c0, 0x1f3c8,
1558 		0x1f408, 0x1f40c,
1559 		0x1f440, 0x1f444,
1560 		0x1f44c, 0x1f44c,
1561 		0x1f684, 0x1f690,
1562 		0x1f6c0, 0x1f6c0,
1563 		0x1f6e0, 0x1f6e0,
1564 		0x1f700, 0x1f784,
1565 		0x1f7c0, 0x1f7c8,
1566 		0x1f808, 0x1f80c,
1567 		0x1f840, 0x1f844,
1568 		0x1f84c, 0x1f84c,
1569 		0x1fa84, 0x1fa90,
1570 		0x1fac0, 0x1fac0,
1571 		0x1fae0, 0x1fae0,
1572 		0x1fb00, 0x1fb84,
1573 		0x1fbc0, 0x1fbc8,
1574 		0x1fc08, 0x1fc0c,
1575 		0x1fc40, 0x1fc44,
1576 		0x1fc4c, 0x1fc4c,
1577 		0x1fe84, 0x1fe90,
1578 		0x1fec0, 0x1fec0,
1579 		0x1fee0, 0x1fee0,
1580 		0x1ff00, 0x1ff84,
1581 		0x1ffc0, 0x1ffc8,
1582 		0x30000, 0x30030,
1583 		0x30100, 0x30168,
1584 		0x30190, 0x301a0,
1585 		0x301a8, 0x301b8,
1586 		0x301c4, 0x301c8,
1587 		0x301d0, 0x301d0,
1588 		0x30200, 0x30320,
1589 		0x30400, 0x304b4,
1590 		0x304c0, 0x3052c,
1591 		0x30540, 0x3061c,
1592 		0x30800, 0x308a0,
1593 		0x308c0, 0x30908,
1594 		0x30910, 0x309b8,
1595 		0x30a00, 0x30a04,
1596 		0x30a0c, 0x30a14,
1597 		0x30a1c, 0x30a2c,
1598 		0x30a44, 0x30a50,
1599 		0x30a74, 0x30a74,
1600 		0x30a7c, 0x30afc,
1601 		0x30b08, 0x30c24,
1602 		0x30d00, 0x30d14,
1603 		0x30d1c, 0x30d3c,
1604 		0x30d44, 0x30d4c,
1605 		0x30d54, 0x30d74,
1606 		0x30d7c, 0x30d7c,
1607 		0x30de0, 0x30de0,
1608 		0x30e00, 0x30ed4,
1609 		0x30f00, 0x30fa4,
1610 		0x30fc0, 0x30fc4,
1611 		0x31000, 0x31004,
1612 		0x31080, 0x310fc,
1613 		0x31208, 0x31220,
1614 		0x3123c, 0x31254,
1615 		0x31300, 0x31300,
1616 		0x31308, 0x3131c,
1617 		0x31338, 0x3133c,
1618 		0x31380, 0x31380,
1619 		0x31388, 0x313a8,
1620 		0x313b4, 0x313b4,
1621 		0x31400, 0x31420,
1622 		0x31438, 0x3143c,
1623 		0x31480, 0x31480,
1624 		0x314a8, 0x314a8,
1625 		0x314b0, 0x314b4,
1626 		0x314c8, 0x314d4,
1627 		0x31a40, 0x31a4c,
1628 		0x31af0, 0x31b20,
1629 		0x31b38, 0x31b3c,
1630 		0x31b80, 0x31b80,
1631 		0x31ba8, 0x31ba8,
1632 		0x31bb0, 0x31bb4,
1633 		0x31bc8, 0x31bd4,
1634 		0x32140, 0x3218c,
1635 		0x321f0, 0x321f4,
1636 		0x32200, 0x32200,
1637 		0x32218, 0x32218,
1638 		0x32400, 0x32400,
1639 		0x32408, 0x3241c,
1640 		0x32618, 0x32620,
1641 		0x32664, 0x32664,
1642 		0x326a8, 0x326a8,
1643 		0x326ec, 0x326ec,
1644 		0x32a00, 0x32abc,
1645 		0x32b00, 0x32b38,
1646 		0x32b20, 0x32b38,
1647 		0x32b40, 0x32b58,
1648 		0x32b60, 0x32b78,
1649 		0x32c00, 0x32c00,
1650 		0x32c08, 0x32c3c,
1651 		0x33000, 0x3302c,
1652 		0x33034, 0x33050,
1653 		0x33058, 0x33058,
1654 		0x33060, 0x3308c,
1655 		0x3309c, 0x330ac,
1656 		0x330c0, 0x330c0,
1657 		0x330c8, 0x330d0,
1658 		0x330d8, 0x330e0,
1659 		0x330ec, 0x3312c,
1660 		0x33134, 0x33150,
1661 		0x33158, 0x33158,
1662 		0x33160, 0x3318c,
1663 		0x3319c, 0x331ac,
1664 		0x331c0, 0x331c0,
1665 		0x331c8, 0x331d0,
1666 		0x331d8, 0x331e0,
1667 		0x331ec, 0x33290,
1668 		0x33298, 0x332c4,
1669 		0x332e4, 0x33390,
1670 		0x33398, 0x333c4,
1671 		0x333e4, 0x3342c,
1672 		0x33434, 0x33450,
1673 		0x33458, 0x33458,
1674 		0x33460, 0x3348c,
1675 		0x3349c, 0x334ac,
1676 		0x334c0, 0x334c0,
1677 		0x334c8, 0x334d0,
1678 		0x334d8, 0x334e0,
1679 		0x334ec, 0x3352c,
1680 		0x33534, 0x33550,
1681 		0x33558, 0x33558,
1682 		0x33560, 0x3358c,
1683 		0x3359c, 0x335ac,
1684 		0x335c0, 0x335c0,
1685 		0x335c8, 0x335d0,
1686 		0x335d8, 0x335e0,
1687 		0x335ec, 0x33690,
1688 		0x33698, 0x336c4,
1689 		0x336e4, 0x33790,
1690 		0x33798, 0x337c4,
1691 		0x337e4, 0x337fc,
1692 		0x33814, 0x33814,
1693 		0x33854, 0x33868,
1694 		0x33880, 0x3388c,
1695 		0x338c0, 0x338d0,
1696 		0x338e8, 0x338ec,
1697 		0x33900, 0x3392c,
1698 		0x33934, 0x33950,
1699 		0x33958, 0x33958,
1700 		0x33960, 0x3398c,
1701 		0x3399c, 0x339ac,
1702 		0x339c0, 0x339c0,
1703 		0x339c8, 0x339d0,
1704 		0x339d8, 0x339e0,
1705 		0x339ec, 0x33a90,
1706 		0x33a98, 0x33ac4,
1707 		0x33ae4, 0x33b10,
1708 		0x33b24, 0x33b28,
1709 		0x33b38, 0x33b50,
1710 		0x33bf0, 0x33c10,
1711 		0x33c24, 0x33c28,
1712 		0x33c38, 0x33c50,
1713 		0x33cf0, 0x33cfc,
1714 		0x34000, 0x34030,
1715 		0x34100, 0x34168,
1716 		0x34190, 0x341a0,
1717 		0x341a8, 0x341b8,
1718 		0x341c4, 0x341c8,
1719 		0x341d0, 0x341d0,
1720 		0x34200, 0x34320,
1721 		0x34400, 0x344b4,
1722 		0x344c0, 0x3452c,
1723 		0x34540, 0x3461c,
1724 		0x34800, 0x348a0,
1725 		0x348c0, 0x34908,
1726 		0x34910, 0x349b8,
1727 		0x34a00, 0x34a04,
1728 		0x34a0c, 0x34a14,
1729 		0x34a1c, 0x34a2c,
1730 		0x34a44, 0x34a50,
1731 		0x34a74, 0x34a74,
1732 		0x34a7c, 0x34afc,
1733 		0x34b08, 0x34c24,
1734 		0x34d00, 0x34d14,
1735 		0x34d1c, 0x34d3c,
1736 		0x34d44, 0x34d4c,
1737 		0x34d54, 0x34d74,
1738 		0x34d7c, 0x34d7c,
1739 		0x34de0, 0x34de0,
1740 		0x34e00, 0x34ed4,
1741 		0x34f00, 0x34fa4,
1742 		0x34fc0, 0x34fc4,
1743 		0x35000, 0x35004,
1744 		0x35080, 0x350fc,
1745 		0x35208, 0x35220,
1746 		0x3523c, 0x35254,
1747 		0x35300, 0x35300,
1748 		0x35308, 0x3531c,
1749 		0x35338, 0x3533c,
1750 		0x35380, 0x35380,
1751 		0x35388, 0x353a8,
1752 		0x353b4, 0x353b4,
1753 		0x35400, 0x35420,
1754 		0x35438, 0x3543c,
1755 		0x35480, 0x35480,
1756 		0x354a8, 0x354a8,
1757 		0x354b0, 0x354b4,
1758 		0x354c8, 0x354d4,
1759 		0x35a40, 0x35a4c,
1760 		0x35af0, 0x35b20,
1761 		0x35b38, 0x35b3c,
1762 		0x35b80, 0x35b80,
1763 		0x35ba8, 0x35ba8,
1764 		0x35bb0, 0x35bb4,
1765 		0x35bc8, 0x35bd4,
1766 		0x36140, 0x3618c,
1767 		0x361f0, 0x361f4,
1768 		0x36200, 0x36200,
1769 		0x36218, 0x36218,
1770 		0x36400, 0x36400,
1771 		0x36408, 0x3641c,
1772 		0x36618, 0x36620,
1773 		0x36664, 0x36664,
1774 		0x366a8, 0x366a8,
1775 		0x366ec, 0x366ec,
1776 		0x36a00, 0x36abc,
1777 		0x36b00, 0x36b38,
1778 		0x36b20, 0x36b38,
1779 		0x36b40, 0x36b58,
1780 		0x36b60, 0x36b78,
1781 		0x36c00, 0x36c00,
1782 		0x36c08, 0x36c3c,
1783 		0x37000, 0x3702c,
1784 		0x37034, 0x37050,
1785 		0x37058, 0x37058,
1786 		0x37060, 0x3708c,
1787 		0x3709c, 0x370ac,
1788 		0x370c0, 0x370c0,
1789 		0x370c8, 0x370d0,
1790 		0x370d8, 0x370e0,
1791 		0x370ec, 0x3712c,
1792 		0x37134, 0x37150,
1793 		0x37158, 0x37158,
1794 		0x37160, 0x3718c,
1795 		0x3719c, 0x371ac,
1796 		0x371c0, 0x371c0,
1797 		0x371c8, 0x371d0,
1798 		0x371d8, 0x371e0,
1799 		0x371ec, 0x37290,
1800 		0x37298, 0x372c4,
1801 		0x372e4, 0x37390,
1802 		0x37398, 0x373c4,
1803 		0x373e4, 0x3742c,
1804 		0x37434, 0x37450,
1805 		0x37458, 0x37458,
1806 		0x37460, 0x3748c,
1807 		0x3749c, 0x374ac,
1808 		0x374c0, 0x374c0,
1809 		0x374c8, 0x374d0,
1810 		0x374d8, 0x374e0,
1811 		0x374ec, 0x3752c,
1812 		0x37534, 0x37550,
1813 		0x37558, 0x37558,
1814 		0x37560, 0x3758c,
1815 		0x3759c, 0x375ac,
1816 		0x375c0, 0x375c0,
1817 		0x375c8, 0x375d0,
1818 		0x375d8, 0x375e0,
1819 		0x375ec, 0x37690,
1820 		0x37698, 0x376c4,
1821 		0x376e4, 0x37790,
1822 		0x37798, 0x377c4,
1823 		0x377e4, 0x377fc,
1824 		0x37814, 0x37814,
1825 		0x37854, 0x37868,
1826 		0x37880, 0x3788c,
1827 		0x378c0, 0x378d0,
1828 		0x378e8, 0x378ec,
1829 		0x37900, 0x3792c,
1830 		0x37934, 0x37950,
1831 		0x37958, 0x37958,
1832 		0x37960, 0x3798c,
1833 		0x3799c, 0x379ac,
1834 		0x379c0, 0x379c0,
1835 		0x379c8, 0x379d0,
1836 		0x379d8, 0x379e0,
1837 		0x379ec, 0x37a90,
1838 		0x37a98, 0x37ac4,
1839 		0x37ae4, 0x37b10,
1840 		0x37b24, 0x37b28,
1841 		0x37b38, 0x37b50,
1842 		0x37bf0, 0x37c10,
1843 		0x37c24, 0x37c28,
1844 		0x37c38, 0x37c50,
1845 		0x37cf0, 0x37cfc,
1846 		0x40040, 0x40040,
1847 		0x40080, 0x40084,
1848 		0x40100, 0x40100,
1849 		0x40140, 0x401bc,
1850 		0x40200, 0x40214,
1851 		0x40228, 0x40228,
1852 		0x40240, 0x40258,
1853 		0x40280, 0x40280,
1854 		0x40304, 0x40304,
1855 		0x40330, 0x4033c,
1856 		0x41304, 0x413c8,
1857 		0x413d0, 0x413dc,
1858 		0x413f0, 0x413f0,
1859 		0x41400, 0x4140c,
1860 		0x41414, 0x4141c,
1861 		0x41480, 0x414d0,
1862 		0x44000, 0x4407c,
1863 		0x440c0, 0x441ac,
1864 		0x441b4, 0x4427c,
1865 		0x442c0, 0x443ac,
1866 		0x443b4, 0x4447c,
1867 		0x444c0, 0x445ac,
1868 		0x445b4, 0x4467c,
1869 		0x446c0, 0x447ac,
1870 		0x447b4, 0x4487c,
1871 		0x448c0, 0x449ac,
1872 		0x449b4, 0x44a7c,
1873 		0x44ac0, 0x44bac,
1874 		0x44bb4, 0x44c7c,
1875 		0x44cc0, 0x44dac,
1876 		0x44db4, 0x44e7c,
1877 		0x44ec0, 0x44fac,
1878 		0x44fb4, 0x4507c,
1879 		0x450c0, 0x451ac,
1880 		0x451b4, 0x451fc,
1881 		0x45800, 0x45804,
1882 		0x45810, 0x45830,
1883 		0x45840, 0x45860,
1884 		0x45868, 0x45868,
1885 		0x45880, 0x45884,
1886 		0x458a0, 0x458b0,
1887 		0x45a00, 0x45a04,
1888 		0x45a10, 0x45a30,
1889 		0x45a40, 0x45a60,
1890 		0x45a68, 0x45a68,
1891 		0x45a80, 0x45a84,
1892 		0x45aa0, 0x45ab0,
1893 		0x460c0, 0x460e4,
1894 		0x47000, 0x4703c,
1895 		0x47044, 0x4708c,
1896 		0x47200, 0x47250,
1897 		0x47400, 0x47408,
1898 		0x47414, 0x47420,
1899 		0x47600, 0x47618,
1900 		0x47800, 0x47814,
1901 		0x47820, 0x4782c,
1902 		0x50000, 0x50084,
1903 		0x50090, 0x500cc,
1904 		0x50300, 0x50384,
1905 		0x50400, 0x50400,
1906 		0x50800, 0x50884,
1907 		0x50890, 0x508cc,
1908 		0x50b00, 0x50b84,
1909 		0x50c00, 0x50c00,
1910 		0x51000, 0x51020,
1911 		0x51028, 0x510b0,
1912 		0x51300, 0x51324,
1913 	};
1914 
1915 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
1916 	const unsigned int *reg_ranges;
1917 	int reg_ranges_size, range;
1918 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1919 
1920 	/* Select the right set of register ranges to dump depending on the
1921 	 * adapter chip type.
1922 	 */
1923 	switch (chip_version) {
1924 	case CHELSIO_T5:
1925 		reg_ranges = t5_reg_ranges;
1926 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1927 		break;
1928 
1929 	case CHELSIO_T6:
1930 		reg_ranges = t6_reg_ranges;
1931 		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1932 		break;
1933 
1934 	default:
1935 		dev_err(adap,
1936 			"Unsupported chip version %d\n", chip_version);
1937 		return;
1938 	}
1939 
1940 	/* Clear the register buffer and insert the appropriate register
1941 	 * values selected by the above register ranges.
1942 	 */
1943 	memset(buf, 0, buf_size);
1944 	for (range = 0; range < reg_ranges_size; range += 2) {
1945 		unsigned int reg = reg_ranges[range];
1946 		unsigned int last_reg = reg_ranges[range + 1];
1947 		u32 *bufp = (u32 *)((char *)buf + reg);
1948 
1949 		/* Iterate across the register range filling in the register
1950 		 * buffer but don't write past the end of the register buffer.
1951 		 */
1952 		while (reg <= last_reg && bufp < buf_end) {
1953 			*bufp++ = t4_read_reg(adap, reg);
1954 			reg += sizeof(u32);
1955 		}
1956 	}
1957 }
1958 
1959 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1960 #define EEPROM_DELAY            10              /* 10us per poll spin */
1961 #define EEPROM_MAX_POLL         5000            /* x 5000 == 50ms */
1962 
1963 #define EEPROM_STAT_ADDR        0x7bfc
1964 
1965 /**
1966  * Small utility function to wait till any outstanding VPD Access is complete.
1967  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1968  * VPD Access in flight.  This allows us to handle the problem of having a
1969  * previous VPD Access time out and prevent an attempt to inject a new VPD
1970  * Request before any in-flight VPD request has completed.
1971  */
1972 static int t4_seeprom_wait(struct adapter *adapter)
1973 {
1974 	unsigned int base = adapter->params.pci.vpd_cap_addr;
1975 	int max_poll;
1976 
1977 	/* If no VPD Access is in flight, we can just return success right
1978 	 * away.
1979 	 */
1980 	if (!adapter->vpd_busy)
1981 		return 0;
1982 
1983 	/* Poll the VPD Capability Address/Flag register waiting for it
1984 	 * to indicate that the operation is complete.
1985 	 */
1986 	max_poll = EEPROM_MAX_POLL;
1987 	do {
1988 		u16 val;
1989 
1990 		udelay(EEPROM_DELAY);
1991 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
1992 
1993 		/* If the operation is complete, mark the VPD as no longer
1994 		 * busy and return success.
1995 		 */
1996 		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
1997 			adapter->vpd_busy = 0;
1998 			return 0;
1999 		}
2000 	} while (--max_poll);
2001 
2002 	/* Failure!  Note that we leave the VPD Busy status set in order to
2003 	 * avoid pushing a new VPD Access request into the VPD Capability till
2004 	 * the current operation eventually succeeds.  It's a bug to issue a
2005 	 * new request when an existing request is in flight and will result
2006 	 * in corrupt hardware state.
2007 	 */
2008 	return -ETIMEDOUT;
2009 }
2010 
2011 /**
2012  * t4_seeprom_read - read a serial EEPROM location
2013  * @adapter: adapter to read
2014  * @addr: EEPROM virtual address
2015  * @data: where to store the read data
2016  *
2017  * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2018  * VPD capability.  Note that this function must be called with a virtual
2019  * address.
2020  */
2021 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2022 {
2023 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2024 	int ret;
2025 
2026 	/* VPD Accesses must alway be 4-byte aligned!
2027 	 */
2028 	if (addr >= EEPROMVSIZE || (addr & 3))
2029 		return -EINVAL;
2030 
2031 	/* Wait for any previous operation which may still be in flight to
2032 	 * complete.
2033 	 */
2034 	ret = t4_seeprom_wait(adapter);
2035 	if (ret) {
2036 		dev_err(adapter, "VPD still busy from previous operation\n");
2037 		return ret;
2038 	}
2039 
2040 	/* Issue our new VPD Read request, mark the VPD as being busy and wait
2041 	 * for our request to complete.  If it doesn't complete, note the
2042 	 * error and return it to our caller.  Note that we do not reset the
2043 	 * VPD Busy status!
2044 	 */
2045 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2046 	adapter->vpd_busy = 1;
2047 	adapter->vpd_flag = PCI_VPD_ADDR_F;
2048 	ret = t4_seeprom_wait(adapter);
2049 	if (ret) {
2050 		dev_err(adapter, "VPD read of address %#x failed\n", addr);
2051 		return ret;
2052 	}
2053 
2054 	/* Grab the returned data, swizzle it into our endianness and
2055 	 * return success.
2056 	 */
2057 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2058 	*data = le32_to_cpu(*data);
2059 	return 0;
2060 }
2061 
2062 /**
2063  * t4_seeprom_write - write a serial EEPROM location
2064  * @adapter: adapter to write
2065  * @addr: virtual EEPROM address
2066  * @data: value to write
2067  *
2068  * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2069  * VPD capability.  Note that this function must be called with a virtual
2070  * address.
2071  */
2072 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2073 {
2074 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2075 	int ret;
2076 	u32 stats_reg = 0;
2077 	int max_poll;
2078 
2079 	/* VPD Accesses must alway be 4-byte aligned!
2080 	 */
2081 	if (addr >= EEPROMVSIZE || (addr & 3))
2082 		return -EINVAL;
2083 
2084 	/* Wait for any previous operation which may still be in flight to
2085 	 * complete.
2086 	 */
2087 	ret = t4_seeprom_wait(adapter);
2088 	if (ret) {
2089 		dev_err(adapter, "VPD still busy from previous operation\n");
2090 		return ret;
2091 	}
2092 
2093 	/* Issue our new VPD Read request, mark the VPD as being busy and wait
2094 	 * for our request to complete.  If it doesn't complete, note the
2095 	 * error and return it to our caller.  Note that we do not reset the
2096 	 * VPD Busy status!
2097 	 */
2098 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2099 			     cpu_to_le32(data));
2100 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2101 			     (u16)addr | PCI_VPD_ADDR_F);
2102 	adapter->vpd_busy = 1;
2103 	adapter->vpd_flag = 0;
2104 	ret = t4_seeprom_wait(adapter);
2105 	if (ret) {
2106 		dev_err(adapter, "VPD write of address %#x failed\n", addr);
2107 		return ret;
2108 	}
2109 
2110 	/* Reset PCI_VPD_DATA register after a transaction and wait for our
2111 	 * request to complete. If it doesn't complete, return error.
2112 	 */
2113 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2114 	max_poll = EEPROM_MAX_POLL;
2115 	do {
2116 		udelay(EEPROM_DELAY);
2117 		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2118 	} while ((stats_reg & 0x1) && --max_poll);
2119 	if (!max_poll)
2120 		return -ETIMEDOUT;
2121 
2122 	/* Return success! */
2123 	return 0;
2124 }
2125 
2126 /**
2127  * t4_seeprom_wp - enable/disable EEPROM write protection
2128  * @adapter: the adapter
2129  * @enable: whether to enable or disable write protection
2130  *
2131  * Enables or disables write protection on the serial EEPROM.
2132  */
2133 int t4_seeprom_wp(struct adapter *adapter, int enable)
2134 {
2135 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2136 }
2137 
2138 /**
2139  * t4_fw_tp_pio_rw - Access TP PIO through LDST
2140  * @adap: the adapter
2141  * @vals: where the indirect register values are stored/written
2142  * @nregs: how many indirect registers to read/write
2143  * @start_idx: index of first indirect register to read/write
2144  * @rw: Read (1) or Write (0)
2145  *
2146  * Access TP PIO registers through LDST
2147  */
2148 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
2149 		     unsigned int start_index, unsigned int rw)
2150 {
2151 	int cmd = FW_LDST_ADDRSPC_TP_PIO;
2152 	struct fw_ldst_cmd c;
2153 	unsigned int i;
2154 	int ret;
2155 
2156 	for (i = 0 ; i < nregs; i++) {
2157 		memset(&c, 0, sizeof(c));
2158 		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
2159 						F_FW_CMD_REQUEST |
2160 						(rw ? F_FW_CMD_READ :
2161 						      F_FW_CMD_WRITE) |
2162 						V_FW_LDST_CMD_ADDRSPACE(cmd));
2163 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
2164 
2165 		c.u.addrval.addr = cpu_to_be32(start_index + i);
2166 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
2167 		ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2168 		if (ret == 0) {
2169 			if (rw)
2170 				vals[i] = be32_to_cpu(c.u.addrval.val);
2171 		}
2172 	}
2173 }
2174 
2175 /**
2176  * t4_read_rss_key - read the global RSS key
2177  * @adap: the adapter
2178  * @key: 10-entry array holding the 320-bit RSS key
2179  *
2180  * Reads the global 320-bit RSS key.
2181  */
2182 void t4_read_rss_key(struct adapter *adap, u32 *key)
2183 {
2184 	t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
2185 }
2186 
2187 /**
2188  * t4_write_rss_key - program one of the RSS keys
2189  * @adap: the adapter
2190  * @key: 10-entry array holding the 320-bit RSS key
2191  * @idx: which RSS key to write
2192  *
2193  * Writes one of the RSS keys with the given 320-bit value.  If @idx is
2194  * 0..15 the corresponding entry in the RSS key table is written,
2195  * otherwise the global RSS key is written.
2196  */
2197 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
2198 {
2199 	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
2200 	u8 rss_key_addr_cnt = 16;
2201 
2202 	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2203 	 * allows access to key addresses 16-63 by using KeyWrAddrX
2204 	 * as index[5:4](upper 2) into key table
2205 	 */
2206 	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
2207 	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
2208 		rss_key_addr_cnt = 32;
2209 
2210 	t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
2211 
2212 	if (idx >= 0 && idx < rss_key_addr_cnt) {
2213 		if (rss_key_addr_cnt > 16)
2214 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2215 				     V_KEYWRADDRX(idx >> 4) |
2216 				     V_T6_VFWRADDR(idx) | F_KEYWREN);
2217 		else
2218 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2219 				     V_KEYWRADDR(idx) | F_KEYWREN);
2220 	}
2221 }
2222 
2223 /**
2224  * t4_config_rss_range - configure a portion of the RSS mapping table
2225  * @adapter: the adapter
2226  * @mbox: mbox to use for the FW command
2227  * @viid: virtual interface whose RSS subtable is to be written
2228  * @start: start entry in the table to write
2229  * @n: how many table entries to write
2230  * @rspq: values for the "response queue" (Ingress Queue) lookup table
2231  * @nrspq: number of values in @rspq
2232  *
2233  * Programs the selected part of the VI's RSS mapping table with the
2234  * provided values.  If @nrspq < @n the supplied values are used repeatedly
2235  * until the full table range is populated.
2236  *
2237  * The caller must ensure the values in @rspq are in the range allowed for
2238  * @viid.
2239  */
2240 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2241 			int start, int n, const u16 *rspq, unsigned int nrspq)
2242 {
2243 	int ret;
2244 	const u16 *rsp = rspq;
2245 	const u16 *rsp_end = rspq + nrspq;
2246 	struct fw_rss_ind_tbl_cmd cmd;
2247 
2248 	memset(&cmd, 0, sizeof(cmd));
2249 	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2250 				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2251 				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
2252 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2253 
2254 	/*
2255 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2256 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2257 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2258 	 * reserved.
2259 	 */
2260 	while (n > 0) {
2261 		int nq = min(n, 32);
2262 		int nq_packed = 0;
2263 		__be32 *qp = &cmd.iq0_to_iq2;
2264 
2265 		/*
2266 		 * Set up the firmware RSS command header to send the next
2267 		 * "nq" Ingress Queue IDs to the firmware.
2268 		 */
2269 		cmd.niqid = cpu_to_be16(nq);
2270 		cmd.startidx = cpu_to_be16(start);
2271 
2272 		/*
2273 		 * "nq" more done for the start of the next loop.
2274 		 */
2275 		start += nq;
2276 		n -= nq;
2277 
2278 		/*
2279 		 * While there are still Ingress Queue IDs to stuff into the
2280 		 * current firmware RSS command, retrieve them from the
2281 		 * Ingress Queue ID array and insert them into the command.
2282 		 */
2283 		while (nq > 0) {
2284 			/*
2285 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2286 			 * around the Ingress Queue ID array if necessary) and
2287 			 * insert them into the firmware RSS command at the
2288 			 * current 3-tuple position within the commad.
2289 			 */
2290 			u16 qbuf[3];
2291 			u16 *qbp = qbuf;
2292 			int nqbuf = min(3, nq);
2293 
2294 			nq -= nqbuf;
2295 			qbuf[0] = 0;
2296 			qbuf[1] = 0;
2297 			qbuf[2] = 0;
2298 			while (nqbuf && nq_packed < 32) {
2299 				nqbuf--;
2300 				nq_packed++;
2301 				*qbp++ = *rsp++;
2302 				if (rsp >= rsp_end)
2303 					rsp = rspq;
2304 			}
2305 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2306 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2307 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2308 		}
2309 
2310 		/*
2311 		 * Send this portion of the RRS table update to the firmware;
2312 		 * bail out on any errors.
2313 		 */
2314 		if (is_pf4(adapter))
2315 			ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
2316 					 NULL);
2317 		else
2318 			ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
2319 		if (ret)
2320 			return ret;
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 /**
2327  * t4_config_vi_rss - configure per VI RSS settings
2328  * @adapter: the adapter
2329  * @mbox: mbox to use for the FW command
2330  * @viid: the VI id
2331  * @flags: RSS flags
2332  * @defq: id of the default RSS queue for the VI.
2333  *
2334  * Configures VI-specific RSS properties.
2335  */
2336 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2337 		     unsigned int flags, unsigned int defq)
2338 {
2339 	struct fw_rss_vi_config_cmd c;
2340 
2341 	memset(&c, 0, sizeof(c));
2342 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2343 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2344 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2345 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2346 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2347 			V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2348 	if (is_pf4(adapter))
2349 		return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2350 	else
2351 		return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
2352 }
2353 
2354 /**
2355  * t4_read_config_vi_rss - read the configured per VI RSS settings
2356  * @adapter: the adapter
2357  * @mbox: mbox to use for the FW command
2358  * @viid: the VI id
2359  * @flags: where to place the configured flags
2360  * @defq: where to place the id of the default RSS queue for the VI.
2361  *
2362  * Read configured VI-specific RSS properties.
2363  */
2364 int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2365 			  u64 *flags, unsigned int *defq)
2366 {
2367 	struct fw_rss_vi_config_cmd c;
2368 	unsigned int result;
2369 	int ret;
2370 
2371 	memset(&c, 0, sizeof(c));
2372 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2373 				   F_FW_CMD_REQUEST | F_FW_CMD_READ |
2374 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2375 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2376 	ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
2377 	if (!ret) {
2378 		result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
2379 		if (defq)
2380 			*defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
2381 		if (flags)
2382 			*flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
2383 	}
2384 
2385 	return ret;
2386 }
2387 
2388 /**
2389  * init_cong_ctrl - initialize congestion control parameters
2390  * @a: the alpha values for congestion control
2391  * @b: the beta values for congestion control
2392  *
2393  * Initialize the congestion control parameters.
2394  */
2395 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2396 {
2397 	int i;
2398 
2399 	for (i = 0; i < 9; i++) {
2400 		a[i] = 1;
2401 		b[i] = 0;
2402 	}
2403 
2404 	a[9] = 2;
2405 	a[10] = 3;
2406 	a[11] = 4;
2407 	a[12] = 5;
2408 	a[13] = 6;
2409 	a[14] = 7;
2410 	a[15] = 8;
2411 	a[16] = 9;
2412 	a[17] = 10;
2413 	a[18] = 14;
2414 	a[19] = 17;
2415 	a[20] = 21;
2416 	a[21] = 25;
2417 	a[22] = 30;
2418 	a[23] = 35;
2419 	a[24] = 45;
2420 	a[25] = 60;
2421 	a[26] = 80;
2422 	a[27] = 100;
2423 	a[28] = 200;
2424 	a[29] = 300;
2425 	a[30] = 400;
2426 	a[31] = 500;
2427 
2428 	b[9] = 1;
2429 	b[10] = 1;
2430 	b[11] = 2;
2431 	b[12] = 2;
2432 	b[13] = 3;
2433 	b[14] = 3;
2434 	b[15] = 3;
2435 	b[16] = 3;
2436 	b[17] = 4;
2437 	b[18] = 4;
2438 	b[19] = 4;
2439 	b[20] = 4;
2440 	b[21] = 4;
2441 	b[22] = 5;
2442 	b[23] = 5;
2443 	b[24] = 5;
2444 	b[25] = 5;
2445 	b[26] = 5;
2446 	b[27] = 5;
2447 	b[28] = 6;
2448 	b[29] = 6;
2449 	b[30] = 7;
2450 	b[31] = 7;
2451 }
2452 
2453 #define INIT_CMD(var, cmd, rd_wr) do { \
2454 	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2455 			F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2456 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2457 } while (0)
2458 
2459 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2460 {
2461 	u32 cclk_param, cclk_val;
2462 	int ret;
2463 
2464 	/*
2465 	 * Ask firmware for the Core Clock since it knows how to translate the
2466 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
2467 	 */
2468 	cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2469 		      V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2470 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2471 			      1, &cclk_param, &cclk_val);
2472 	if (ret) {
2473 		dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2474 			__func__, ret);
2475 		return ret;
2476 	}
2477 
2478 	p->cclk = cclk_val;
2479 	dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2480 	return 0;
2481 }
2482 
2483 /**
2484  * t4_get_pfres - retrieve VF resource limits
2485  * @adapter: the adapter
2486  *
2487  * Retrieves configured resource limits and capabilities for a physical
2488  * function.  The results are stored in @adapter->pfres.
2489  */
2490 int t4_get_pfres(struct adapter *adapter)
2491 {
2492 	struct pf_resources *pfres = &adapter->params.pfres;
2493 	struct fw_pfvf_cmd cmd, rpl;
2494 	u32 word;
2495 	int v;
2496 
2497 	/*
2498 	 * Execute PFVF Read command to get VF resource limits; bail out early
2499 	 * with error on command failure.
2500 	 */
2501 	memset(&cmd, 0, sizeof(cmd));
2502 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
2503 				    F_FW_CMD_REQUEST |
2504 				    F_FW_CMD_READ |
2505 				    V_FW_PFVF_CMD_PFN(adapter->pf) |
2506 				    V_FW_PFVF_CMD_VFN(0));
2507 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2508 	v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2509 	if (v != FW_SUCCESS)
2510 		return v;
2511 
2512 	/*
2513 	 * Extract PF resource limits and return success.
2514 	 */
2515 	word = be32_to_cpu(rpl.niqflint_niq);
2516 	pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
2517 
2518 	word = be32_to_cpu(rpl.type_to_neq);
2519 	pfres->neq = G_FW_PFVF_CMD_NEQ(word);
2520 
2521 	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2522 	pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
2523 
2524 	return 0;
2525 }
2526 
2527 /* serial flash and firmware constants and flash config file constants */
2528 enum {
2529 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
2530 
2531 	/* flash command opcodes */
2532 	SF_PROG_PAGE    = 2,          /* program page */
2533 	SF_WR_DISABLE   = 4,          /* disable writes */
2534 	SF_RD_STATUS    = 5,          /* read status register */
2535 	SF_WR_ENABLE    = 6,          /* enable writes */
2536 	SF_RD_DATA_FAST = 0xb,        /* read flash */
2537 	SF_RD_ID        = 0x9f,       /* read ID */
2538 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
2539 };
2540 
2541 /**
2542  * sf1_read - read data from the serial flash
2543  * @adapter: the adapter
2544  * @byte_cnt: number of bytes to read
2545  * @cont: whether another operation will be chained
2546  * @lock: whether to lock SF for PL access only
2547  * @valp: where to store the read data
2548  *
2549  * Reads up to 4 bytes of data from the serial flash.  The location of
2550  * the read needs to be specified prior to calling this by issuing the
2551  * appropriate commands to the serial flash.
2552  */
2553 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2554 		    int lock, u32 *valp)
2555 {
2556 	int ret;
2557 
2558 	if (!byte_cnt || byte_cnt > 4)
2559 		return -EINVAL;
2560 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2561 		return -EBUSY;
2562 	t4_write_reg(adapter, A_SF_OP,
2563 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2564 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2565 	if (!ret)
2566 		*valp = t4_read_reg(adapter, A_SF_DATA);
2567 	return ret;
2568 }
2569 
2570 /**
2571  * sf1_write - write data to the serial flash
2572  * @adapter: the adapter
2573  * @byte_cnt: number of bytes to write
2574  * @cont: whether another operation will be chained
2575  * @lock: whether to lock SF for PL access only
2576  * @val: value to write
2577  *
2578  * Writes up to 4 bytes of data to the serial flash.  The location of
2579  * the write needs to be specified prior to calling this by issuing the
2580  * appropriate commands to the serial flash.
2581  */
2582 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2583 		     int lock, u32 val)
2584 {
2585 	if (!byte_cnt || byte_cnt > 4)
2586 		return -EINVAL;
2587 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2588 		return -EBUSY;
2589 	t4_write_reg(adapter, A_SF_DATA, val);
2590 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2591 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2592 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2593 }
2594 
2595 /**
2596  * t4_read_flash - read words from serial flash
2597  * @adapter: the adapter
2598  * @addr: the start address for the read
2599  * @nwords: how many 32-bit words to read
2600  * @data: where to store the read data
2601  * @byte_oriented: whether to store data as bytes or as words
2602  *
2603  * Read the specified number of 32-bit words from the serial flash.
2604  * If @byte_oriented is set the read data is stored as a byte array
2605  * (i.e., big-endian), otherwise as 32-bit words in the platform's
2606  * natural endianness.
2607  */
2608 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2609 		  unsigned int nwords, u32 *data, int byte_oriented)
2610 {
2611 	int ret;
2612 
2613 	if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2614 	    (addr & 3))
2615 		return -EINVAL;
2616 
2617 	addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2618 
2619 	ret = sf1_write(adapter, 4, 1, 0, addr);
2620 	if (ret != 0)
2621 		return ret;
2622 
2623 	ret = sf1_read(adapter, 1, 1, 0, data);
2624 	if (ret != 0)
2625 		return ret;
2626 
2627 	for ( ; nwords; nwords--, data++) {
2628 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2629 		if (nwords == 1)
2630 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
2631 		if (ret)
2632 			return ret;
2633 		if (byte_oriented)
2634 			*data = cpu_to_be32(*data);
2635 	}
2636 	return 0;
2637 }
2638 
2639 /**
2640  * t4_get_exprom_version - return the Expansion ROM version (if any)
2641  * @adapter: the adapter
2642  * @vers: where to place the version
2643  *
2644  * Reads the Expansion ROM header from FLASH and returns the version
2645  * number (if present) through the @vers return value pointer.  We return
2646  * this in the Firmware Version Format since it's convenient.  Return
2647  * 0 on success, -ENOENT if no Expansion ROM is present.
2648  */
2649 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2650 {
2651 	struct exprom_header {
2652 		unsigned char hdr_arr[16];      /* must start with 0x55aa */
2653 		unsigned char hdr_ver[4];       /* Expansion ROM version */
2654 	} *hdr;
2655 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2656 					   sizeof(u32))];
2657 	int ret;
2658 
2659 	ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2660 			    ARRAY_SIZE(exprom_header_buf),
2661 			    exprom_header_buf, 0);
2662 	if (ret)
2663 		return ret;
2664 
2665 	hdr = (struct exprom_header *)exprom_header_buf;
2666 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2667 		return -ENOENT;
2668 
2669 	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2670 		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2671 		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2672 		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2673 	return 0;
2674 }
2675 
2676 /**
2677  * t4_get_fw_version - read the firmware version
2678  * @adapter: the adapter
2679  * @vers: where to place the version
2680  *
2681  * Reads the FW version from flash.
2682  */
2683 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2684 {
2685 	return t4_read_flash(adapter, FLASH_FW_START +
2686 			     offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2687 }
2688 
2689 /**
2690  *     t4_get_bs_version - read the firmware bootstrap version
2691  *     @adapter: the adapter
2692  *     @vers: where to place the version
2693  *
2694  *     Reads the FW Bootstrap version from flash.
2695  */
2696 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2697 {
2698 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2699 			     offsetof(struct fw_hdr, fw_ver), 1,
2700 			     vers, 0);
2701 }
2702 
2703 /**
2704  * t4_get_tp_version - read the TP microcode version
2705  * @adapter: the adapter
2706  * @vers: where to place the version
2707  *
2708  * Reads the TP microcode version from flash.
2709  */
2710 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2711 {
2712 	return t4_read_flash(adapter, FLASH_FW_START +
2713 			     offsetof(struct fw_hdr, tp_microcode_ver),
2714 			     1, vers, 0);
2715 }
2716 
2717 /**
2718  * t4_get_version_info - extract various chip/firmware version information
2719  * @adapter: the adapter
2720  *
2721  * Reads various chip/firmware version numbers and stores them into the
2722  * adapter Adapter Parameters structure.  If any of the efforts fails
2723  * the first failure will be returned, but all of the version numbers
2724  * will be read.
2725  */
2726 int t4_get_version_info(struct adapter *adapter)
2727 {
2728 	int ret = 0;
2729 
2730 #define FIRST_RET(__getvinfo) \
2731 	do { \
2732 		int __ret = __getvinfo; \
2733 		if (__ret && !ret) \
2734 			ret = __ret; \
2735 	} while (0)
2736 
2737 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2738 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2739 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2740 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2741 
2742 #undef FIRST_RET
2743 
2744 	return ret;
2745 }
2746 
2747 /**
2748  * t4_dump_version_info - dump all of the adapter configuration IDs
2749  * @adapter: the adapter
2750  *
2751  * Dumps all of the various bits of adapter configuration version/revision
2752  * IDs information.  This is typically called at some point after
2753  * t4_get_version_info() has been called.
2754  */
2755 void t4_dump_version_info(struct adapter *adapter)
2756 {
2757 	/**
2758 	 * Device information.
2759 	 */
2760 	dev_info(adapter, "Chelsio rev %d\n",
2761 		 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2762 
2763 	/**
2764 	 * Firmware Version.
2765 	 */
2766 	if (!adapter->params.fw_vers)
2767 		dev_warn(adapter, "No firmware loaded\n");
2768 	else
2769 		dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2770 			 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2771 			 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2772 			 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2773 			 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2774 
2775 	/**
2776 	 * Bootstrap Firmware Version.
2777 	 */
2778 	if (!adapter->params.bs_vers)
2779 		dev_warn(adapter, "No bootstrap loaded\n");
2780 	else
2781 		dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2782 			 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2783 			 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2784 			 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2785 			 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2786 
2787 	/**
2788 	 * TP Microcode Version.
2789 	 */
2790 	if (!adapter->params.tp_vers)
2791 		dev_warn(adapter, "No TP Microcode loaded\n");
2792 	else
2793 		dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2794 			 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2795 			 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2796 			 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2797 			 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2798 
2799 	/**
2800 	 * Expansion ROM version.
2801 	 */
2802 	if (!adapter->params.er_vers)
2803 		dev_info(adapter, "No Expansion ROM loaded\n");
2804 	else
2805 		dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2806 			 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2807 			 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2808 			 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2809 			 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2810 }
2811 
2812 /**
2813  * t4_link_l1cfg_core - apply link configuration to MAC/PHY
2814  * @pi: the port info
2815  * @caps: link capabilities to configure
2816  * @sleep_ok: if true we may sleep while awaiting command completion
2817  *
2818  * Set up a port's MAC and PHY according to a desired link configuration.
2819  * - If the PHY can auto-negotiate first decide what to advertise, then
2820  *   enable/disable auto-negotiation as desired, and reset.
2821  * - If the PHY does not auto-negotiate just reset it.
2822  * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2823  *   otherwise do it later based on the outcome of auto-negotiation.
2824  */
2825 int t4_link_l1cfg_core(struct port_info *pi, u32 caps, u8 sleep_ok)
2826 {
2827 	struct link_config *lc = &pi->link_cfg;
2828 	struct adapter *adap = pi->adapter;
2829 	struct fw_port_cmd cmd;
2830 	int ret;
2831 
2832 	memset(&cmd, 0, sizeof(cmd));
2833 	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2834 				       F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2835 				       V_FW_PORT_CMD_PORTID(pi->port_id));
2836 	cmd.action_to_len16 =
2837 		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
2838 			    FW_LEN16(cmd));
2839 
2840 	cmd.u.l1cfg32.rcap32 = cpu_to_be32(caps);
2841 
2842 	if (sleep_ok)
2843 		ret = t4_wr_mbox(adap, adap->mbox, &cmd, sizeof(cmd), NULL);
2844 	else
2845 		ret = t4_wr_mbox_ns(adap, adap->mbox, &cmd, sizeof(cmd), NULL);
2846 
2847 	if (ret == FW_SUCCESS)
2848 		lc->link_caps = caps;
2849 	else
2850 		dev_err(adap,
2851 			"Requested Port Capabilities %#x rejected, error %d\n",
2852 			caps, ret);
2853 
2854 	return ret;
2855 }
2856 
2857 /**
2858  * t4_flash_cfg_addr - return the address of the flash configuration file
2859  * @adapter: the adapter
2860  *
2861  * Return the address within the flash where the Firmware Configuration
2862  * File is stored, or an error if the device FLASH is too small to contain
2863  * a Firmware Configuration File.
2864  */
2865 int t4_flash_cfg_addr(struct adapter *adapter)
2866 {
2867 	/*
2868 	 * If the device FLASH isn't large enough to hold a Firmware
2869 	 * Configuration File, return an error.
2870 	 */
2871 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
2872 		return -ENOSPC;
2873 
2874 	return FLASH_CFG_START;
2875 }
2876 
2877 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2878 
2879 /**
2880  * t4_intr_enable - enable interrupts
2881  * @adapter: the adapter whose interrupts should be enabled
2882  *
2883  * Enable PF-specific interrupts for the calling function and the top-level
2884  * interrupt concentrator for global interrupts.  Interrupts are already
2885  * enabled at each module, here we just enable the roots of the interrupt
2886  * hierarchies.
2887  *
2888  * Note: this function should be called only when the driver manages
2889  * non PF-specific interrupts from the various HW modules.  Only one PCI
2890  * function at a time should be doing this.
2891  */
2892 void t4_intr_enable(struct adapter *adapter)
2893 {
2894 	u32 val = 0;
2895 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2896 	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2897 		 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2898 
2899 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2900 		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
2901 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2902 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2903 		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
2904 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2905 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2906 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2907 		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
2908 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2909 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2910 }
2911 
2912 /**
2913  * t4_intr_disable - disable interrupts
2914  * @adapter: the adapter whose interrupts should be disabled
2915  *
2916  * Disable interrupts.  We only disable the top-level interrupt
2917  * concentrators.  The caller must be a PCI function managing global
2918  * interrupts.
2919  */
2920 void t4_intr_disable(struct adapter *adapter)
2921 {
2922 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2923 	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2924 		 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2925 
2926 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2927 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2928 }
2929 
2930 /**
2931  * t4_get_port_type_description - return Port Type string description
2932  * @port_type: firmware Port Type enumeration
2933  */
2934 const char *t4_get_port_type_description(enum fw_port_type port_type)
2935 {
2936 	static const char * const port_type_description[] = {
2937 		"Fiber_XFI",
2938 		"Fiber_XAUI",
2939 		"BT_SGMII",
2940 		"BT_XFI",
2941 		"BT_XAUI",
2942 		"KX4",
2943 		"CX4",
2944 		"KX",
2945 		"KR",
2946 		"SFP",
2947 		"BP_AP",
2948 		"BP4_AP",
2949 		"QSFP_10G",
2950 		"QSA",
2951 		"QSFP",
2952 		"BP40_BA",
2953 		"KR4_100G",
2954 		"CR4_QSFP",
2955 		"CR_QSFP",
2956 		"CR2_QSFP",
2957 		"SFP28",
2958 		"KR_SFP28",
2959 	};
2960 
2961 	if (port_type < ARRAY_SIZE(port_type_description))
2962 		return port_type_description[port_type];
2963 	return "UNKNOWN";
2964 }
2965 
2966 /**
2967  * t4_get_mps_bg_map - return the buffer groups associated with a port
2968  * @adap: the adapter
2969  * @pidx: the port index
2970  *
2971  * Returns a bitmap indicating which MPS buffer groups are associated
2972  * with the given port.  Bit i is set if buffer group i is used by the
2973  * port.
2974  */
2975 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
2976 {
2977 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2978 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
2979 							  A_MPS_CMN_CTL));
2980 
2981 	if (pidx >= nports) {
2982 		dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
2983 			 pidx, nports);
2984 		return 0;
2985 	}
2986 
2987 	switch (chip_version) {
2988 	case CHELSIO_T4:
2989 	case CHELSIO_T5:
2990 		switch (nports) {
2991 		case 1: return 0xf;
2992 		case 2: return 3 << (2 * pidx);
2993 		case 4: return 1 << pidx;
2994 		}
2995 		break;
2996 
2997 	case CHELSIO_T6:
2998 		switch (nports) {
2999 		case 2: return 1 << (2 * pidx);
3000 		}
3001 		break;
3002 	}
3003 
3004 	dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
3005 		chip_version, nports);
3006 	return 0;
3007 }
3008 
3009 /**
3010  * t4_get_tp_ch_map - return TP ingress channels associated with a port
3011  * @adapter: the adapter
3012  * @pidx: the port index
3013  *
3014  * Returns a bitmap indicating which TP Ingress Channels are associated with
3015  * a given Port.  Bit i is set if TP Ingress Channel i is used by the Port.
3016  */
3017 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
3018 {
3019 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
3020 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
3021 							  A_MPS_CMN_CTL));
3022 
3023 	if (pidx >= nports) {
3024 		dev_warn(adap, "TP Port Index %d >= Nports %d\n",
3025 			 pidx, nports);
3026 		return 0;
3027 	}
3028 
3029 	switch (chip_version) {
3030 	case CHELSIO_T4:
3031 	case CHELSIO_T5:
3032 		/* Note that this happens to be the same values as the MPS
3033 		 * Buffer Group Map for these Chips.  But we replicate the code
3034 		 * here because they're really separate concepts.
3035 		 */
3036 		switch (nports) {
3037 		case 1: return 0xf;
3038 		case 2: return 3 << (2 * pidx);
3039 		case 4: return 1 << pidx;
3040 		}
3041 		break;
3042 
3043 	case CHELSIO_T6:
3044 		switch (nports) {
3045 		case 2: return 1 << pidx;
3046 		}
3047 		break;
3048 	}
3049 
3050 	dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
3051 		chip_version, nports);
3052 	return 0;
3053 }
3054 
3055 /**
3056  * t4_get_port_stats - collect port statistics
3057  * @adap: the adapter
3058  * @idx: the port index
3059  * @p: the stats structure to fill
3060  *
3061  * Collect statistics related to the given port from HW.
3062  */
3063 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3064 {
3065 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
3066 	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
3067 
3068 #define GET_STAT(name) \
3069 	t4_read_reg64(adap, \
3070 		      (is_t4(adap->params.chip) ? \
3071 		       PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3072 		       T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3073 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3074 
3075 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3076 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3077 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3078 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3079 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3080 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3081 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3082 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3083 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3084 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3085 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3086 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3087 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3088 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3089 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3090 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3091 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3092 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3093 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3094 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3095 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3096 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3097 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3098 
3099 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3100 		if (stat_ctl & F_COUNTPAUSESTATTX) {
3101 			p->tx_frames -= p->tx_pause;
3102 			p->tx_octets -= p->tx_pause * 64;
3103 		}
3104 		if (stat_ctl & F_COUNTPAUSEMCTX)
3105 			p->tx_mcast_frames -= p->tx_pause;
3106 	}
3107 
3108 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3109 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3110 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3111 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3112 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3113 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3114 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3115 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3116 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3117 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3118 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3119 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3120 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3121 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3122 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3123 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3124 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3125 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3126 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3127 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3128 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3129 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3130 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3131 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3132 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3133 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3134 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3135 
3136 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3137 		if (stat_ctl & F_COUNTPAUSESTATRX) {
3138 			p->rx_frames -= p->rx_pause;
3139 			p->rx_octets -= p->rx_pause * 64;
3140 		}
3141 		if (stat_ctl & F_COUNTPAUSEMCRX)
3142 			p->rx_mcast_frames -= p->rx_pause;
3143 	}
3144 
3145 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3146 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3147 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3148 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3149 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3150 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3151 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3152 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3153 
3154 #undef GET_STAT
3155 #undef GET_STAT_COM
3156 }
3157 
3158 /**
3159  * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3160  * @adap: The adapter
3161  * @idx: The port
3162  * @stats: Current stats to fill
3163  * @offset: Previous stats snapshot
3164  */
3165 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3166 			      struct port_stats *stats,
3167 			      struct port_stats *offset)
3168 {
3169 	u64 *s, *o;
3170 	unsigned int i;
3171 
3172 	t4_get_port_stats(adap, idx, stats);
3173 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3174 	     i < (sizeof(struct port_stats) / sizeof(u64));
3175 	     i++, s++, o++)
3176 		*s -= *o;
3177 }
3178 
3179 /**
3180  * t4_clr_port_stats - clear port statistics
3181  * @adap: the adapter
3182  * @idx: the port index
3183  *
3184  * Clear HW statistics for the given port.
3185  */
3186 void t4_clr_port_stats(struct adapter *adap, int idx)
3187 {
3188 	unsigned int i;
3189 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
3190 	u32 port_base_addr;
3191 
3192 	if (is_t4(adap->params.chip))
3193 		port_base_addr = PORT_BASE(idx);
3194 	else
3195 		port_base_addr = T5_PORT_BASE(idx);
3196 
3197 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3198 	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3199 		t4_write_reg(adap, port_base_addr + i, 0);
3200 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3201 	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3202 		t4_write_reg(adap, port_base_addr + i, 0);
3203 	for (i = 0; i < 4; i++)
3204 		if (bgmap & (1 << i)) {
3205 			t4_write_reg(adap,
3206 				     A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3207 				     i * 8, 0);
3208 			t4_write_reg(adap,
3209 				     A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3210 				     i * 8, 0);
3211 		}
3212 }
3213 
3214 /**
3215  * t4_fw_hello - establish communication with FW
3216  * @adap: the adapter
3217  * @mbox: mailbox to use for the FW command
3218  * @evt_mbox: mailbox to receive async FW events
3219  * @master: specifies the caller's willingness to be the device master
3220  * @state: returns the current device state (if non-NULL)
3221  *
3222  * Issues a command to establish communication with FW.  Returns either
3223  * an error (negative integer) or the mailbox of the Master PF.
3224  */
3225 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3226 		enum dev_master master, enum dev_state *state)
3227 {
3228 	int ret;
3229 	struct fw_hello_cmd c;
3230 	u32 v;
3231 	unsigned int master_mbox;
3232 	int retries = FW_CMD_HELLO_RETRIES;
3233 
3234 retry:
3235 	memset(&c, 0, sizeof(c));
3236 	INIT_CMD(c, HELLO, WRITE);
3237 	c.err_to_clearinit = cpu_to_be32(
3238 			V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3239 			V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3240 			V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3241 						M_FW_HELLO_CMD_MBMASTER) |
3242 			V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3243 			V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3244 			F_FW_HELLO_CMD_CLEARINIT);
3245 
3246 	/*
3247 	 * Issue the HELLO command to the firmware.  If it's not successful
3248 	 * but indicates that we got a "busy" or "timeout" condition, retry
3249 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
3250 	 * retry limit, check to see if the firmware left us any error
3251 	 * information and report that if so ...
3252 	 */
3253 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3254 	if (ret != FW_SUCCESS) {
3255 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3256 			goto retry;
3257 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3258 			t4_report_fw_error(adap);
3259 		return ret;
3260 	}
3261 
3262 	v = be32_to_cpu(c.err_to_clearinit);
3263 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3264 	if (state) {
3265 		if (v & F_FW_HELLO_CMD_ERR)
3266 			*state = DEV_STATE_ERR;
3267 		else if (v & F_FW_HELLO_CMD_INIT)
3268 			*state = DEV_STATE_INIT;
3269 		else
3270 			*state = DEV_STATE_UNINIT;
3271 	}
3272 
3273 	/*
3274 	 * If we're not the Master PF then we need to wait around for the
3275 	 * Master PF Driver to finish setting up the adapter.
3276 	 *
3277 	 * Note that we also do this wait if we're a non-Master-capable PF and
3278 	 * there is no current Master PF; a Master PF may show up momentarily
3279 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
3280 	 * OS loads lots of different drivers rapidly at the same time).  In
3281 	 * this case, the Master PF returned by the firmware will be
3282 	 * M_PCIE_FW_MASTER so the test below will work ...
3283 	 */
3284 	if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3285 	    master_mbox != mbox) {
3286 		int waiting = FW_CMD_HELLO_TIMEOUT;
3287 
3288 		/*
3289 		 * Wait for the firmware to either indicate an error or
3290 		 * initialized state.  If we see either of these we bail out
3291 		 * and report the issue to the caller.  If we exhaust the
3292 		 * "hello timeout" and we haven't exhausted our retries, try
3293 		 * again.  Otherwise bail with a timeout error.
3294 		 */
3295 		for (;;) {
3296 			u32 pcie_fw;
3297 
3298 			msleep(50);
3299 			waiting -= 50;
3300 
3301 			/*
3302 			 * If neither Error nor Initialialized are indicated
3303 			 * by the firmware keep waiting till we exaust our
3304 			 * timeout ... and then retry if we haven't exhausted
3305 			 * our retries ...
3306 			 */
3307 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3308 			if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3309 				if (waiting <= 0) {
3310 					if (retries-- > 0)
3311 						goto retry;
3312 
3313 					return -ETIMEDOUT;
3314 				}
3315 				continue;
3316 			}
3317 
3318 			/*
3319 			 * We either have an Error or Initialized condition
3320 			 * report errors preferentially.
3321 			 */
3322 			if (state) {
3323 				if (pcie_fw & F_PCIE_FW_ERR)
3324 					*state = DEV_STATE_ERR;
3325 				else if (pcie_fw & F_PCIE_FW_INIT)
3326 					*state = DEV_STATE_INIT;
3327 			}
3328 
3329 			/*
3330 			 * If we arrived before a Master PF was selected and
3331 			 * there's not a valid Master PF, grab its identity
3332 			 * for our caller.
3333 			 */
3334 			if (master_mbox == M_PCIE_FW_MASTER &&
3335 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
3336 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3337 			break;
3338 		}
3339 	}
3340 
3341 	return master_mbox;
3342 }
3343 
3344 /**
3345  * t4_fw_bye - end communication with FW
3346  * @adap: the adapter
3347  * @mbox: mailbox to use for the FW command
3348  *
3349  * Issues a command to terminate communication with FW.
3350  */
3351 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3352 {
3353 	struct fw_bye_cmd c;
3354 
3355 	memset(&c, 0, sizeof(c));
3356 	INIT_CMD(c, BYE, WRITE);
3357 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3358 }
3359 
3360 /**
3361  * t4_fw_reset - issue a reset to FW
3362  * @adap: the adapter
3363  * @mbox: mailbox to use for the FW command
3364  * @reset: specifies the type of reset to perform
3365  *
3366  * Issues a reset command of the specified type to FW.
3367  */
3368 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3369 {
3370 	struct fw_reset_cmd c;
3371 
3372 	memset(&c, 0, sizeof(c));
3373 	INIT_CMD(c, RESET, WRITE);
3374 	c.val = cpu_to_be32(reset);
3375 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3376 }
3377 
3378 /**
3379  * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3380  * @adap: the adapter
3381  * @mbox: mailbox to use for the FW RESET command (if desired)
3382  * @force: force uP into RESET even if FW RESET command fails
3383  *
3384  * Issues a RESET command to firmware (if desired) with a HALT indication
3385  * and then puts the microprocessor into RESET state.  The RESET command
3386  * will only be issued if a legitimate mailbox is provided (mbox <=
3387  * M_PCIE_FW_MASTER).
3388  *
3389  * This is generally used in order for the host to safely manipulate the
3390  * adapter without fear of conflicting with whatever the firmware might
3391  * be doing.  The only way out of this state is to RESTART the firmware
3392  * ...
3393  */
3394 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3395 {
3396 	int ret = 0;
3397 
3398 	/*
3399 	 * If a legitimate mailbox is provided, issue a RESET command
3400 	 * with a HALT indication.
3401 	 */
3402 	if (mbox <= M_PCIE_FW_MASTER) {
3403 		struct fw_reset_cmd c;
3404 
3405 		memset(&c, 0, sizeof(c));
3406 		INIT_CMD(c, RESET, WRITE);
3407 		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3408 		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3409 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3410 	}
3411 
3412 	/*
3413 	 * Normally we won't complete the operation if the firmware RESET
3414 	 * command fails but if our caller insists we'll go ahead and put the
3415 	 * uP into RESET.  This can be useful if the firmware is hung or even
3416 	 * missing ...  We'll have to take the risk of putting the uP into
3417 	 * RESET without the cooperation of firmware in that case.
3418 	 *
3419 	 * We also force the firmware's HALT flag to be on in case we bypassed
3420 	 * the firmware RESET command above or we're dealing with old firmware
3421 	 * which doesn't have the HALT capability.  This will serve as a flag
3422 	 * for the incoming firmware to know that it's coming out of a HALT
3423 	 * rather than a RESET ... if it's new enough to understand that ...
3424 	 */
3425 	if (ret == 0 || force) {
3426 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3427 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3428 				 F_PCIE_FW_HALT);
3429 	}
3430 
3431 	/*
3432 	 * And we always return the result of the firmware RESET command
3433 	 * even when we force the uP into RESET ...
3434 	 */
3435 	return ret;
3436 }
3437 
3438 /**
3439  * t4_fw_restart - restart the firmware by taking the uP out of RESET
3440  * @adap: the adapter
3441  * @mbox: mailbox to use for the FW RESET command (if desired)
3442  * @reset: if we want to do a RESET to restart things
3443  *
3444  * Restart firmware previously halted by t4_fw_halt().  On successful
3445  * return the previous PF Master remains as the new PF Master and there
3446  * is no need to issue a new HELLO command, etc.
3447  *
3448  * We do this in two ways:
3449  *
3450  * 1. If we're dealing with newer firmware we'll simply want to take
3451  *    the chip's microprocessor out of RESET.  This will cause the
3452  *    firmware to start up from its start vector.  And then we'll loop
3453  *    until the firmware indicates it's started again (PCIE_FW.HALT
3454  *    reset to 0) or we timeout.
3455  *
3456  * 2. If we're dealing with older firmware then we'll need to RESET
3457  *    the chip since older firmware won't recognize the PCIE_FW.HALT
3458  *    flag and automatically RESET itself on startup.
3459  */
3460 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3461 {
3462 	if (reset) {
3463 		/*
3464 		 * Since we're directing the RESET instead of the firmware
3465 		 * doing it automatically, we need to clear the PCIE_FW.HALT
3466 		 * bit.
3467 		 */
3468 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3469 
3470 		/*
3471 		 * If we've been given a valid mailbox, first try to get the
3472 		 * firmware to do the RESET.  If that works, great and we can
3473 		 * return success.  Otherwise, if we haven't been given a
3474 		 * valid mailbox or the RESET command failed, fall back to
3475 		 * hitting the chip with a hammer.
3476 		 */
3477 		if (mbox <= M_PCIE_FW_MASTER) {
3478 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3479 			msleep(100);
3480 			if (t4_fw_reset(adap, mbox,
3481 					F_PIORST | F_PIORSTMODE) == 0)
3482 				return 0;
3483 		}
3484 
3485 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3486 		msleep(2000);
3487 	} else {
3488 		int ms;
3489 
3490 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3491 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3492 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3493 				return FW_SUCCESS;
3494 			msleep(100);
3495 			ms += 100;
3496 		}
3497 		return -ETIMEDOUT;
3498 	}
3499 	return 0;
3500 }
3501 
3502 /**
3503  * t4_fl_pkt_align - return the fl packet alignment
3504  * @adap: the adapter
3505  *
3506  * T4 has a single field to specify the packing and padding boundary.
3507  * T5 onwards has separate fields for this and hence the alignment for
3508  * next packet offset is maximum of these two.
3509  */
3510 int t4_fl_pkt_align(struct adapter *adap)
3511 {
3512 	u32 sge_control, sge_control2;
3513 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
3514 
3515 	sge_control = t4_read_reg(adap, A_SGE_CONTROL);
3516 
3517 	/* T4 uses a single control field to specify both the PCIe Padding and
3518 	 * Packing Boundary.  T5 introduced the ability to specify these
3519 	 * separately.  The actual Ingress Packet Data alignment boundary
3520 	 * within Packed Buffer Mode is the maximum of these two
3521 	 * specifications.
3522 	 */
3523 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
3524 		ingpad_shift = X_INGPADBOUNDARY_SHIFT;
3525 	else
3526 		ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
3527 
3528 	ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
3529 
3530 	fl_align = ingpadboundary;
3531 	if (!is_t4(adap->params.chip)) {
3532 		sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
3533 		ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
3534 		if (ingpackboundary == X_INGPACKBOUNDARY_16B)
3535 			ingpackboundary = 16;
3536 		else
3537 			ingpackboundary = 1 << (ingpackboundary +
3538 					X_INGPACKBOUNDARY_SHIFT);
3539 
3540 		fl_align = max(ingpadboundary, ingpackboundary);
3541 	}
3542 	return fl_align;
3543 }
3544 
3545 /**
3546  * t4_fixup_host_params_compat - fix up host-dependent parameters
3547  * @adap: the adapter
3548  * @page_size: the host's Base Page Size
3549  * @cache_line_size: the host's Cache Line Size
3550  * @chip_compat: maintain compatibility with designated chip
3551  *
3552  * Various registers in the chip contain values which are dependent on the
3553  * host's Base Page and Cache Line Sizes.  This function will fix all of
3554  * those registers with the appropriate values as passed in ...
3555  *
3556  * @chip_compat is used to limit the set of changes that are made
3557  * to be compatible with the indicated chip release.  This is used by
3558  * drivers to maintain compatibility with chip register settings when
3559  * the drivers haven't [yet] been updated with new chip support.
3560  */
3561 int t4_fixup_host_params_compat(struct adapter *adap,
3562 				unsigned int page_size,
3563 				unsigned int cache_line_size,
3564 				enum chip_type chip_compat)
3565 {
3566 	unsigned int page_shift = cxgbe_fls(page_size) - 1;
3567 	unsigned int sge_hps = page_shift - 10;
3568 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3569 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3570 	unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3571 
3572 	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3573 		     V_HOSTPAGESIZEPF0(sge_hps) |
3574 		     V_HOSTPAGESIZEPF1(sge_hps) |
3575 		     V_HOSTPAGESIZEPF2(sge_hps) |
3576 		     V_HOSTPAGESIZEPF3(sge_hps) |
3577 		     V_HOSTPAGESIZEPF4(sge_hps) |
3578 		     V_HOSTPAGESIZEPF5(sge_hps) |
3579 		     V_HOSTPAGESIZEPF6(sge_hps) |
3580 		     V_HOSTPAGESIZEPF7(sge_hps));
3581 
3582 	if (is_t4(adap->params.chip) || is_t4(chip_compat))
3583 		t4_set_reg_field(adap, A_SGE_CONTROL,
3584 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3585 				 F_EGRSTATUSPAGESIZE,
3586 				 V_INGPADBOUNDARY(fl_align_log -
3587 						  X_INGPADBOUNDARY_SHIFT) |
3588 				V_EGRSTATUSPAGESIZE(stat_len != 64));
3589 	else {
3590 		unsigned int pack_align;
3591 		unsigned int ingpad, ingpack;
3592 		unsigned int pcie_cap;
3593 
3594 		/*
3595 		 * T5 introduced the separation of the Free List Padding and
3596 		 * Packing Boundaries.  Thus, we can select a smaller Padding
3597 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3598 		 * Bandwidth, and use a Packing Boundary which is large enough
3599 		 * to avoid false sharing between CPUs, etc.
3600 		 *
3601 		 * For the PCI Link, the smaller the Padding Boundary the
3602 		 * better.  For the Memory Controller, a smaller Padding
3603 		 * Boundary is better until we cross under the Memory Line
3604 		 * Size (the minimum unit of transfer to/from Memory).  If we
3605 		 * have a Padding Boundary which is smaller than the Memory
3606 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
3607 		 * Memory Controller which is never good.
3608 		 */
3609 
3610 		/* We want the Packing Boundary to be based on the Cache Line
3611 		 * Size in order to help avoid False Sharing performance
3612 		 * issues between CPUs, etc.  We also want the Packing
3613 		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
3614 		 * get best performance when the Packing Boundary is a
3615 		 * multiple of the Maximum Payload Size.
3616 		 */
3617 		pack_align = fl_align;
3618 		pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3619 		if (pcie_cap) {
3620 			unsigned int mps, mps_log;
3621 			u16 devctl;
3622 
3623 			/* The PCIe Device Control Maximum Payload Size field
3624 			 * [bits 7:5] encodes sizes as powers of 2 starting at
3625 			 * 128 bytes.
3626 			 */
3627 			t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3628 					    &devctl);
3629 			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3630 			mps = 1 << mps_log;
3631 			if (mps > pack_align)
3632 				pack_align = mps;
3633 		}
3634 
3635 		/*
3636 		 * N.B. T5 has a different interpretation of the "0" value for
3637 		 * the Packing Boundary.  This corresponds to 16 bytes instead
3638 		 * of the expected 32 bytes.  We never have a Packing Boundary
3639 		 * less than 32 bytes so we can't use that special value but
3640 		 * on the other hand, if we wanted 32 bytes, the best we can
3641 		 * really do is 64 bytes ...
3642 		 */
3643 		if (pack_align <= 16) {
3644 			ingpack = X_INGPACKBOUNDARY_16B;
3645 			fl_align = 16;
3646 		} else if (pack_align == 32) {
3647 			ingpack = X_INGPACKBOUNDARY_64B;
3648 			fl_align = 64;
3649 		} else {
3650 			unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3651 
3652 			ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3653 			fl_align = pack_align;
3654 		}
3655 
3656 		/* Use the smallest Ingress Padding which isn't smaller than
3657 		 * the Memory Controller Read/Write Size.  We'll take that as
3658 		 * being 8 bytes since we don't know of any system with a
3659 		 * wider Memory Controller Bus Width.
3660 		 */
3661 		if (is_t5(adap->params.chip))
3662 			ingpad = X_INGPADBOUNDARY_32B;
3663 		else
3664 			ingpad = X_T6_INGPADBOUNDARY_8B;
3665 		t4_set_reg_field(adap, A_SGE_CONTROL,
3666 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3667 				 F_EGRSTATUSPAGESIZE,
3668 				 V_INGPADBOUNDARY(ingpad) |
3669 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
3670 		t4_set_reg_field(adap, A_SGE_CONTROL2,
3671 				 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3672 				 V_INGPACKBOUNDARY(ingpack));
3673 	}
3674 
3675 	/*
3676 	 * Adjust various SGE Free List Host Buffer Sizes.
3677 	 *
3678 	 * The first four entries are:
3679 	 *
3680 	 *   0: Host Page Size
3681 	 *   1: 64KB
3682 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3683 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3684 	 *
3685 	 * For the single-MTU buffers in unpacked mode we need to include
3686 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3687 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3688 	 * Padding boundary.  All of these are accommodated in the Factory
3689 	 * Default Firmware Configuration File but we need to adjust it for
3690 	 * this host's cache line size.
3691 	 */
3692 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3693 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3694 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3695 		     & ~(fl_align - 1));
3696 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3697 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3698 		     & ~(fl_align - 1));
3699 
3700 	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3701 
3702 	return 0;
3703 }
3704 
3705 /**
3706  * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3707  * @adap: the adapter
3708  * @page_size: the host's Base Page Size
3709  * @cache_line_size: the host's Cache Line Size
3710  *
3711  * Various registers in T4 contain values which are dependent on the
3712  * host's Base Page and Cache Line Sizes.  This function will fix all of
3713  * those registers with the appropriate values as passed in ...
3714  *
3715  * This routine makes changes which are compatible with T4 chips.
3716  */
3717 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3718 			 unsigned int cache_line_size)
3719 {
3720 	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3721 					   T4_LAST_REV);
3722 }
3723 
3724 /**
3725  * t4_fw_initialize - ask FW to initialize the device
3726  * @adap: the adapter
3727  * @mbox: mailbox to use for the FW command
3728  *
3729  * Issues a command to FW to partially initialize the device.  This
3730  * performs initialization that generally doesn't depend on user input.
3731  */
3732 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3733 {
3734 	struct fw_initialize_cmd c;
3735 
3736 	memset(&c, 0, sizeof(c));
3737 	INIT_CMD(c, INITIALIZE, WRITE);
3738 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3739 }
3740 
3741 /**
3742  * t4_query_params_rw - query FW or device parameters
3743  * @adap: the adapter
3744  * @mbox: mailbox to use for the FW command
3745  * @pf: the PF
3746  * @vf: the VF
3747  * @nparams: the number of parameters
3748  * @params: the parameter names
3749  * @val: the parameter values
3750  * @rw: Write and read flag
3751  *
3752  * Reads the value of FW or device parameters.  Up to 7 parameters can be
3753  * queried at once.
3754  */
3755 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3756 			      unsigned int pf, unsigned int vf,
3757 			      unsigned int nparams, const u32 *params,
3758 			      u32 *val, int rw)
3759 {
3760 	unsigned int i;
3761 	int ret;
3762 	struct fw_params_cmd c;
3763 	__be32 *p = &c.param[0].mnem;
3764 
3765 	if (nparams > 7)
3766 		return -EINVAL;
3767 
3768 	memset(&c, 0, sizeof(c));
3769 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3770 				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
3771 				  V_FW_PARAMS_CMD_PFN(pf) |
3772 				  V_FW_PARAMS_CMD_VFN(vf));
3773 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3774 
3775 	for (i = 0; i < nparams; i++) {
3776 		*p++ = cpu_to_be32(*params++);
3777 		if (rw)
3778 			*p = cpu_to_be32(*(val + i));
3779 		p++;
3780 	}
3781 
3782 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3783 	if (ret == 0)
3784 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3785 			*val++ = be32_to_cpu(*p);
3786 	return ret;
3787 }
3788 
3789 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3790 		    unsigned int vf, unsigned int nparams, const u32 *params,
3791 		    u32 *val)
3792 {
3793 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3794 }
3795 
3796 /**
3797  * t4_set_params_timeout - sets FW or device parameters
3798  * @adap: the adapter
3799  * @mbox: mailbox to use for the FW command
3800  * @pf: the PF
3801  * @vf: the VF
3802  * @nparams: the number of parameters
3803  * @params: the parameter names
3804  * @val: the parameter values
3805  * @timeout: the timeout time
3806  *
3807  * Sets the value of FW or device parameters.  Up to 7 parameters can be
3808  * specified at once.
3809  */
3810 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3811 			  unsigned int pf, unsigned int vf,
3812 			  unsigned int nparams, const u32 *params,
3813 			  const u32 *val, int timeout)
3814 {
3815 	struct fw_params_cmd c;
3816 	__be32 *p = &c.param[0].mnem;
3817 
3818 	if (nparams > 7)
3819 		return -EINVAL;
3820 
3821 	memset(&c, 0, sizeof(c));
3822 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3823 				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3824 				  V_FW_PARAMS_CMD_PFN(pf) |
3825 				  V_FW_PARAMS_CMD_VFN(vf));
3826 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3827 
3828 	while (nparams--) {
3829 		*p++ = cpu_to_be32(*params++);
3830 		*p++ = cpu_to_be32(*val++);
3831 	}
3832 
3833 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3834 }
3835 
3836 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3837 		  unsigned int vf, unsigned int nparams, const u32 *params,
3838 		  const u32 *val)
3839 {
3840 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3841 				     FW_CMD_MAX_TIMEOUT);
3842 }
3843 
3844 /**
3845  * t4_alloc_vi_func - allocate a virtual interface
3846  * @adap: the adapter
3847  * @mbox: mailbox to use for the FW command
3848  * @port: physical port associated with the VI
3849  * @pf: the PF owning the VI
3850  * @vf: the VF owning the VI
3851  * @nmac: number of MAC addresses needed (1 to 5)
3852  * @mac: the MAC addresses of the VI
3853  * @rss_size: size of RSS table slice associated with this VI
3854  * @portfunc: which Port Application Function MAC Address is desired
3855  * @idstype: Intrusion Detection Type
3856  *
3857  * Allocates a virtual interface for the given physical port.  If @mac is
3858  * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3859  * @mac should be large enough to hold @nmac Ethernet addresses, they are
3860  * stored consecutively so the space needed is @nmac * 6 bytes.
3861  * Returns a negative error number or the non-negative VI id.
3862  */
3863 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
3864 		     unsigned int port, unsigned int pf, unsigned int vf,
3865 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
3866 		     unsigned int portfunc, unsigned int idstype,
3867 		     u8 *vivld, u8 *vin)
3868 {
3869 	int ret;
3870 	struct fw_vi_cmd c;
3871 
3872 	memset(&c, 0, sizeof(c));
3873 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3874 				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
3875 				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
3876 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
3877 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
3878 				     V_FW_VI_CMD_FUNC(portfunc));
3879 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
3880 	c.nmac = nmac - 1;
3881 
3882 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3883 	if (ret)
3884 		return ret;
3885 
3886 	if (mac) {
3887 		memcpy(mac, c.mac, sizeof(c.mac));
3888 		switch (nmac) {
3889 		case 5:
3890 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3891 			/* FALLTHROUGH */
3892 		case 4:
3893 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3894 			/* FALLTHROUGH */
3895 		case 3:
3896 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3897 			/* FALLTHROUGH */
3898 		case 2:
3899 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
3900 			/* FALLTHROUGH */
3901 		}
3902 	}
3903 	if (rss_size)
3904 		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
3905 	if (vivld)
3906 		*vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
3907 	if (vin)
3908 		*vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
3909 	return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
3910 }
3911 
3912 /**
3913  * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
3914  * @adap: the adapter
3915  * @mbox: mailbox to use for the FW command
3916  * @port: physical port associated with the VI
3917  * @pf: the PF owning the VI
3918  * @vf: the VF owning the VI
3919  * @nmac: number of MAC addresses needed (1 to 5)
3920  * @mac: the MAC addresses of the VI
3921  * @rss_size: size of RSS table slice associated with this VI
3922  *
3923  * Backwards compatible and convieniance routine to allocate a Virtual
3924  * Interface with a Ethernet Port Application Function and Intrustion
3925  * Detection System disabled.
3926  */
3927 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3928 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3929 		unsigned int *rss_size, u8 *vivld, u8 *vin)
3930 {
3931 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
3932 				FW_VI_FUNC_ETH, 0, vivld, vin);
3933 }
3934 
3935 /**
3936  * t4_free_vi - free a virtual interface
3937  * @adap: the adapter
3938  * @mbox: mailbox to use for the FW command
3939  * @pf: the PF owning the VI
3940  * @vf: the VF owning the VI
3941  * @viid: virtual interface identifiler
3942  *
3943  * Free a previously allocated virtual interface.
3944  */
3945 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
3946 	       unsigned int vf, unsigned int viid)
3947 {
3948 	struct fw_vi_cmd c;
3949 
3950 	memset(&c, 0, sizeof(c));
3951 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3952 				  F_FW_CMD_EXEC);
3953 	if (is_pf4(adap))
3954 		c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
3955 					   V_FW_VI_CMD_VFN(vf));
3956 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
3957 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
3958 
3959 	if (is_pf4(adap))
3960 		return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3961 	else
3962 		return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
3963 }
3964 
3965 /**
3966  * t4_set_rxmode - set Rx properties of a virtual interface
3967  * @adap: the adapter
3968  * @mbox: mailbox to use for the FW command
3969  * @viid: the VI id
3970  * @mtu: the new MTU or -1
3971  * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3972  * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3973  * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3974  * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
3975  *          -1 no change
3976  * @sleep_ok: if true we may sleep while awaiting command completion
3977  *
3978  * Sets Rx properties of a virtual interface.
3979  */
3980 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3981 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3982 		  bool sleep_ok)
3983 {
3984 	struct fw_vi_rxmode_cmd c;
3985 
3986 	/* convert to FW values */
3987 	if (mtu < 0)
3988 		mtu = M_FW_VI_RXMODE_CMD_MTU;
3989 	if (promisc < 0)
3990 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
3991 	if (all_multi < 0)
3992 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
3993 	if (bcast < 0)
3994 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
3995 	if (vlanex < 0)
3996 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
3997 
3998 	memset(&c, 0, sizeof(c));
3999 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
4000 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4001 				   V_FW_VI_RXMODE_CMD_VIID(viid));
4002 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4003 	c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4004 			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4005 			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4006 			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4007 			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4008 	if (is_pf4(adap))
4009 		return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
4010 				       sleep_ok);
4011 	else
4012 		return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4013 }
4014 
4015 /**
4016  *	t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
4017  *	@adap: the adapter
4018  *	@viid: the VI id
4019  *	@mac: the MAC address
4020  *	@mask: the mask
4021  *	@idx: index at which to add this entry
4022  *	@port_id: the port index
4023  *	@lookup_type: MAC address for inner (1) or outer (0) header
4024  *	@sleep_ok: call is allowed to sleep
4025  *
4026  *	Adds the mac entry at the specified index using raw mac interface.
4027  *
4028  *	Returns a negative error number or the allocated index for this mac.
4029  */
4030 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
4031 			  const u8 *addr, const u8 *mask, unsigned int idx,
4032 			  u8 lookup_type, u8 port_id, bool sleep_ok)
4033 {
4034 	int ret = 0;
4035 	struct fw_vi_mac_cmd c;
4036 	struct fw_vi_mac_raw *p = &c.u.raw;
4037 	u32 val;
4038 
4039 	memset(&c, 0, sizeof(c));
4040 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4041 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4042 				   V_FW_VI_MAC_CMD_VIID(viid));
4043 	val = V_FW_CMD_LEN16(1) |
4044 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4045 	c.freemacs_to_len16 = cpu_to_be32(val);
4046 
4047 	/* Specify that this is an inner mac address */
4048 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
4049 
4050 	/* Lookup Type. Outer header: 0, Inner header: 1 */
4051 	p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4052 				   V_DATAPORTNUM(port_id));
4053 	/* Lookup mask and port mask */
4054 	p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4055 				    V_DATAPORTNUM(M_DATAPORTNUM));
4056 
4057 	/* Copy the address and the mask */
4058 	memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4059 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4060 
4061 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4062 	if (ret == 0) {
4063 		ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
4064 		if (ret != (int)idx)
4065 			ret = -ENOMEM;
4066 	}
4067 
4068 	return ret;
4069 }
4070 
4071 /**
4072  *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
4073  *	@adap: the adapter
4074  *	@viid: the VI id
4075  *	@addr: the MAC address
4076  *	@mask: the mask
4077  *	@idx: index of the entry in mps tcam
4078  *	@lookup_type: MAC address for inner (1) or outer (0) header
4079  *	@port_id: the port index
4080  *	@sleep_ok: call is allowed to sleep
4081  *
4082  *	Removes the mac entry at the specified index using raw mac interface.
4083  *
4084  *	Returns a negative error number on failure.
4085  */
4086 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
4087 			 const u8 *addr, const u8 *mask, unsigned int idx,
4088 			 u8 lookup_type, u8 port_id, bool sleep_ok)
4089 {
4090 	struct fw_vi_mac_cmd c;
4091 	struct fw_vi_mac_raw *p = &c.u.raw;
4092 	u32 raw;
4093 
4094 	memset(&c, 0, sizeof(c));
4095 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4096 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4097 				   V_FW_CMD_EXEC(0) |
4098 				   V_FW_VI_MAC_CMD_VIID(viid));
4099 	raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4100 	c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
4101 					  raw |
4102 					  V_FW_CMD_LEN16(1));
4103 
4104 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
4105 				     FW_VI_MAC_ID_BASED_FREE);
4106 
4107 	/* Lookup Type. Outer header: 0, Inner header: 1 */
4108 	p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4109 				   V_DATAPORTNUM(port_id));
4110 	/* Lookup mask and port mask */
4111 	p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4112 				    V_DATAPORTNUM(M_DATAPORTNUM));
4113 
4114 	/* Copy the address and the mask */
4115 	memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4116 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4117 
4118 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4119 }
4120 
4121 /**
4122  * t4_change_mac - modifies the exact-match filter for a MAC address
4123  * @adap: the adapter
4124  * @mbox: mailbox to use for the FW command
4125  * @viid: the VI id
4126  * @idx: index of existing filter for old value of MAC address, or -1
4127  * @addr: the new MAC address value
4128  * @persist: whether a new MAC allocation should be persistent
4129  * @add_smt: if true also add the address to the HW SMT
4130  *
4131  * Modifies an exact-match filter and sets it to the new MAC address if
4132  * @idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4133  * latter case the address is added persistently if @persist is %true.
4134  *
4135  * Note that in general it is not possible to modify the value of a given
4136  * filter so the generic way to modify an address filter is to free the one
4137  * being used by the old address value and allocate a new filter for the
4138  * new address value.
4139  *
4140  * Returns a negative error number or the index of the filter with the new
4141  * MAC value.  Note that this index may differ from @idx.
4142  */
4143 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4144 		  int idx, const u8 *addr, bool persist, bool add_smt)
4145 {
4146 	int ret, mode;
4147 	struct fw_vi_mac_cmd c;
4148 	struct fw_vi_mac_exact *p = c.u.exact;
4149 	int max_mac_addr = adap->params.arch.mps_tcam_size;
4150 
4151 	if (idx < 0)                             /* new allocation */
4152 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4153 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4154 
4155 	memset(&c, 0, sizeof(c));
4156 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4157 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4158 				   V_FW_VI_MAC_CMD_VIID(viid));
4159 	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
4160 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
4161 				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4162 				      V_FW_VI_MAC_CMD_IDX(idx));
4163 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4164 
4165 	if (is_pf4(adap))
4166 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4167 	else
4168 		ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
4169 	if (ret == 0) {
4170 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
4171 		if (ret >= max_mac_addr)
4172 			ret = -ENOMEM;
4173 	}
4174 	return ret;
4175 }
4176 
4177 /**
4178  * t4_enable_vi_params - enable/disable a virtual interface
4179  * @adap: the adapter
4180  * @mbox: mailbox to use for the FW command
4181  * @viid: the VI id
4182  * @rx_en: 1=enable Rx, 0=disable Rx
4183  * @tx_en: 1=enable Tx, 0=disable Tx
4184  * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4185  *
4186  * Enables/disables a virtual interface.  Note that setting DCB Enable
4187  * only makes sense when enabling a Virtual Interface ...
4188  */
4189 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4190 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4191 {
4192 	struct fw_vi_enable_cmd c;
4193 
4194 	memset(&c, 0, sizeof(c));
4195 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
4196 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4197 				   V_FW_VI_ENABLE_CMD_VIID(viid));
4198 	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4199 				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
4200 				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
4201 				     FW_LEN16(c));
4202 	if (is_pf4(adap))
4203 		return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4204 	else
4205 		return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
4206 }
4207 
4208 /**
4209  * t4_enable_vi - enable/disable a virtual interface
4210  * @adap: the adapter
4211  * @mbox: mailbox to use for the FW command
4212  * @viid: the VI id
4213  * @rx_en: 1=enable Rx, 0=disable Rx
4214  * @tx_en: 1=enable Tx, 0=disable Tx
4215  *
4216  * Enables/disables a virtual interface.  Note that setting DCB Enable
4217  * only makes sense when enabling a Virtual Interface ...
4218  */
4219 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4220 		 bool rx_en, bool tx_en)
4221 {
4222 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4223 }
4224 
4225 /**
4226  * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4227  * @adap: the adapter
4228  * @mbox: mailbox to use for the FW command
4229  * @start: %true to enable the queues, %false to disable them
4230  * @pf: the PF owning the queues
4231  * @vf: the VF owning the queues
4232  * @iqid: ingress queue id
4233  * @fl0id: FL0 queue id or 0xffff if no attached FL0
4234  * @fl1id: FL1 queue id or 0xffff if no attached FL1
4235  *
4236  * Starts or stops an ingress queue and its associated FLs, if any.
4237  */
4238 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4239 		     unsigned int pf, unsigned int vf, unsigned int iqid,
4240 		     unsigned int fl0id, unsigned int fl1id)
4241 {
4242 	struct fw_iq_cmd c;
4243 
4244 	memset(&c, 0, sizeof(c));
4245 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4246 				  F_FW_CMD_EXEC);
4247 	c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4248 				       V_FW_IQ_CMD_IQSTOP(!start) |
4249 				       FW_LEN16(c));
4250 	c.iqid = cpu_to_be16(iqid);
4251 	c.fl0id = cpu_to_be16(fl0id);
4252 	c.fl1id = cpu_to_be16(fl1id);
4253 	if (is_pf4(adap)) {
4254 		c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4255 					   V_FW_IQ_CMD_VFN(vf));
4256 		return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4257 	} else {
4258 		return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4259 	}
4260 }
4261 
4262 /**
4263  * t4_iq_free - free an ingress queue and its FLs
4264  * @adap: the adapter
4265  * @mbox: mailbox to use for the FW command
4266  * @pf: the PF owning the queues
4267  * @vf: the VF owning the queues
4268  * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4269  * @iqid: ingress queue id
4270  * @fl0id: FL0 queue id or 0xffff if no attached FL0
4271  * @fl1id: FL1 queue id or 0xffff if no attached FL1
4272  *
4273  * Frees an ingress queue and its associated FLs, if any.
4274  */
4275 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4276 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4277 	       unsigned int fl0id, unsigned int fl1id)
4278 {
4279 	struct fw_iq_cmd c;
4280 
4281 	memset(&c, 0, sizeof(c));
4282 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4283 				  F_FW_CMD_EXEC);
4284 	if (is_pf4(adap))
4285 		c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4286 					   V_FW_IQ_CMD_VFN(vf));
4287 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4288 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4289 	c.iqid = cpu_to_be16(iqid);
4290 	c.fl0id = cpu_to_be16(fl0id);
4291 	c.fl1id = cpu_to_be16(fl1id);
4292 	if (is_pf4(adap))
4293 		return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4294 	else
4295 		return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4296 }
4297 
4298 /**
4299  * t4_eth_eq_free - free an Ethernet egress queue
4300  * @adap: the adapter
4301  * @mbox: mailbox to use for the FW command
4302  * @pf: the PF owning the queue
4303  * @vf: the VF owning the queue
4304  * @eqid: egress queue id
4305  *
4306  * Frees an Ethernet egress queue.
4307  */
4308 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4309 		   unsigned int vf, unsigned int eqid)
4310 {
4311 	struct fw_eq_eth_cmd c;
4312 
4313 	memset(&c, 0, sizeof(c));
4314 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4315 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
4316 	if (is_pf4(adap))
4317 		c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4318 					   V_FW_IQ_CMD_VFN(vf));
4319 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4320 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4321 	if (is_pf4(adap))
4322 		return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4323 	else
4324 		return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4325 }
4326 
4327 /**
4328  * t4_link_down_rc_str - return a string for a Link Down Reason Code
4329  * @link_down_rc: Link Down Reason Code
4330  *
4331  * Returns a string representation of the Link Down Reason Code.
4332  */
4333 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
4334 {
4335 	static const char * const reason[] = {
4336 		"Link Down",
4337 		"Remote Fault",
4338 		"Auto-negotiation Failure",
4339 		"Reserved",
4340 		"Insufficient Airflow",
4341 		"Unable To Determine Reason",
4342 		"No RX Signal Detected",
4343 		"Reserved",
4344 	};
4345 
4346 	if (link_down_rc >= ARRAY_SIZE(reason))
4347 		return "Bad Reason Code";
4348 
4349 	return reason[link_down_rc];
4350 }
4351 
4352 static u32 t4_speed_to_fwcap(u32 speed)
4353 {
4354 	switch (speed) {
4355 	case 100000:
4356 		return FW_PORT_CAP32_SPEED_100G;
4357 	case 50000:
4358 		return FW_PORT_CAP32_SPEED_50G;
4359 	case 40000:
4360 		return FW_PORT_CAP32_SPEED_40G;
4361 	case 25000:
4362 		return FW_PORT_CAP32_SPEED_25G;
4363 	case 10000:
4364 		return FW_PORT_CAP32_SPEED_10G;
4365 	case 1000:
4366 		return FW_PORT_CAP32_SPEED_1G;
4367 	case 100:
4368 		return FW_PORT_CAP32_SPEED_100M;
4369 	default:
4370 		break;
4371 	}
4372 
4373 	return 0;
4374 }
4375 
4376 /* Return the highest speed set in the port capabilities, in Mb/s. */
4377 unsigned int t4_fwcap_to_speed(u32 caps)
4378 {
4379 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4380 	do { \
4381 		if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4382 			return __speed; \
4383 	} while (0)
4384 
4385 	TEST_SPEED_RETURN(100G, 100000);
4386 	TEST_SPEED_RETURN(50G,   50000);
4387 	TEST_SPEED_RETURN(40G,   40000);
4388 	TEST_SPEED_RETURN(25G,   25000);
4389 	TEST_SPEED_RETURN(10G,   10000);
4390 	TEST_SPEED_RETURN(1G,     1000);
4391 	TEST_SPEED_RETURN(100M,    100);
4392 
4393 #undef TEST_SPEED_RETURN
4394 
4395 	return 0;
4396 }
4397 
4398 static void t4_set_link_autoneg_speed(struct port_info *pi, u32 *new_caps)
4399 {
4400 	struct link_config *lc = &pi->link_cfg;
4401 	u32 caps = *new_caps;
4402 
4403 	caps &= ~V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4404 	caps |= G_FW_PORT_CAP32_SPEED(lc->acaps);
4405 
4406 	*new_caps = caps;
4407 }
4408 
4409 int t4_set_link_speed(struct port_info *pi, u32 speed, u32 *new_caps)
4410 {
4411 	u32 fw_speed_cap = t4_speed_to_fwcap(speed);
4412 	struct link_config *lc = &pi->link_cfg;
4413 	u32 caps = *new_caps;
4414 
4415 	if (!(lc->pcaps & fw_speed_cap))
4416 		return -EOPNOTSUPP;
4417 
4418 	caps &= ~V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4419 	caps |= fw_speed_cap;
4420 
4421 	*new_caps = caps;
4422 
4423 	return 0;
4424 }
4425 
4426 int t4_set_link_pause(struct port_info *pi, u8 autoneg, u8 pause_tx,
4427 		      u8 pause_rx, u32 *new_caps)
4428 {
4429 	struct link_config *lc = &pi->link_cfg;
4430 	u32 caps = *new_caps;
4431 	u32 max_speed;
4432 
4433 	max_speed = t4_fwcap_to_speed(lc->link_caps);
4434 
4435 	if (autoneg) {
4436 		if (!(lc->pcaps & FW_PORT_CAP32_ANEG))
4437 			return -EINVAL;
4438 
4439 		caps |= FW_PORT_CAP32_ANEG;
4440 		t4_set_link_autoneg_speed(pi, &caps);
4441 	} else {
4442 		if (!max_speed)
4443 			max_speed = t4_fwcap_to_speed(lc->acaps);
4444 
4445 		caps &= ~FW_PORT_CAP32_ANEG;
4446 		t4_set_link_speed(pi, max_speed, &caps);
4447 	}
4448 
4449 	if (lc->pcaps & FW_PORT_CAP32_MDIAUTO)
4450 		caps |= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4451 
4452 	caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
4453 	caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
4454 	if (pause_tx && pause_rx) {
4455 		caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX;
4456 		if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
4457 			caps |= FW_PORT_CAP32_802_3_PAUSE;
4458 	} else if (pause_tx) {
4459 		caps |= FW_PORT_CAP32_FC_TX;
4460 		if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
4461 			caps |= FW_PORT_CAP32_802_3_ASM_DIR;
4462 	} else if (pause_rx) {
4463 		caps |= FW_PORT_CAP32_FC_RX;
4464 		if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
4465 			caps |= FW_PORT_CAP32_802_3_PAUSE;
4466 
4467 		if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
4468 			caps |= FW_PORT_CAP32_802_3_ASM_DIR;
4469 	}
4470 
4471 	*new_caps = caps;
4472 
4473 	return 0;
4474 }
4475 
4476 int t4_set_link_fec(struct port_info *pi, u8 fec_rs, u8 fec_baser,
4477 		    u8 fec_none, u32 *new_caps)
4478 {
4479 	struct link_config *lc = &pi->link_cfg;
4480 	u32 max_speed, caps = *new_caps;
4481 
4482 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
4483 		return -EOPNOTSUPP;
4484 
4485 	/* Link might be down. In that case consider the max
4486 	 * speed advertised
4487 	 */
4488 	max_speed = t4_fwcap_to_speed(lc->link_caps);
4489 	if (!max_speed)
4490 		max_speed = t4_fwcap_to_speed(lc->acaps);
4491 
4492 	caps &= ~V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
4493 	if (fec_rs) {
4494 		switch (max_speed) {
4495 		case 100000:
4496 		case 25000:
4497 			caps |= FW_PORT_CAP32_FEC_RS;
4498 			break;
4499 		default:
4500 			return -EOPNOTSUPP;
4501 		}
4502 	}
4503 
4504 	if (fec_baser) {
4505 		switch (max_speed) {
4506 		case 50000:
4507 		case 25000:
4508 			caps |= FW_PORT_CAP32_FEC_BASER_RS;
4509 			break;
4510 		default:
4511 			return -EOPNOTSUPP;
4512 		}
4513 	}
4514 
4515 	if (fec_none)
4516 		caps |= FW_PORT_CAP32_FEC_NO_FEC;
4517 
4518 	if (!(caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC))) {
4519 		/* No explicit encoding is requested.
4520 		 * So, default back to AUTO.
4521 		 */
4522 		switch (max_speed) {
4523 		case 100000:
4524 			caps |= FW_PORT_CAP32_FEC_RS |
4525 				FW_PORT_CAP32_FEC_NO_FEC;
4526 			break;
4527 		case 50000:
4528 			caps |= FW_PORT_CAP32_FEC_BASER_RS |
4529 				FW_PORT_CAP32_FEC_NO_FEC;
4530 			break;
4531 		case 25000:
4532 			caps |= FW_PORT_CAP32_FEC_RS |
4533 				FW_PORT_CAP32_FEC_BASER_RS |
4534 				FW_PORT_CAP32_FEC_NO_FEC;
4535 			break;
4536 		default:
4537 			return -EOPNOTSUPP;
4538 		}
4539 	}
4540 
4541 	*new_caps = caps;
4542 
4543 	return 0;
4544 }
4545 
4546 /**
4547  * t4_handle_get_port_info - process a FW reply message
4548  * @pi: the port info
4549  * @rpl: start of the FW message
4550  *
4551  * Processes a GET_PORT_INFO FW reply message.
4552  */
4553 static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
4554 {
4555 	const struct fw_port_cmd *cmd = (const void *)rpl;
4556 	u8 link_ok, link_down_rc, mod_type, port_type;
4557 	u32 action, pcaps, acaps, link_caps, lstatus;
4558 	struct link_config *lc = &pi->link_cfg;
4559 	struct adapter *adapter = pi->adapter;
4560 	u8 mod_changed = 0;
4561 
4562 	/* Extract the various fields from the Port Information message.
4563 	 */
4564 	action = be32_to_cpu(cmd->action_to_len16);
4565 	if (G_FW_PORT_CMD_ACTION(action) != FW_PORT_ACTION_GET_PORT_INFO32) {
4566 		dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
4567 			 action);
4568 		return;
4569 	}
4570 
4571 	lstatus = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
4572 	link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS32) ? 1 : 0;
4573 	link_down_rc = G_FW_PORT_CMD_LINKDNRC32(lstatus);
4574 	port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus);
4575 	mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus);
4576 
4577 	pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
4578 	acaps = be32_to_cpu(cmd->u.info32.acaps32);
4579 	link_caps = be32_to_cpu(cmd->u.info32.linkattr32);
4580 
4581 	if (mod_type != lc->mod_type) {
4582 		t4_init_link_config(pi, pcaps, acaps, lc->mdio_addr,
4583 				    port_type, mod_type);
4584 		t4_os_portmod_changed(adapter, pi->pidx);
4585 		mod_changed = 1;
4586 	}
4587 	if (link_ok != lc->link_ok || acaps != lc->acaps ||
4588 	    link_caps != lc->link_caps) { /* something changed */
4589 		if (!link_ok && lc->link_ok) {
4590 			lc->link_down_rc = link_down_rc;
4591 			dev_warn(adap, "Port %d link down, reason: %s\n",
4592 				 pi->port_id,
4593 				 t4_link_down_rc_str(link_down_rc));
4594 		}
4595 		lc->link_ok = link_ok;
4596 		lc->acaps = acaps;
4597 		lc->link_caps = link_caps;
4598 		t4_os_link_changed(adapter, pi->pidx);
4599 	}
4600 
4601 	if (mod_changed) {
4602 		u32 mod_caps = lc->admin_caps;
4603 		int ret;
4604 
4605 		ret = t4_link_l1cfg_ns(pi, mod_caps);
4606 		if (ret != FW_SUCCESS)
4607 			dev_warn(adapter,
4608 				 "Attempt to update new Transceiver Module settings %#x failed with error: %d\n",
4609 				 mod_caps, ret);
4610 	}
4611 }
4612 
4613 /**
4614  * t4_ctrl_eq_free - free a control egress queue
4615  * @adap: the adapter
4616  * @mbox: mailbox to use for the FW command
4617  * @pf: the PF owning the queue
4618  * @vf: the VF owning the queue
4619  * @eqid: egress queue id
4620  *
4621  * Frees a control egress queue.
4622  */
4623 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4624 		    unsigned int vf, unsigned int eqid)
4625 {
4626 	struct fw_eq_ctrl_cmd c;
4627 
4628 	memset(&c, 0, sizeof(c));
4629 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
4630 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4631 				  V_FW_EQ_CTRL_CMD_PFN(pf) |
4632 				  V_FW_EQ_CTRL_CMD_VFN(vf));
4633 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4634 	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
4635 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4636 }
4637 
4638 /**
4639  * t4_handle_fw_rpl - process a FW reply message
4640  * @adap: the adapter
4641  * @rpl: start of the FW message
4642  *
4643  * Processes a FW message, such as link state change messages.
4644  */
4645 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4646 {
4647 	u8 opcode = *(const u8 *)rpl;
4648 
4649 	/*
4650 	 * This might be a port command ... this simplifies the following
4651 	 * conditionals ...  We can get away with pre-dereferencing
4652 	 * action_to_len16 because it's in the first 16 bytes and all messages
4653 	 * will be at least that long.
4654 	 */
4655 	const struct fw_port_cmd *p = (const void *)rpl;
4656 	unsigned int action =
4657 		G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4658 
4659 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO32) {
4660 		/* link/module state change message */
4661 		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4662 		struct port_info *pi = NULL;
4663 		int i;
4664 
4665 		for_each_port(adap, i) {
4666 			pi = adap2pinfo(adap, i);
4667 			if (pi->tx_chan == chan)
4668 				break;
4669 		}
4670 
4671 		t4_handle_get_port_info(pi, rpl);
4672 	} else {
4673 		dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4674 		return -EINVAL;
4675 	}
4676 	return 0;
4677 }
4678 
4679 void t4_reset_link_config(struct adapter *adap, int idx)
4680 {
4681 	struct port_info *pi = adap2pinfo(adap, idx);
4682 	struct link_config *lc = &pi->link_cfg;
4683 
4684 	lc->link_ok = 0;
4685 	lc->link_down_rc = 0;
4686 	lc->link_caps = 0;
4687 }
4688 
4689 /**
4690  * t4_init_link_config - initialize a link's SW state
4691  * @pi: the port info
4692  * @pcaps: link Port Capabilities
4693  * @acaps: link current Advertised Port Capabilities
4694  * @mdio_addr : address of the PHY
4695  * @port_type : firmware port type
4696  * @mod_type  : firmware module type
4697  *
4698  * Initializes the SW state maintained for each link, including the link's
4699  * capabilities and default speed/flow-control/autonegotiation settings.
4700  */
4701 void t4_init_link_config(struct port_info *pi, u32 pcaps, u32 acaps,
4702 			 u8 mdio_addr, u8 port_type, u8 mod_type)
4703 {
4704 	u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
4705 	struct link_config *lc = &pi->link_cfg;
4706 
4707 	lc->pcaps = pcaps;
4708 	lc->acaps = acaps;
4709 	lc->admin_caps = acaps;
4710 	lc->link_caps = 0;
4711 
4712 	lc->mdio_addr = mdio_addr;
4713 	lc->port_type = port_type;
4714 	lc->mod_type = mod_type;
4715 
4716 	lc->link_ok = 0;
4717 	lc->link_down_rc = 0;
4718 
4719 	/* Turn Tx and Rx pause off by default */
4720 	lc->admin_caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
4721 	lc->admin_caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
4722 	if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
4723 		lc->admin_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
4724 
4725 	/* Reset FEC caps to default values */
4726 	if (lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) {
4727 		if (lc->acaps & FW_PORT_CAP32_FEC_RS)
4728 			fec_rs = 1;
4729 		else if (lc->acaps & FW_PORT_CAP32_FEC_BASER_RS)
4730 			fec_baser = 1;
4731 		else
4732 			fec_none = 1;
4733 
4734 		lc->admin_caps &= ~V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
4735 		t4_set_link_fec(pi, fec_rs, fec_baser, fec_none,
4736 				&lc->admin_caps);
4737 	}
4738 
4739 	if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
4740 		lc->admin_caps &= ~FW_PORT_CAP32_FORCE_FEC;
4741 
4742 	/* Reset MDI to AUTO */
4743 	if (lc->pcaps & FW_PORT_CAP32_MDIAUTO) {
4744 		lc->admin_caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI);
4745 		lc->admin_caps |= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4746 	}
4747 }
4748 
4749 /**
4750  * t4_wait_dev_ready - wait till to reads of registers work
4751  *
4752  * Right after the device is RESET is can take a small amount of time
4753  * for it to respond to register reads.  Until then, all reads will
4754  * return either 0xff...ff or 0xee...ee.  Return an error if reads
4755  * don't work within a reasonable time frame.
4756  */
4757 static int t4_wait_dev_ready(struct adapter *adapter)
4758 {
4759 	u32 whoami;
4760 
4761 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4762 
4763 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4764 		return 0;
4765 
4766 	msleep(500);
4767 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4768 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4769 		return 0;
4770 
4771 	dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4772 		whoami);
4773 	return -EIO;
4774 }
4775 
4776 struct flash_desc {
4777 	u32 vendor_and_model_id;
4778 	u32 size_mb;
4779 };
4780 
4781 int t4_get_flash_params(struct adapter *adapter)
4782 {
4783 	/*
4784 	 * Table for non-standard supported Flash parts.  Note, all Flash
4785 	 * parts must have 64KB sectors.
4786 	 */
4787 	static struct flash_desc supported_flash[] = {
4788 		{ 0x00150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
4789 	};
4790 
4791 	int ret;
4792 	u32 flashid = 0;
4793 	unsigned int part, manufacturer;
4794 	unsigned int density, size = 0;
4795 
4796 	/**
4797 	 * Issue a Read ID Command to the Flash part.  We decode supported
4798 	 * Flash parts and their sizes from this.  There's a newer Query
4799 	 * Command which can retrieve detailed geometry information but
4800 	 * many Flash parts don't support it.
4801 	 */
4802 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4803 	if (!ret)
4804 		ret = sf1_read(adapter, 3, 0, 1, &flashid);
4805 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
4806 	if (ret < 0)
4807 		return ret;
4808 
4809 	/**
4810 	 * Check to see if it's one of our non-standard supported Flash parts.
4811 	 */
4812 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4813 		if (supported_flash[part].vendor_and_model_id == flashid) {
4814 			adapter->params.sf_size =
4815 				supported_flash[part].size_mb;
4816 			adapter->params.sf_nsec =
4817 				adapter->params.sf_size / SF_SEC_SIZE;
4818 			goto found;
4819 		}
4820 	}
4821 
4822 	/**
4823 	 * Decode Flash part size.  The code below looks repetative with
4824 	 * common encodings, but that's not guaranteed in the JEDEC
4825 	 * specification for the Read JADEC ID command.  The only thing that
4826 	 * we're guaranteed by the JADEC specification is where the
4827 	 * Manufacturer ID is in the returned result.  After that each
4828 	 * Manufacturer ~could~ encode things completely differently.
4829 	 * Note, all Flash parts must have 64KB sectors.
4830 	 */
4831 	manufacturer = flashid & 0xff;
4832 	switch (manufacturer) {
4833 	case 0x20: { /* Micron/Numonix */
4834 		/**
4835 		 * This Density -> Size decoding table is taken from Micron
4836 		 * Data Sheets.
4837 		 */
4838 		density = (flashid >> 16) & 0xff;
4839 		switch (density) {
4840 		case 0x14:
4841 			size = 1 << 20; /* 1MB */
4842 			break;
4843 		case 0x15:
4844 			size = 1 << 21; /* 2MB */
4845 			break;
4846 		case 0x16:
4847 			size = 1 << 22; /* 4MB */
4848 			break;
4849 		case 0x17:
4850 			size = 1 << 23; /* 8MB */
4851 			break;
4852 		case 0x18:
4853 			size = 1 << 24; /* 16MB */
4854 			break;
4855 		case 0x19:
4856 			size = 1 << 25; /* 32MB */
4857 			break;
4858 		case 0x20:
4859 			size = 1 << 26; /* 64MB */
4860 			break;
4861 		case 0x21:
4862 			size = 1 << 27; /* 128MB */
4863 			break;
4864 		case 0x22:
4865 			size = 1 << 28; /* 256MB */
4866 			break;
4867 		}
4868 		break;
4869 	}
4870 
4871 	case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
4872 		/**
4873 		 * This Density -> Size decoding table is taken from ISSI
4874 		 * Data Sheets.
4875 		 */
4876 		density = (flashid >> 16) & 0xff;
4877 		switch (density) {
4878 		case 0x16:
4879 			size = 1 << 25; /* 32MB */
4880 			break;
4881 		case 0x17:
4882 			size = 1 << 26; /* 64MB */
4883 			break;
4884 		}
4885 		break;
4886 	}
4887 
4888 	case 0xc2: { /* Macronix */
4889 		/**
4890 		 * This Density -> Size decoding table is taken from Macronix
4891 		 * Data Sheets.
4892 		 */
4893 		density = (flashid >> 16) & 0xff;
4894 		switch (density) {
4895 		case 0x17:
4896 			size = 1 << 23; /* 8MB */
4897 			break;
4898 		case 0x18:
4899 			size = 1 << 24; /* 16MB */
4900 			break;
4901 		}
4902 		break;
4903 	}
4904 
4905 	case 0xef: { /* Winbond */
4906 		/**
4907 		 * This Density -> Size decoding table is taken from Winbond
4908 		 * Data Sheets.
4909 		 */
4910 		density = (flashid >> 16) & 0xff;
4911 		switch (density) {
4912 		case 0x17:
4913 			size = 1 << 23; /* 8MB */
4914 			break;
4915 		case 0x18:
4916 			size = 1 << 24; /* 16MB */
4917 			break;
4918 		}
4919 		break;
4920 	}
4921 	}
4922 
4923 	/* If we didn't recognize the FLASH part, that's no real issue: the
4924 	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
4925 	 * use a FLASH part which is at least 4MB in size and has 64KB
4926 	 * sectors.  The unrecognized FLASH part is likely to be much larger
4927 	 * than 4MB, but that's all we really need.
4928 	 */
4929 	if (size == 0) {
4930 		dev_warn(adapter,
4931 			 "Unknown Flash Part, ID = %#x, assuming 4MB\n",
4932 			 flashid);
4933 		size = 1 << 22;
4934 	}
4935 
4936 	/**
4937 	 * Store decoded Flash size and fall through into vetting code.
4938 	 */
4939 	adapter->params.sf_size = size;
4940 	adapter->params.sf_nsec = size / SF_SEC_SIZE;
4941 
4942 found:
4943 	/*
4944 	 * We should reject adapters with FLASHes which are too small. So, emit
4945 	 * a warning.
4946 	 */
4947 	if (adapter->params.sf_size < FLASH_MIN_SIZE)
4948 		dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4949 			 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4950 
4951 	return 0;
4952 }
4953 
4954 static void set_pcie_completion_timeout(struct adapter *adapter,
4955 					u8 range)
4956 {
4957 	u32 pcie_cap;
4958 	u16 val;
4959 
4960 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4961 	if (pcie_cap) {
4962 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4963 		val &= 0xfff0;
4964 		val |= range;
4965 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4966 	}
4967 }
4968 
4969 /**
4970  * t4_get_chip_type - Determine chip type from device ID
4971  * @adap: the adapter
4972  * @ver: adapter version
4973  */
4974 int t4_get_chip_type(struct adapter *adap, int ver)
4975 {
4976 	enum chip_type chip = 0;
4977 	u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4978 
4979 	/* Retrieve adapter's device ID */
4980 	switch (ver) {
4981 	case CHELSIO_T5:
4982 		chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4983 		break;
4984 	case CHELSIO_T6:
4985 		chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4986 		break;
4987 	default:
4988 		dev_err(adap, "Device %d is not supported\n",
4989 			adap->params.pci.device_id);
4990 		return -EINVAL;
4991 	}
4992 
4993 	return chip;
4994 }
4995 
4996 /**
4997  * t4_prep_adapter - prepare SW and HW for operation
4998  * @adapter: the adapter
4999  *
5000  * Initialize adapter SW state for the various HW modules, set initial
5001  * values for some adapter tunables, take PHYs out of reset, and
5002  * initialize the MDIO interface.
5003  */
5004 int t4_prep_adapter(struct adapter *adapter)
5005 {
5006 	int ret, ver;
5007 	u32 pl_rev;
5008 
5009 	ret = t4_wait_dev_ready(adapter);
5010 	if (ret < 0)
5011 		return ret;
5012 
5013 	pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
5014 	adapter->params.pci.device_id = adapter->pdev->id.device_id;
5015 	adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
5016 
5017 	/*
5018 	 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
5019 	 * ADAPTER (VERSION << 4 | REVISION)
5020 	 */
5021 	ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
5022 	adapter->params.chip = 0;
5023 	switch (ver) {
5024 	case CHELSIO_T5:
5025 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5026 		adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
5027 		adapter->params.arch.mps_tcam_size =
5028 						NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5029 		adapter->params.arch.mps_rplc_size = 128;
5030 		adapter->params.arch.nchan = NCHAN;
5031 		adapter->params.arch.vfcount = 128;
5032 		/* Congestion map is for 4 channels so that
5033 		 * MPS can have 4 priority per port.
5034 		 */
5035 		adapter->params.arch.cng_ch_bits_log = 2;
5036 		break;
5037 	case CHELSIO_T6:
5038 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5039 		adapter->params.arch.sge_fl_db = 0;
5040 		adapter->params.arch.mps_tcam_size =
5041 						NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5042 		adapter->params.arch.mps_rplc_size = 256;
5043 		adapter->params.arch.nchan = 2;
5044 		adapter->params.arch.vfcount = 256;
5045 		/* Congestion map is for 2 channels so that
5046 		 * MPS can have 8 priority per port.
5047 		 */
5048 		adapter->params.arch.cng_ch_bits_log = 3;
5049 		break;
5050 	default:
5051 		dev_err(adapter, "%s: Device %d is not supported\n",
5052 			__func__, adapter->params.pci.device_id);
5053 		return -EINVAL;
5054 	}
5055 
5056 	adapter->params.pci.vpd_cap_addr =
5057 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5058 
5059 	ret = t4_get_flash_params(adapter);
5060 	if (ret < 0) {
5061 		dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
5062 			-ret);
5063 		return ret;
5064 	}
5065 
5066 	adapter->params.cim_la_size = CIMLA_SIZE;
5067 
5068 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5069 
5070 	/*
5071 	 * Default port and clock for debugging in case we can't reach FW.
5072 	 */
5073 	adapter->params.nports = 1;
5074 	adapter->params.portvec = 1;
5075 	adapter->params.vpd.cclk = 50000;
5076 
5077 	/* Set pci completion timeout value to 4 seconds. */
5078 	set_pcie_completion_timeout(adapter, 0xd);
5079 	return 0;
5080 }
5081 
5082 /**
5083  * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5084  * @adapter: the adapter
5085  * @qid: the Queue ID
5086  * @qtype: the Ingress or Egress type for @qid
5087  * @pbar2_qoffset: BAR2 Queue Offset
5088  * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5089  *
5090  * Returns the BAR2 SGE Queue Registers information associated with the
5091  * indicated Absolute Queue ID.  These are passed back in return value
5092  * pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5093  * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5094  *
5095  * This may return an error which indicates that BAR2 SGE Queue
5096  * registers aren't available.  If an error is not returned, then the
5097  * following values are returned:
5098  *
5099  *   *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5100  *   *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5101  *
5102  * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5103  * require the "Inferred Queue ID" ability may be used.  E.g. the
5104  * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5105  * then these "Inferred Queue ID" register may not be used.
5106  */
5107 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
5108 		      enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
5109 		      unsigned int *pbar2_qid)
5110 {
5111 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5112 	u64 bar2_page_offset, bar2_qoffset;
5113 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5114 
5115 	/*
5116 	 * T4 doesn't support BAR2 SGE Queue registers.
5117 	 */
5118 	if (is_t4(adapter->params.chip))
5119 		return -EINVAL;
5120 
5121 	/*
5122 	 * Get our SGE Page Size parameters.
5123 	 */
5124 	page_shift = adapter->params.sge.hps + 10;
5125 	page_size = 1 << page_shift;
5126 
5127 	/*
5128 	 * Get the right Queues per Page parameters for our Queue.
5129 	 */
5130 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
5131 			      adapter->params.sge.eq_qpp :
5132 			      adapter->params.sge.iq_qpp);
5133 	qpp_mask = (1 << qpp_shift) - 1;
5134 
5135 	/*
5136 	 * Calculate the basics of the BAR2 SGE Queue register area:
5137 	 *  o The BAR2 page the Queue registers will be in.
5138 	 *  o The BAR2 Queue ID.
5139 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
5140 	 */
5141 	bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5142 	bar2_qid = qid & qpp_mask;
5143 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5144 
5145 	/*
5146 	 * If the BAR2 Queue ID Offset is less than the Page Size, then the
5147 	 * hardware will infer the Absolute Queue ID simply from the writes to
5148 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5149 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
5150 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5151 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5152 	 * from the BAR2 Page and BAR2 Queue ID.
5153 	 *
5154 	 * One important censequence of this is that some BAR2 SGE registers
5155 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5156 	 * there.  But other registers synthesize the SGE Queue ID purely
5157 	 * from the writes to the registers -- the Write Combined Doorbell
5158 	 * Buffer is a good example.  These BAR2 SGE Registers are only
5159 	 * available for those BAR2 SGE Register areas where the SGE Absolute
5160 	 * Queue ID can be inferred from simple writes.
5161 	 */
5162 	bar2_qoffset = bar2_page_offset;
5163 	bar2_qinferred = (bar2_qid_offset < page_size);
5164 	if (bar2_qinferred) {
5165 		bar2_qoffset += bar2_qid_offset;
5166 		bar2_qid = 0;
5167 	}
5168 
5169 	*pbar2_qoffset = bar2_qoffset;
5170 	*pbar2_qid = bar2_qid;
5171 	return 0;
5172 }
5173 
5174 /**
5175  * t4_init_sge_params - initialize adap->params.sge
5176  * @adapter: the adapter
5177  *
5178  * Initialize various fields of the adapter's SGE Parameters structure.
5179  */
5180 int t4_init_sge_params(struct adapter *adapter)
5181 {
5182 	struct sge_params *sge_params = &adapter->params.sge;
5183 	u32 hps, qpp;
5184 	unsigned int s_hps, s_qpp;
5185 
5186 	/*
5187 	 * Extract the SGE Page Size for our PF.
5188 	 */
5189 	hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
5190 	s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
5191 		 adapter->pf);
5192 	sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
5193 
5194 	/*
5195 	 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
5196 	 */
5197 	s_qpp = (S_QUEUESPERPAGEPF0 +
5198 		 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
5199 	qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
5200 	sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5201 	qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
5202 	sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5203 
5204 	return 0;
5205 }
5206 
5207 /**
5208  * t4_init_tp_params - initialize adap->params.tp
5209  * @adap: the adapter
5210  *
5211  * Initialize various fields of the adapter's TP Parameters structure.
5212  */
5213 int t4_init_tp_params(struct adapter *adap)
5214 {
5215 	int chan, ret;
5216 	u32 param, v;
5217 
5218 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5219 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5220 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5221 
5222 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5223 	for (chan = 0; chan < NCHAN; chan++)
5224 		adap->params.tp.tx_modq[chan] = chan;
5225 
5226 	/*
5227 	 * Cache the adapter's Compressed Filter Mode/Mask and global Ingress
5228 	 * Configuration.
5229 	 */
5230 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5231 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
5232 		 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
5233 
5234 	/* Read current value */
5235 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5236 			      1, &param, &v);
5237 	if (!ret) {
5238 		dev_info(adap, "Current filter mode/mask 0x%x:0x%x\n",
5239 			 G_FW_PARAMS_PARAM_FILTER_MODE(v),
5240 			 G_FW_PARAMS_PARAM_FILTER_MASK(v));
5241 		adap->params.tp.vlan_pri_map =
5242 			G_FW_PARAMS_PARAM_FILTER_MODE(v);
5243 		adap->params.tp.filter_mask =
5244 			G_FW_PARAMS_PARAM_FILTER_MASK(v);
5245 	} else {
5246 		dev_info(adap,
5247 			 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
5248 
5249 		/* In case of older-fw (which doesn't expose the api
5250 		 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
5251 		 * the fw api) combination, fall-back to older method of reading
5252 		 * the filter mode from indirect-register
5253 		 */
5254 		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5255 				 &adap->params.tp.vlan_pri_map, 1,
5256 				 A_TP_VLAN_PRI_MAP);
5257 
5258 		/* With the older-fw and newer-driver combination we might run
5259 		 * into an issue when user wants to use hash filter region but
5260 		 * the filter_mask is zero, in this case filter_mask validation
5261 		 * is tough. To avoid that we set the filter_mask same as filter
5262 		 * mode, which will behave exactly as the older way of ignoring
5263 		 * the filter mask validation.
5264 		 */
5265 		adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
5266 	}
5267 
5268 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5269 			 &adap->params.tp.ingress_config, 1,
5270 			 A_TP_INGRESS_CONFIG);
5271 
5272 	/* For T6, cache the adapter's compressed error vector
5273 	 * and passing outer header info for encapsulated packets.
5274 	 */
5275 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
5276 		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
5277 		adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
5278 	}
5279 
5280 	/*
5281 	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5282 	 * shift positions of several elements of the Compressed Filter Tuple
5283 	 * for this adapter which we need frequently ...
5284 	 */
5285 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5286 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5287 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5288 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5289 							       F_PROTOCOL);
5290 	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
5291 								F_ETHERTYPE);
5292 	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
5293 							       F_MACMATCH);
5294 	adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
5295 
5296 	v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
5297 	adap->params.tp.hash_filter_mask = v;
5298 	v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
5299 	adap->params.tp.hash_filter_mask |= ((u64)v << 32);
5300 
5301 	return 0;
5302 }
5303 
5304 /**
5305  * t4_filter_field_shift - calculate filter field shift
5306  * @adap: the adapter
5307  * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5308  *
5309  * Return the shift position of a filter field within the Compressed
5310  * Filter Tuple.  The filter field is specified via its selection bit
5311  * within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5312  */
5313 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
5314 {
5315 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5316 	unsigned int sel;
5317 	int field_shift;
5318 
5319 	if ((filter_mode & filter_sel) == 0)
5320 		return -1;
5321 
5322 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5323 		switch (filter_mode & sel) {
5324 		case F_FCOE:
5325 			field_shift += W_FT_FCOE;
5326 			break;
5327 		case F_PORT:
5328 			field_shift += W_FT_PORT;
5329 			break;
5330 		case F_VNIC_ID:
5331 			field_shift += W_FT_VNIC_ID;
5332 			break;
5333 		case F_VLAN:
5334 			field_shift += W_FT_VLAN;
5335 			break;
5336 		case F_TOS:
5337 			field_shift += W_FT_TOS;
5338 			break;
5339 		case F_PROTOCOL:
5340 			field_shift += W_FT_PROTOCOL;
5341 			break;
5342 		case F_ETHERTYPE:
5343 			field_shift += W_FT_ETHERTYPE;
5344 			break;
5345 		case F_MACMATCH:
5346 			field_shift += W_FT_MACMATCH;
5347 			break;
5348 		case F_MPSHITTYPE:
5349 			field_shift += W_FT_MPSHITTYPE;
5350 			break;
5351 		case F_FRAGMENTATION:
5352 			field_shift += W_FT_FRAGMENTATION;
5353 			break;
5354 		}
5355 	}
5356 	return field_shift;
5357 }
5358 
5359 int t4_init_rss_mode(struct adapter *adap, int mbox)
5360 {
5361 	int i, ret;
5362 	struct fw_rss_vi_config_cmd rvc;
5363 
5364 	memset(&rvc, 0, sizeof(rvc));
5365 
5366 	for_each_port(adap, i) {
5367 		struct port_info *p = adap2pinfo(adap, i);
5368 
5369 		rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5370 				       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5371 				       V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5372 		rvc.retval_len16 = htonl(FW_LEN16(rvc));
5373 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5374 		if (ret)
5375 			return ret;
5376 		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
5377 	}
5378 	return 0;
5379 }
5380 
5381 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5382 {
5383 	u32 param, val, pcaps, acaps;
5384 	enum fw_port_type port_type;
5385 	struct fw_port_cmd cmd;
5386 	u8 vivld = 0, vin = 0;
5387 	int ret, i, j = 0;
5388 	int mdio_addr;
5389 	u8 addr[6];
5390 
5391 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
5392 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
5393 	val = 1;
5394 	ret = t4_set_params(adap, mbox, pf, vf, 1, &param, &val);
5395 	if (ret < 0)
5396 		return ret;
5397 
5398 	memset(&cmd, 0, sizeof(cmd));
5399 
5400 	for_each_port(adap, i) {
5401 		struct port_info *pi = adap2pinfo(adap, i);
5402 		unsigned int rss_size = 0;
5403 		u32 lstatus32;
5404 
5405 		while ((adap->params.portvec & (1 << j)) == 0)
5406 			j++;
5407 
5408 		memset(&cmd, 0, sizeof(cmd));
5409 		cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
5410 					       F_FW_CMD_REQUEST |
5411 					       F_FW_CMD_READ |
5412 					       V_FW_PORT_CMD_PORTID(j));
5413 		val = FW_PORT_ACTION_GET_PORT_INFO32;
5414 		cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(val) |
5415 						  FW_LEN16(cmd));
5416 		ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
5417 		if (ret)
5418 			return ret;
5419 
5420 		/* Extract the various fields from the Port Information
5421 		 * message.
5422 		 */
5423 		lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
5424 
5425 		port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
5426 		mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
5427 			    (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : -1;
5428 		pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
5429 		acaps = be32_to_cpu(cmd.u.info32.acaps32);
5430 
5431 		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size,
5432 				  &vivld, &vin);
5433 		if (ret < 0)
5434 			return ret;
5435 
5436 		pi->viid = ret;
5437 		pi->tx_chan = j;
5438 		pi->rss_size = rss_size;
5439 		t4_os_set_hw_addr(adap, i, addr);
5440 
5441 		/* If fw supports returning the VIN as part of FW_VI_CMD,
5442 		 * save the returned values.
5443 		 */
5444 		if (adap->params.viid_smt_extn_support) {
5445 			pi->vivld = vivld;
5446 			pi->vin = vin;
5447 		} else {
5448 			/* Retrieve the values from VIID */
5449 			pi->vivld = G_FW_VIID_VIVLD(pi->viid);
5450 			pi->vin =  G_FW_VIID_VIN(pi->viid);
5451 		}
5452 
5453 		t4_init_link_config(pi, pcaps, acaps, mdio_addr, port_type,
5454 				    FW_PORT_MOD_TYPE_NA);
5455 		j++;
5456 	}
5457 	return 0;
5458 }
5459 
5460 /**
5461  * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
5462  * @adap: the adapter
5463  * @win: PCI-E Memory Window to use
5464  * @addr: address within adapter memory
5465  * @len: amount of memory to transfer
5466  * @hbuf: host memory buffer
5467  * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5468  *
5469  * Reads/writes an [almost] arbitrary memory region in the firmware: the
5470  * firmware memory address and host buffer must be aligned on 32-bit
5471  * boudaries; the length may be arbitrary.
5472  *
5473  * NOTES:
5474  *  1. The memory is transferred as a raw byte sequence from/to the
5475  *     firmware's memory.  If this memory contains data structures which
5476  *     contain multi-byte integers, it's the caller's responsibility to
5477  *     perform appropriate byte order conversions.
5478  *
5479  *  2. It is the Caller's responsibility to ensure that no other code
5480  *     uses the specified PCI-E Memory Window while this routine is
5481  *     using it.  This is typically done via the use of OS-specific
5482  *     locks, etc.
5483  */
5484 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
5485 		      u32 len, void *hbuf, int dir)
5486 {
5487 	u32 pos, offset, resid;
5488 	u32 win_pf, mem_reg, mem_aperture, mem_base;
5489 	u32 *buf;
5490 
5491 	/* Argument sanity checks ...*/
5492 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
5493 		return -EINVAL;
5494 	buf = (u32 *)hbuf;
5495 
5496 	/* It's convenient to be able to handle lengths which aren't a
5497 	 * multiple of 32-bits because we often end up transferring files to
5498 	 * the firmware.  So we'll handle that by normalizing the length here
5499 	 * and then handling any residual transfer at the end.
5500 	 */
5501 	resid = len & 0x3;
5502 	len -= resid;
5503 
5504 	/* Each PCI-E Memory Window is programmed with a window size -- or
5505 	 * "aperture" -- which controls the granularity of its mapping onto
5506 	 * adapter memory.  We need to grab that aperture in order to know
5507 	 * how to use the specified window.  The window is also programmed
5508 	 * with the base address of the Memory Window in BAR0's address
5509 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
5510 	 * the address is relative to BAR0.
5511 	 */
5512 	mem_reg = t4_read_reg(adap,
5513 			      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
5514 						  win));
5515 	mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
5516 	mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
5517 
5518 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
5519 
5520 	/* Calculate our initial PCI-E Memory Window Position and Offset into
5521 	 * that Window.
5522 	 */
5523 	pos = addr & ~(mem_aperture - 1);
5524 	offset = addr - pos;
5525 
5526 	/* Set up initial PCI-E Memory Window to cover the start of our
5527 	 * transfer.  (Read it back to ensure that changes propagate before we
5528 	 * attempt to use the new value.)
5529 	 */
5530 	t4_write_reg(adap,
5531 		     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
5532 		     pos | win_pf);
5533 	t4_read_reg(adap,
5534 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
5535 
5536 	/* Transfer data to/from the adapter as long as there's an integral
5537 	 * number of 32-bit transfers to complete.
5538 	 *
5539 	 * A note on Endianness issues:
5540 	 *
5541 	 * The "register" reads and writes below from/to the PCI-E Memory
5542 	 * Window invoke the standard adapter Big-Endian to PCI-E Link
5543 	 * Little-Endian "swizzel."  As a result, if we have the following
5544 	 * data in adapter memory:
5545 	 *
5546 	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
5547 	 *     Address:      i+0  i+1  i+2  i+3
5548 	 *
5549 	 * Then a read of the adapter memory via the PCI-E Memory Window
5550 	 * will yield:
5551 	 *
5552 	 *     x = readl(i)
5553 	 *         31                  0
5554 	 *         [ b3 | b2 | b1 | b0 ]
5555 	 *
5556 	 * If this value is stored into local memory on a Little-Endian system
5557 	 * it will show up correctly in local memory as:
5558 	 *
5559 	 *     ( ..., b0, b1, b2, b3, ... )
5560 	 *
5561 	 * But on a Big-Endian system, the store will show up in memory
5562 	 * incorrectly swizzled as:
5563 	 *
5564 	 *     ( ..., b3, b2, b1, b0, ... )
5565 	 *
5566 	 * So we need to account for this in the reads and writes to the
5567 	 * PCI-E Memory Window below by undoing the register read/write
5568 	 * swizzels.
5569 	 */
5570 	while (len > 0) {
5571 		if (dir == T4_MEMORY_READ)
5572 			*buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
5573 								 mem_base +
5574 								 offset));
5575 		else
5576 			t4_write_reg(adap, mem_base + offset,
5577 				     (u32)cpu_to_le32(*buf++));
5578 		offset += sizeof(__be32);
5579 		len -= sizeof(__be32);
5580 
5581 		/* If we've reached the end of our current window aperture,
5582 		 * move the PCI-E Memory Window on to the next.  Note that
5583 		 * doing this here after "len" may be 0 allows us to set up
5584 		 * the PCI-E Memory Window for a possible final residual
5585 		 * transfer below ...
5586 		 */
5587 		if (offset == mem_aperture) {
5588 			pos += mem_aperture;
5589 			offset = 0;
5590 			t4_write_reg(adap,
5591 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5592 						    win), pos | win_pf);
5593 			t4_read_reg(adap,
5594 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5595 						    win));
5596 		}
5597 	}
5598 
5599 	/* If the original transfer had a length which wasn't a multiple of
5600 	 * 32-bits, now's where we need to finish off the transfer of the
5601 	 * residual amount.  The PCI-E Memory Window has already been moved
5602 	 * above (if necessary) to cover this final transfer.
5603 	 */
5604 	if (resid) {
5605 		union {
5606 			u32 word;
5607 			char byte[4];
5608 		} last;
5609 		unsigned char *bp;
5610 		int i;
5611 
5612 		if (dir == T4_MEMORY_READ) {
5613 			last.word = le32_to_cpu((__le32)t4_read_reg(adap,
5614 								    mem_base +
5615 								    offset));
5616 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
5617 				bp[i] = last.byte[i];
5618 		} else {
5619 			last.word = *buf;
5620 			for (i = resid; i < 4; i++)
5621 				last.byte[i] = 0;
5622 			t4_write_reg(adap, mem_base + offset,
5623 				     (u32)cpu_to_le32(last.word));
5624 		}
5625 	}
5626 
5627 	return 0;
5628 }
5629 
5630 /**
5631  * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
5632  * @adap: the adapter
5633  * @win: PCI-E Memory Window to use
5634  * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
5635  * @maddr: address within indicated memory type
5636  * @len: amount of memory to transfer
5637  * @hbuf: host memory buffer
5638  * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5639  *
5640  * Reads/writes adapter memory using t4_memory_rw_addr().  This routine
5641  * provides an (memory type, address within memory type) interface.
5642  */
5643 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
5644 		       u32 len, void *hbuf, int dir)
5645 {
5646 	u32 mtype_offset;
5647 	u32 edc_size, mc_size;
5648 
5649 	/* Offset into the region of memory which is being accessed
5650 	 * MEM_EDC0 = 0
5651 	 * MEM_EDC1 = 1
5652 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
5653 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
5654 	 */
5655 	edc_size  = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
5656 	if (mtype != MEM_MC1) {
5657 		mtype_offset = (mtype * (edc_size * 1024 * 1024));
5658 	} else {
5659 		mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
5660 						      A_MA_EXT_MEMORY0_BAR));
5661 		mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
5662 	}
5663 
5664 	return t4_memory_rw_addr(adap, win,
5665 				 mtype_offset + maddr, len,
5666 				 hbuf, dir);
5667 }
5668