2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
41 * t4_wait_op_done_val - wait until an operation is completed
42 * @adapter: the adapter performing the operation
43 * @reg: the register to check for completion
44 * @mask: a single-bit field within @reg that indicates completion
45 * @polarity: the value of the field when the operation is completed
46 * @attempts: number of check iterations
47 * @delay: delay in usecs between iterations
48 * @valp: where to store the value of the register at completion time
50 * Wait until an operation is completed by checking a bit in a register
51 * up to @attempts times. If @valp is not NULL the value of the register
52 * at the time it indicated completion is stored there. Returns 0 if the
53 * operation completes and -EAGAIN otherwise.
55 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 int polarity, int attempts, int delay, u32 *valp)
59 u32 val = t4_read_reg(adapter, reg);
61 if (!!(val & mask) == polarity) {
73 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
74 int polarity, int attempts, int delay)
76 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
81 * t4_set_reg_field - set a register field to a value
82 * @adapter: the adapter to program
83 * @addr: the register address
84 * @mask: specifies the portion of the register to modify
85 * @val: the new value for the register field
87 * Sets a register field specified by the supplied mask to the
90 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 v = t4_read_reg(adapter, addr) & ~mask;
95 t4_write_reg(adapter, addr, v | val);
96 (void) t4_read_reg(adapter, addr); /* flush */
100 * t4_read_indirect - read indirectly addressed registers
102 * @addr_reg: register holding the indirect address
103 * @data_reg: register holding the value of the indirect register
104 * @vals: where the read register values are stored
105 * @nregs: how many indirect registers to read
106 * @start_idx: index of first indirect register to read
108 * Reads registers that are accessed indirectly through an address/data
111 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
112 unsigned int data_reg, u32 *vals,
113 unsigned int nregs, unsigned int start_idx)
116 t4_write_reg(adap, addr_reg, start_idx);
117 *vals++ = t4_read_reg(adap, data_reg);
123 * t4_write_indirect - write indirectly addressed registers
125 * @addr_reg: register holding the indirect addresses
126 * @data_reg: register holding the value for the indirect registers
127 * @vals: values to write
128 * @nregs: how many indirect registers to write
129 * @start_idx: address of first indirect register to write
131 * Writes a sequential block of registers that are accessed indirectly
132 * through an address/data register pair.
134 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
135 unsigned int data_reg, const u32 *vals,
136 unsigned int nregs, unsigned int start_idx)
139 t4_write_reg(adap, addr_reg, start_idx++);
140 t4_write_reg(adap, data_reg, *vals++);
145 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
146 * mechanism. This guarantees that we get the real value even if we're
147 * operating within a Virtual Machine and the Hypervisor is trapping our
148 * Configuration Space accesses.
150 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
152 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
154 if (is_t4(adap->params.chip))
157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 * Configuration Space read. (None of the other fields matter when
162 * ENABLE is 0 so a simple register write is easier than a
163 * read-modify-write via t4_set_reg_field().)
165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
169 * t4_report_fw_error - report firmware error
172 * The adapter firmware can indicate error conditions to the host.
173 * If the firmware has indicated an error, print out the reason for
174 * the firmware error.
176 static void t4_report_fw_error(struct adapter *adap)
178 static const char *const reason[] = {
179 "Crash", /* PCIE_FW_EVAL_CRASH */
180 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
181 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
182 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
183 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
184 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
185 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
186 "Reserved", /* reserved */
190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
191 if (pcie_fw & FW_PCIE_FW_ERR)
192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
193 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
197 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
199 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
202 for ( ; nflit; nflit--, mbox_addr += 8)
203 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
207 * Handle a FW assertion reported in a mailbox.
209 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
211 struct fw_debug_cmd asrt;
213 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
214 dev_alert(adap->pdev_dev,
215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
216 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
217 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
220 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
222 dev_err(adap->pdev_dev,
223 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
224 (unsigned long long)t4_read_reg64(adap, data_reg),
225 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
235 * t4_wr_mbox_meat - send a command to FW through the given mailbox
237 * @mbox: index of the mailbox to use
238 * @cmd: the command to write
239 * @size: command length in bytes
240 * @rpl: where to optionally store the reply
241 * @sleep_ok: if true we may sleep while awaiting command completion
243 * Sends the given command to FW through the selected mailbox and waits
244 * for the FW to execute the command. If @rpl is not %NULL it is used to
245 * store the FW's reply to the command. The command and its optional
246 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
247 * to respond. @sleep_ok determines whether we may sleep while awaiting
248 * the response. If sleeping is allowed we use progressive backoff
251 * The return value is 0 on success or a negative errno on failure. A
252 * failure can happen either because we are not able to execute the
253 * command or FW executes it but signals an error. In the latter case
254 * the return value is the error code indicated by FW (negated).
256 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
257 void *rpl, bool sleep_ok)
259 static const int delay[] = {
260 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
265 int i, ms, delay_idx;
266 const __be64 *p = cmd;
267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
270 if ((size & 15) || size > MBOX_LEN)
274 * If the device is off-line, as in EEH, commands will time out.
275 * Fail them early so we don't waste time waiting.
277 if (adap->pdev->error_state != pci_channel_io_normal)
280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
284 if (v != MBOX_OWNER_DRV)
285 return v ? -EBUSY : -ETIMEDOUT;
287 for (i = 0; i < size; i += 8)
288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
291 t4_read_reg(adap, ctl_reg); /* flush write */
296 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
298 ms = delay[delay_idx]; /* last element may repeat */
299 if (delay_idx < ARRAY_SIZE(delay) - 1)
305 v = t4_read_reg(adap, ctl_reg);
306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
307 if (!(v & MBMSGVALID)) {
308 t4_write_reg(adap, ctl_reg, 0);
312 res = t4_read_reg64(adap, data_reg);
313 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
314 fw_asrt(adap, data_reg);
315 res = FW_CMD_RETVAL_V(EIO);
317 get_mbox_rpl(adap, rpl, size / 8, data_reg);
320 if (FW_CMD_RETVAL_G((int)res))
321 dump_mbox(adap, mbox, data_reg);
322 t4_write_reg(adap, ctl_reg, 0);
323 return -FW_CMD_RETVAL_G((int)res);
327 dump_mbox(adap, mbox, data_reg);
328 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
329 *(const u8 *)cmd, mbox);
330 t4_report_fw_error(adap);
335 * t4_mc_read - read from MC through backdoor accesses
337 * @addr: address of first byte requested
338 * @idx: which MC to access
339 * @data: 64 bytes of data containing the requested address
340 * @ecc: where to store the corresponding 64-bit ECC word
342 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
343 * that covers the requested address @addr. If @parity is not %NULL it
344 * is assigned the 64-bit ECC word for the read data.
346 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
349 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
350 u32 mc_bist_status_rdata, mc_bist_data_pattern;
352 if (is_t4(adap->params.chip)) {
353 mc_bist_cmd = MC_BIST_CMD;
354 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
355 mc_bist_cmd_len = MC_BIST_CMD_LEN;
356 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
357 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
359 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
360 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
361 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
362 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
363 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
366 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
368 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
369 t4_write_reg(adap, mc_bist_cmd_len, 64);
370 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
371 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
373 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
377 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
379 for (i = 15; i >= 0; i--)
380 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
382 *ecc = t4_read_reg64(adap, MC_DATA(16));
388 * t4_edc_read - read from EDC through backdoor accesses
390 * @idx: which EDC to access
391 * @addr: address of first byte requested
392 * @data: 64 bytes of data containing the requested address
393 * @ecc: where to store the corresponding 64-bit ECC word
395 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
396 * that covers the requested address @addr. If @parity is not %NULL it
397 * is assigned the 64-bit ECC word for the read data.
399 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
402 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
403 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
405 if (is_t4(adap->params.chip)) {
406 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
407 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
408 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
409 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
411 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
414 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
415 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
416 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
417 edc_bist_cmd_data_pattern =
418 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
419 edc_bist_status_rdata =
420 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
423 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
425 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
426 t4_write_reg(adap, edc_bist_cmd_len, 64);
427 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
428 t4_write_reg(adap, edc_bist_cmd,
429 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
430 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
434 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
436 for (i = 15; i >= 0; i--)
437 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
439 *ecc = t4_read_reg64(adap, EDC_DATA(16));
445 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
447 * @win: PCI-E Memory Window to use
448 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
449 * @addr: address within indicated memory type
450 * @len: amount of memory to transfer
451 * @buf: host memory buffer
452 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * Reads/writes an [almost] arbitrary memory region in the firmware: the
455 * firmware memory address and host buffer must be aligned on 32-bit
456 * boudaries; the length may be arbitrary. The memory is transferred as
457 * a raw byte sequence from/to the firmware's memory. If this memory
458 * contains data structures which contain multi-byte integers, it's the
459 * caller's responsibility to perform appropriate byte order conversions.
461 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
462 u32 len, __be32 *buf, int dir)
464 u32 pos, offset, resid, memoffset;
465 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 /* Argument sanity checks ...
472 /* It's convenient to be able to handle lengths which aren't a
473 * multiple of 32-bits because we often end up transferring files to
474 * the firmware. So we'll handle that by normalizing the length here
475 * and then handling any residual transfer at the end.
480 /* Offset into the region of memory which is being accessed
484 * MEM_MC0 = 2 -- For T5
485 * MEM_MC1 = 3 -- For T5
487 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
488 if (mtype != MEM_MC1)
489 memoffset = (mtype * (edc_size * 1024 * 1024));
491 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
492 MA_EXT_MEMORY1_BAR_A));
493 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
496 /* Determine the PCIE_MEM_ACCESS_OFFSET */
497 addr = addr + memoffset;
499 /* Each PCI-E Memory Window is programmed with a window size -- or
500 * "aperture" -- which controls the granularity of its mapping onto
501 * adapter memory. We need to grab that aperture in order to know
502 * how to use the specified window. The window is also programmed
503 * with the base address of the Memory Window in BAR0's address
504 * space. For T4 this is an absolute PCI-E Bus Address. For T5
505 * the address is relative to BAR0.
507 mem_reg = t4_read_reg(adap,
508 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
510 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
511 mem_base = GET_PCIEOFST(mem_reg) << 10;
512 if (is_t4(adap->params.chip))
513 mem_base -= adap->t4_bar0;
514 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
516 /* Calculate our initial PCI-E Memory Window Position and Offset into
519 pos = addr & ~(mem_aperture-1);
522 /* Set up initial PCI-E Memory Window to cover the start of our
523 * transfer. (Read it back to ensure that changes propagate before we
524 * attempt to use the new value.)
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
532 /* Transfer data to/from the adapter as long as there's an integral
533 * number of 32-bit transfers to complete.
536 if (dir == T4_MEMORY_READ)
537 *buf++ = (__force __be32) t4_read_reg(adap,
540 t4_write_reg(adap, mem_base + offset,
541 (__force u32) *buf++);
542 offset += sizeof(__be32);
543 len -= sizeof(__be32);
545 /* If we've reached the end of our current window aperture,
546 * move the PCI-E Memory Window on to the next. Note that
547 * doing this here after "len" may be 0 allows us to set up
548 * the PCI-E Memory Window for a possible final residual
551 if (offset == mem_aperture) {
555 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
558 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
563 /* If the original transfer had a length which wasn't a multiple of
564 * 32-bits, now's where we need to finish off the transfer of the
565 * residual amount. The PCI-E Memory Window has already been moved
566 * above (if necessary) to cover this final transfer.
576 if (dir == T4_MEMORY_READ) {
577 last.word = (__force __be32) t4_read_reg(adap,
579 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
580 bp[i] = last.byte[i];
583 for (i = resid; i < 4; i++)
585 t4_write_reg(adap, mem_base + offset,
586 (__force u32) last.word);
593 #define EEPROM_STAT_ADDR 0x7bfc
594 #define VPD_BASE 0x400
595 #define VPD_BASE_OLD 0
597 #define CHELSIO_VPD_UNIQUE_ID 0x82
600 * t4_seeprom_wp - enable/disable EEPROM write protection
601 * @adapter: the adapter
602 * @enable: whether to enable or disable write protection
604 * Enables or disables write protection on the serial EEPROM.
606 int t4_seeprom_wp(struct adapter *adapter, bool enable)
608 unsigned int v = enable ? 0xc : 0;
609 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
610 return ret < 0 ? ret : 0;
614 * get_vpd_params - read VPD parameters from VPD EEPROM
615 * @adapter: adapter to read
616 * @p: where to store the parameters
618 * Reads card parameters stored in VPD EEPROM.
620 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
622 u32 cclk_param, cclk_val;
626 unsigned int vpdr_len, kw_offset, id_len;
628 vpd = vmalloc(VPD_LEN);
632 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
636 /* The VPD shall have a unique identifier specified by the PCI SIG.
637 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
638 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
639 * is expected to automatically put this entry at the
640 * beginning of the VPD.
642 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
644 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
648 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
649 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
654 id_len = pci_vpd_lrdt_size(vpd);
658 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
660 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
665 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
666 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
667 if (vpdr_len + kw_offset > VPD_LEN) {
668 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
673 #define FIND_VPD_KW(var, name) do { \
674 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
676 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
680 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
683 FIND_VPD_KW(i, "RV");
684 for (csum = 0; i >= 0; i--)
688 dev_err(adapter->pdev_dev,
689 "corrupted VPD EEPROM, actual csum %u\n", csum);
694 FIND_VPD_KW(ec, "EC");
695 FIND_VPD_KW(sn, "SN");
696 FIND_VPD_KW(pn, "PN");
699 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
701 memcpy(p->ec, vpd + ec, EC_LEN);
703 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
704 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
706 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
707 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
711 * Ask firmware for the Core Clock since it knows how to translate the
712 * Reference Clock ('V2') VPD field into a Core Clock value ...
714 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
715 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
716 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
717 1, &cclk_param, &cclk_val);
728 /* serial flash and firmware constants */
730 SF_ATTEMPTS = 10, /* max retries for SF operations */
732 /* flash command opcodes */
733 SF_PROG_PAGE = 2, /* program page */
734 SF_WR_DISABLE = 4, /* disable writes */
735 SF_RD_STATUS = 5, /* read status register */
736 SF_WR_ENABLE = 6, /* enable writes */
737 SF_RD_DATA_FAST = 0xb, /* read flash */
738 SF_RD_ID = 0x9f, /* read ID */
739 SF_ERASE_SECTOR = 0xd8, /* erase sector */
741 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
745 * sf1_read - read data from the serial flash
746 * @adapter: the adapter
747 * @byte_cnt: number of bytes to read
748 * @cont: whether another operation will be chained
749 * @lock: whether to lock SF for PL access only
750 * @valp: where to store the read data
752 * Reads up to 4 bytes of data from the serial flash. The location of
753 * the read needs to be specified prior to calling this by issuing the
754 * appropriate commands to the serial flash.
756 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
761 if (!byte_cnt || byte_cnt > 4)
763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
765 cont = cont ? SF_CONT : 0;
766 lock = lock ? SF_LOCK : 0;
767 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
768 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
770 *valp = t4_read_reg(adapter, SF_DATA);
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @lock: whether to lock SF for PL access only
780 * @val: value to write
782 * Writes up to 4 bytes of data to the serial flash. The location of
783 * the write needs to be specified prior to calling this by issuing the
784 * appropriate commands to the serial flash.
786 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
789 if (!byte_cnt || byte_cnt > 4)
791 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
793 cont = cont ? SF_CONT : 0;
794 lock = lock ? SF_LOCK : 0;
795 t4_write_reg(adapter, SF_DATA, val);
796 t4_write_reg(adapter, SF_OP, lock |
797 cont | BYTECNT(byte_cnt - 1) | OP_WR);
798 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
802 * flash_wait_op - wait for a flash operation to complete
803 * @adapter: the adapter
804 * @attempts: max number of polls of the status register
805 * @delay: delay between polls in ms
807 * Wait for a flash operation to complete by polling the status register.
809 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
815 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
816 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
828 * t4_read_flash - read words from serial flash
829 * @adapter: the adapter
830 * @addr: the start address for the read
831 * @nwords: how many 32-bit words to read
832 * @data: where to store the read data
833 * @byte_oriented: whether to store data as bytes or as words
835 * Read the specified number of 32-bit words from the serial flash.
836 * If @byte_oriented is set the read data is stored as a byte array
837 * (i.e., big-endian), otherwise as 32-bit words in the platform's
840 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
841 unsigned int nwords, u32 *data, int byte_oriented)
845 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
848 addr = swab32(addr) | SF_RD_DATA_FAST;
850 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
851 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
854 for ( ; nwords; nwords--, data++) {
855 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
857 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
861 *data = (__force __u32) (htonl(*data));
867 * t4_write_flash - write up to a page of data to the serial flash
868 * @adapter: the adapter
869 * @addr: the start address to write
870 * @n: length of data to write in bytes
871 * @data: the data to write
873 * Writes up to a page of data (256 bytes) to the serial flash starting
874 * at the given address. All the data must be written to the same page.
876 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
877 unsigned int n, const u8 *data)
881 unsigned int i, c, left, val, offset = addr & 0xff;
883 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
886 val = swab32(addr) | SF_PROG_PAGE;
888 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
892 for (left = n; left; left -= c) {
894 for (val = 0, i = 0; i < c; ++i)
895 val = (val << 8) + *data++;
897 ret = sf1_write(adapter, c, c != left, 1, val);
901 ret = flash_wait_op(adapter, 8, 1);
905 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
907 /* Read the page to verify the write succeeded */
908 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
912 if (memcmp(data - n, (u8 *)buf + offset, n)) {
913 dev_err(adapter->pdev_dev,
914 "failed to correctly write the flash page at %#x\n",
921 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
926 * t4_get_fw_version - read the firmware version
927 * @adapter: the adapter
928 * @vers: where to place the version
930 * Reads the FW version from flash.
932 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
934 return t4_read_flash(adapter, FLASH_FW_START +
935 offsetof(struct fw_hdr, fw_ver), 1,
940 * t4_get_tp_version - read the TP microcode version
941 * @adapter: the adapter
942 * @vers: where to place the version
944 * Reads the TP microcode version from flash.
946 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
948 return t4_read_flash(adapter, FLASH_FW_START +
949 offsetof(struct fw_hdr, tp_microcode_ver),
953 /* Is the given firmware API compatible with the one the driver was compiled
956 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
959 /* short circuit if it's the exact same firmware version */
960 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
963 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
964 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
965 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
972 /* The firmware in the filesystem is usable, but should it be installed?
973 * This routine explains itself in detail if it indicates the filesystem
974 * firmware should be installed.
976 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
981 if (!card_fw_usable) {
982 reason = "incompatible or unusable";
987 reason = "older than the version supported with this driver";
994 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
995 "installing firmware %u.%u.%u.%u on card.\n",
996 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
997 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
998 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
999 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1004 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1005 const u8 *fw_data, unsigned int fw_size,
1006 struct fw_hdr *card_fw, enum dev_state state,
1009 int ret, card_fw_usable, fs_fw_usable;
1010 const struct fw_hdr *fs_fw;
1011 const struct fw_hdr *drv_fw;
1013 drv_fw = &fw_info->fw_hdr;
1015 /* Read the header of the firmware on the card */
1016 ret = -t4_read_flash(adap, FLASH_FW_START,
1017 sizeof(*card_fw) / sizeof(uint32_t),
1018 (uint32_t *)card_fw, 1);
1020 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1022 dev_err(adap->pdev_dev,
1023 "Unable to read card's firmware header: %d\n", ret);
1027 if (fw_data != NULL) {
1028 fs_fw = (const void *)fw_data;
1029 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1035 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1036 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1037 /* Common case: the firmware on the card is an exact match and
1038 * the filesystem one is an exact match too, or the filesystem
1039 * one is absent/incompatible.
1041 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1042 should_install_fs_fw(adap, card_fw_usable,
1043 be32_to_cpu(fs_fw->fw_ver),
1044 be32_to_cpu(card_fw->fw_ver))) {
1045 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1048 dev_err(adap->pdev_dev,
1049 "failed to install firmware: %d\n", ret);
1053 /* Installed successfully, update the cached header too. */
1054 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1056 *reset = 0; /* already reset as part of load_fw */
1059 if (!card_fw_usable) {
1062 d = be32_to_cpu(drv_fw->fw_ver);
1063 c = be32_to_cpu(card_fw->fw_ver);
1064 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1066 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1068 "driver compiled with %d.%d.%d.%d, "
1069 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1071 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1072 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1073 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1074 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1075 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1076 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1081 /* We're using whatever's on the card and it's known to be good. */
1082 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1083 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1090 * t4_flash_erase_sectors - erase a range of flash sectors
1091 * @adapter: the adapter
1092 * @start: the first sector to erase
1093 * @end: the last sector to erase
1095 * Erases the sectors in the given inclusive range.
1097 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1101 if (end >= adapter->params.sf_nsec)
1104 while (start <= end) {
1105 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1106 (ret = sf1_write(adapter, 4, 0, 1,
1107 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1108 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1109 dev_err(adapter->pdev_dev,
1110 "erase of flash sector %d failed, error %d\n",
1116 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1121 * t4_flash_cfg_addr - return the address of the flash configuration file
1122 * @adapter: the adapter
1124 * Return the address within the flash where the Firmware Configuration
1127 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1129 if (adapter->params.sf_size == 0x100000)
1130 return FLASH_FPGA_CFG_START;
1132 return FLASH_CFG_START;
1136 * t4_load_fw - download firmware
1137 * @adap: the adapter
1138 * @fw_data: the firmware image to write
1141 * Write the supplied firmware image to the card's serial flash.
1143 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1148 u8 first_page[SF_PAGE_SIZE];
1149 const __be32 *p = (const __be32 *)fw_data;
1150 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1151 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1152 unsigned int fw_img_start = adap->params.sf_fw_start;
1153 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1156 dev_err(adap->pdev_dev, "FW image has no data\n");
1160 dev_err(adap->pdev_dev,
1161 "FW image size not multiple of 512 bytes\n");
1164 if (ntohs(hdr->len512) * 512 != size) {
1165 dev_err(adap->pdev_dev,
1166 "FW image size differs from size in FW header\n");
1169 if (size > FW_MAX_SIZE) {
1170 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1175 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1176 csum += ntohl(p[i]);
1178 if (csum != 0xffffffff) {
1179 dev_err(adap->pdev_dev,
1180 "corrupted firmware image, checksum %#x\n", csum);
1184 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1185 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1190 * We write the correct version at the end so the driver can see a bad
1191 * version if the FW write fails. Start by writing a copy of the
1192 * first page with a bad version.
1194 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1195 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1196 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1200 addr = fw_img_start;
1201 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1202 addr += SF_PAGE_SIZE;
1203 fw_data += SF_PAGE_SIZE;
1204 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1209 ret = t4_write_flash(adap,
1210 fw_img_start + offsetof(struct fw_hdr, fw_ver),
1211 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1214 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1219 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1220 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1224 * t4_link_start - apply link configuration to MAC/PHY
1225 * @phy: the PHY to setup
1226 * @mac: the MAC to setup
1227 * @lc: the requested link configuration
1229 * Set up a port's MAC and PHY according to a desired link configuration.
1230 * - If the PHY can auto-negotiate first decide what to advertise, then
1231 * enable/disable auto-negotiation as desired, and reset.
1232 * - If the PHY does not auto-negotiate just reset it.
1233 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1234 * otherwise do it later based on the outcome of auto-negotiation.
1236 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1237 struct link_config *lc)
1239 struct fw_port_cmd c;
1240 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1243 if (lc->requested_fc & PAUSE_RX)
1244 fc |= FW_PORT_CAP_FC_RX;
1245 if (lc->requested_fc & PAUSE_TX)
1246 fc |= FW_PORT_CAP_FC_TX;
1248 memset(&c, 0, sizeof(c));
1249 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
1250 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID(port));
1251 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1254 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1255 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1256 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1257 } else if (lc->autoneg == AUTONEG_DISABLE) {
1258 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1259 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1261 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1263 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1267 * t4_restart_aneg - restart autonegotiation
1268 * @adap: the adapter
1269 * @mbox: mbox to use for the FW command
1270 * @port: the port id
1272 * Restarts autonegotiation for the selected port.
1274 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1276 struct fw_port_cmd c;
1278 memset(&c, 0, sizeof(c));
1279 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
1280 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID(port));
1281 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1283 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1284 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1287 typedef void (*int_handler_t)(struct adapter *adap);
1290 unsigned int mask; /* bits to check in interrupt status */
1291 const char *msg; /* message to print or NULL */
1292 short stat_idx; /* stat counter to increment or -1 */
1293 unsigned short fatal; /* whether the condition reported is fatal */
1294 int_handler_t int_handler; /* platform-specific int handler */
1298 * t4_handle_intr_status - table driven interrupt handler
1299 * @adapter: the adapter that generated the interrupt
1300 * @reg: the interrupt status register to process
1301 * @acts: table of interrupt actions
1303 * A table driven interrupt handler that applies a set of masks to an
1304 * interrupt status word and performs the corresponding actions if the
1305 * interrupts described by the mask have occurred. The actions include
1306 * optionally emitting a warning or alert message. The table is terminated
1307 * by an entry specifying mask 0. Returns the number of fatal interrupt
1310 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1311 const struct intr_info *acts)
1314 unsigned int mask = 0;
1315 unsigned int status = t4_read_reg(adapter, reg);
1317 for ( ; acts->mask; ++acts) {
1318 if (!(status & acts->mask))
1322 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1323 status & acts->mask);
1324 } else if (acts->msg && printk_ratelimit())
1325 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1326 status & acts->mask);
1327 if (acts->int_handler)
1328 acts->int_handler(adapter);
1332 if (status) /* clear processed interrupts */
1333 t4_write_reg(adapter, reg, status);
1338 * Interrupt handler for the PCIE module.
1340 static void pcie_intr_handler(struct adapter *adapter)
1342 static const struct intr_info sysbus_intr_info[] = {
1343 { RNPP, "RXNP array parity error", -1, 1 },
1344 { RPCP, "RXPC array parity error", -1, 1 },
1345 { RCIP, "RXCIF array parity error", -1, 1 },
1346 { RCCP, "Rx completions control array parity error", -1, 1 },
1347 { RFTP, "RXFT array parity error", -1, 1 },
1350 static const struct intr_info pcie_port_intr_info[] = {
1351 { TPCP, "TXPC array parity error", -1, 1 },
1352 { TNPP, "TXNP array parity error", -1, 1 },
1353 { TFTP, "TXFT array parity error", -1, 1 },
1354 { TCAP, "TXCA array parity error", -1, 1 },
1355 { TCIP, "TXCIF array parity error", -1, 1 },
1356 { RCAP, "RXCA array parity error", -1, 1 },
1357 { OTDD, "outbound request TLP discarded", -1, 1 },
1358 { RDPE, "Rx data parity error", -1, 1 },
1359 { TDUE, "Tx uncorrectable data error", -1, 1 },
1362 static const struct intr_info pcie_intr_info[] = {
1363 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1364 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1365 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1366 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1367 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1368 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1369 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1370 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1371 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1372 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1373 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1374 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1375 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1376 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1377 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1378 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1379 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1380 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1381 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1382 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1383 { FIDPERR, "PCI FID parity error", -1, 1 },
1384 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1385 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1386 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1387 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1388 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1389 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1390 { PCIESINT, "PCI core secondary fault", -1, 1 },
1391 { PCIEPINT, "PCI core primary fault", -1, 1 },
1392 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1396 static struct intr_info t5_pcie_intr_info[] = {
1397 { MSTGRPPERR, "Master Response Read Queue parity error",
1399 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1400 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1401 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1402 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1403 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1404 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1405 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1407 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1409 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1410 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1411 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1412 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1413 { DREQWRPERR, "PCI DMA channel write request parity error",
1415 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1416 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1417 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1418 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1419 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1420 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1421 { FIDPERR, "PCI FID parity error", -1, 1 },
1422 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1423 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1424 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1425 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1427 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1428 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1429 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1430 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1431 { READRSPERR, "Outbound read error", -1, 0 },
1437 if (is_t4(adapter->params.chip))
1438 fat = t4_handle_intr_status(adapter,
1439 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1441 t4_handle_intr_status(adapter,
1442 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1443 pcie_port_intr_info) +
1444 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1447 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1451 t4_fatal_err(adapter);
1455 * TP interrupt handler.
1457 static void tp_intr_handler(struct adapter *adapter)
1459 static const struct intr_info tp_intr_info[] = {
1460 { 0x3fffffff, "TP parity error", -1, 1 },
1461 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1465 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1466 t4_fatal_err(adapter);
1470 * SGE interrupt handler.
1472 static void sge_intr_handler(struct adapter *adapter)
1476 static const struct intr_info sge_intr_info[] = {
1477 { ERR_CPL_EXCEED_IQE_SIZE,
1478 "SGE received CPL exceeding IQE size", -1, 1 },
1479 { ERR_INVALID_CIDX_INC,
1480 "SGE GTS CIDX increment too large", -1, 0 },
1481 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1482 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1483 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1484 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1485 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1486 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1487 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1489 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1491 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1493 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1495 { ERR_ING_CTXT_PRIO,
1496 "SGE too many priority ingress contexts", -1, 0 },
1497 { ERR_EGR_CTXT_PRIO,
1498 "SGE too many priority egress contexts", -1, 0 },
1499 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1500 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1504 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1505 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1507 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1508 (unsigned long long)v);
1509 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1510 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1513 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1515 t4_fatal_err(adapter);
1519 * CIM interrupt handler.
1521 static void cim_intr_handler(struct adapter *adapter)
1523 static const struct intr_info cim_intr_info[] = {
1524 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1525 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1526 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1527 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1528 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1529 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1530 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1533 static const struct intr_info cim_upintr_info[] = {
1534 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1535 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1536 { ILLWRINT, "CIM illegal write", -1, 1 },
1537 { ILLRDINT, "CIM illegal read", -1, 1 },
1538 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1539 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1540 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1541 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1542 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1543 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1544 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1545 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1546 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1547 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1548 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1549 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1550 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1551 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1552 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1553 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1554 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1555 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1556 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1557 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1558 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1559 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1560 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1561 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1567 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1568 t4_report_fw_error(adapter);
1570 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1572 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1575 t4_fatal_err(adapter);
1579 * ULP RX interrupt handler.
1581 static void ulprx_intr_handler(struct adapter *adapter)
1583 static const struct intr_info ulprx_intr_info[] = {
1584 { 0x1800000, "ULPRX context error", -1, 1 },
1585 { 0x7fffff, "ULPRX parity error", -1, 1 },
1589 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1590 t4_fatal_err(adapter);
1594 * ULP TX interrupt handler.
1596 static void ulptx_intr_handler(struct adapter *adapter)
1598 static const struct intr_info ulptx_intr_info[] = {
1599 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1601 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1603 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1605 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1607 { 0xfffffff, "ULPTX parity error", -1, 1 },
1611 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1612 t4_fatal_err(adapter);
1616 * PM TX interrupt handler.
1618 static void pmtx_intr_handler(struct adapter *adapter)
1620 static const struct intr_info pmtx_intr_info[] = {
1621 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1622 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1623 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1624 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1625 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1626 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1627 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1628 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1629 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1633 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1634 t4_fatal_err(adapter);
1638 * PM RX interrupt handler.
1640 static void pmrx_intr_handler(struct adapter *adapter)
1642 static const struct intr_info pmrx_intr_info[] = {
1643 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1644 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1645 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1646 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1647 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1648 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1652 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1653 t4_fatal_err(adapter);
1657 * CPL switch interrupt handler.
1659 static void cplsw_intr_handler(struct adapter *adapter)
1661 static const struct intr_info cplsw_intr_info[] = {
1662 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1663 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1664 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1665 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1666 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1667 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1671 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1672 t4_fatal_err(adapter);
1676 * LE interrupt handler.
1678 static void le_intr_handler(struct adapter *adap)
1680 static const struct intr_info le_intr_info[] = {
1681 { LIPMISS, "LE LIP miss", -1, 0 },
1682 { LIP0, "LE 0 LIP error", -1, 0 },
1683 { PARITYERR, "LE parity error", -1, 1 },
1684 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1685 { REQQPARERR, "LE request queue parity error", -1, 1 },
1689 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1694 * MPS interrupt handler.
1696 static void mps_intr_handler(struct adapter *adapter)
1698 static const struct intr_info mps_rx_intr_info[] = {
1699 { 0xffffff, "MPS Rx parity error", -1, 1 },
1702 static const struct intr_info mps_tx_intr_info[] = {
1703 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1704 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1705 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1706 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1707 { BUBBLE, "MPS Tx underflow", -1, 1 },
1708 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1709 { FRMERR, "MPS Tx framing error", -1, 1 },
1712 static const struct intr_info mps_trc_intr_info[] = {
1713 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1714 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1715 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1718 static const struct intr_info mps_stat_sram_intr_info[] = {
1719 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1722 static const struct intr_info mps_stat_tx_intr_info[] = {
1723 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1726 static const struct intr_info mps_stat_rx_intr_info[] = {
1727 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1730 static const struct intr_info mps_cls_intr_info[] = {
1731 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1732 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1733 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1739 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1741 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1743 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1744 mps_trc_intr_info) +
1745 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1746 mps_stat_sram_intr_info) +
1747 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1748 mps_stat_tx_intr_info) +
1749 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1750 mps_stat_rx_intr_info) +
1751 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1754 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1755 RXINT | TXINT | STATINT);
1756 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1758 t4_fatal_err(adapter);
1761 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1764 * EDC/MC interrupt handler.
1766 static void mem_intr_handler(struct adapter *adapter, int idx)
1768 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
1770 unsigned int addr, cnt_addr, v;
1772 if (idx <= MEM_EDC1) {
1773 addr = EDC_REG(EDC_INT_CAUSE, idx);
1774 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1775 } else if (idx == MEM_MC) {
1776 if (is_t4(adapter->params.chip)) {
1777 addr = MC_INT_CAUSE;
1778 cnt_addr = MC_ECC_STATUS;
1780 addr = MC_P_INT_CAUSE;
1781 cnt_addr = MC_P_ECC_STATUS;
1784 addr = MC_REG(MC_P_INT_CAUSE, 1);
1785 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
1788 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1789 if (v & PERR_INT_CAUSE)
1790 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1792 if (v & ECC_CE_INT_CAUSE) {
1793 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1795 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1796 if (printk_ratelimit())
1797 dev_warn(adapter->pdev_dev,
1798 "%u %s correctable ECC data error%s\n",
1799 cnt, name[idx], cnt > 1 ? "s" : "");
1801 if (v & ECC_UE_INT_CAUSE)
1802 dev_alert(adapter->pdev_dev,
1803 "%s uncorrectable ECC data error\n", name[idx]);
1805 t4_write_reg(adapter, addr, v);
1806 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1807 t4_fatal_err(adapter);
1811 * MA interrupt handler.
1813 static void ma_intr_handler(struct adapter *adap)
1815 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1817 if (status & MEM_PERR_INT_CAUSE) {
1818 dev_alert(adap->pdev_dev,
1819 "MA parity error, parity status %#x\n",
1820 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1821 if (is_t5(adap->params.chip))
1822 dev_alert(adap->pdev_dev,
1823 "MA parity error, parity status %#x\n",
1825 MA_PARITY_ERROR_STATUS2));
1827 if (status & MEM_WRAP_INT_CAUSE) {
1828 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1829 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1830 "client %u to address %#x\n",
1831 MEM_WRAP_CLIENT_NUM_GET(v),
1832 MEM_WRAP_ADDRESS_GET(v) << 4);
1834 t4_write_reg(adap, MA_INT_CAUSE, status);
1839 * SMB interrupt handler.
1841 static void smb_intr_handler(struct adapter *adap)
1843 static const struct intr_info smb_intr_info[] = {
1844 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1845 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1846 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1850 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1855 * NC-SI interrupt handler.
1857 static void ncsi_intr_handler(struct adapter *adap)
1859 static const struct intr_info ncsi_intr_info[] = {
1860 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1861 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1862 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1863 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1867 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1872 * XGMAC interrupt handler.
1874 static void xgmac_intr_handler(struct adapter *adap, int port)
1876 u32 v, int_cause_reg;
1878 if (is_t4(adap->params.chip))
1879 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1881 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1883 v = t4_read_reg(adap, int_cause_reg);
1885 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1889 if (v & TXFIFO_PRTY_ERR)
1890 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1892 if (v & RXFIFO_PRTY_ERR)
1893 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1895 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1900 * PL interrupt handler.
1902 static void pl_intr_handler(struct adapter *adap)
1904 static const struct intr_info pl_intr_info[] = {
1905 { FATALPERR, "T4 fatal parity error", -1, 1 },
1906 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1910 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1914 #define PF_INTR_MASK (PFSW)
1915 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1916 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1917 CPL_SWITCH | SGE | ULP_TX)
1920 * t4_slow_intr_handler - control path interrupt handler
1921 * @adapter: the adapter
1923 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1924 * The designation 'slow' is because it involves register reads, while
1925 * data interrupts typically don't involve any MMIOs.
1927 int t4_slow_intr_handler(struct adapter *adapter)
1929 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1931 if (!(cause & GLBL_INTR_MASK))
1934 cim_intr_handler(adapter);
1936 mps_intr_handler(adapter);
1938 ncsi_intr_handler(adapter);
1940 pl_intr_handler(adapter);
1942 smb_intr_handler(adapter);
1944 xgmac_intr_handler(adapter, 0);
1946 xgmac_intr_handler(adapter, 1);
1947 if (cause & XGMAC_KR0)
1948 xgmac_intr_handler(adapter, 2);
1949 if (cause & XGMAC_KR1)
1950 xgmac_intr_handler(adapter, 3);
1952 pcie_intr_handler(adapter);
1954 mem_intr_handler(adapter, MEM_MC);
1955 if (!is_t4(adapter->params.chip) && (cause & MC1))
1956 mem_intr_handler(adapter, MEM_MC1);
1958 mem_intr_handler(adapter, MEM_EDC0);
1960 mem_intr_handler(adapter, MEM_EDC1);
1962 le_intr_handler(adapter);
1964 tp_intr_handler(adapter);
1966 ma_intr_handler(adapter);
1968 pmtx_intr_handler(adapter);
1970 pmrx_intr_handler(adapter);
1972 ulprx_intr_handler(adapter);
1973 if (cause & CPL_SWITCH)
1974 cplsw_intr_handler(adapter);
1976 sge_intr_handler(adapter);
1978 ulptx_intr_handler(adapter);
1980 /* Clear the interrupts just processed for which we are the master. */
1981 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1982 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1987 * t4_intr_enable - enable interrupts
1988 * @adapter: the adapter whose interrupts should be enabled
1990 * Enable PF-specific interrupts for the calling function and the top-level
1991 * interrupt concentrator for global interrupts. Interrupts are already
1992 * enabled at each module, here we just enable the roots of the interrupt
1995 * Note: this function should be called only when the driver manages
1996 * non PF-specific interrupts from the various HW modules. Only one PCI
1997 * function at a time should be doing this.
1999 void t4_intr_enable(struct adapter *adapter)
2001 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2003 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2004 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2005 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2006 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2007 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2008 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2009 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
2010 DBFIFO_HP_INT | DBFIFO_LP_INT |
2012 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2013 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2017 * t4_intr_disable - disable interrupts
2018 * @adapter: the adapter whose interrupts should be disabled
2020 * Disable interrupts. We only disable the top-level interrupt
2021 * concentrators. The caller must be a PCI function managing global
2024 void t4_intr_disable(struct adapter *adapter)
2026 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2028 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2029 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2033 * hash_mac_addr - return the hash value of a MAC address
2034 * @addr: the 48-bit Ethernet MAC address
2036 * Hashes a MAC address according to the hash function used by HW inexact
2037 * (hash) address matching.
2039 static int hash_mac_addr(const u8 *addr)
2041 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2042 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2050 * t4_config_rss_range - configure a portion of the RSS mapping table
2051 * @adapter: the adapter
2052 * @mbox: mbox to use for the FW command
2053 * @viid: virtual interface whose RSS subtable is to be written
2054 * @start: start entry in the table to write
2055 * @n: how many table entries to write
2056 * @rspq: values for the response queue lookup table
2057 * @nrspq: number of values in @rspq
2059 * Programs the selected part of the VI's RSS mapping table with the
2060 * provided values. If @nrspq < @n the supplied values are used repeatedly
2061 * until the full table range is populated.
2063 * The caller must ensure the values in @rspq are in the range allowed for
2066 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2067 int start, int n, const u16 *rspq, unsigned int nrspq)
2070 const u16 *rsp = rspq;
2071 const u16 *rsp_end = rspq + nrspq;
2072 struct fw_rss_ind_tbl_cmd cmd;
2074 memset(&cmd, 0, sizeof(cmd));
2075 cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
2076 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
2077 FW_RSS_IND_TBL_CMD_VIID(viid));
2078 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2080 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2082 int nq = min(n, 32);
2083 __be32 *qp = &cmd.iq0_to_iq2;
2085 cmd.niqid = htons(nq);
2086 cmd.startidx = htons(start);
2094 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2095 if (++rsp >= rsp_end)
2097 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2098 if (++rsp >= rsp_end)
2100 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2101 if (++rsp >= rsp_end)
2108 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2116 * t4_config_glbl_rss - configure the global RSS mode
2117 * @adapter: the adapter
2118 * @mbox: mbox to use for the FW command
2119 * @mode: global RSS mode
2120 * @flags: mode-specific flags
2122 * Sets the global RSS mode.
2124 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2127 struct fw_rss_glb_config_cmd c;
2129 memset(&c, 0, sizeof(c));
2130 c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
2131 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
2132 c.retval_len16 = htonl(FW_LEN16(c));
2133 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2134 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2135 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2136 c.u.basicvirtual.mode_pkd =
2137 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2138 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2141 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2145 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2146 * @adap: the adapter
2147 * @v4: holds the TCP/IP counter values
2148 * @v6: holds the TCP/IPv6 counter values
2150 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2151 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2153 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2154 struct tp_tcp_stats *v6)
2156 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2158 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2159 #define STAT(x) val[STAT_IDX(x)]
2160 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2163 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2164 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2165 v4->tcpOutRsts = STAT(OUT_RST);
2166 v4->tcpInSegs = STAT64(IN_SEG);
2167 v4->tcpOutSegs = STAT64(OUT_SEG);
2168 v4->tcpRetransSegs = STAT64(RXT_SEG);
2171 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2172 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2173 v6->tcpOutRsts = STAT(OUT_RST);
2174 v6->tcpInSegs = STAT64(IN_SEG);
2175 v6->tcpOutSegs = STAT64(OUT_SEG);
2176 v6->tcpRetransSegs = STAT64(RXT_SEG);
2184 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2185 * @adap: the adapter
2186 * @mtus: where to store the MTU values
2187 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2189 * Reads the HW path MTU table.
2191 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2196 for (i = 0; i < NMTUS; ++i) {
2197 t4_write_reg(adap, TP_MTU_TABLE,
2198 MTUINDEX(0xff) | MTUVALUE(i));
2199 v = t4_read_reg(adap, TP_MTU_TABLE);
2200 mtus[i] = MTUVALUE_GET(v);
2202 mtu_log[i] = MTUWIDTH_GET(v);
2207 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2208 * @adap: the adapter
2209 * @addr: the indirect TP register address
2210 * @mask: specifies the field within the register to modify
2211 * @val: new value for the field
2213 * Sets a field of an indirect TP register to the given value.
2215 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2216 unsigned int mask, unsigned int val)
2218 t4_write_reg(adap, TP_PIO_ADDR, addr);
2219 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2220 t4_write_reg(adap, TP_PIO_DATA, val);
2224 * init_cong_ctrl - initialize congestion control parameters
2225 * @a: the alpha values for congestion control
2226 * @b: the beta values for congestion control
2228 * Initialize the congestion control parameters.
2230 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2232 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2257 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2260 b[13] = b[14] = b[15] = b[16] = 3;
2261 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2262 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2267 /* The minimum additive increment value for the congestion control table */
2268 #define CC_MIN_INCR 2U
2271 * t4_load_mtus - write the MTU and congestion control HW tables
2272 * @adap: the adapter
2273 * @mtus: the values for the MTU table
2274 * @alpha: the values for the congestion control alpha parameter
2275 * @beta: the values for the congestion control beta parameter
2277 * Write the HW MTU table with the supplied MTUs and the high-speed
2278 * congestion control table with the supplied alpha, beta, and MTUs.
2279 * We write the two tables together because the additive increments
2280 * depend on the MTUs.
2282 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2283 const unsigned short *alpha, const unsigned short *beta)
2285 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2286 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2287 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2288 28672, 40960, 57344, 81920, 114688, 163840, 229376
2293 for (i = 0; i < NMTUS; ++i) {
2294 unsigned int mtu = mtus[i];
2295 unsigned int log2 = fls(mtu);
2297 if (!(mtu & ((1 << log2) >> 2))) /* round */
2299 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2300 MTUWIDTH(log2) | MTUVALUE(mtu));
2302 for (w = 0; w < NCCTRL_WIN; ++w) {
2305 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2308 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2309 (w << 16) | (beta[w] << 13) | inc);
2315 * get_mps_bg_map - return the buffer groups associated with a port
2316 * @adap: the adapter
2317 * @idx: the port index
2319 * Returns a bitmap indicating which MPS buffer groups are associated
2320 * with the given port. Bit i is set if buffer group i is used by the
2323 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2325 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2328 return idx == 0 ? 0xf : 0;
2330 return idx < 2 ? (3 << (2 * idx)) : 0;
2335 * t4_get_port_type_description - return Port Type string description
2336 * @port_type: firmware Port Type enumeration
2338 const char *t4_get_port_type_description(enum fw_port_type port_type)
2340 static const char *const port_type_description[] = {
2359 if (port_type < ARRAY_SIZE(port_type_description))
2360 return port_type_description[port_type];
2365 * t4_get_port_stats - collect port statistics
2366 * @adap: the adapter
2367 * @idx: the port index
2368 * @p: the stats structure to fill
2370 * Collect statistics related to the given port from HW.
2372 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2374 u32 bgmap = get_mps_bg_map(adap, idx);
2376 #define GET_STAT(name) \
2377 t4_read_reg64(adap, \
2378 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2379 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2380 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2382 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2383 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2384 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2385 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2386 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2387 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2388 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2389 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2390 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2391 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2392 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2393 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2394 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2395 p->tx_drop = GET_STAT(TX_PORT_DROP);
2396 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2397 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2398 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2399 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2400 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2401 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2402 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2403 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2404 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2406 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2407 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2408 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2409 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2410 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2411 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2412 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2413 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2414 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2415 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2416 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2417 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2418 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2419 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2420 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2421 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2422 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2423 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2424 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2425 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2426 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2427 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2428 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2429 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2430 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2431 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2432 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2434 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2435 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2436 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2437 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2438 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2439 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2440 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2441 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2448 * t4_wol_magic_enable - enable/disable magic packet WoL
2449 * @adap: the adapter
2450 * @port: the physical port index
2451 * @addr: MAC address expected in magic packets, %NULL to disable
2453 * Enables/disables magic packet wake-on-LAN for the selected port.
2455 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2458 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2460 if (is_t4(adap->params.chip)) {
2461 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2462 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2463 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2465 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2466 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2467 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2471 t4_write_reg(adap, mag_id_reg_l,
2472 (addr[2] << 24) | (addr[3] << 16) |
2473 (addr[4] << 8) | addr[5]);
2474 t4_write_reg(adap, mag_id_reg_h,
2475 (addr[0] << 8) | addr[1]);
2477 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2478 addr ? MAGICEN : 0);
2482 * t4_wol_pat_enable - enable/disable pattern-based WoL
2483 * @adap: the adapter
2484 * @port: the physical port index
2485 * @map: bitmap of which HW pattern filters to set
2486 * @mask0: byte mask for bytes 0-63 of a packet
2487 * @mask1: byte mask for bytes 64-127 of a packet
2488 * @crc: Ethernet CRC for selected bytes
2489 * @enable: enable/disable switch
2491 * Sets the pattern filters indicated in @map to mask out the bytes
2492 * specified in @mask0/@mask1 in received packets and compare the CRC of
2493 * the resulting packet against @crc. If @enable is %true pattern-based
2494 * WoL is enabled, otherwise disabled.
2496 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2497 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2502 if (is_t4(adap->params.chip))
2503 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2505 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2508 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2514 #define EPIO_REG(name) \
2515 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2516 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2518 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2519 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2520 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2522 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2526 /* write byte masks */
2527 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2528 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2529 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2530 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2534 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2535 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2536 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2537 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2542 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2546 /* t4_mk_filtdelwr - create a delete filter WR
2547 * @ftid: the filter ID
2548 * @wr: the filter work request to populate
2549 * @qid: ingress queue to receive the delete notification
2551 * Creates a filter work request to delete the supplied filter. If @qid is
2552 * negative the delete notification is suppressed.
2554 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2556 memset(wr, 0, sizeof(*wr));
2557 wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
2558 wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
2559 wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
2560 FW_FILTER_WR_NOREPLY_V(qid < 0));
2561 wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
2563 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
2566 #define INIT_CMD(var, cmd, rd_wr) do { \
2567 (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
2568 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
2569 (var).retval_len16 = htonl(FW_LEN16(var)); \
2572 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2575 struct fw_ldst_cmd c;
2577 memset(&c, 0, sizeof(c));
2578 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2580 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
2581 c.cycles_to_len16 = htonl(FW_LEN16(c));
2582 c.u.addrval.addr = htonl(addr);
2583 c.u.addrval.val = htonl(val);
2585 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2589 * t4_mdio_rd - read a PHY register through MDIO
2590 * @adap: the adapter
2591 * @mbox: mailbox to use for the FW command
2592 * @phy_addr: the PHY address
2593 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2594 * @reg: the register to read
2595 * @valp: where to store the value
2597 * Issues a FW command through the given mailbox to read a PHY register.
2599 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2600 unsigned int mmd, unsigned int reg, u16 *valp)
2603 struct fw_ldst_cmd c;
2605 memset(&c, 0, sizeof(c));
2606 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2607 FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
2608 c.cycles_to_len16 = htonl(FW_LEN16(c));
2609 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2610 FW_LDST_CMD_MMD_V(mmd));
2611 c.u.mdio.raddr = htons(reg);
2613 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2615 *valp = ntohs(c.u.mdio.rval);
2620 * t4_mdio_wr - write a PHY register through MDIO
2621 * @adap: the adapter
2622 * @mbox: mailbox to use for the FW command
2623 * @phy_addr: the PHY address
2624 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2625 * @reg: the register to write
2626 * @valp: value to write
2628 * Issues a FW command through the given mailbox to write a PHY register.
2630 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2631 unsigned int mmd, unsigned int reg, u16 val)
2633 struct fw_ldst_cmd c;
2635 memset(&c, 0, sizeof(c));
2636 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2637 FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
2638 c.cycles_to_len16 = htonl(FW_LEN16(c));
2639 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2640 FW_LDST_CMD_MMD_V(mmd));
2641 c.u.mdio.raddr = htons(reg);
2642 c.u.mdio.rval = htons(val);
2644 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2648 * t4_sge_decode_idma_state - decode the idma state
2649 * @adap: the adapter
2650 * @state: the state idma is stuck in
2652 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2654 static const char * const t4_decode[] = {
2656 "IDMA_PUSH_MORE_CPL_FIFO",
2657 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2659 "IDMA_PHYSADDR_SEND_PCIEHDR",
2660 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2661 "IDMA_PHYSADDR_SEND_PAYLOAD",
2662 "IDMA_SEND_FIFO_TO_IMSG",
2663 "IDMA_FL_REQ_DATA_FL_PREP",
2664 "IDMA_FL_REQ_DATA_FL",
2666 "IDMA_FL_H_REQ_HEADER_FL",
2667 "IDMA_FL_H_SEND_PCIEHDR",
2668 "IDMA_FL_H_PUSH_CPL_FIFO",
2669 "IDMA_FL_H_SEND_CPL",
2670 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2671 "IDMA_FL_H_SEND_IP_HDR",
2672 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2673 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2674 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2675 "IDMA_FL_D_SEND_PCIEHDR",
2676 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2677 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2678 "IDMA_FL_SEND_PCIEHDR",
2679 "IDMA_FL_PUSH_CPL_FIFO",
2681 "IDMA_FL_SEND_PAYLOAD_FIRST",
2682 "IDMA_FL_SEND_PAYLOAD",
2683 "IDMA_FL_REQ_NEXT_DATA_FL",
2684 "IDMA_FL_SEND_NEXT_PCIEHDR",
2685 "IDMA_FL_SEND_PADDING",
2686 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2687 "IDMA_FL_SEND_FIFO_TO_IMSG",
2688 "IDMA_FL_REQ_DATAFL_DONE",
2689 "IDMA_FL_REQ_HEADERFL_DONE",
2691 static const char * const t5_decode[] = {
2694 "IDMA_PUSH_MORE_CPL_FIFO",
2695 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2696 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2697 "IDMA_PHYSADDR_SEND_PCIEHDR",
2698 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2699 "IDMA_PHYSADDR_SEND_PAYLOAD",
2700 "IDMA_SEND_FIFO_TO_IMSG",
2701 "IDMA_FL_REQ_DATA_FL",
2703 "IDMA_FL_DROP_SEND_INC",
2704 "IDMA_FL_H_REQ_HEADER_FL",
2705 "IDMA_FL_H_SEND_PCIEHDR",
2706 "IDMA_FL_H_PUSH_CPL_FIFO",
2707 "IDMA_FL_H_SEND_CPL",
2708 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2709 "IDMA_FL_H_SEND_IP_HDR",
2710 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2711 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2712 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2713 "IDMA_FL_D_SEND_PCIEHDR",
2714 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2715 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2716 "IDMA_FL_SEND_PCIEHDR",
2717 "IDMA_FL_PUSH_CPL_FIFO",
2719 "IDMA_FL_SEND_PAYLOAD_FIRST",
2720 "IDMA_FL_SEND_PAYLOAD",
2721 "IDMA_FL_REQ_NEXT_DATA_FL",
2722 "IDMA_FL_SEND_NEXT_PCIEHDR",
2723 "IDMA_FL_SEND_PADDING",
2724 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2726 static const u32 sge_regs[] = {
2727 SGE_DEBUG_DATA_LOW_INDEX_2,
2728 SGE_DEBUG_DATA_LOW_INDEX_3,
2729 SGE_DEBUG_DATA_HIGH_INDEX_10,
2731 const char **sge_idma_decode;
2732 int sge_idma_decode_nstates;
2735 if (is_t4(adapter->params.chip)) {
2736 sge_idma_decode = (const char **)t4_decode;
2737 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2739 sge_idma_decode = (const char **)t5_decode;
2740 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2743 if (state < sge_idma_decode_nstates)
2744 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2746 CH_WARN(adapter, "idma state %d unknown\n", state);
2748 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2749 CH_WARN(adapter, "SGE register %#x value %#x\n",
2750 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2754 * t4_fw_hello - establish communication with FW
2755 * @adap: the adapter
2756 * @mbox: mailbox to use for the FW command
2757 * @evt_mbox: mailbox to receive async FW events
2758 * @master: specifies the caller's willingness to be the device master
2759 * @state: returns the current device state (if non-NULL)
2761 * Issues a command to establish communication with FW. Returns either
2762 * an error (negative integer) or the mailbox of the Master PF.
2764 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2765 enum dev_master master, enum dev_state *state)
2768 struct fw_hello_cmd c;
2770 unsigned int master_mbox;
2771 int retries = FW_CMD_HELLO_RETRIES;
2774 memset(&c, 0, sizeof(c));
2775 INIT_CMD(c, HELLO, WRITE);
2776 c.err_to_clearinit = htonl(
2777 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
2778 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
2779 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
2780 FW_HELLO_CMD_MBMASTER_M) |
2781 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
2782 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
2783 FW_HELLO_CMD_CLEARINIT_F);
2786 * Issue the HELLO command to the firmware. If it's not successful
2787 * but indicates that we got a "busy" or "timeout" condition, retry
2788 * the HELLO until we exhaust our retry limit. If we do exceed our
2789 * retry limit, check to see if the firmware left us any error
2790 * information and report that if so.
2792 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2794 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2796 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2797 t4_report_fw_error(adap);
2801 v = ntohl(c.err_to_clearinit);
2802 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
2804 if (v & FW_HELLO_CMD_ERR_F)
2805 *state = DEV_STATE_ERR;
2806 else if (v & FW_HELLO_CMD_INIT_F)
2807 *state = DEV_STATE_INIT;
2809 *state = DEV_STATE_UNINIT;
2813 * If we're not the Master PF then we need to wait around for the
2814 * Master PF Driver to finish setting up the adapter.
2816 * Note that we also do this wait if we're a non-Master-capable PF and
2817 * there is no current Master PF; a Master PF may show up momentarily
2818 * and we wouldn't want to fail pointlessly. (This can happen when an
2819 * OS loads lots of different drivers rapidly at the same time). In
2820 * this case, the Master PF returned by the firmware will be
2821 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2823 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
2824 master_mbox != mbox) {
2825 int waiting = FW_CMD_HELLO_TIMEOUT;
2828 * Wait for the firmware to either indicate an error or
2829 * initialized state. If we see either of these we bail out
2830 * and report the issue to the caller. If we exhaust the
2831 * "hello timeout" and we haven't exhausted our retries, try
2832 * again. Otherwise bail with a timeout error.
2841 * If neither Error nor Initialialized are indicated
2842 * by the firmware keep waiting till we exaust our
2843 * timeout ... and then retry if we haven't exhausted
2846 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2847 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2858 * We either have an Error or Initialized condition
2859 * report errors preferentially.
2862 if (pcie_fw & FW_PCIE_FW_ERR)
2863 *state = DEV_STATE_ERR;
2864 else if (pcie_fw & FW_PCIE_FW_INIT)
2865 *state = DEV_STATE_INIT;
2869 * If we arrived before a Master PF was selected and
2870 * there's not a valid Master PF, grab its identity
2873 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2874 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2875 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2884 * t4_fw_bye - end communication with FW
2885 * @adap: the adapter
2886 * @mbox: mailbox to use for the FW command
2888 * Issues a command to terminate communication with FW.
2890 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2892 struct fw_bye_cmd c;
2894 memset(&c, 0, sizeof(c));
2895 INIT_CMD(c, BYE, WRITE);
2896 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2900 * t4_init_cmd - ask FW to initialize the device
2901 * @adap: the adapter
2902 * @mbox: mailbox to use for the FW command
2904 * Issues a command to FW to partially initialize the device. This
2905 * performs initialization that generally doesn't depend on user input.
2907 int t4_early_init(struct adapter *adap, unsigned int mbox)
2909 struct fw_initialize_cmd c;
2911 memset(&c, 0, sizeof(c));
2912 INIT_CMD(c, INITIALIZE, WRITE);
2913 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2917 * t4_fw_reset - issue a reset to FW
2918 * @adap: the adapter
2919 * @mbox: mailbox to use for the FW command
2920 * @reset: specifies the type of reset to perform
2922 * Issues a reset command of the specified type to FW.
2924 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2926 struct fw_reset_cmd c;
2928 memset(&c, 0, sizeof(c));
2929 INIT_CMD(c, RESET, WRITE);
2930 c.val = htonl(reset);
2931 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2935 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2936 * @adap: the adapter
2937 * @mbox: mailbox to use for the FW RESET command (if desired)
2938 * @force: force uP into RESET even if FW RESET command fails
2940 * Issues a RESET command to firmware (if desired) with a HALT indication
2941 * and then puts the microprocessor into RESET state. The RESET command
2942 * will only be issued if a legitimate mailbox is provided (mbox <=
2943 * FW_PCIE_FW_MASTER_MASK).
2945 * This is generally used in order for the host to safely manipulate the
2946 * adapter without fear of conflicting with whatever the firmware might
2947 * be doing. The only way out of this state is to RESTART the firmware
2950 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2955 * If a legitimate mailbox is provided, issue a RESET command
2956 * with a HALT indication.
2958 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2959 struct fw_reset_cmd c;
2961 memset(&c, 0, sizeof(c));
2962 INIT_CMD(c, RESET, WRITE);
2963 c.val = htonl(PIORST | PIORSTMODE);
2964 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
2965 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2969 * Normally we won't complete the operation if the firmware RESET
2970 * command fails but if our caller insists we'll go ahead and put the
2971 * uP into RESET. This can be useful if the firmware is hung or even
2972 * missing ... We'll have to take the risk of putting the uP into
2973 * RESET without the cooperation of firmware in that case.
2975 * We also force the firmware's HALT flag to be on in case we bypassed
2976 * the firmware RESET command above or we're dealing with old firmware
2977 * which doesn't have the HALT capability. This will serve as a flag
2978 * for the incoming firmware to know that it's coming out of a HALT
2979 * rather than a RESET ... if it's new enough to understand that ...
2981 if (ret == 0 || force) {
2982 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2983 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2988 * And we always return the result of the firmware RESET command
2989 * even when we force the uP into RESET ...
2995 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2996 * @adap: the adapter
2997 * @reset: if we want to do a RESET to restart things
2999 * Restart firmware previously halted by t4_fw_halt(). On successful
3000 * return the previous PF Master remains as the new PF Master and there
3001 * is no need to issue a new HELLO command, etc.
3003 * We do this in two ways:
3005 * 1. If we're dealing with newer firmware we'll simply want to take
3006 * the chip's microprocessor out of RESET. This will cause the
3007 * firmware to start up from its start vector. And then we'll loop
3008 * until the firmware indicates it's started again (PCIE_FW.HALT
3009 * reset to 0) or we timeout.
3011 * 2. If we're dealing with older firmware then we'll need to RESET
3012 * the chip since older firmware won't recognize the PCIE_FW.HALT
3013 * flag and automatically RESET itself on startup.
3015 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3019 * Since we're directing the RESET instead of the firmware
3020 * doing it automatically, we need to clear the PCIE_FW.HALT
3023 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
3026 * If we've been given a valid mailbox, first try to get the
3027 * firmware to do the RESET. If that works, great and we can
3028 * return success. Otherwise, if we haven't been given a
3029 * valid mailbox or the RESET command failed, fall back to
3030 * hitting the chip with a hammer.
3032 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
3033 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3035 if (t4_fw_reset(adap, mbox,
3036 PIORST | PIORSTMODE) == 0)
3040 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3045 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3046 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3047 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3058 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3059 * @adap: the adapter
3060 * @mbox: mailbox to use for the FW RESET command (if desired)
3061 * @fw_data: the firmware image to write
3063 * @force: force upgrade even if firmware doesn't cooperate
3065 * Perform all of the steps necessary for upgrading an adapter's
3066 * firmware image. Normally this requires the cooperation of the
3067 * existing firmware in order to halt all existing activities
3068 * but if an invalid mailbox token is passed in we skip that step
3069 * (though we'll still put the adapter microprocessor into RESET in
3072 * On successful return the new firmware will have been loaded and
3073 * the adapter will have been fully RESET losing all previous setup
3074 * state. On unsuccessful return the adapter may be completely hosed ...
3075 * positive errno indicates that the adapter is ~probably~ intact, a
3076 * negative errno indicates that things are looking bad ...
3078 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3079 const u8 *fw_data, unsigned int size, int force)
3081 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3084 ret = t4_fw_halt(adap, mbox, force);
3085 if (ret < 0 && !force)
3088 ret = t4_load_fw(adap, fw_data, size);
3093 * Older versions of the firmware don't understand the new
3094 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3095 * restart. So for newly loaded older firmware we'll have to do the
3096 * RESET for it so it starts up on a clean slate. We can tell if
3097 * the newly loaded firmware will handle this right by checking
3098 * its header flags to see if it advertises the capability.
3100 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3101 return t4_fw_restart(adap, mbox, reset);
3105 * t4_fixup_host_params - fix up host-dependent parameters
3106 * @adap: the adapter
3107 * @page_size: the host's Base Page Size
3108 * @cache_line_size: the host's Cache Line Size
3110 * Various registers in T4 contain values which are dependent on the
3111 * host's Base Page and Cache Line Sizes. This function will fix all of
3112 * those registers with the appropriate values as passed in ...
3114 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3115 unsigned int cache_line_size)
3117 unsigned int page_shift = fls(page_size) - 1;
3118 unsigned int sge_hps = page_shift - 10;
3119 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3120 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3121 unsigned int fl_align_log = fls(fl_align) - 1;
3123 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3124 HOSTPAGESIZEPF0(sge_hps) |
3125 HOSTPAGESIZEPF1(sge_hps) |
3126 HOSTPAGESIZEPF2(sge_hps) |
3127 HOSTPAGESIZEPF3(sge_hps) |
3128 HOSTPAGESIZEPF4(sge_hps) |
3129 HOSTPAGESIZEPF5(sge_hps) |
3130 HOSTPAGESIZEPF6(sge_hps) |
3131 HOSTPAGESIZEPF7(sge_hps));
3133 if (is_t4(adap->params.chip)) {
3134 t4_set_reg_field(adap, SGE_CONTROL,
3135 INGPADBOUNDARY_MASK |
3136 EGRSTATUSPAGESIZE_MASK,
3137 INGPADBOUNDARY(fl_align_log - 5) |
3138 EGRSTATUSPAGESIZE(stat_len != 64));
3140 /* T5 introduced the separation of the Free List Padding and
3141 * Packing Boundaries. Thus, we can select a smaller Padding
3142 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3143 * Bandwidth, and use a Packing Boundary which is large enough
3144 * to avoid false sharing between CPUs, etc.
3146 * For the PCI Link, the smaller the Padding Boundary the
3147 * better. For the Memory Controller, a smaller Padding
3148 * Boundary is better until we cross under the Memory Line
3149 * Size (the minimum unit of transfer to/from Memory). If we
3150 * have a Padding Boundary which is smaller than the Memory
3151 * Line Size, that'll involve a Read-Modify-Write cycle on the
3152 * Memory Controller which is never good. For T5 the smallest
3153 * Padding Boundary which we can select is 32 bytes which is
3154 * larger than any known Memory Controller Line Size so we'll
3157 * T5 has a different interpretation of the "0" value for the
3158 * Packing Boundary. This corresponds to 16 bytes instead of
3159 * the expected 32 bytes. We never have a Packing Boundary
3160 * less than 32 bytes so we can't use that special value but
3161 * on the other hand, if we wanted 32 bytes, the best we can
3162 * really do is 64 bytes.
3164 if (fl_align <= 32) {
3168 t4_set_reg_field(adap, SGE_CONTROL,
3169 INGPADBOUNDARY_MASK |
3170 EGRSTATUSPAGESIZE_MASK,
3171 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3172 EGRSTATUSPAGESIZE(stat_len != 64));
3173 t4_set_reg_field(adap, SGE_CONTROL2_A,
3174 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3175 INGPACKBOUNDARY_V(fl_align_log -
3176 INGPACKBOUNDARY_SHIFT_X));
3179 * Adjust various SGE Free List Host Buffer Sizes.
3181 * This is something of a crock since we're using fixed indices into
3182 * the array which are also known by the sge.c code and the T4
3183 * Firmware Configuration File. We need to come up with a much better
3184 * approach to managing this array. For now, the first four entries
3189 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3190 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3192 * For the single-MTU buffers in unpacked mode we need to include
3193 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3194 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3195 * Padding boundry. All of these are accommodated in the Factory
3196 * Default Firmware Configuration File but we need to adjust it for
3197 * this host's cache line size.
3199 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3200 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3201 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3203 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3204 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3207 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3213 * t4_fw_initialize - ask FW to initialize the device
3214 * @adap: the adapter
3215 * @mbox: mailbox to use for the FW command
3217 * Issues a command to FW to partially initialize the device. This
3218 * performs initialization that generally doesn't depend on user input.
3220 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3222 struct fw_initialize_cmd c;
3224 memset(&c, 0, sizeof(c));
3225 INIT_CMD(c, INITIALIZE, WRITE);
3226 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3230 * t4_query_params - query FW or device parameters
3231 * @adap: the adapter
3232 * @mbox: mailbox to use for the FW command
3235 * @nparams: the number of parameters
3236 * @params: the parameter names
3237 * @val: the parameter values
3239 * Reads the value of FW or device parameters. Up to 7 parameters can be
3242 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3243 unsigned int vf, unsigned int nparams, const u32 *params,
3247 struct fw_params_cmd c;
3248 __be32 *p = &c.param[0].mnem;
3253 memset(&c, 0, sizeof(c));
3254 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
3255 FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
3256 FW_PARAMS_CMD_VFN_V(vf));
3257 c.retval_len16 = htonl(FW_LEN16(c));
3258 for (i = 0; i < nparams; i++, p += 2)
3259 *p = htonl(*params++);
3261 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3263 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3269 * t4_set_params_nosleep - sets FW or device parameters
3270 * @adap: the adapter
3271 * @mbox: mailbox to use for the FW command
3274 * @nparams: the number of parameters
3275 * @params: the parameter names
3276 * @val: the parameter values
3278 * Does not ever sleep
3279 * Sets the value of FW or device parameters. Up to 7 parameters can be
3280 * specified at once.
3282 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3283 unsigned int pf, unsigned int vf,
3284 unsigned int nparams, const u32 *params,
3287 struct fw_params_cmd c;
3288 __be32 *p = &c.param[0].mnem;
3293 memset(&c, 0, sizeof(c));
3294 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3295 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3296 FW_PARAMS_CMD_PFN_V(pf) |
3297 FW_PARAMS_CMD_VFN_V(vf));
3298 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3301 *p++ = cpu_to_be32(*params++);
3302 *p++ = cpu_to_be32(*val++);
3305 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3309 * t4_set_params - sets FW or device parameters
3310 * @adap: the adapter
3311 * @mbox: mailbox to use for the FW command
3314 * @nparams: the number of parameters
3315 * @params: the parameter names
3316 * @val: the parameter values
3318 * Sets the value of FW or device parameters. Up to 7 parameters can be
3319 * specified at once.
3321 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3322 unsigned int vf, unsigned int nparams, const u32 *params,
3325 struct fw_params_cmd c;
3326 __be32 *p = &c.param[0].mnem;
3331 memset(&c, 0, sizeof(c));
3332 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
3333 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
3334 FW_PARAMS_CMD_VFN_V(vf));
3335 c.retval_len16 = htonl(FW_LEN16(c));
3337 *p++ = htonl(*params++);
3338 *p++ = htonl(*val++);
3341 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3345 * t4_cfg_pfvf - configure PF/VF resource limits
3346 * @adap: the adapter
3347 * @mbox: mailbox to use for the FW command
3348 * @pf: the PF being configured
3349 * @vf: the VF being configured
3350 * @txq: the max number of egress queues
3351 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3352 * @rxqi: the max number of interrupt-capable ingress queues
3353 * @rxq: the max number of interruptless ingress queues
3354 * @tc: the PCI traffic class
3355 * @vi: the max number of virtual interfaces
3356 * @cmask: the channel access rights mask for the PF/VF
3357 * @pmask: the port access rights mask for the PF/VF
3358 * @nexact: the maximum number of exact MPS filters
3359 * @rcaps: read capabilities
3360 * @wxcaps: write/execute capabilities
3362 * Configures resource limits and capabilities for a physical or virtual
3365 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3366 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3367 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3368 unsigned int vi, unsigned int cmask, unsigned int pmask,
3369 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3371 struct fw_pfvf_cmd c;
3373 memset(&c, 0, sizeof(c));
3374 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
3375 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
3376 FW_PFVF_CMD_VFN_V(vf));
3377 c.retval_len16 = htonl(FW_LEN16(c));
3378 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
3379 FW_PFVF_CMD_NIQ_V(rxq));
3380 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
3381 FW_PFVF_CMD_PMASK_V(pmask) |
3382 FW_PFVF_CMD_NEQ_V(txq));
3383 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
3384 FW_PFVF_CMD_NEXACTF_V(nexact));
3385 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
3386 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
3387 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
3388 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3392 * t4_alloc_vi - allocate a virtual interface
3393 * @adap: the adapter
3394 * @mbox: mailbox to use for the FW command
3395 * @port: physical port associated with the VI
3396 * @pf: the PF owning the VI
3397 * @vf: the VF owning the VI
3398 * @nmac: number of MAC addresses needed (1 to 5)
3399 * @mac: the MAC addresses of the VI
3400 * @rss_size: size of RSS table slice associated with this VI
3402 * Allocates a virtual interface for the given physical port. If @mac is
3403 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3404 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3405 * stored consecutively so the space needed is @nmac * 6 bytes.
3406 * Returns a negative error number or the non-negative VI id.
3408 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3409 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3410 unsigned int *rss_size)
3415 memset(&c, 0, sizeof(c));
3416 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
3417 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3418 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3419 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3420 c.portid_pkd = FW_VI_CMD_PORTID(port);
3423 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3428 memcpy(mac, c.mac, sizeof(c.mac));
3431 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3433 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3435 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3437 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3441 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
3442 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
3446 * t4_set_rxmode - set Rx properties of a virtual interface
3447 * @adap: the adapter
3448 * @mbox: mailbox to use for the FW command
3450 * @mtu: the new MTU or -1
3451 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3452 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3453 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3454 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
3455 * @sleep_ok: if true we may sleep while awaiting command completion
3457 * Sets Rx properties of a virtual interface.
3459 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3460 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3463 struct fw_vi_rxmode_cmd c;
3465 /* convert to FW values */
3467 mtu = FW_RXMODE_MTU_NO_CHG;
3469 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3471 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3473 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
3475 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
3477 memset(&c, 0, sizeof(c));
3478 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
3479 FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID(viid));
3480 c.retval_len16 = htonl(FW_LEN16(c));
3481 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3482 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3483 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3484 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3485 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3486 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3490 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3491 * @adap: the adapter
3492 * @mbox: mailbox to use for the FW command
3494 * @free: if true any existing filters for this VI id are first removed
3495 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3496 * @addr: the MAC address(es)
3497 * @idx: where to store the index of each allocated filter
3498 * @hash: pointer to hash address filter bitmap
3499 * @sleep_ok: call is allowed to sleep
3501 * Allocates an exact-match filter for each of the supplied addresses and
3502 * sets it to the corresponding address. If @idx is not %NULL it should
3503 * have at least @naddr entries, each of which will be set to the index of
3504 * the filter allocated for the corresponding MAC address. If a filter
3505 * could not be allocated for an address its index is set to 0xffff.
3506 * If @hash is not %NULL addresses that fail to allocate an exact filter
3507 * are hashed and update the hash filter bitmap pointed at by @hash.
3509 * Returns a negative error number or the number of filters allocated.
3511 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3512 unsigned int viid, bool free, unsigned int naddr,
3513 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3516 struct fw_vi_mac_cmd c;
3517 struct fw_vi_mac_exact *p;
3518 unsigned int max_naddr = is_t4(adap->params.chip) ?
3519 NUM_MPS_CLS_SRAM_L_INSTANCES :
3520 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3525 memset(&c, 0, sizeof(c));
3526 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3527 FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
3528 FW_VI_MAC_CMD_VIID(viid));
3529 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3530 FW_CMD_LEN16_V((naddr + 2) / 2));
3532 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3533 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3534 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3535 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3538 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3542 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3543 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3546 idx[i] = index >= max_naddr ? 0xffff : index;
3547 if (index < max_naddr)
3550 *hash |= (1ULL << hash_mac_addr(addr[i]));
3556 * t4_change_mac - modifies the exact-match filter for a MAC address
3557 * @adap: the adapter
3558 * @mbox: mailbox to use for the FW command
3560 * @idx: index of existing filter for old value of MAC address, or -1
3561 * @addr: the new MAC address value
3562 * @persist: whether a new MAC allocation should be persistent
3563 * @add_smt: if true also add the address to the HW SMT
3565 * Modifies an exact-match filter and sets it to the new MAC address.
3566 * Note that in general it is not possible to modify the value of a given
3567 * filter so the generic way to modify an address filter is to free the one
3568 * being used by the old address value and allocate a new filter for the
3569 * new address value. @idx can be -1 if the address is a new addition.
3571 * Returns a negative error number or the index of the filter with the new
3574 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3575 int idx, const u8 *addr, bool persist, bool add_smt)
3578 struct fw_vi_mac_cmd c;
3579 struct fw_vi_mac_exact *p = c.u.exact;
3580 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3581 NUM_MPS_CLS_SRAM_L_INSTANCES :
3582 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3584 if (idx < 0) /* new allocation */
3585 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3586 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3588 memset(&c, 0, sizeof(c));
3589 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3590 FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID(viid));
3591 c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
3592 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3593 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3594 FW_VI_MAC_CMD_IDX(idx));
3595 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3597 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3599 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3600 if (ret >= max_mac_addr)
3607 * t4_set_addr_hash - program the MAC inexact-match hash filter
3608 * @adap: the adapter
3609 * @mbox: mailbox to use for the FW command
3611 * @ucast: whether the hash filter should also match unicast addresses
3612 * @vec: the value to be written to the hash filter
3613 * @sleep_ok: call is allowed to sleep
3615 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3617 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3618 bool ucast, u64 vec, bool sleep_ok)
3620 struct fw_vi_mac_cmd c;
3622 memset(&c, 0, sizeof(c));
3623 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3624 FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID(viid));
3625 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3626 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3628 c.u.hash.hashvec = cpu_to_be64(vec);
3629 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3633 * t4_enable_vi_params - enable/disable a virtual interface
3634 * @adap: the adapter
3635 * @mbox: mailbox to use for the FW command
3637 * @rx_en: 1=enable Rx, 0=disable Rx
3638 * @tx_en: 1=enable Tx, 0=disable Tx
3639 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3641 * Enables/disables a virtual interface. Note that setting DCB Enable
3642 * only makes sense when enabling a Virtual Interface ...
3644 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3645 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3647 struct fw_vi_enable_cmd c;
3649 memset(&c, 0, sizeof(c));
3650 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
3651 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID(viid));
3653 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3654 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3655 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
3656 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3660 * t4_enable_vi - enable/disable a virtual interface
3661 * @adap: the adapter
3662 * @mbox: mailbox to use for the FW command
3664 * @rx_en: 1=enable Rx, 0=disable Rx
3665 * @tx_en: 1=enable Tx, 0=disable Tx
3667 * Enables/disables a virtual interface.
3669 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3670 bool rx_en, bool tx_en)
3672 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3676 * t4_identify_port - identify a VI's port by blinking its LED
3677 * @adap: the adapter
3678 * @mbox: mailbox to use for the FW command
3680 * @nblinks: how many times to blink LED at 2.5 Hz
3682 * Identifies a VI's port by blinking its LED.
3684 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3685 unsigned int nblinks)
3687 struct fw_vi_enable_cmd c;
3689 memset(&c, 0, sizeof(c));
3690 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
3691 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID(viid));
3692 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3693 c.blinkdur = htons(nblinks);
3694 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3698 * t4_iq_free - free an ingress queue and its FLs
3699 * @adap: the adapter
3700 * @mbox: mailbox to use for the FW command
3701 * @pf: the PF owning the queues
3702 * @vf: the VF owning the queues
3703 * @iqtype: the ingress queue type
3704 * @iqid: ingress queue id
3705 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3706 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3708 * Frees an ingress queue and its associated FLs, if any.
3710 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3711 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3712 unsigned int fl0id, unsigned int fl1id)
3716 memset(&c, 0, sizeof(c));
3717 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
3718 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
3719 FW_IQ_CMD_VFN_V(vf));
3720 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
3721 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
3722 c.iqid = htons(iqid);
3723 c.fl0id = htons(fl0id);
3724 c.fl1id = htons(fl1id);
3725 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3729 * t4_eth_eq_free - free an Ethernet egress queue
3730 * @adap: the adapter
3731 * @mbox: mailbox to use for the FW command
3732 * @pf: the PF owning the queue
3733 * @vf: the VF owning the queue
3734 * @eqid: egress queue id
3736 * Frees an Ethernet egress queue.
3738 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3739 unsigned int vf, unsigned int eqid)
3741 struct fw_eq_eth_cmd c;
3743 memset(&c, 0, sizeof(c));
3744 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
3745 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
3746 FW_EQ_ETH_CMD_VFN_V(vf));
3747 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
3748 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
3749 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3753 * t4_ctrl_eq_free - free a control egress queue
3754 * @adap: the adapter
3755 * @mbox: mailbox to use for the FW command
3756 * @pf: the PF owning the queue
3757 * @vf: the VF owning the queue
3758 * @eqid: egress queue id
3760 * Frees a control egress queue.
3762 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3763 unsigned int vf, unsigned int eqid)
3765 struct fw_eq_ctrl_cmd c;
3767 memset(&c, 0, sizeof(c));
3768 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
3769 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
3770 FW_EQ_CTRL_CMD_VFN_V(vf));
3771 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
3772 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
3773 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3777 * t4_ofld_eq_free - free an offload egress queue
3778 * @adap: the adapter
3779 * @mbox: mailbox to use for the FW command
3780 * @pf: the PF owning the queue
3781 * @vf: the VF owning the queue
3782 * @eqid: egress queue id
3784 * Frees a control egress queue.
3786 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3787 unsigned int vf, unsigned int eqid)
3789 struct fw_eq_ofld_cmd c;
3791 memset(&c, 0, sizeof(c));
3792 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
3793 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
3794 FW_EQ_OFLD_CMD_VFN_V(vf));
3795 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
3796 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
3797 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3801 * t4_handle_fw_rpl - process a FW reply message
3802 * @adap: the adapter
3803 * @rpl: start of the FW message
3805 * Processes a FW message, such as link state change messages.
3807 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3809 u8 opcode = *(const u8 *)rpl;
3811 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3812 int speed = 0, fc = 0;
3813 const struct fw_port_cmd *p = (void *)rpl;
3814 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3815 int port = adap->chan_map[chan];
3816 struct port_info *pi = adap2pinfo(adap, port);
3817 struct link_config *lc = &pi->link_cfg;
3818 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3819 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3820 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3822 if (stat & FW_PORT_CMD_RXPAUSE)
3824 if (stat & FW_PORT_CMD_TXPAUSE)
3826 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3828 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3830 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3832 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3835 if (link_ok != lc->link_ok || speed != lc->speed ||
3836 fc != lc->fc) { /* something changed */
3837 lc->link_ok = link_ok;
3840 lc->supported = be16_to_cpu(p->u.info.pcap);
3841 t4_os_link_changed(adap, port, link_ok);
3843 if (mod != pi->mod_type) {
3845 t4_os_portmod_changed(adap, port);
3851 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3855 if (pci_is_pcie(adapter->pdev)) {
3856 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3857 p->speed = val & PCI_EXP_LNKSTA_CLS;
3858 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3863 * init_link_config - initialize a link's SW state
3864 * @lc: structure holding the link state
3865 * @caps: link capabilities
3867 * Initializes the SW state maintained for each link, including the link's
3868 * capabilities and default speed/flow-control/autonegotiation settings.
3870 static void init_link_config(struct link_config *lc, unsigned int caps)
3872 lc->supported = caps;
3873 lc->requested_speed = 0;
3875 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3876 if (lc->supported & FW_PORT_CAP_ANEG) {
3877 lc->advertising = lc->supported & ADVERT_MASK;
3878 lc->autoneg = AUTONEG_ENABLE;
3879 lc->requested_fc |= PAUSE_AUTONEG;
3881 lc->advertising = 0;
3882 lc->autoneg = AUTONEG_DISABLE;
3886 #define CIM_PF_NOACCESS 0xeeeeeeee
3888 int t4_wait_dev_ready(void __iomem *regs)
3892 whoami = readl(regs + PL_WHOAMI);
3893 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
3897 whoami = readl(regs + PL_WHOAMI);
3898 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
3902 u32 vendor_and_model_id;
3906 static int get_flash_params(struct adapter *adap)
3908 /* Table for non-Numonix supported flash parts. Numonix parts are left
3909 * to the preexisting code. All flash parts have 64KB sectors.
3911 static struct flash_desc supported_flash[] = {
3912 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3918 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3920 ret = sf1_read(adap, 3, 0, 1, &info);
3921 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3925 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3926 if (supported_flash[ret].vendor_and_model_id == info) {
3927 adap->params.sf_size = supported_flash[ret].size_mb;
3928 adap->params.sf_nsec =
3929 adap->params.sf_size / SF_SEC_SIZE;
3933 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3935 info >>= 16; /* log2 of size */
3936 if (info >= 0x14 && info < 0x18)
3937 adap->params.sf_nsec = 1 << (info - 16);
3938 else if (info == 0x18)
3939 adap->params.sf_nsec = 64;
3942 adap->params.sf_size = 1 << info;
3943 adap->params.sf_fw_start =
3944 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3946 if (adap->params.sf_size < FLASH_MIN_SIZE)
3947 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3948 adap->params.sf_size, FLASH_MIN_SIZE);
3953 * t4_prep_adapter - prepare SW and HW for operation
3954 * @adapter: the adapter
3955 * @reset: if true perform a HW reset
3957 * Initialize adapter SW state for the various HW modules, set initial
3958 * values for some adapter tunables, take PHYs out of reset, and
3959 * initialize the MDIO interface.
3961 int t4_prep_adapter(struct adapter *adapter)
3967 get_pci_mode(adapter, &adapter->params.pci);
3968 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3970 ret = get_flash_params(adapter);
3972 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3976 /* Retrieve adapter's device ID
3978 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3979 ver = device_id >> 12;
3980 adapter->params.chip = 0;
3983 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3986 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3989 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3994 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3997 * Default port for debugging in case we can't reach FW.
3999 adapter->params.nports = 1;
4000 adapter->params.portvec = 1;
4001 adapter->params.vpd.cclk = 50000;
4006 * t4_init_tp_params - initialize adap->params.tp
4007 * @adap: the adapter
4009 * Initialize various fields of the adapter's TP Parameters structure.
4011 int t4_init_tp_params(struct adapter *adap)
4016 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4017 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4018 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4020 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4021 for (chan = 0; chan < NCHAN; chan++)
4022 adap->params.tp.tx_modq[chan] = chan;
4024 /* Cache the adapter's Compressed Filter Mode and global Incress
4027 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4028 &adap->params.tp.vlan_pri_map, 1,
4030 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4031 &adap->params.tp.ingress_config, 1,
4034 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4035 * shift positions of several elements of the Compressed Filter Tuple
4036 * for this adapter which we need frequently ...
4038 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4039 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4040 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4041 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4044 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4045 * represents the presense of an Outer VLAN instead of a VNIC ID.
4047 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4048 adap->params.tp.vnic_shift = -1;
4054 * t4_filter_field_shift - calculate filter field shift
4055 * @adap: the adapter
4056 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4058 * Return the shift position of a filter field within the Compressed
4059 * Filter Tuple. The filter field is specified via its selection bit
4060 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4062 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4064 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4068 if ((filter_mode & filter_sel) == 0)
4071 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4072 switch (filter_mode & sel) {
4074 field_shift += W_FT_FCOE;
4077 field_shift += W_FT_PORT;
4080 field_shift += W_FT_VNIC_ID;
4083 field_shift += W_FT_VLAN;
4086 field_shift += W_FT_TOS;
4089 field_shift += W_FT_PROTOCOL;
4092 field_shift += W_FT_ETHERTYPE;
4095 field_shift += W_FT_MACMATCH;
4098 field_shift += W_FT_MPSHITTYPE;
4100 case F_FRAGMENTATION:
4101 field_shift += W_FT_FRAGMENTATION;
4108 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4112 struct fw_port_cmd c;
4113 struct fw_rss_vi_config_cmd rvc;
4115 memset(&c, 0, sizeof(c));
4116 memset(&rvc, 0, sizeof(rvc));
4118 for_each_port(adap, i) {
4119 unsigned int rss_size;
4120 struct port_info *p = adap2pinfo(adap, i);
4122 while ((adap->params.portvec & (1 << j)) == 0)
4125 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
4126 FW_CMD_REQUEST_F | FW_CMD_READ_F |
4127 FW_PORT_CMD_PORTID(j));
4128 c.action_to_len16 = htonl(
4129 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4131 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4135 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4142 p->rss_size = rss_size;
4143 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
4144 adap->port[i]->dev_port = j;
4146 ret = ntohl(c.u.info.lstatus_to_modtype);
4147 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4148 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4149 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
4150 p->mod_type = FW_PORT_MOD_TYPE_NA;
4152 rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4153 FW_CMD_REQUEST_F | FW_CMD_READ_F |
4154 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4155 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4156 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4159 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4161 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));