2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/delay.h>
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
60 u32 val = t4_read_reg(adapter, reg);
62 if (!!(val & mask) == polarity) {
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
126 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
134 * Handle a FW assertion reported in a mailbox.
136 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
138 struct fw_debug_cmd asrt;
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
147 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
183 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
186 static const int delay[] = {
187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
197 if ((size & 15) || size > MBOX_LEN)
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
204 if (adap->pdev->error_state != pci_channel_io_normal)
207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
260 * t4_mc_read - read from MC through backdoor accesses
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
270 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
285 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
296 * t4_edc_read - read from EDC through backdoor accesses
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
307 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
323 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
333 #define EEPROM_STAT_ADDR 0x7bfc
338 * t4_seeprom_wp - enable/disable EEPROM write protection
339 * @adapter: the adapter
340 * @enable: whether to enable or disable write protection
342 * Enables or disables write protection on the serial EEPROM.
344 int t4_seeprom_wp(struct adapter *adapter, bool enable)
346 unsigned int v = enable ? 0xc : 0;
347 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
348 return ret < 0 ? ret : 0;
352 * get_vpd_params - read VPD parameters from VPD EEPROM
353 * @adapter: adapter to read
354 * @p: where to store the parameters
356 * Reads card parameters stored in VPD EEPROM.
358 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
362 u8 vpd[VPD_LEN], csum;
363 unsigned int vpdr_len, kw_offset, id_len;
365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
370 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
374 id_len = pci_vpd_lrdt_size(vpd);
378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
380 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
386 if (vpdr_len + kw_offset > VPD_LEN) {
387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
391 #define FIND_VPD_KW(var, name) do { \
392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
397 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
400 FIND_VPD_KW(i, "RV");
401 for (csum = 0; i >= 0; i--)
405 dev_err(adapter->pdev_dev,
406 "corrupted VPD EEPROM, actual csum %u\n", csum);
410 FIND_VPD_KW(ec, "EC");
411 FIND_VPD_KW(sn, "SN");
414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
416 memcpy(p->ec, vpd + ec, EC_LEN);
418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
424 /* serial flash and firmware constants */
426 SF_ATTEMPTS = 10, /* max retries for SF operations */
428 /* flash command opcodes */
429 SF_PROG_PAGE = 2, /* program page */
430 SF_WR_DISABLE = 4, /* disable writes */
431 SF_RD_STATUS = 5, /* read status register */
432 SF_WR_ENABLE = 6, /* enable writes */
433 SF_RD_DATA_FAST = 0xb, /* read flash */
434 SF_RD_ID = 0x9f, /* read ID */
435 SF_ERASE_SECTOR = 0xd8, /* erase sector */
437 FW_MAX_SIZE = 512 * 1024,
441 * sf1_read - read data from the serial flash
442 * @adapter: the adapter
443 * @byte_cnt: number of bytes to read
444 * @cont: whether another operation will be chained
445 * @lock: whether to lock SF for PL access only
446 * @valp: where to store the read data
448 * Reads up to 4 bytes of data from the serial flash. The location of
449 * the read needs to be specified prior to calling this by issuing the
450 * appropriate commands to the serial flash.
452 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
457 if (!byte_cnt || byte_cnt > 4)
459 if (t4_read_reg(adapter, SF_OP) & BUSY)
461 cont = cont ? SF_CONT : 0;
462 lock = lock ? SF_LOCK : 0;
463 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
464 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
466 *valp = t4_read_reg(adapter, SF_DATA);
471 * sf1_write - write data to the serial flash
472 * @adapter: the adapter
473 * @byte_cnt: number of bytes to write
474 * @cont: whether another operation will be chained
475 * @lock: whether to lock SF for PL access only
476 * @val: value to write
478 * Writes up to 4 bytes of data to the serial flash. The location of
479 * the write needs to be specified prior to calling this by issuing the
480 * appropriate commands to the serial flash.
482 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
485 if (!byte_cnt || byte_cnt > 4)
487 if (t4_read_reg(adapter, SF_OP) & BUSY)
489 cont = cont ? SF_CONT : 0;
490 lock = lock ? SF_LOCK : 0;
491 t4_write_reg(adapter, SF_DATA, val);
492 t4_write_reg(adapter, SF_OP, lock |
493 cont | BYTECNT(byte_cnt - 1) | OP_WR);
494 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
498 * flash_wait_op - wait for a flash operation to complete
499 * @adapter: the adapter
500 * @attempts: max number of polls of the status register
501 * @delay: delay between polls in ms
503 * Wait for a flash operation to complete by polling the status register.
505 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
511 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
512 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
524 * t4_read_flash - read words from serial flash
525 * @adapter: the adapter
526 * @addr: the start address for the read
527 * @nwords: how many 32-bit words to read
528 * @data: where to store the read data
529 * @byte_oriented: whether to store data as bytes or as words
531 * Read the specified number of 32-bit words from the serial flash.
532 * If @byte_oriented is set the read data is stored as a byte array
533 * (i.e., big-endian), otherwise as 32-bit words in the platform's
536 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
537 unsigned int nwords, u32 *data, int byte_oriented)
541 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
544 addr = swab32(addr) | SF_RD_DATA_FAST;
546 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
547 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
550 for ( ; nwords; nwords--, data++) {
551 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
553 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
557 *data = htonl(*data);
563 * t4_write_flash - write up to a page of data to the serial flash
564 * @adapter: the adapter
565 * @addr: the start address to write
566 * @n: length of data to write in bytes
567 * @data: the data to write
569 * Writes up to a page of data (256 bytes) to the serial flash starting
570 * at the given address. All the data must be written to the same page.
572 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
573 unsigned int n, const u8 *data)
577 unsigned int i, c, left, val, offset = addr & 0xff;
579 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
582 val = swab32(addr) | SF_PROG_PAGE;
584 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
585 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
588 for (left = n; left; left -= c) {
590 for (val = 0, i = 0; i < c; ++i)
591 val = (val << 8) + *data++;
593 ret = sf1_write(adapter, c, c != left, 1, val);
597 ret = flash_wait_op(adapter, 8, 1);
601 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
603 /* Read the page to verify the write succeeded */
604 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
608 if (memcmp(data - n, (u8 *)buf + offset, n)) {
609 dev_err(adapter->pdev_dev,
610 "failed to correctly write the flash page at %#x\n",
617 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
622 * get_fw_version - read the firmware version
623 * @adapter: the adapter
624 * @vers: where to place the version
626 * Reads the FW version from flash.
628 static int get_fw_version(struct adapter *adapter, u32 *vers)
630 return t4_read_flash(adapter, adapter->params.sf_fw_start +
631 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
635 * get_tp_version - read the TP microcode version
636 * @adapter: the adapter
637 * @vers: where to place the version
639 * Reads the TP microcode version from flash.
641 static int get_tp_version(struct adapter *adapter, u32 *vers)
643 return t4_read_flash(adapter, adapter->params.sf_fw_start +
644 offsetof(struct fw_hdr, tp_microcode_ver),
649 * t4_check_fw_version - check if the FW is compatible with this driver
650 * @adapter: the adapter
652 * Checks if an adapter's FW is compatible with the driver. Returns 0
653 * if there's exact match, a negative error if the version could not be
654 * read or there's a major version mismatch, and a positive value if the
655 * expected major version is found but there's a minor version mismatch.
657 int t4_check_fw_version(struct adapter *adapter)
660 int ret, major, minor, micro;
662 ret = get_fw_version(adapter, &adapter->params.fw_vers);
664 ret = get_tp_version(adapter, &adapter->params.tp_vers);
666 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
667 offsetof(struct fw_hdr, intfver_nic),
672 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
673 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
674 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
675 memcpy(adapter->params.api_vers, api_vers,
676 sizeof(adapter->params.api_vers));
678 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
679 dev_err(adapter->pdev_dev,
680 "card FW has major version %u, driver wants %u\n",
681 major, FW_VERSION_MAJOR);
685 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
686 return 0; /* perfect match */
688 /* Minor/micro version mismatch. Report it but often it's OK. */
693 * t4_flash_erase_sectors - erase a range of flash sectors
694 * @adapter: the adapter
695 * @start: the first sector to erase
696 * @end: the last sector to erase
698 * Erases the sectors in the given inclusive range.
700 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
704 while (start <= end) {
705 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
706 (ret = sf1_write(adapter, 4, 0, 1,
707 SF_ERASE_SECTOR | (start << 8))) != 0 ||
708 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
709 dev_err(adapter->pdev_dev,
710 "erase of flash sector %d failed, error %d\n",
716 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
721 * t4_load_fw - download firmware
723 * @fw_data: the firmware image to write
726 * Write the supplied firmware image to the card's serial flash.
728 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
733 u8 first_page[SF_PAGE_SIZE];
734 const u32 *p = (const u32 *)fw_data;
735 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
736 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
737 unsigned int fw_img_start = adap->params.sf_fw_start;
738 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
741 dev_err(adap->pdev_dev, "FW image has no data\n");
745 dev_err(adap->pdev_dev,
746 "FW image size not multiple of 512 bytes\n");
749 if (ntohs(hdr->len512) * 512 != size) {
750 dev_err(adap->pdev_dev,
751 "FW image size differs from size in FW header\n");
754 if (size > FW_MAX_SIZE) {
755 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
760 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
763 if (csum != 0xffffffff) {
764 dev_err(adap->pdev_dev,
765 "corrupted firmware image, checksum %#x\n", csum);
769 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
770 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
775 * We write the correct version at the end so the driver can see a bad
776 * version if the FW write fails. Start by writing a copy of the
777 * first page with a bad version.
779 memcpy(first_page, fw_data, SF_PAGE_SIZE);
780 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
781 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
786 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
787 addr += SF_PAGE_SIZE;
788 fw_data += SF_PAGE_SIZE;
789 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
794 ret = t4_write_flash(adap,
795 fw_img_start + offsetof(struct fw_hdr, fw_ver),
796 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
799 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
804 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
805 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
808 * t4_link_start - apply link configuration to MAC/PHY
809 * @phy: the PHY to setup
810 * @mac: the MAC to setup
811 * @lc: the requested link configuration
813 * Set up a port's MAC and PHY according to a desired link configuration.
814 * - If the PHY can auto-negotiate first decide what to advertise, then
815 * enable/disable auto-negotiation as desired, and reset.
816 * - If the PHY does not auto-negotiate just reset it.
817 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
818 * otherwise do it later based on the outcome of auto-negotiation.
820 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
821 struct link_config *lc)
823 struct fw_port_cmd c;
824 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
827 if (lc->requested_fc & PAUSE_RX)
828 fc |= FW_PORT_CAP_FC_RX;
829 if (lc->requested_fc & PAUSE_TX)
830 fc |= FW_PORT_CAP_FC_TX;
832 memset(&c, 0, sizeof(c));
833 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
834 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
835 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
838 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
839 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
841 } else if (lc->autoneg == AUTONEG_DISABLE) {
842 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
843 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
845 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
851 * t4_restart_aneg - restart autonegotiation
853 * @mbox: mbox to use for the FW command
856 * Restarts autonegotiation for the selected port.
858 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
860 struct fw_port_cmd c;
862 memset(&c, 0, sizeof(c));
863 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
864 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
865 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
867 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
872 unsigned int mask; /* bits to check in interrupt status */
873 const char *msg; /* message to print or NULL */
874 short stat_idx; /* stat counter to increment or -1 */
875 unsigned short fatal; /* whether the condition reported is fatal */
879 * t4_handle_intr_status - table driven interrupt handler
880 * @adapter: the adapter that generated the interrupt
881 * @reg: the interrupt status register to process
882 * @acts: table of interrupt actions
884 * A table driven interrupt handler that applies a set of masks to an
885 * interrupt status word and performs the corresponding actions if the
886 * interrupts described by the mask have occurred. The actions include
887 * optionally emitting a warning or alert message. The table is terminated
888 * by an entry specifying mask 0. Returns the number of fatal interrupt
891 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
892 const struct intr_info *acts)
895 unsigned int mask = 0;
896 unsigned int status = t4_read_reg(adapter, reg);
898 for ( ; acts->mask; ++acts) {
899 if (!(status & acts->mask))
903 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
904 status & acts->mask);
905 } else if (acts->msg && printk_ratelimit())
906 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
907 status & acts->mask);
911 if (status) /* clear processed interrupts */
912 t4_write_reg(adapter, reg, status);
917 * Interrupt handler for the PCIE module.
919 static void pcie_intr_handler(struct adapter *adapter)
921 static const struct intr_info sysbus_intr_info[] = {
922 { RNPP, "RXNP array parity error", -1, 1 },
923 { RPCP, "RXPC array parity error", -1, 1 },
924 { RCIP, "RXCIF array parity error", -1, 1 },
925 { RCCP, "Rx completions control array parity error", -1, 1 },
926 { RFTP, "RXFT array parity error", -1, 1 },
929 static const struct intr_info pcie_port_intr_info[] = {
930 { TPCP, "TXPC array parity error", -1, 1 },
931 { TNPP, "TXNP array parity error", -1, 1 },
932 { TFTP, "TXFT array parity error", -1, 1 },
933 { TCAP, "TXCA array parity error", -1, 1 },
934 { TCIP, "TXCIF array parity error", -1, 1 },
935 { RCAP, "RXCA array parity error", -1, 1 },
936 { OTDD, "outbound request TLP discarded", -1, 1 },
937 { RDPE, "Rx data parity error", -1, 1 },
938 { TDUE, "Tx uncorrectable data error", -1, 1 },
941 static const struct intr_info pcie_intr_info[] = {
942 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
943 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
944 { MSIDATAPERR, "MSI data parity error", -1, 1 },
945 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
946 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
947 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
948 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
949 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
950 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
951 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
952 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
953 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
954 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
955 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
956 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
957 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
958 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
959 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
960 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
961 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
962 { FIDPERR, "PCI FID parity error", -1, 1 },
963 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
964 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
965 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
966 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
967 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
968 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
969 { PCIESINT, "PCI core secondary fault", -1, 1 },
970 { PCIEPINT, "PCI core primary fault", -1, 1 },
971 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
977 fat = t4_handle_intr_status(adapter,
978 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
980 t4_handle_intr_status(adapter,
981 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
982 pcie_port_intr_info) +
983 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
985 t4_fatal_err(adapter);
989 * TP interrupt handler.
991 static void tp_intr_handler(struct adapter *adapter)
993 static const struct intr_info tp_intr_info[] = {
994 { 0x3fffffff, "TP parity error", -1, 1 },
995 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
999 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1000 t4_fatal_err(adapter);
1004 * SGE interrupt handler.
1006 static void sge_intr_handler(struct adapter *adapter)
1010 static const struct intr_info sge_intr_info[] = {
1011 { ERR_CPL_EXCEED_IQE_SIZE,
1012 "SGE received CPL exceeding IQE size", -1, 1 },
1013 { ERR_INVALID_CIDX_INC,
1014 "SGE GTS CIDX increment too large", -1, 0 },
1015 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1016 { F_DBFIFO_LP_INT, NULL, -1, 0 },
1017 { F_DBFIFO_HP_INT, NULL, -1, 0 },
1018 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1019 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1020 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1021 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1023 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1025 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1027 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1029 { ERR_ING_CTXT_PRIO,
1030 "SGE too many priority ingress contexts", -1, 0 },
1031 { ERR_EGR_CTXT_PRIO,
1032 "SGE too many priority egress contexts", -1, 0 },
1033 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1034 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1038 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1039 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1041 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1042 (unsigned long long)v);
1043 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1044 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1047 err = t4_read_reg(adapter, A_SGE_INT_CAUSE3);
1048 if (err & (F_DBFIFO_HP_INT|F_DBFIFO_LP_INT))
1049 t4_db_full(adapter);
1050 if (err & F_ERR_DROPPED_DB)
1051 t4_db_dropped(adapter);
1053 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1055 t4_fatal_err(adapter);
1059 * CIM interrupt handler.
1061 static void cim_intr_handler(struct adapter *adapter)
1063 static const struct intr_info cim_intr_info[] = {
1064 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1065 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1066 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1067 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1068 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1069 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1070 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1073 static const struct intr_info cim_upintr_info[] = {
1074 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1075 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1076 { ILLWRINT, "CIM illegal write", -1, 1 },
1077 { ILLRDINT, "CIM illegal read", -1, 1 },
1078 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1079 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1080 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1081 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1082 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1083 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1084 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1085 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1086 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1087 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1088 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1089 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1090 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1091 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1092 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1093 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1094 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1095 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1096 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1097 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1098 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1099 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1100 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1101 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1107 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1109 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1112 t4_fatal_err(adapter);
1116 * ULP RX interrupt handler.
1118 static void ulprx_intr_handler(struct adapter *adapter)
1120 static const struct intr_info ulprx_intr_info[] = {
1121 { 0x1800000, "ULPRX context error", -1, 1 },
1122 { 0x7fffff, "ULPRX parity error", -1, 1 },
1126 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1127 t4_fatal_err(adapter);
1131 * ULP TX interrupt handler.
1133 static void ulptx_intr_handler(struct adapter *adapter)
1135 static const struct intr_info ulptx_intr_info[] = {
1136 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1138 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1140 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1142 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1144 { 0xfffffff, "ULPTX parity error", -1, 1 },
1148 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1149 t4_fatal_err(adapter);
1153 * PM TX interrupt handler.
1155 static void pmtx_intr_handler(struct adapter *adapter)
1157 static const struct intr_info pmtx_intr_info[] = {
1158 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1159 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1160 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1161 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1162 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1163 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1164 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1165 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1166 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1170 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1171 t4_fatal_err(adapter);
1175 * PM RX interrupt handler.
1177 static void pmrx_intr_handler(struct adapter *adapter)
1179 static const struct intr_info pmrx_intr_info[] = {
1180 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1181 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1182 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1183 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1184 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1185 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1189 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1190 t4_fatal_err(adapter);
1194 * CPL switch interrupt handler.
1196 static void cplsw_intr_handler(struct adapter *adapter)
1198 static const struct intr_info cplsw_intr_info[] = {
1199 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1200 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1201 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1202 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1203 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1204 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1208 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1209 t4_fatal_err(adapter);
1213 * LE interrupt handler.
1215 static void le_intr_handler(struct adapter *adap)
1217 static const struct intr_info le_intr_info[] = {
1218 { LIPMISS, "LE LIP miss", -1, 0 },
1219 { LIP0, "LE 0 LIP error", -1, 0 },
1220 { PARITYERR, "LE parity error", -1, 1 },
1221 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1222 { REQQPARERR, "LE request queue parity error", -1, 1 },
1226 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1231 * MPS interrupt handler.
1233 static void mps_intr_handler(struct adapter *adapter)
1235 static const struct intr_info mps_rx_intr_info[] = {
1236 { 0xffffff, "MPS Rx parity error", -1, 1 },
1239 static const struct intr_info mps_tx_intr_info[] = {
1240 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1241 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1242 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1243 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1244 { BUBBLE, "MPS Tx underflow", -1, 1 },
1245 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1246 { FRMERR, "MPS Tx framing error", -1, 1 },
1249 static const struct intr_info mps_trc_intr_info[] = {
1250 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1251 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1252 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1255 static const struct intr_info mps_stat_sram_intr_info[] = {
1256 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1259 static const struct intr_info mps_stat_tx_intr_info[] = {
1260 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1263 static const struct intr_info mps_stat_rx_intr_info[] = {
1264 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1267 static const struct intr_info mps_cls_intr_info[] = {
1268 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1269 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1270 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1276 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1278 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1280 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1281 mps_trc_intr_info) +
1282 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1283 mps_stat_sram_intr_info) +
1284 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1285 mps_stat_tx_intr_info) +
1286 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1287 mps_stat_rx_intr_info) +
1288 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1291 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1292 RXINT | TXINT | STATINT);
1293 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1295 t4_fatal_err(adapter);
1298 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1301 * EDC/MC interrupt handler.
1303 static void mem_intr_handler(struct adapter *adapter, int idx)
1305 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1307 unsigned int addr, cnt_addr, v;
1309 if (idx <= MEM_EDC1) {
1310 addr = EDC_REG(EDC_INT_CAUSE, idx);
1311 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1313 addr = MC_INT_CAUSE;
1314 cnt_addr = MC_ECC_STATUS;
1317 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1318 if (v & PERR_INT_CAUSE)
1319 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1321 if (v & ECC_CE_INT_CAUSE) {
1322 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1324 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1325 if (printk_ratelimit())
1326 dev_warn(adapter->pdev_dev,
1327 "%u %s correctable ECC data error%s\n",
1328 cnt, name[idx], cnt > 1 ? "s" : "");
1330 if (v & ECC_UE_INT_CAUSE)
1331 dev_alert(adapter->pdev_dev,
1332 "%s uncorrectable ECC data error\n", name[idx]);
1334 t4_write_reg(adapter, addr, v);
1335 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1336 t4_fatal_err(adapter);
1340 * MA interrupt handler.
1342 static void ma_intr_handler(struct adapter *adap)
1344 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1346 if (status & MEM_PERR_INT_CAUSE)
1347 dev_alert(adap->pdev_dev,
1348 "MA parity error, parity status %#x\n",
1349 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1350 if (status & MEM_WRAP_INT_CAUSE) {
1351 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1352 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1353 "client %u to address %#x\n",
1354 MEM_WRAP_CLIENT_NUM_GET(v),
1355 MEM_WRAP_ADDRESS_GET(v) << 4);
1357 t4_write_reg(adap, MA_INT_CAUSE, status);
1362 * SMB interrupt handler.
1364 static void smb_intr_handler(struct adapter *adap)
1366 static const struct intr_info smb_intr_info[] = {
1367 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1368 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1369 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1373 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1378 * NC-SI interrupt handler.
1380 static void ncsi_intr_handler(struct adapter *adap)
1382 static const struct intr_info ncsi_intr_info[] = {
1383 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1384 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1385 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1386 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1390 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1395 * XGMAC interrupt handler.
1397 static void xgmac_intr_handler(struct adapter *adap, int port)
1399 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1401 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1405 if (v & TXFIFO_PRTY_ERR)
1406 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1408 if (v & RXFIFO_PRTY_ERR)
1409 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1411 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1416 * PL interrupt handler.
1418 static void pl_intr_handler(struct adapter *adap)
1420 static const struct intr_info pl_intr_info[] = {
1421 { FATALPERR, "T4 fatal parity error", -1, 1 },
1422 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1426 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1430 #define PF_INTR_MASK (PFSW)
1431 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1432 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1433 CPL_SWITCH | SGE | ULP_TX)
1436 * t4_slow_intr_handler - control path interrupt handler
1437 * @adapter: the adapter
1439 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1440 * The designation 'slow' is because it involves register reads, while
1441 * data interrupts typically don't involve any MMIOs.
1443 int t4_slow_intr_handler(struct adapter *adapter)
1445 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1447 if (!(cause & GLBL_INTR_MASK))
1450 cim_intr_handler(adapter);
1452 mps_intr_handler(adapter);
1454 ncsi_intr_handler(adapter);
1456 pl_intr_handler(adapter);
1458 smb_intr_handler(adapter);
1460 xgmac_intr_handler(adapter, 0);
1462 xgmac_intr_handler(adapter, 1);
1463 if (cause & XGMAC_KR0)
1464 xgmac_intr_handler(adapter, 2);
1465 if (cause & XGMAC_KR1)
1466 xgmac_intr_handler(adapter, 3);
1468 pcie_intr_handler(adapter);
1470 mem_intr_handler(adapter, MEM_MC);
1472 mem_intr_handler(adapter, MEM_EDC0);
1474 mem_intr_handler(adapter, MEM_EDC1);
1476 le_intr_handler(adapter);
1478 tp_intr_handler(adapter);
1480 ma_intr_handler(adapter);
1482 pmtx_intr_handler(adapter);
1484 pmrx_intr_handler(adapter);
1486 ulprx_intr_handler(adapter);
1487 if (cause & CPL_SWITCH)
1488 cplsw_intr_handler(adapter);
1490 sge_intr_handler(adapter);
1492 ulptx_intr_handler(adapter);
1494 /* Clear the interrupts just processed for which we are the master. */
1495 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1496 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1501 * t4_intr_enable - enable interrupts
1502 * @adapter: the adapter whose interrupts should be enabled
1504 * Enable PF-specific interrupts for the calling function and the top-level
1505 * interrupt concentrator for global interrupts. Interrupts are already
1506 * enabled at each module, here we just enable the roots of the interrupt
1509 * Note: this function should be called only when the driver manages
1510 * non PF-specific interrupts from the various HW modules. Only one PCI
1511 * function at a time should be doing this.
1513 void t4_intr_enable(struct adapter *adapter)
1515 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1517 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1518 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1519 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1520 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1521 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1522 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1523 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1524 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
1526 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1527 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1531 * t4_intr_disable - disable interrupts
1532 * @adapter: the adapter whose interrupts should be disabled
1534 * Disable interrupts. We only disable the top-level interrupt
1535 * concentrators. The caller must be a PCI function managing global
1538 void t4_intr_disable(struct adapter *adapter)
1540 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1542 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1543 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1547 * hash_mac_addr - return the hash value of a MAC address
1548 * @addr: the 48-bit Ethernet MAC address
1550 * Hashes a MAC address according to the hash function used by HW inexact
1551 * (hash) address matching.
1553 static int hash_mac_addr(const u8 *addr)
1555 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1556 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1564 * t4_config_rss_range - configure a portion of the RSS mapping table
1565 * @adapter: the adapter
1566 * @mbox: mbox to use for the FW command
1567 * @viid: virtual interface whose RSS subtable is to be written
1568 * @start: start entry in the table to write
1569 * @n: how many table entries to write
1570 * @rspq: values for the response queue lookup table
1571 * @nrspq: number of values in @rspq
1573 * Programs the selected part of the VI's RSS mapping table with the
1574 * provided values. If @nrspq < @n the supplied values are used repeatedly
1575 * until the full table range is populated.
1577 * The caller must ensure the values in @rspq are in the range allowed for
1580 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1581 int start, int n, const u16 *rspq, unsigned int nrspq)
1584 const u16 *rsp = rspq;
1585 const u16 *rsp_end = rspq + nrspq;
1586 struct fw_rss_ind_tbl_cmd cmd;
1588 memset(&cmd, 0, sizeof(cmd));
1589 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1590 FW_CMD_REQUEST | FW_CMD_WRITE |
1591 FW_RSS_IND_TBL_CMD_VIID(viid));
1592 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1594 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1596 int nq = min(n, 32);
1597 __be32 *qp = &cmd.iq0_to_iq2;
1599 cmd.niqid = htons(nq);
1600 cmd.startidx = htons(start);
1608 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1609 if (++rsp >= rsp_end)
1611 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1612 if (++rsp >= rsp_end)
1614 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1615 if (++rsp >= rsp_end)
1622 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1630 * t4_config_glbl_rss - configure the global RSS mode
1631 * @adapter: the adapter
1632 * @mbox: mbox to use for the FW command
1633 * @mode: global RSS mode
1634 * @flags: mode-specific flags
1636 * Sets the global RSS mode.
1638 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1641 struct fw_rss_glb_config_cmd c;
1643 memset(&c, 0, sizeof(c));
1644 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1645 FW_CMD_REQUEST | FW_CMD_WRITE);
1646 c.retval_len16 = htonl(FW_LEN16(c));
1647 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1648 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1649 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1650 c.u.basicvirtual.mode_pkd =
1651 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1652 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1655 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1659 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1660 * @adap: the adapter
1661 * @v4: holds the TCP/IP counter values
1662 * @v6: holds the TCP/IPv6 counter values
1664 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1665 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1667 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1668 struct tp_tcp_stats *v6)
1670 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1672 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1673 #define STAT(x) val[STAT_IDX(x)]
1674 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1677 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1678 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1679 v4->tcpOutRsts = STAT(OUT_RST);
1680 v4->tcpInSegs = STAT64(IN_SEG);
1681 v4->tcpOutSegs = STAT64(OUT_SEG);
1682 v4->tcpRetransSegs = STAT64(RXT_SEG);
1685 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1686 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1687 v6->tcpOutRsts = STAT(OUT_RST);
1688 v6->tcpInSegs = STAT64(IN_SEG);
1689 v6->tcpOutSegs = STAT64(OUT_SEG);
1690 v6->tcpRetransSegs = STAT64(RXT_SEG);
1698 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1699 * @adap: the adapter
1700 * @mtus: where to store the MTU values
1701 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1703 * Reads the HW path MTU table.
1705 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1710 for (i = 0; i < NMTUS; ++i) {
1711 t4_write_reg(adap, TP_MTU_TABLE,
1712 MTUINDEX(0xff) | MTUVALUE(i));
1713 v = t4_read_reg(adap, TP_MTU_TABLE);
1714 mtus[i] = MTUVALUE_GET(v);
1716 mtu_log[i] = MTUWIDTH_GET(v);
1721 * init_cong_ctrl - initialize congestion control parameters
1722 * @a: the alpha values for congestion control
1723 * @b: the beta values for congestion control
1725 * Initialize the congestion control parameters.
1727 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1729 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1754 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1757 b[13] = b[14] = b[15] = b[16] = 3;
1758 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1759 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1764 /* The minimum additive increment value for the congestion control table */
1765 #define CC_MIN_INCR 2U
1768 * t4_load_mtus - write the MTU and congestion control HW tables
1769 * @adap: the adapter
1770 * @mtus: the values for the MTU table
1771 * @alpha: the values for the congestion control alpha parameter
1772 * @beta: the values for the congestion control beta parameter
1774 * Write the HW MTU table with the supplied MTUs and the high-speed
1775 * congestion control table with the supplied alpha, beta, and MTUs.
1776 * We write the two tables together because the additive increments
1777 * depend on the MTUs.
1779 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1780 const unsigned short *alpha, const unsigned short *beta)
1782 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1783 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1784 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1785 28672, 40960, 57344, 81920, 114688, 163840, 229376
1790 for (i = 0; i < NMTUS; ++i) {
1791 unsigned int mtu = mtus[i];
1792 unsigned int log2 = fls(mtu);
1794 if (!(mtu & ((1 << log2) >> 2))) /* round */
1796 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1797 MTUWIDTH(log2) | MTUVALUE(mtu));
1799 for (w = 0; w < NCCTRL_WIN; ++w) {
1802 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1805 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1806 (w << 16) | (beta[w] << 13) | inc);
1812 * get_mps_bg_map - return the buffer groups associated with a port
1813 * @adap: the adapter
1814 * @idx: the port index
1816 * Returns a bitmap indicating which MPS buffer groups are associated
1817 * with the given port. Bit i is set if buffer group i is used by the
1820 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1822 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1825 return idx == 0 ? 0xf : 0;
1827 return idx < 2 ? (3 << (2 * idx)) : 0;
1832 * t4_get_port_stats - collect port statistics
1833 * @adap: the adapter
1834 * @idx: the port index
1835 * @p: the stats structure to fill
1837 * Collect statistics related to the given port from HW.
1839 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1841 u32 bgmap = get_mps_bg_map(adap, idx);
1843 #define GET_STAT(name) \
1844 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1845 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1847 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1848 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1849 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1850 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1851 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1852 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1853 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1854 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1855 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1856 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1857 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1858 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1859 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1860 p->tx_drop = GET_STAT(TX_PORT_DROP);
1861 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1862 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1863 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1864 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1865 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1866 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1867 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1868 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1869 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1871 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1872 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1873 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1874 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1875 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1876 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1877 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1878 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1879 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1880 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1881 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1882 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1883 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1884 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1885 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1886 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1887 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1888 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1889 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1890 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1891 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1892 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1893 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1894 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1895 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1896 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1897 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1899 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1900 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1901 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1902 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1903 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1904 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1905 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1906 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1913 * t4_wol_magic_enable - enable/disable magic packet WoL
1914 * @adap: the adapter
1915 * @port: the physical port index
1916 * @addr: MAC address expected in magic packets, %NULL to disable
1918 * Enables/disables magic packet wake-on-LAN for the selected port.
1920 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1924 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1925 (addr[2] << 24) | (addr[3] << 16) |
1926 (addr[4] << 8) | addr[5]);
1927 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1928 (addr[0] << 8) | addr[1]);
1930 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1931 addr ? MAGICEN : 0);
1935 * t4_wol_pat_enable - enable/disable pattern-based WoL
1936 * @adap: the adapter
1937 * @port: the physical port index
1938 * @map: bitmap of which HW pattern filters to set
1939 * @mask0: byte mask for bytes 0-63 of a packet
1940 * @mask1: byte mask for bytes 64-127 of a packet
1941 * @crc: Ethernet CRC for selected bytes
1942 * @enable: enable/disable switch
1944 * Sets the pattern filters indicated in @map to mask out the bytes
1945 * specified in @mask0/@mask1 in received packets and compare the CRC of
1946 * the resulting packet against @crc. If @enable is %true pattern-based
1947 * WoL is enabled, otherwise disabled.
1949 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1950 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1955 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1962 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1964 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1965 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1966 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1968 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1972 /* write byte masks */
1973 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1974 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1975 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1976 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1980 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1981 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1982 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1983 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1988 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1992 #define INIT_CMD(var, cmd, rd_wr) do { \
1993 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1994 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1995 (var).retval_len16 = htonl(FW_LEN16(var)); \
1999 * t4_mdio_rd - read a PHY register through MDIO
2000 * @adap: the adapter
2001 * @mbox: mailbox to use for the FW command
2002 * @phy_addr: the PHY address
2003 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2004 * @reg: the register to read
2005 * @valp: where to store the value
2007 * Issues a FW command through the given mailbox to read a PHY register.
2009 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2010 unsigned int mmd, unsigned int reg, u16 *valp)
2013 struct fw_ldst_cmd c;
2015 memset(&c, 0, sizeof(c));
2016 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2017 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2018 c.cycles_to_len16 = htonl(FW_LEN16(c));
2019 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2020 FW_LDST_CMD_MMD(mmd));
2021 c.u.mdio.raddr = htons(reg);
2023 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2025 *valp = ntohs(c.u.mdio.rval);
2030 * t4_mdio_wr - write a PHY register through MDIO
2031 * @adap: the adapter
2032 * @mbox: mailbox to use for the FW command
2033 * @phy_addr: the PHY address
2034 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2035 * @reg: the register to write
2036 * @valp: value to write
2038 * Issues a FW command through the given mailbox to write a PHY register.
2040 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2041 unsigned int mmd, unsigned int reg, u16 val)
2043 struct fw_ldst_cmd c;
2045 memset(&c, 0, sizeof(c));
2046 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2047 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2048 c.cycles_to_len16 = htonl(FW_LEN16(c));
2049 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2050 FW_LDST_CMD_MMD(mmd));
2051 c.u.mdio.raddr = htons(reg);
2052 c.u.mdio.rval = htons(val);
2054 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2058 * t4_fw_hello - establish communication with FW
2059 * @adap: the adapter
2060 * @mbox: mailbox to use for the FW command
2061 * @evt_mbox: mailbox to receive async FW events
2062 * @master: specifies the caller's willingness to be the device master
2063 * @state: returns the current device state
2065 * Issues a command to establish communication with FW.
2067 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2068 enum dev_master master, enum dev_state *state)
2071 struct fw_hello_cmd c;
2073 INIT_CMD(c, HELLO, WRITE);
2074 c.err_to_mbasyncnot = htonl(
2075 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2076 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2077 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2078 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2080 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2081 if (ret == 0 && state) {
2082 u32 v = ntohl(c.err_to_mbasyncnot);
2083 if (v & FW_HELLO_CMD_INIT)
2084 *state = DEV_STATE_INIT;
2085 else if (v & FW_HELLO_CMD_ERR)
2086 *state = DEV_STATE_ERR;
2088 *state = DEV_STATE_UNINIT;
2094 * t4_fw_bye - end communication with FW
2095 * @adap: the adapter
2096 * @mbox: mailbox to use for the FW command
2098 * Issues a command to terminate communication with FW.
2100 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2102 struct fw_bye_cmd c;
2104 INIT_CMD(c, BYE, WRITE);
2105 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2109 * t4_init_cmd - ask FW to initialize the device
2110 * @adap: the adapter
2111 * @mbox: mailbox to use for the FW command
2113 * Issues a command to FW to partially initialize the device. This
2114 * performs initialization that generally doesn't depend on user input.
2116 int t4_early_init(struct adapter *adap, unsigned int mbox)
2118 struct fw_initialize_cmd c;
2120 INIT_CMD(c, INITIALIZE, WRITE);
2121 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2125 * t4_fw_reset - issue a reset to FW
2126 * @adap: the adapter
2127 * @mbox: mailbox to use for the FW command
2128 * @reset: specifies the type of reset to perform
2130 * Issues a reset command of the specified type to FW.
2132 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2134 struct fw_reset_cmd c;
2136 INIT_CMD(c, RESET, WRITE);
2137 c.val = htonl(reset);
2138 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2142 * t4_query_params - query FW or device parameters
2143 * @adap: the adapter
2144 * @mbox: mailbox to use for the FW command
2147 * @nparams: the number of parameters
2148 * @params: the parameter names
2149 * @val: the parameter values
2151 * Reads the value of FW or device parameters. Up to 7 parameters can be
2154 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2155 unsigned int vf, unsigned int nparams, const u32 *params,
2159 struct fw_params_cmd c;
2160 __be32 *p = &c.param[0].mnem;
2165 memset(&c, 0, sizeof(c));
2166 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2167 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2168 FW_PARAMS_CMD_VFN(vf));
2169 c.retval_len16 = htonl(FW_LEN16(c));
2170 for (i = 0; i < nparams; i++, p += 2)
2171 *p = htonl(*params++);
2173 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2175 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2181 * t4_set_params - sets FW or device parameters
2182 * @adap: the adapter
2183 * @mbox: mailbox to use for the FW command
2186 * @nparams: the number of parameters
2187 * @params: the parameter names
2188 * @val: the parameter values
2190 * Sets the value of FW or device parameters. Up to 7 parameters can be
2191 * specified at once.
2193 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2194 unsigned int vf, unsigned int nparams, const u32 *params,
2197 struct fw_params_cmd c;
2198 __be32 *p = &c.param[0].mnem;
2203 memset(&c, 0, sizeof(c));
2204 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2205 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2206 FW_PARAMS_CMD_VFN(vf));
2207 c.retval_len16 = htonl(FW_LEN16(c));
2209 *p++ = htonl(*params++);
2210 *p++ = htonl(*val++);
2213 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2217 * t4_cfg_pfvf - configure PF/VF resource limits
2218 * @adap: the adapter
2219 * @mbox: mailbox to use for the FW command
2220 * @pf: the PF being configured
2221 * @vf: the VF being configured
2222 * @txq: the max number of egress queues
2223 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2224 * @rxqi: the max number of interrupt-capable ingress queues
2225 * @rxq: the max number of interruptless ingress queues
2226 * @tc: the PCI traffic class
2227 * @vi: the max number of virtual interfaces
2228 * @cmask: the channel access rights mask for the PF/VF
2229 * @pmask: the port access rights mask for the PF/VF
2230 * @nexact: the maximum number of exact MPS filters
2231 * @rcaps: read capabilities
2232 * @wxcaps: write/execute capabilities
2234 * Configures resource limits and capabilities for a physical or virtual
2237 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2238 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2239 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2240 unsigned int vi, unsigned int cmask, unsigned int pmask,
2241 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2243 struct fw_pfvf_cmd c;
2245 memset(&c, 0, sizeof(c));
2246 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2247 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2248 FW_PFVF_CMD_VFN(vf));
2249 c.retval_len16 = htonl(FW_LEN16(c));
2250 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2251 FW_PFVF_CMD_NIQ(rxq));
2252 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2253 FW_PFVF_CMD_PMASK(pmask) |
2254 FW_PFVF_CMD_NEQ(txq));
2255 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2256 FW_PFVF_CMD_NEXACTF(nexact));
2257 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2258 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2259 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2264 * t4_alloc_vi - allocate a virtual interface
2265 * @adap: the adapter
2266 * @mbox: mailbox to use for the FW command
2267 * @port: physical port associated with the VI
2268 * @pf: the PF owning the VI
2269 * @vf: the VF owning the VI
2270 * @nmac: number of MAC addresses needed (1 to 5)
2271 * @mac: the MAC addresses of the VI
2272 * @rss_size: size of RSS table slice associated with this VI
2274 * Allocates a virtual interface for the given physical port. If @mac is
2275 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2276 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2277 * stored consecutively so the space needed is @nmac * 6 bytes.
2278 * Returns a negative error number or the non-negative VI id.
2280 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2281 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2282 unsigned int *rss_size)
2287 memset(&c, 0, sizeof(c));
2288 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2289 FW_CMD_WRITE | FW_CMD_EXEC |
2290 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2291 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2292 c.portid_pkd = FW_VI_CMD_PORTID(port);
2295 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2300 memcpy(mac, c.mac, sizeof(c.mac));
2303 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2305 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2307 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2309 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2313 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2314 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
2318 * t4_set_rxmode - set Rx properties of a virtual interface
2319 * @adap: the adapter
2320 * @mbox: mailbox to use for the FW command
2322 * @mtu: the new MTU or -1
2323 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2324 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2325 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2326 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2327 * @sleep_ok: if true we may sleep while awaiting command completion
2329 * Sets Rx properties of a virtual interface.
2331 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2332 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2335 struct fw_vi_rxmode_cmd c;
2337 /* convert to FW values */
2339 mtu = FW_RXMODE_MTU_NO_CHG;
2341 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2343 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2345 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2347 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2349 memset(&c, 0, sizeof(c));
2350 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2351 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2352 c.retval_len16 = htonl(FW_LEN16(c));
2353 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2354 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2355 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2356 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2357 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2358 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2362 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2363 * @adap: the adapter
2364 * @mbox: mailbox to use for the FW command
2366 * @free: if true any existing filters for this VI id are first removed
2367 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2368 * @addr: the MAC address(es)
2369 * @idx: where to store the index of each allocated filter
2370 * @hash: pointer to hash address filter bitmap
2371 * @sleep_ok: call is allowed to sleep
2373 * Allocates an exact-match filter for each of the supplied addresses and
2374 * sets it to the corresponding address. If @idx is not %NULL it should
2375 * have at least @naddr entries, each of which will be set to the index of
2376 * the filter allocated for the corresponding MAC address. If a filter
2377 * could not be allocated for an address its index is set to 0xffff.
2378 * If @hash is not %NULL addresses that fail to allocate an exact filter
2379 * are hashed and update the hash filter bitmap pointed at by @hash.
2381 * Returns a negative error number or the number of filters allocated.
2383 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2384 unsigned int viid, bool free, unsigned int naddr,
2385 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2388 struct fw_vi_mac_cmd c;
2389 struct fw_vi_mac_exact *p;
2394 memset(&c, 0, sizeof(c));
2395 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2396 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2397 FW_VI_MAC_CMD_VIID(viid));
2398 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2399 FW_CMD_LEN16((naddr + 2) / 2));
2401 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2402 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2403 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2404 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2407 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2411 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2412 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2415 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2416 if (index < NEXACT_MAC)
2419 *hash |= (1ULL << hash_mac_addr(addr[i]));
2425 * t4_change_mac - modifies the exact-match filter for a MAC address
2426 * @adap: the adapter
2427 * @mbox: mailbox to use for the FW command
2429 * @idx: index of existing filter for old value of MAC address, or -1
2430 * @addr: the new MAC address value
2431 * @persist: whether a new MAC allocation should be persistent
2432 * @add_smt: if true also add the address to the HW SMT
2434 * Modifies an exact-match filter and sets it to the new MAC address.
2435 * Note that in general it is not possible to modify the value of a given
2436 * filter so the generic way to modify an address filter is to free the one
2437 * being used by the old address value and allocate a new filter for the
2438 * new address value. @idx can be -1 if the address is a new addition.
2440 * Returns a negative error number or the index of the filter with the new
2443 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2444 int idx, const u8 *addr, bool persist, bool add_smt)
2447 struct fw_vi_mac_cmd c;
2448 struct fw_vi_mac_exact *p = c.u.exact;
2450 if (idx < 0) /* new allocation */
2451 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2452 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2454 memset(&c, 0, sizeof(c));
2455 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2456 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2457 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2458 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2459 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2460 FW_VI_MAC_CMD_IDX(idx));
2461 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2463 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2465 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2466 if (ret >= NEXACT_MAC)
2473 * t4_set_addr_hash - program the MAC inexact-match hash filter
2474 * @adap: the adapter
2475 * @mbox: mailbox to use for the FW command
2477 * @ucast: whether the hash filter should also match unicast addresses
2478 * @vec: the value to be written to the hash filter
2479 * @sleep_ok: call is allowed to sleep
2481 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2483 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2484 bool ucast, u64 vec, bool sleep_ok)
2486 struct fw_vi_mac_cmd c;
2488 memset(&c, 0, sizeof(c));
2489 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2490 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2491 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2492 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2494 c.u.hash.hashvec = cpu_to_be64(vec);
2495 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2499 * t4_enable_vi - enable/disable a virtual interface
2500 * @adap: the adapter
2501 * @mbox: mailbox to use for the FW command
2503 * @rx_en: 1=enable Rx, 0=disable Rx
2504 * @tx_en: 1=enable Tx, 0=disable Tx
2506 * Enables/disables a virtual interface.
2508 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2509 bool rx_en, bool tx_en)
2511 struct fw_vi_enable_cmd c;
2513 memset(&c, 0, sizeof(c));
2514 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2515 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2516 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2517 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2518 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2522 * t4_identify_port - identify a VI's port by blinking its LED
2523 * @adap: the adapter
2524 * @mbox: mailbox to use for the FW command
2526 * @nblinks: how many times to blink LED at 2.5 Hz
2528 * Identifies a VI's port by blinking its LED.
2530 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2531 unsigned int nblinks)
2533 struct fw_vi_enable_cmd c;
2535 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2536 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2537 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2538 c.blinkdur = htons(nblinks);
2539 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2543 * t4_iq_free - free an ingress queue and its FLs
2544 * @adap: the adapter
2545 * @mbox: mailbox to use for the FW command
2546 * @pf: the PF owning the queues
2547 * @vf: the VF owning the queues
2548 * @iqtype: the ingress queue type
2549 * @iqid: ingress queue id
2550 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2551 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2553 * Frees an ingress queue and its associated FLs, if any.
2555 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2556 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2557 unsigned int fl0id, unsigned int fl1id)
2561 memset(&c, 0, sizeof(c));
2562 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2563 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2565 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2566 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2567 c.iqid = htons(iqid);
2568 c.fl0id = htons(fl0id);
2569 c.fl1id = htons(fl1id);
2570 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2574 * t4_eth_eq_free - free an Ethernet egress queue
2575 * @adap: the adapter
2576 * @mbox: mailbox to use for the FW command
2577 * @pf: the PF owning the queue
2578 * @vf: the VF owning the queue
2579 * @eqid: egress queue id
2581 * Frees an Ethernet egress queue.
2583 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2584 unsigned int vf, unsigned int eqid)
2586 struct fw_eq_eth_cmd c;
2588 memset(&c, 0, sizeof(c));
2589 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2590 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2591 FW_EQ_ETH_CMD_VFN(vf));
2592 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2593 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2594 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2598 * t4_ctrl_eq_free - free a control egress queue
2599 * @adap: the adapter
2600 * @mbox: mailbox to use for the FW command
2601 * @pf: the PF owning the queue
2602 * @vf: the VF owning the queue
2603 * @eqid: egress queue id
2605 * Frees a control egress queue.
2607 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2608 unsigned int vf, unsigned int eqid)
2610 struct fw_eq_ctrl_cmd c;
2612 memset(&c, 0, sizeof(c));
2613 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2614 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2615 FW_EQ_CTRL_CMD_VFN(vf));
2616 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2617 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2618 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2622 * t4_ofld_eq_free - free an offload egress queue
2623 * @adap: the adapter
2624 * @mbox: mailbox to use for the FW command
2625 * @pf: the PF owning the queue
2626 * @vf: the VF owning the queue
2627 * @eqid: egress queue id
2629 * Frees a control egress queue.
2631 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2632 unsigned int vf, unsigned int eqid)
2634 struct fw_eq_ofld_cmd c;
2636 memset(&c, 0, sizeof(c));
2637 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2638 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2639 FW_EQ_OFLD_CMD_VFN(vf));
2640 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2641 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2642 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2646 * t4_handle_fw_rpl - process a FW reply message
2647 * @adap: the adapter
2648 * @rpl: start of the FW message
2650 * Processes a FW message, such as link state change messages.
2652 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2654 u8 opcode = *(const u8 *)rpl;
2656 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2657 int speed = 0, fc = 0;
2658 const struct fw_port_cmd *p = (void *)rpl;
2659 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2660 int port = adap->chan_map[chan];
2661 struct port_info *pi = adap2pinfo(adap, port);
2662 struct link_config *lc = &pi->link_cfg;
2663 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2664 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2665 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2667 if (stat & FW_PORT_CMD_RXPAUSE)
2669 if (stat & FW_PORT_CMD_TXPAUSE)
2671 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2673 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2675 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2676 speed = SPEED_10000;
2678 if (link_ok != lc->link_ok || speed != lc->speed ||
2679 fc != lc->fc) { /* something changed */
2680 lc->link_ok = link_ok;
2683 t4_os_link_changed(adap, port, link_ok);
2685 if (mod != pi->mod_type) {
2687 t4_os_portmod_changed(adap, port);
2693 static void __devinit get_pci_mode(struct adapter *adapter,
2694 struct pci_params *p)
2697 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2700 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
2702 p->speed = val & PCI_EXP_LNKSTA_CLS;
2703 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2708 * init_link_config - initialize a link's SW state
2709 * @lc: structure holding the link state
2710 * @caps: link capabilities
2712 * Initializes the SW state maintained for each link, including the link's
2713 * capabilities and default speed/flow-control/autonegotiation settings.
2715 static void __devinit init_link_config(struct link_config *lc,
2718 lc->supported = caps;
2719 lc->requested_speed = 0;
2721 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2722 if (lc->supported & FW_PORT_CAP_ANEG) {
2723 lc->advertising = lc->supported & ADVERT_MASK;
2724 lc->autoneg = AUTONEG_ENABLE;
2725 lc->requested_fc |= PAUSE_AUTONEG;
2727 lc->advertising = 0;
2728 lc->autoneg = AUTONEG_DISABLE;
2732 int t4_wait_dev_ready(struct adapter *adap)
2734 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2737 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2740 static int __devinit get_flash_params(struct adapter *adap)
2745 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2747 ret = sf1_read(adap, 3, 0, 1, &info);
2748 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2752 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2754 info >>= 16; /* log2 of size */
2755 if (info >= 0x14 && info < 0x18)
2756 adap->params.sf_nsec = 1 << (info - 16);
2757 else if (info == 0x18)
2758 adap->params.sf_nsec = 64;
2761 adap->params.sf_size = 1 << info;
2762 adap->params.sf_fw_start =
2763 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2768 * t4_prep_adapter - prepare SW and HW for operation
2769 * @adapter: the adapter
2770 * @reset: if true perform a HW reset
2772 * Initialize adapter SW state for the various HW modules, set initial
2773 * values for some adapter tunables, take PHYs out of reset, and
2774 * initialize the MDIO interface.
2776 int __devinit t4_prep_adapter(struct adapter *adapter)
2780 ret = t4_wait_dev_ready(adapter);
2784 get_pci_mode(adapter, &adapter->params.pci);
2785 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2787 ret = get_flash_params(adapter);
2789 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2793 ret = get_vpd_params(adapter, &adapter->params.vpd);
2797 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2800 * Default port for debugging in case we can't reach FW.
2802 adapter->params.nports = 1;
2803 adapter->params.portvec = 1;
2807 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2811 struct fw_port_cmd c;
2812 struct fw_rss_vi_config_cmd rvc;
2814 memset(&c, 0, sizeof(c));
2815 memset(&rvc, 0, sizeof(rvc));
2817 for_each_port(adap, i) {
2818 unsigned int rss_size;
2819 struct port_info *p = adap2pinfo(adap, i);
2821 while ((adap->params.portvec & (1 << j)) == 0)
2824 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2825 FW_CMD_REQUEST | FW_CMD_READ |
2826 FW_PORT_CMD_PORTID(j));
2827 c.action_to_len16 = htonl(
2828 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2830 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2834 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2841 p->rss_size = rss_size;
2842 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2843 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
2844 adap->port[i]->dev_id = j;
2846 ret = ntohl(c.u.info.lstatus_to_modtype);
2847 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2848 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2849 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
2850 p->mod_type = FW_PORT_MOD_TYPE_NA;
2852 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2853 FW_CMD_REQUEST | FW_CMD_READ |
2854 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2855 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2856 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2859 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2861 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));