3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
11 *------------------------------------------------------------------------------
15 * This software is provided subject to the following terms and conditions,
16 * which you should read carefully before using the software. Using this
17 * software indicates your acceptance of these terms and conditions. If you do
18 * not agree with these terms and conditions, do not use the software.
20 * Copyright © 2005 Agere Systems Inc.
21 * All rights reserved.
23 * Redistribution and use in source or binary forms, with or without
24 * modifications, are permitted provided that the following conditions are met:
26 * . Redistributions of source code must retain the above copyright notice, this
27 * list of conditions and the following Disclaimer as comments in the code as
28 * well as in the documentation and/or other materials provided with the
31 * . Redistributions in binary form must reproduce the above copyright notice,
32 * this list of conditions and the following Disclaimer in the documentation
33 * and/or other materials provided with the distribution.
35 * . Neither the name of Agere Systems Inc. nor the names of the contributors
36 * may be used to endorse or promote products derived from this software
37 * without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
44 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
56 #include <linux/pci.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/types.h>
60 #include <linux/kernel.h>
62 #include <linux/sched.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/ctype.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
70 #include <linux/delay.h>
71 #include <linux/bitops.h>
73 #include <asm/system.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_arp.h>
79 #include <linux/ioport.h>
80 #include <linux/crc32.h>
81 #include <linux/random.h>
82 #include <linux/phy.h>
86 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
87 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
88 MODULE_LICENSE("Dual BSD/GPL");
89 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
90 "for the ET1310 by Agere Systems");
93 #define MAX_NUM_REGISTER_POLLS 1000
94 #define MAX_NUM_WRITE_RETRIES 2
97 #define COUNTER_WRAP_16_BIT 0x10000
98 #define COUNTER_WRAP_12_BIT 0x1000
101 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
102 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
106 * For interrupts, normal running is:
107 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108 * watchdog_interrupt & txdma_xfer_done
110 * In both cases, when flow control is enabled for either Tx or bi-direction,
111 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112 * buffer rings are running low.
114 #define INT_MASK_DISABLE 0xffffffff
116 /* NOTE: Masking out MAC_STAT Interrupt for now...
117 * #define INT_MASK_ENABLE 0xfff6bf17
118 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
120 #define INT_MASK_ENABLE 0xfffebf17
121 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
123 /* General defines */
124 /* Packet and header sizes */
125 #define NIC_MIN_PACKET_SIZE 60
127 /* Multicast list size */
128 #define NIC_MAX_MCAST_LIST 128
130 /* Supported Filters */
131 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
132 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
133 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
134 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
138 #define ET131X_TX_TIMEOUT (1 * HZ)
139 #define NIC_SEND_HANG_THRESHOLD 0
142 #define fMP_DEST_MULTI 0x00000001
143 #define fMP_DEST_BROAD 0x00000002
145 /* MP_ADAPTER flags */
146 #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
147 #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
149 /* MP_SHARED flags */
150 #define fMP_ADAPTER_LOWER_POWER 0x00200000
152 #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
153 #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
155 #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
157 /* Some offsets in PCI config space that are actually used. */
158 #define ET1310_PCI_MAX_PYLD 0x4C
159 #define ET1310_PCI_MAC_ADDRESS 0xA4
160 #define ET1310_PCI_EEPROM_STATUS 0xB2
161 #define ET1310_PCI_ACK_NACK 0xC0
162 #define ET1310_PCI_REPLAY 0xC2
163 #define ET1310_PCI_L0L1LATENCY 0xCF
165 /* PCI Product IDs */
166 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
167 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
169 /* Define order of magnitude converter */
170 #define NANO_IN_A_MICRO 1000
172 #define PARM_RX_NUM_BUFS_DEF 4
173 #define PARM_RX_TIME_INT_DEF 10
174 #define PARM_RX_MEM_END_DEF 0x2bc
175 #define PARM_TX_TIME_INT_DEF 40
176 #define PARM_TX_NUM_BUFS_DEF 4
177 #define PARM_DMA_CACHE_DEF 0
182 #define FBR_CHUNKS 32
184 #define MAX_DESC_PER_RING_RX 1024
186 /* number of RFDs - default and min */
188 #define RFD_LOW_WATER_MARK 40
189 #define NIC_DEFAULT_NUM_RFD 1024
192 #define RFD_LOW_WATER_MARK 20
193 #define NIC_DEFAULT_NUM_RFD 256
197 #define NIC_MIN_NUM_RFD 64
199 #define NUM_PACKETS_HANDLED 256
201 #define ALCATEL_MULTICAST_PKT 0x01000000
202 #define ALCATEL_BROADCAST_PKT 0x02000000
204 /* typedefs for Free Buffer Descriptors */
208 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
211 /* Packet Status Ring Descriptors
215 * top 16 bits are from the Alcatel Status Word as enumerated in
216 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
219 * 1: ipa IP checksum assist
220 * 2: ipp IP checksum pass
221 * 3: tcpa TCP checksum assist
222 * 4: tcpp TCP checksum pass
224 * 6: rxmac_error RXMAC Error Indicator
225 * 7: drop Drop packet
226 * 8: ft Frame Truncated
230 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
231 * 17: asw_RX_DV_event short receive event detected
232 * 18: asw_false_carrier_event bad carrier since last good packet
233 * 19: asw_code_err one or more nibbles signalled as errors
234 * 20: asw_CRC_err CRC error
235 * 21: asw_len_chk_err frame length field incorrect
236 * 22: asw_too_long frame length > 1518 bytes
237 * 23: asw_OK valid CRC + no code error
238 * 24: asw_multicast has a multicast address
239 * 25: asw_broadcast has a broadcast address
240 * 26: asw_dribble_nibble spurious bits after EOP
241 * 27: asw_control_frame is a control frame
242 * 28: asw_pause_frame is a pause frame
243 * 29: asw_unsupported_op unsupported OP code
244 * 30: asw_VLAN_tag VLAN tag detected
245 * 31: asw_long_evt Rx long event
248 * 0-15: length length in bytes
249 * 16-25: bi Buffer Index
250 * 26-27: ri Ring Index
254 struct pkt_stat_desc {
259 /* Typedefs for the RX DMA status word */
262 * rx status word 0 holds part of the status bits of the Rx DMA engine
263 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
264 * which contains the Free Buffer ring 0 and 1 available offset.
266 * bit 0-9 FBR1 offset
267 * bit 10 Wrap flag for FBR1
268 * bit 16-25 FBR0 offset
269 * bit 26 Wrap flag for FBR0
273 * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
274 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
275 * which contains the Packet Status Ring available offset.
278 * bit 16-27 PSRoffset
284 * struct rx_status_block is a structure representing the status of the Rx
285 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
287 struct rx_status_block {
293 * Structure for look-up table holding free buffer ring pointers, addresses
297 void *virt[MAX_DESC_PER_RING_RX];
298 void *buffer1[MAX_DESC_PER_RING_RX];
299 void *buffer2[MAX_DESC_PER_RING_RX];
300 u32 bus_high[MAX_DESC_PER_RING_RX];
301 u32 bus_low[MAX_DESC_PER_RING_RX];
303 dma_addr_t ring_physaddr;
304 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
305 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
306 uint64_t real_physaddr;
314 * struct rx_ring is the sructure representing the adaptor's local
315 * reference(s) to the rings
317 ******************************************************************************
318 * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
319 * and index 1 to refer to FRB0
320 ******************************************************************************
323 struct fbr_lookup *fbr[NUM_FBRS];
324 void *ps_ring_virtaddr;
325 dma_addr_t ps_ring_physaddr;
329 struct rx_status_block *rx_status_block;
330 dma_addr_t rx_status_bus;
333 struct list_head recv_list;
338 bool unfinished_receives;
340 /* lookaside lists */
341 struct kmem_cache *recv_lookaside;
346 * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
348 * 0-15: length of packet
351 * 29-31: VLAN priority
353 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
355 * 0: last packet in the sequence
356 * 1: first packet in the sequence
357 * 2: interrupt the processor when this pkt sent
358 * 3: Control word - no packet data
359 * 4: Issue half-duplex backpressure : XON/XOFF
360 * 5: send pause frame
361 * 6: Tx frame has error
365 * 10: Packet is a Huge packet
366 * 11: append VLAN tag
367 * 12: IP checksum assist
368 * 13: TCP checksum assist
369 * 14: UDP checksum assist
372 /* struct tx_desc represents each descriptor on the ring */
376 u32 len_vlan; /* control words how to xmit the */
377 u32 flags; /* data (detailed above) */
381 * The status of the Tx DMA engine it sits in free memory, and is pointed to
382 * by 0x101c / 0x1020. This is a DMA10 type
385 /* TCB (Transmit Control Block: Host Side) */
387 struct tcb *next; /* Next entry in ring */
388 u32 flags; /* Our flags for the packet */
389 u32 count; /* Used to spot stuck/lost packets */
390 u32 stale; /* Used to spot stuck/lost packets */
391 struct sk_buff *skb; /* Network skb we are tied to */
392 u32 index; /* Ring indexes */
396 /* Structure representing our local reference(s) to the ring */
398 /* TCB (Transmit Control Block) memory and lists */
399 struct tcb *tcb_ring;
401 /* List of TCBs that are ready to be used */
402 struct tcb *tcb_qhead;
403 struct tcb *tcb_qtail;
405 /* list of TCBs that are currently being sent. NOTE that access to all
406 * three of these (including used) are controlled via the
407 * TCBSendQLock. This lock should be secured prior to incementing /
408 * decrementing used, or any queue manipulation on send_head /
411 struct tcb *send_head;
412 struct tcb *send_tail;
415 /* The actual descriptor ring */
416 struct tx_desc *tx_desc_ring;
417 dma_addr_t tx_desc_ring_pa;
419 /* send_idx indicates where we last wrote to in the descriptor ring. */
422 /* The location of the write-back status block */
424 dma_addr_t tx_status_pa;
426 /* Packets since the last IRQ: used for interrupt coalescing */
430 /* ADAPTER defines */
432 * Do not change these values: if changed, then change also in respective
433 * TXdma and Rxdma engines
435 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
439 * These values are all superseded by registry entries to facilitate tuning.
440 * Once the desired performance has been achieved, the optimal registry values
441 * should be re-populated to these #defines:
443 #define TX_ERROR_PERIOD 1000
445 #define LO_MARK_PERCENT_FOR_PSR 15
446 #define LO_MARK_PERCENT_FOR_RX 15
448 /* RFD (Receive Frame Descriptor) */
450 struct list_head list_node;
452 u32 len; /* total size of receive frame */
459 #define FLOW_TXONLY 1
460 #define FLOW_RXONLY 2
463 /* Struct to define some device statistics */
467 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
468 * MUST have 32, then we'll need another way to perform atomic
471 u32 unicast_pkts_rcvd;
472 atomic_t unicast_pkts_xmtd;
473 u32 multicast_pkts_rcvd;
474 atomic_t multicast_pkts_xmtd;
475 u32 broadcast_pkts_rcvd;
476 atomic_t broadcast_pkts_xmtd;
477 u32 rcvd_pkts_dropped;
483 u32 tx_excessive_collisions;
484 u32 tx_first_collisions;
485 u32 tx_late_collisions;
495 u32 rx_code_violations;
498 u32 synchronous_iterations;
499 u32 interrupt_status;
502 /* The private adapter structure */
503 struct et131x_adapter {
504 struct net_device *netdev;
505 struct pci_dev *pdev;
506 struct mii_bus *mii_bus;
507 struct phy_device *phydev;
508 struct work_struct task;
510 /* Flags that indicate current state of the adapter */
513 /* local link state, to determine if a state change has occurred */
517 u8 rom_addr[ETH_ALEN];
525 spinlock_t tcb_send_qlock;
526 spinlock_t tcb_ready_qlock;
527 spinlock_t send_hw_lock;
530 spinlock_t rcv_pend_lock;
535 /* Packet Filter and look ahead size */
539 u32 multicast_addr_count;
540 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
542 /* Pointer to the device's PCI register space */
543 struct address_map __iomem *regs;
545 /* Registry parameters */
546 u8 wanted_flow; /* Flow we want for 802.3x flow control */
547 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
549 /* Derived from the registry: */
550 u8 flowcontrol; /* flow control validated by the far-end */
552 /* Minimize init-time */
553 struct timer_list error_timer;
555 /* variable putting the phy into coma mode when boot up with no cable
556 * plugged in after 5 seconds
560 /* Next two used to save power information at power down. This
561 * information will be used during power up to set up parts of Power
562 * Management in JAGCore
567 /* Tx Memory Variables */
568 struct tx_ring tx_ring;
570 /* Rx Memory Variables */
571 struct rx_ring rx_ring;
574 struct ce_stats stats;
576 struct net_device_stats net_stats;
579 void et131x_adapter_setup(struct et131x_adapter *adapter);
580 void et131x_soft_reset(struct et131x_adapter *adapter);
581 void et131x_isr_handler(struct work_struct *work);
582 void et1310_setup_device_for_multicast(struct et131x_adapter *adapter);
583 void et1310_setup_device_for_unicast(struct et131x_adapter *adapter);
584 void et131x_up(struct net_device *netdev);
585 void et131x_down(struct net_device *netdev);
586 struct net_device *et131x_device_alloc(void);
587 void et131x_enable_txrx(struct net_device *netdev);
588 void et131x_disable_txrx(struct net_device *netdev);
589 int et1310_in_phy_coma(struct et131x_adapter *adapter);
590 void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
592 u16 regnum, u16 bitnum, u8 *value);
593 int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
595 int32_t et131x_mii_write(struct et131x_adapter *adapter,
597 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter);
598 void et131x_rx_dma_disable(struct et131x_adapter *adapter);
599 void et131x_rx_dma_enable(struct et131x_adapter *adapter);
600 void et131x_init_send(struct et131x_adapter *adapter);
601 void et131x_tx_dma_enable(struct et131x_adapter *adapter);
603 /* EEPROM functions */
605 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
611 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
612 * bits 7,1:0 both equal to 1, at least once after reset.
613 * Subsequent operations need only to check that bits 1:0 are equal
614 * to 1 prior to starting a single byte read/write
617 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
618 /* Read registers grouped in DWORD1 */
619 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
622 /* I2C idle and Phy Queue Avail both true */
623 if ((reg & 0x3000) == 0x3000) {
634 * eeprom_write - Write a byte to the ET1310's EEPROM
635 * @adapter: pointer to our private adapter structure
636 * @addr: the address to write
637 * @data: the value to write
639 * Returns 1 for a successful write.
641 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
643 struct pci_dev *pdev = adapter->pdev;
653 * For an EEPROM, an I2C single byte write is defined as a START
654 * condition followed by the device address, EEPROM address, one byte
655 * of data and a STOP condition. The STOP condition will trigger the
656 * EEPROM's internally timed write cycle to the nonvolatile memory.
657 * All inputs are disabled during this write cycle and the EEPROM will
658 * not respond to any access until the internal write is complete.
661 err = eeprom_wait_ready(pdev, NULL);
666 * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
667 * and bits 1:0 both =0. Bit 5 should be set according to the
668 * type of EEPROM being accessed (1=two byte addressing, 0=one
671 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
672 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
677 /* Prepare EEPROM address for Step 3 */
679 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
680 /* Write the address to the LBCIF Address Register */
681 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
684 * Write the data to the LBCIF Data Register (the I2C write
687 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
690 * Monitor bit 1:0 of the LBCIF Status Register. When bits
691 * 1:0 are both equal to 1, the I2C write has completed and the
692 * internal write cycle of the EEPROM is about to start.
693 * (bits 1:0 = 01 is a legal state while waiting from both
694 * equal to 1, but bits 1:0 = 10 is invalid and implies that
695 * something is broken).
697 err = eeprom_wait_ready(pdev, &status);
702 * Check bit 3 of the LBCIF Status Register. If equal to 1,
703 * an error has occurred.Don't break here if we are revision
704 * 1, this is so we do a blind write for load bug.
706 if ((status & LBCIF_STATUS_GENERAL_ERROR)
707 && adapter->pdev->revision == 0)
711 * Check bit 2 of the LBCIF Status Register. If equal to 1 an
712 * ACK error has occurred on the address phase of the write.
713 * This could be due to an actual hardware failure or the
714 * EEPROM may still be in its internal write cycle from a
715 * previous write. This write operation was ignored and must be
718 if (status & LBCIF_STATUS_ACK_ERROR) {
720 * This could be due to an actual hardware failure
721 * or the EEPROM may still be in its internal write
722 * cycle from a previous write. This write operation
723 * was ignored and must be repeated later.
734 * Set bit 6 of the LBCIF Control Register = 0.
739 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
740 LBCIF_CONTROL_LBCIF_ENABLE))
743 /* Do read until internal ACK_ERROR goes away meaning write
747 pci_write_config_dword(pdev,
748 LBCIF_ADDRESS_REGISTER,
751 pci_read_config_dword(pdev,
752 LBCIF_DATA_REGISTER, &val);
753 } while ((val & 0x00010000) == 0);
754 } while (val & 0x00040000);
756 if ((val & 0xFF00) != 0xC000 || index == 10000)
760 return writeok ? 0 : -EIO;
764 * eeprom_read - Read a byte from the ET1310's EEPROM
765 * @adapter: pointer to our private adapter structure
766 * @addr: the address from which to read
767 * @pdata: a pointer to a byte in which to store the value of the read
768 * @eeprom_id: the ID of the EEPROM
769 * @addrmode: how the EEPROM is to be accessed
771 * Returns 1 for a successful read
773 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
775 struct pci_dev *pdev = adapter->pdev;
780 * A single byte read is similar to the single byte write, with the
781 * exception of the data flow:
784 err = eeprom_wait_ready(pdev, NULL);
788 * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
789 * and bits 1:0 both =0. Bit 5 should be set according to the type
790 * of EEPROM being accessed (1=two byte addressing, 0=one byte
793 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
794 LBCIF_CONTROL_LBCIF_ENABLE))
797 * Write the address to the LBCIF Address Register (I2C read will
800 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
803 * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
804 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
807 err = eeprom_wait_ready(pdev, &status);
811 * Regardless of error status, read data byte from LBCIF Data
816 * Check bit 2 of the LBCIF Status Register. If = 1,
817 * then an error has occurred.
819 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
822 int et131x_init_eeprom(struct et131x_adapter *adapter)
824 struct pci_dev *pdev = adapter->pdev;
827 /* We first need to check the EEPROM Status code located at offset
828 * 0xB2 of config space
830 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
833 /* THIS IS A WORKAROUND:
834 * I need to call this function twice to get my card in a
835 * LG M1 Express Dual running. I tried also a msleep before this
836 * function, because I thougth there could be some time condidions
837 * but it didn't work. Call the whole function twice also work.
839 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
841 "Could not read PCI config space for EEPROM Status\n");
845 /* Determine if the error(s) we care about are present. If they are
846 * present we need to fail.
848 if (eestatus & 0x4C) {
849 int write_failed = 0;
850 if (pdev->revision == 0x01) {
852 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
854 /* Re-write the first 4 bytes if we have an eeprom
855 * present and the revision id is 1, this fixes the
856 * corruption seen with 1310 B Silicon
858 for (i = 0; i < 3; i++)
859 if (eeprom_write(adapter, i, eedata[i]) < 0)
862 if (pdev->revision != 0x01 || write_failed) {
864 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
866 /* This error could mean that there was an error
867 * reading the eeprom or that the eeprom doesn't exist.
868 * We will treat each case the same and not try to
869 * gather additional information that normally would
870 * come from the eeprom, like MAC Address
872 adapter->has_eeprom = 0;
876 adapter->has_eeprom = 1;
878 /* Read the EEPROM for information regarding LED behavior. Refer to
879 * ET1310_phy.c, et131x_xcvr_init(), for its use.
881 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
882 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
884 if (adapter->eeprom_data[0] != 0xcd)
885 /* Disable all optional features */
886 adapter->eeprom_data[1] = 0x00;
894 * et1310_config_mac_regs1 - Initialize the first part of MAC regs
895 * @adapter: pointer to our adapter structure
897 void et1310_config_mac_regs1(struct et131x_adapter *adapter)
899 struct mac_regs __iomem *macregs = &adapter->regs->mac;
904 /* First we need to reset everything. Write to MAC configuration
905 * register 1 to perform reset.
907 writel(0xC00F0000, ¯egs->cfg1);
909 /* Next lets configure the MAC Inter-packet gap register */
910 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
911 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
912 writel(ipg, ¯egs->ipg);
914 /* Next lets configure the MAC Half Duplex register */
915 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
916 writel(0x00A1F037, ¯egs->hfdp);
918 /* Next lets configure the MAC Interface Control register */
919 writel(0, ¯egs->if_ctrl);
921 /* Let's move on to setting up the mii management configuration */
922 writel(0x07, ¯egs->mii_mgmt_cfg); /* Clock reset 0x7 */
924 /* Next lets configure the MAC Station Address register. These
925 * values are read from the EEPROM during initialization and stored
926 * in the adapter structure. We write what is stored in the adapter
927 * structure to the MAC Station Address registers high and low. This
928 * station address is used for generating and checking pause control
931 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
932 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
933 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
934 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
935 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
937 writel(station1, ¯egs->station_addr_1);
938 writel(station2, ¯egs->station_addr_2);
940 /* Max ethernet packet in bytes that will passed by the mac without
941 * being truncated. Allow the MAC to pass 4 more than our max packet
942 * size. This is 4 for the Ethernet CRC.
944 * Packets larger than (registry_jumbo_packet) that do not contain a
945 * VLAN ID will be dropped by the Rx function.
947 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
949 /* clear out MAC config reset */
950 writel(0, ¯egs->cfg1);
954 * et1310_config_mac_regs2 - Initialize the second part of MAC regs
955 * @adapter: pointer to our adapter structure
957 void et1310_config_mac_regs2(struct et131x_adapter *adapter)
960 struct mac_regs __iomem *mac = &adapter->regs->mac;
961 struct phy_device *phydev = adapter->phydev;
967 ctl = readl(&adapter->regs->txmac.ctl);
968 cfg1 = readl(&mac->cfg1);
969 cfg2 = readl(&mac->cfg2);
970 ifctrl = readl(&mac->if_ctrl);
972 /* Set up the if mode bits */
974 if (phydev && phydev->speed == SPEED_1000) {
977 ifctrl &= ~(1 << 24);
983 /* We need to enable Rx/Tx */
984 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
985 /* Initialize loop back to off */
986 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
987 if (adapter->flowcontrol == FLOW_RXONLY ||
988 adapter->flowcontrol == FLOW_BOTH)
989 cfg1 |= CFG1_RX_FLOW;
990 writel(cfg1, &mac->cfg1);
992 /* Now we need to initialize the MAC Configuration 2 register */
993 /* preamble 7, check length, huge frame off, pad crc, crc enable
998 /* Turn on duplex if needed */
999 if (phydev && phydev->duplex == DUPLEX_FULL)
1002 ifctrl &= ~(1 << 26);
1003 if (phydev && phydev->duplex == DUPLEX_HALF)
1004 ifctrl |= (1<<26); /* Enable ghd */
1006 writel(ifctrl, &mac->if_ctrl);
1007 writel(cfg2, &mac->cfg2);
1012 cfg1 = readl(&mac->cfg1);
1013 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1016 dev_warn(&adapter->pdev->dev,
1017 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1022 ctl |= 0x09; /* TX mac enable, FC disable */
1023 writel(ctl, &adapter->regs->txmac.ctl);
1025 /* Ready to start the RXDMA/TXDMA engine */
1026 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1027 et131x_rx_dma_enable(adapter);
1028 et131x_tx_dma_enable(adapter);
1032 void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1034 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1035 struct phy_device *phydev = adapter->phydev;
1040 /* Disable the MAC while it is being configured (also disable WOL) */
1041 writel(0x8, &rxmac->ctrl);
1043 /* Initialize WOL to disabled. */
1044 writel(0, &rxmac->crc0);
1045 writel(0, &rxmac->crc12);
1046 writel(0, &rxmac->crc34);
1048 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1049 * its default Values of 0x00000000 because there are not WOL masks
1052 writel(0, &rxmac->mask0_word0);
1053 writel(0, &rxmac->mask0_word1);
1054 writel(0, &rxmac->mask0_word2);
1055 writel(0, &rxmac->mask0_word3);
1057 writel(0, &rxmac->mask1_word0);
1058 writel(0, &rxmac->mask1_word1);
1059 writel(0, &rxmac->mask1_word2);
1060 writel(0, &rxmac->mask1_word3);
1062 writel(0, &rxmac->mask2_word0);
1063 writel(0, &rxmac->mask2_word1);
1064 writel(0, &rxmac->mask2_word2);
1065 writel(0, &rxmac->mask2_word3);
1067 writel(0, &rxmac->mask3_word0);
1068 writel(0, &rxmac->mask3_word1);
1069 writel(0, &rxmac->mask3_word2);
1070 writel(0, &rxmac->mask3_word3);
1072 writel(0, &rxmac->mask4_word0);
1073 writel(0, &rxmac->mask4_word1);
1074 writel(0, &rxmac->mask4_word2);
1075 writel(0, &rxmac->mask4_word3);
1077 /* Lets setup the WOL Source Address */
1078 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1079 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1080 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1082 writel(sa_lo, &rxmac->sa_lo);
1084 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1086 writel(sa_hi, &rxmac->sa_hi);
1088 /* Disable all Packet Filtering */
1089 writel(0, &rxmac->pf_ctrl);
1091 /* Let's initialize the Unicast Packet filtering address */
1092 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1093 et1310_setup_device_for_unicast(adapter);
1094 pf_ctrl |= 4; /* Unicast filter */
1096 writel(0, &rxmac->uni_pf_addr1);
1097 writel(0, &rxmac->uni_pf_addr2);
1098 writel(0, &rxmac->uni_pf_addr3);
1101 /* Let's initialize the Multicast hash */
1102 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1103 pf_ctrl |= 2; /* Multicast filter */
1104 et1310_setup_device_for_multicast(adapter);
1107 /* Runt packet filtering. Didn't work in version A silicon. */
1108 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1109 pf_ctrl |= 8; /* Fragment filter */
1111 if (adapter->registry_jumbo_packet > 8192)
1112 /* In order to transmit jumbo packets greater than 8k, the
1113 * FIFO between RxMAC and RxDMA needs to be reduced in size
1114 * to (16k - Jumbo packet size). In order to implement this,
1115 * we must use "cut through" mode in the RxMAC, which chops
1116 * packets down into segments which are (max_size * 16). In
1117 * this case we selected 256 bytes, since this is the size of
1118 * the PCI-Express TLP's that the 1310 uses.
1120 * seg_en on, fc_en off, size 0x10
1122 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1124 writel(0, &rxmac->mcif_ctrl_max_seg);
1126 /* Initialize the MCIF water marks */
1127 writel(0, &rxmac->mcif_water_mark);
1129 /* Initialize the MIF control */
1130 writel(0, &rxmac->mif_ctrl);
1132 /* Initialize the Space Available Register */
1133 writel(0, &rxmac->space_avail);
1135 /* Initialize the the mif_ctrl register
1136 * bit 3: Receive code error. One or more nibbles were signaled as
1137 * errors during the reception of the packet. Clear this
1138 * bit in Gigabit, set it in 100Mbit. This was derived
1139 * experimentally at UNH.
1140 * bit 4: Receive CRC error. The packet's CRC did not match the
1141 * internally generated CRC.
1142 * bit 5: Receive length check error. Indicates that frame length
1143 * field value in the packet does not match the actual data
1144 * byte length and is not a type field.
1145 * bit 16: Receive frame truncated.
1146 * bit 17: Drop packet enable
1148 if (phydev && phydev->speed == SPEED_100)
1149 writel(0x30038, &rxmac->mif_ctrl);
1151 writel(0x30030, &rxmac->mif_ctrl);
1153 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1154 * filter is always enabled since it is where the runt packets are
1155 * supposed to be dropped. For version A silicon, runt packet
1156 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1157 * but we still leave the packet filter on.
1159 writel(pf_ctrl, &rxmac->pf_ctrl);
1160 writel(0x9, &rxmac->ctrl);
1163 void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1165 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1167 /* We need to update the Control Frame Parameters
1168 * cfpt - control frame pause timer set to 64 (0x40)
1169 * cfep - control frame extended pause timer set to 0x0
1171 if (adapter->flowcontrol == FLOW_NONE)
1172 writel(0, &txmac->cf_param);
1174 writel(0x40, &txmac->cf_param);
1177 void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1179 struct macstat_regs __iomem *macstat =
1180 &adapter->regs->macstat;
1182 /* Next we need to initialize all the macstat registers to zero on
1185 writel(0, &macstat->txrx_0_64_byte_frames);
1186 writel(0, &macstat->txrx_65_127_byte_frames);
1187 writel(0, &macstat->txrx_128_255_byte_frames);
1188 writel(0, &macstat->txrx_256_511_byte_frames);
1189 writel(0, &macstat->txrx_512_1023_byte_frames);
1190 writel(0, &macstat->txrx_1024_1518_byte_frames);
1191 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1193 writel(0, &macstat->rx_bytes);
1194 writel(0, &macstat->rx_packets);
1195 writel(0, &macstat->rx_fcs_errs);
1196 writel(0, &macstat->rx_multicast_packets);
1197 writel(0, &macstat->rx_broadcast_packets);
1198 writel(0, &macstat->rx_control_frames);
1199 writel(0, &macstat->rx_pause_frames);
1200 writel(0, &macstat->rx_unknown_opcodes);
1201 writel(0, &macstat->rx_align_errs);
1202 writel(0, &macstat->rx_frame_len_errs);
1203 writel(0, &macstat->rx_code_errs);
1204 writel(0, &macstat->rx_carrier_sense_errs);
1205 writel(0, &macstat->rx_undersize_packets);
1206 writel(0, &macstat->rx_oversize_packets);
1207 writel(0, &macstat->rx_fragment_packets);
1208 writel(0, &macstat->rx_jabbers);
1209 writel(0, &macstat->rx_drops);
1211 writel(0, &macstat->tx_bytes);
1212 writel(0, &macstat->tx_packets);
1213 writel(0, &macstat->tx_multicast_packets);
1214 writel(0, &macstat->tx_broadcast_packets);
1215 writel(0, &macstat->tx_pause_frames);
1216 writel(0, &macstat->tx_deferred);
1217 writel(0, &macstat->tx_excessive_deferred);
1218 writel(0, &macstat->tx_single_collisions);
1219 writel(0, &macstat->tx_multiple_collisions);
1220 writel(0, &macstat->tx_late_collisions);
1221 writel(0, &macstat->tx_excessive_collisions);
1222 writel(0, &macstat->tx_total_collisions);
1223 writel(0, &macstat->tx_pause_honored_frames);
1224 writel(0, &macstat->tx_drops);
1225 writel(0, &macstat->tx_jabbers);
1226 writel(0, &macstat->tx_fcs_errs);
1227 writel(0, &macstat->tx_control_frames);
1228 writel(0, &macstat->tx_oversize_frames);
1229 writel(0, &macstat->tx_undersize_frames);
1230 writel(0, &macstat->tx_fragments);
1231 writel(0, &macstat->carry_reg1);
1232 writel(0, &macstat->carry_reg2);
1234 /* Unmask any counters that we want to track the overflow of.
1235 * Initially this will be all counters. It may become clear later
1236 * that we do not need to track all counters.
1238 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1239 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1242 void et1310_config_flow_control(struct et131x_adapter *adapter)
1244 struct phy_device *phydev = adapter->phydev;
1246 if (phydev->duplex == DUPLEX_HALF) {
1247 adapter->flowcontrol = FLOW_NONE;
1249 char remote_pause, remote_async_pause;
1251 et1310_phy_access_mii_bit(adapter,
1252 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1253 et1310_phy_access_mii_bit(adapter,
1254 TRUEPHY_BIT_READ, 5, 11,
1255 &remote_async_pause);
1257 if ((remote_pause == TRUEPHY_BIT_SET) &&
1258 (remote_async_pause == TRUEPHY_BIT_SET)) {
1259 adapter->flowcontrol = adapter->wanted_flow;
1260 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1261 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1262 if (adapter->wanted_flow == FLOW_BOTH)
1263 adapter->flowcontrol = FLOW_BOTH;
1265 adapter->flowcontrol = FLOW_NONE;
1266 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1267 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1268 adapter->flowcontrol = FLOW_NONE;
1269 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1270 remote_async_pause == TRUEPHY_SET_BIT) */
1271 if (adapter->wanted_flow == FLOW_BOTH)
1272 adapter->flowcontrol = FLOW_RXONLY;
1274 adapter->flowcontrol = FLOW_NONE;
1280 * et1310_update_macstat_host_counters - Update the local copy of the statistics
1281 * @adapter: pointer to the adapter structure
1283 void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1285 struct ce_stats *stats = &adapter->stats;
1286 struct macstat_regs __iomem *macstat =
1287 &adapter->regs->macstat;
1289 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1290 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1291 stats->tx_deferred += readl(&macstat->tx_deferred);
1292 stats->tx_excessive_collisions +=
1293 readl(&macstat->tx_multiple_collisions);
1294 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1295 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1296 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1298 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1299 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1300 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1301 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1302 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1303 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1304 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1308 * et1310_handle_macstat_interrupt
1309 * @adapter: pointer to the adapter structure
1311 * One of the MACSTAT counters has wrapped. Update the local copy of
1312 * the statistics held in the adapter structure, checking the "wrap"
1313 * bit for each counter.
1315 void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1320 /* Read the interrupt bits from the register(s). These are Clear On
1323 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1324 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1326 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1327 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1329 /* We need to do update the host copy of all the MAC_STAT counters.
1330 * For each counter, check it's overflow bit. If the overflow bit is
1331 * set, then increment the host version of the count by one complete
1332 * revolution of the counter. This routine is called when the counter
1333 * block indicates that one of the counters has wrapped.
1335 if (carry_reg1 & (1 << 14))
1336 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1337 if (carry_reg1 & (1 << 8))
1338 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1339 if (carry_reg1 & (1 << 7))
1340 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1341 if (carry_reg1 & (1 << 2))
1342 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1343 if (carry_reg1 & (1 << 6))
1344 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1345 if (carry_reg1 & (1 << 3))
1346 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1347 if (carry_reg1 & (1 << 0))
1348 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1349 if (carry_reg2 & (1 << 16))
1350 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1351 if (carry_reg2 & (1 << 15))
1352 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1353 if (carry_reg2 & (1 << 6))
1354 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1355 if (carry_reg2 & (1 << 8))
1356 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1357 if (carry_reg2 & (1 << 5))
1358 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1359 if (carry_reg2 & (1 << 4))
1360 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1361 if (carry_reg2 & (1 << 2))
1362 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1365 void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1367 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1376 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1377 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1378 * specified) then we should pass NO multi-cast addresses to the
1381 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1382 /* Loop through our multicast array and set up the device */
1383 for (nIndex = 0; nIndex < adapter->multicast_addr_count;
1385 result = ether_crc(6, adapter->multicast_list[nIndex]);
1387 result = (result & 0x3F800000) >> 23;
1390 hash1 |= (1 << result);
1391 } else if ((31 < result) && (result < 64)) {
1393 hash2 |= (1 << result);
1394 } else if ((63 < result) && (result < 96)) {
1396 hash3 |= (1 << result);
1399 hash4 |= (1 << result);
1404 /* Write out the new hash to the device */
1405 pm_csr = readl(&adapter->regs->global.pm_csr);
1406 if (!et1310_in_phy_coma(adapter)) {
1407 writel(hash1, &rxmac->multi_hash1);
1408 writel(hash2, &rxmac->multi_hash2);
1409 writel(hash3, &rxmac->multi_hash3);
1410 writel(hash4, &rxmac->multi_hash4);
1414 void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1416 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1422 /* Set up unicast packet filter reg 3 to be the first two octets of
1423 * the MAC address for both address
1425 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1426 * MAC address for second address
1428 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1429 * MAC address for first address
1431 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1432 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1433 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1436 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1437 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1438 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1441 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1442 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1443 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1446 pm_csr = readl(&adapter->regs->global.pm_csr);
1447 if (!et1310_in_phy_coma(adapter)) {
1448 writel(uni_pf1, &rxmac->uni_pf_addr1);
1449 writel(uni_pf2, &rxmac->uni_pf_addr2);
1450 writel(uni_pf3, &rxmac->uni_pf_addr3);
1456 int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1458 struct net_device *netdev = bus->priv;
1459 struct et131x_adapter *adapter = netdev_priv(netdev);
1463 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1471 int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
1473 struct net_device *netdev = bus->priv;
1474 struct et131x_adapter *adapter = netdev_priv(netdev);
1476 return et131x_mii_write(adapter, reg, value);
1479 int et131x_mdio_reset(struct mii_bus *bus)
1481 struct net_device *netdev = bus->priv;
1482 struct et131x_adapter *adapter = netdev_priv(netdev);
1484 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1489 int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1491 struct phy_device *phydev = adapter->phydev;
1496 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1500 * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1501 * @adapter: pointer to our private adapter structure
1502 * @addr: the address of the transceiver
1503 * @reg: the register to read
1504 * @value: pointer to a 16-bit value in which the value will be stored
1506 * Returns 0 on success, errno on failure (as defined in errno.h)
1508 int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1511 struct mac_regs __iomem *mac = &adapter->regs->mac;
1518 /* Save a local copy of the registers we are dealing with so we can
1521 mii_addr = readl(&mac->mii_mgmt_addr);
1522 mii_cmd = readl(&mac->mii_mgmt_cmd);
1524 /* Stop the current operation */
1525 writel(0, &mac->mii_mgmt_cmd);
1527 /* Set up the register we need to read from on the correct PHY */
1528 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1530 writel(0x1, &mac->mii_mgmt_cmd);
1535 mii_indicator = readl(&mac->mii_mgmt_indicator);
1536 } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1538 /* If we hit the max delay, we could not read the register */
1540 dev_warn(&adapter->pdev->dev,
1541 "reg 0x%08x could not be read\n", reg);
1542 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1548 /* If we hit here we were able to read the register and we need to
1549 * return the value to the caller */
1550 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1552 /* Stop the read operation */
1553 writel(0, &mac->mii_mgmt_cmd);
1555 /* set the registers we touched back to the state at which we entered
1558 writel(mii_addr, &mac->mii_mgmt_addr);
1559 writel(mii_cmd, &mac->mii_mgmt_cmd);
1565 * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1566 * @adapter: pointer to our private adapter structure
1567 * @reg: the register to read
1568 * @value: 16-bit value to write
1570 * FIXME: one caller in netdev still
1572 * Return 0 on success, errno on failure (as defined in errno.h)
1574 int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1576 struct mac_regs __iomem *mac = &adapter->regs->mac;
1577 struct phy_device *phydev = adapter->phydev;
1588 addr = phydev->addr;
1590 /* Save a local copy of the registers we are dealing with so we can
1593 mii_addr = readl(&mac->mii_mgmt_addr);
1594 mii_cmd = readl(&mac->mii_mgmt_cmd);
1596 /* Stop the current operation */
1597 writel(0, &mac->mii_mgmt_cmd);
1599 /* Set up the register we need to write to on the correct PHY */
1600 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1602 /* Add the value to write to the registers to the mac */
1603 writel(value, &mac->mii_mgmt_ctrl);
1608 mii_indicator = readl(&mac->mii_mgmt_indicator);
1609 } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1611 /* If we hit the max delay, we could not write the register */
1615 dev_warn(&adapter->pdev->dev,
1616 "reg 0x%08x could not be written", reg);
1617 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1619 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1620 readl(&mac->mii_mgmt_cmd));
1622 et131x_mii_read(adapter, reg, &tmp);
1626 /* Stop the write operation */
1627 writel(0, &mac->mii_mgmt_cmd);
1630 * set the registers we touched back to the state at which we entered
1633 writel(mii_addr, &mac->mii_mgmt_addr);
1634 writel(mii_cmd, &mac->mii_mgmt_cmd);
1640 * et1310_phy_power_down - PHY power control
1641 * @adapter: device to control
1642 * @down: true for off/false for back on
1644 * one hundred, ten, one thousand megs
1645 * How would you like to have your LAN accessed
1646 * Can't you see that this code processed
1647 * Phy power, phy power..
1649 void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1653 et131x_mii_read(adapter, MII_BMCR, &data);
1654 data &= ~BMCR_PDOWN;
1657 et131x_mii_write(adapter, MII_BMCR, data);
1660 /* Still used from _mac for BIT_READ */
1661 void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
1662 u16 regnum, u16 bitnum, u8 *value)
1665 u16 mask = 0x0001 << bitnum;
1667 /* Read the requested register */
1668 et131x_mii_read(adapter, regnum, ®);
1671 case TRUEPHY_BIT_READ:
1672 *value = (reg & mask) >> bitnum;
1675 case TRUEPHY_BIT_SET:
1676 et131x_mii_write(adapter, regnum, reg | mask);
1679 case TRUEPHY_BIT_CLEAR:
1680 et131x_mii_write(adapter, regnum, reg & ~mask);
1689 * et131x_xcvr_init - Init the phy if we are setting it into force mode
1690 * @adapter: pointer to our private adapter structure
1693 void et131x_xcvr_init(struct et131x_adapter *adapter)
1699 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1700 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1702 /* Set the link status interrupt only. Bad behavior when link status
1703 * and auto neg are set, we run into a nested interrupt problem
1705 imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1706 ET_PHY_INT_MASK_LINKSTAT &
1707 ET_PHY_INT_MASK_ENABLE);
1709 et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1711 /* Set the LED behavior such that LED 1 indicates speed (off =
1712 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1713 * link and activity (on for link, blink off for activity).
1715 * NOTE: Some customizations have been added here for specific
1716 * vendors; The LED behavior is now determined by vendor data in the
1717 * EEPROM. However, the above description is the default.
1719 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1720 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1722 lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1723 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1725 if ((adapter->eeprom_data[1] & 0x8) == 0)
1726 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1728 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1730 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1737 * et1310_in_phy_coma - check if the device is in phy coma
1738 * @adapter: pointer to our adapter structure
1740 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1742 int et1310_in_phy_coma(struct et131x_adapter *adapter)
1746 pmcsr = readl(&adapter->regs->global.pm_csr);
1748 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1752 * et1310_enable_phy_coma - called when network cable is unplugged
1753 * @adapter: pointer to our adapter structure
1755 * driver receive an phy status change interrupt while in D0 and check that
1756 * phy_status is down.
1758 * -- gate off JAGCore;
1759 * -- set gigE PHY in Coma mode
1760 * -- wake on phy_interrupt; Perform software reset JAGCore,
1761 * re-initialize jagcore and gigE PHY
1763 * Add D0-ASPM-PhyLinkDown Support:
1764 * -- while in D0, when there is a phy_interrupt indicating phy link
1765 * down status, call the MPSetPhyComa routine to enter this active
1766 * state power saving mode
1767 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
1768 * indicating linkup status, call the MPDisablePhyComa routine to
1769 * restore JAGCore and gigE PHY
1771 void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1773 unsigned long flags;
1776 pmcsr = readl(&adapter->regs->global.pm_csr);
1778 /* Save the GbE PHY speed and duplex modes. Need to restore this
1779 * when cable is plugged back in
1782 * TODO - when PM is re-enabled, check if we need to
1783 * perform a similar task as this -
1784 * adapter->pdown_speed = adapter->ai_force_speed;
1785 * adapter->pdown_duplex = adapter->ai_force_duplex;
1788 /* Stop sending packets. */
1789 spin_lock_irqsave(&adapter->send_hw_lock, flags);
1790 adapter->flags |= fMP_ADAPTER_LOWER_POWER;
1791 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
1793 /* Wait for outstanding Receive packets */
1795 et131x_disable_txrx(adapter->netdev);
1797 /* Gate off JAGCore 3 clock domains */
1798 pmcsr &= ~ET_PMCSR_INIT;
1799 writel(pmcsr, &adapter->regs->global.pm_csr);
1801 /* Program gigE PHY in to Coma mode */
1802 pmcsr |= ET_PM_PHY_SW_COMA;
1803 writel(pmcsr, &adapter->regs->global.pm_csr);
1807 * et1310_disable_phy_coma - Disable the Phy Coma Mode
1808 * @adapter: pointer to our adapter structure
1810 void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1814 pmcsr = readl(&adapter->regs->global.pm_csr);
1816 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1817 pmcsr |= ET_PMCSR_INIT;
1818 pmcsr &= ~ET_PM_PHY_SW_COMA;
1819 writel(pmcsr, &adapter->regs->global.pm_csr);
1821 /* Restore the GbE PHY speed and duplex modes;
1822 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1824 /* TODO - when PM is re-enabled, check if we need to
1825 * perform a similar task as this -
1826 * adapter->ai_force_speed = adapter->pdown_speed;
1827 * adapter->ai_force_duplex = adapter->pdown_duplex;
1830 /* Re-initialize the send structures */
1831 et131x_init_send(adapter);
1833 /* Bring the device back to the state it was during init prior to
1834 * autonegotiation being complete. This way, when we get the auto-neg
1835 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1837 et131x_soft_reset(adapter);
1839 /* setup et1310 as per the documentation ?? */
1840 et131x_adapter_setup(adapter);
1842 /* Allow Tx to restart */
1843 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
1845 et131x_enable_txrx(adapter->netdev);
1850 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1852 u32 tmp_free_buff_ring = *free_buff_ring;
1853 tmp_free_buff_ring++;
1854 /* This works for all cases where limit < 1024. The 1023 case
1855 works because 1023++ is 1024 which means the if condition is not
1856 taken but the carry of the bit into the wrap bit toggles the wrap
1858 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1859 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1860 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1862 /* For the 1023 case */
1863 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1864 *free_buff_ring = tmp_free_buff_ring;
1865 return tmp_free_buff_ring;
1869 * et131x_align_allocated_memory - Align allocated memory on a given boundary
1870 * @adapter: pointer to our adapter structure
1871 * @phys_addr: pointer to Physical address
1872 * @offset: pointer to the offset variable
1873 * @mask: correct mask
1875 void et131x_align_allocated_memory(struct et131x_adapter *adapter,
1876 uint64_t *phys_addr,
1877 uint64_t *offset, uint64_t mask)
1883 new_addr = *phys_addr & ~mask;
1885 if (new_addr != *phys_addr) {
1886 /* Move to next aligned block */
1887 new_addr += mask + 1;
1888 /* Return offset for adjusting virt addr */
1889 *offset = new_addr - *phys_addr;
1890 /* Return new physical address */
1891 *phys_addr = new_addr;
1896 * et131x_rx_dma_memory_alloc
1897 * @adapter: pointer to our private adapter structure
1899 * Returns 0 on success and errno on failure (as defined in errno.h)
1901 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1902 * and the Packet Status Ring.
1904 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1908 u32 pktstat_ringsize, fbr_chunksize;
1909 struct rx_ring *rx_ring;
1911 /* Setup some convenience pointers */
1912 rx_ring = &adapter->rx_ring;
1914 /* Alloc memory for the lookup table */
1916 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
1918 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
1920 /* The first thing we will do is configure the sizes of the buffer
1921 * rings. These will change based on jumbo packet support. Larger
1922 * jumbo packets increases the size of each entry in FBR0, and the
1923 * number of entries in FBR0, while at the same time decreasing the
1924 * number of entries in FBR1.
1926 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
1927 * entries are huge in order to accommodate a "jumbo" frame, then it
1928 * will have less entries. Conversely, FBR1 will now be relied upon
1929 * to carry more "normal" frames, thus it's entry size also increases
1930 * and the number of entries goes up too (since it now carries
1931 * "small" + "regular" packets.
1933 * In this scheme, we try to maintain 512 entries between the two
1934 * rings. Also, FBR1 remains a constant size - when it's size doubles
1935 * the number of entries halves. FBR0 increases in size, however.
1938 if (adapter->registry_jumbo_packet < 2048) {
1940 rx_ring->fbr[1]->buffsize = 256;
1941 rx_ring->fbr[1]->num_entries = 512;
1943 rx_ring->fbr[0]->buffsize = 2048;
1944 rx_ring->fbr[0]->num_entries = 512;
1945 } else if (adapter->registry_jumbo_packet < 4096) {
1947 rx_ring->fbr[1]->buffsize = 512;
1948 rx_ring->fbr[1]->num_entries = 1024;
1950 rx_ring->fbr[0]->buffsize = 4096;
1951 rx_ring->fbr[0]->num_entries = 512;
1954 rx_ring->fbr[1]->buffsize = 1024;
1955 rx_ring->fbr[1]->num_entries = 768;
1957 rx_ring->fbr[0]->buffsize = 16384;
1958 rx_ring->fbr[0]->num_entries = 128;
1962 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[1]->num_entries +
1963 adapter->rx_ring.fbr[0]->num_entries;
1965 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
1968 /* Allocate an area of memory for Free Buffer Ring 1 */
1969 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff;
1970 rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1972 &rx_ring->fbr[0]->ring_physaddr,
1974 if (!rx_ring->fbr[0]->ring_virtaddr) {
1975 dev_err(&adapter->pdev->dev,
1976 "Cannot alloc memory for Free Buffer Ring 1\n");
1980 /* Save physical address
1982 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
1983 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1984 * are ever returned, make sure the high part is retrieved here
1985 * before storing the adjusted address.
1987 rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
1989 /* Align Free Buffer Ring 1 on a 4K boundary */
1990 et131x_align_allocated_memory(adapter,
1991 &rx_ring->fbr[0]->real_physaddr,
1992 &rx_ring->fbr[0]->offset, 0x0FFF);
1994 rx_ring->fbr[0]->ring_virtaddr =
1995 (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
1996 rx_ring->fbr[0]->offset);
1999 /* Allocate an area of memory for Free Buffer Ring 0 */
2000 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff;
2001 rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2003 &rx_ring->fbr[1]->ring_physaddr,
2005 if (!rx_ring->fbr[1]->ring_virtaddr) {
2006 dev_err(&adapter->pdev->dev,
2007 "Cannot alloc memory for Free Buffer Ring 0\n");
2011 /* Save physical address
2013 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2014 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2015 * are ever returned, make sure the high part is retrieved here before
2016 * storing the adjusted address.
2018 rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2020 /* Align Free Buffer Ring 0 on a 4K boundary */
2021 et131x_align_allocated_memory(adapter,
2022 &rx_ring->fbr[1]->real_physaddr,
2023 &rx_ring->fbr[1]->offset, 0x0FFF);
2025 rx_ring->fbr[1]->ring_virtaddr =
2026 (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2027 rx_ring->fbr[1]->offset);
2029 for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2031 u64 fbr1_tmp_physaddr;
2034 /* This code allocates an area of memory big enough for N
2035 * free buffers + (buffer_size - 1) so that the buffers can
2036 * be aligned on 4k boundaries. If each buffer were aligned
2037 * to a buffer_size boundary, the effect would be to double
2038 * the size of FBR0. By allocating N buffers at once, we
2039 * reduce this overhead.
2041 if (rx_ring->fbr[0]->buffsize > 4096)
2044 fbr1_align = rx_ring->fbr[0]->buffsize;
2047 (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2048 rx_ring->fbr[0]->mem_virtaddrs[i] =
2049 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2050 &rx_ring->fbr[0]->mem_physaddrs[i], GFP_KERNEL);
2052 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2053 dev_err(&adapter->pdev->dev,
2054 "Could not alloc memory\n");
2058 /* See NOTE in "Save Physical Address" comment above */
2059 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2061 et131x_align_allocated_memory(adapter,
2063 &fbr1_offset, (fbr1_align - 1));
2065 for (j = 0; j < FBR_CHUNKS; j++) {
2066 u32 index = (i * FBR_CHUNKS) + j;
2068 /* Save the Virtual address of this index for quick
2071 rx_ring->fbr[0]->virt[index] =
2072 (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2073 (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2075 /* now store the physical address in the descriptor
2076 * so the device can access it
2078 rx_ring->fbr[0]->bus_high[index] =
2079 (u32) (fbr1_tmp_physaddr >> 32);
2080 rx_ring->fbr[0]->bus_low[index] =
2081 (u32) fbr1_tmp_physaddr;
2083 fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2085 rx_ring->fbr[0]->buffer1[index] =
2086 rx_ring->fbr[0]->virt[index];
2087 rx_ring->fbr[0]->buffer2[index] =
2088 rx_ring->fbr[0]->virt[index] - 4;
2093 /* Same for FBR0 (if in use) */
2094 for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2096 u64 fbr0_tmp_physaddr;
2099 ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2100 rx_ring->fbr[1]->mem_virtaddrs[i] =
2101 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2102 &rx_ring->fbr[1]->mem_physaddrs[i], GFP_KERNEL);
2104 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2105 dev_err(&adapter->pdev->dev,
2106 "Could not alloc memory\n");
2110 /* See NOTE in "Save Physical Address" comment above */
2111 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2113 et131x_align_allocated_memory(adapter,
2116 rx_ring->fbr[1]->buffsize - 1);
2118 for (j = 0; j < FBR_CHUNKS; j++) {
2119 u32 index = (i * FBR_CHUNKS) + j;
2121 rx_ring->fbr[1]->virt[index] =
2122 (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2123 (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2125 rx_ring->fbr[1]->bus_high[index] =
2126 (u32) (fbr0_tmp_physaddr >> 32);
2127 rx_ring->fbr[1]->bus_low[index] =
2128 (u32) fbr0_tmp_physaddr;
2130 fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2132 rx_ring->fbr[1]->buffer1[index] =
2133 rx_ring->fbr[1]->virt[index];
2134 rx_ring->fbr[1]->buffer2[index] =
2135 rx_ring->fbr[1]->virt[index] - 4;
2140 /* Allocate an area of memory for FIFO of Packet Status ring entries */
2142 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2144 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2146 &rx_ring->ps_ring_physaddr,
2149 if (!rx_ring->ps_ring_virtaddr) {
2150 dev_err(&adapter->pdev->dev,
2151 "Cannot alloc memory for Packet Status Ring\n");
2154 printk(KERN_INFO "Packet Status Ring %lx\n",
2155 (unsigned long) rx_ring->ps_ring_physaddr);
2158 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2159 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2160 * are ever returned, make sure the high part is retrieved here before
2161 * storing the adjusted address.
2164 /* Allocate an area of memory for writeback of status information */
2165 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2166 sizeof(struct rx_status_block),
2167 &rx_ring->rx_status_bus,
2169 if (!rx_ring->rx_status_block) {
2170 dev_err(&adapter->pdev->dev,
2171 "Cannot alloc memory for Status Block\n");
2174 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2175 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2178 * kmem_cache_create initializes a lookaside list. After successful
2179 * creation, nonpaged fixed-size blocks can be allocated from and
2180 * freed to the lookaside list.
2181 * RFDs will be allocated from this pool.
2183 rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2190 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2192 /* The RFDs are going to be put on lists later on, so initialize the
2195 INIT_LIST_HEAD(&rx_ring->recv_list);
2200 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2201 * @adapter: pointer to our private adapter structure
2203 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2207 u32 pktstat_ringsize;
2209 struct rx_ring *rx_ring;
2211 /* Setup some convenience pointers */
2212 rx_ring = &adapter->rx_ring;
2214 /* Free RFDs and associated packet descriptors */
2215 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2217 while (!list_empty(&rx_ring->recv_list)) {
2218 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2219 struct rfd, list_node);
2221 list_del(&rfd->list_node);
2223 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2226 /* Free Free Buffer Ring 1 */
2227 if (rx_ring->fbr[0]->ring_virtaddr) {
2228 /* First the packet memory */
2229 for (index = 0; index <
2230 (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2231 if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2234 if (rx_ring->fbr[0]->buffsize > 4096)
2237 fbr1_align = rx_ring->fbr[0]->buffsize;
2240 (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2243 dma_free_coherent(&adapter->pdev->dev,
2245 rx_ring->fbr[0]->mem_virtaddrs[index],
2246 rx_ring->fbr[0]->mem_physaddrs[index]);
2248 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2252 /* Now the FIFO itself */
2253 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2254 rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2256 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries)
2259 dma_free_coherent(&adapter->pdev->dev, bufsize,
2260 rx_ring->fbr[0]->ring_virtaddr,
2261 rx_ring->fbr[0]->ring_physaddr);
2263 rx_ring->fbr[0]->ring_virtaddr = NULL;
2267 /* Now the same for Free Buffer Ring 0 */
2268 if (rx_ring->fbr[1]->ring_virtaddr) {
2269 /* First the packet memory */
2270 for (index = 0; index <
2271 (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2272 if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2274 (rx_ring->fbr[1]->buffsize *
2275 (FBR_CHUNKS + 1)) - 1;
2277 dma_free_coherent(&adapter->pdev->dev,
2279 rx_ring->fbr[1]->mem_virtaddrs[index],
2280 rx_ring->fbr[1]->mem_physaddrs[index]);
2282 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2286 /* Now the FIFO itself */
2287 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2288 rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2290 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries)
2293 dma_free_coherent(&adapter->pdev->dev,
2295 rx_ring->fbr[1]->ring_virtaddr,
2296 rx_ring->fbr[1]->ring_physaddr);
2298 rx_ring->fbr[1]->ring_virtaddr = NULL;
2302 /* Free Packet Status Ring */
2303 if (rx_ring->ps_ring_virtaddr) {
2305 sizeof(struct pkt_stat_desc) *
2306 adapter->rx_ring.psr_num_entries;
2308 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2309 rx_ring->ps_ring_virtaddr,
2310 rx_ring->ps_ring_physaddr);
2312 rx_ring->ps_ring_virtaddr = NULL;
2315 /* Free area of memory for the writeback of status information */
2316 if (rx_ring->rx_status_block) {
2317 dma_free_coherent(&adapter->pdev->dev,
2318 sizeof(struct rx_status_block),
2319 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2320 rx_ring->rx_status_block = NULL;
2323 /* Destroy the lookaside (RFD) pool */
2324 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2325 kmem_cache_destroy(rx_ring->recv_lookaside);
2326 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2329 /* Free the FBR Lookup Table */
2331 kfree(rx_ring->fbr[1]);
2334 kfree(rx_ring->fbr[0]);
2336 /* Reset Counters */
2337 rx_ring->num_ready_recv = 0;
2341 * et131x_init_recv - Initialize receive data structures.
2342 * @adapter: pointer to our private adapter structure
2344 * Returns 0 on success and errno on failure (as defined in errno.h)
2346 int et131x_init_recv(struct et131x_adapter *adapter)
2348 int status = -ENOMEM;
2349 struct rfd *rfd = NULL;
2352 struct rx_ring *rx_ring;
2354 /* Setup some convenience pointers */
2355 rx_ring = &adapter->rx_ring;
2357 /* Setup each RFD */
2358 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2359 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2360 GFP_ATOMIC | GFP_DMA);
2363 dev_err(&adapter->pdev->dev,
2364 "Couldn't alloc RFD out of kmem_cache\n");
2371 /* Add this RFD to the recv_list */
2372 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2374 /* Increment both the available RFD's, and the total RFD's. */
2375 rx_ring->num_ready_recv++;
2379 if (numrfd > NIC_MIN_NUM_RFD)
2382 rx_ring->num_rfd = numrfd;
2385 kmem_cache_free(rx_ring->recv_lookaside, rfd);
2386 dev_err(&adapter->pdev->dev,
2387 "Allocation problems in et131x_init_recv\n");
2393 * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
2394 * @adapter: pointer to our adapter structure
2396 void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
2398 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2399 struct rx_ring *rx_local = &adapter->rx_ring;
2400 struct fbr_desc *fbr_entry;
2403 unsigned long flags;
2405 /* Halt RXDMA to perform the reconfigure. */
2406 et131x_rx_dma_disable(adapter);
2408 /* Load the completion writeback physical address
2410 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2411 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2412 * are ever returned, make sure the high part is retrieved here
2413 * before storing the adjusted address.
2415 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
2416 &rx_dma->dma_wb_base_hi);
2417 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
2419 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
2421 /* Set the address and parameters of the packet status ring into the
2424 writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
2425 &rx_dma->psr_base_hi);
2426 writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
2427 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
2428 writel(0, &rx_dma->psr_full_offset);
2430 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
2431 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
2432 &rx_dma->psr_min_des);
2434 spin_lock_irqsave(&adapter->rcv_lock, flags);
2436 /* These local variables track the PSR in the adapter structure */
2437 rx_local->local_psr_full = 0;
2439 /* Now's the best time to initialize FBR1 contents */
2440 fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
2441 for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
2442 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
2443 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
2444 fbr_entry->word2 = entry;
2448 /* Set the address and parameters of Free buffer ring 1 (and 0 if
2449 * required) into the 1310's registers
2451 writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
2452 &rx_dma->fbr1_base_hi);
2453 writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
2454 writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
2455 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
2457 /* This variable tracks the free buffer ring 1 full position, so it
2458 * has to match the above.
2460 rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
2462 ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
2463 &rx_dma->fbr1_min_des);
2466 /* Now's the best time to initialize FBR0 contents */
2467 fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
2468 for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
2469 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
2470 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
2471 fbr_entry->word2 = entry;
2475 writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
2476 &rx_dma->fbr0_base_hi);
2477 writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
2478 writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
2479 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
2481 /* This variable tracks the free buffer ring 0 full position, so it
2482 * has to match the above.
2484 rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
2486 ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
2487 &rx_dma->fbr0_min_des);
2490 /* Program the number of packets we will receive before generating an
2492 * For version B silicon, this value gets updated once autoneg is
2495 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
2497 /* The "time_done" is not working correctly to coalesce interrupts
2498 * after a given time period, but rather is giving us an interrupt
2499 * regardless of whether we have received packets.
2500 * This value gets updated once autoneg is complete.
2502 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
2504 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2508 * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2509 * @adapter: pointer to our adapter structure
2511 void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2513 struct phy_device *phydev = adapter->phydev;
2518 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2519 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2521 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2522 writel(0, &adapter->regs->rxdma.max_pkt_time);
2523 writel(1, &adapter->regs->rxdma.num_pkt_done);
2528 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2529 * @adapter: pointer to our adapter
2530 * @rfd: pointer to the RFD
2532 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2534 struct rx_ring *rx_local = &adapter->rx_ring;
2535 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2536 u16 buff_index = rfd->bufferindex;
2537 u8 ring_index = rfd->ringindex;
2538 unsigned long flags;
2540 /* We don't use any of the OOB data besides status. Otherwise, we
2541 * need to clean up OOB data
2545 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2547 (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2548 spin_lock_irqsave(&adapter->fbr_lock, flags);
2550 if (ring_index == 1) {
2551 struct fbr_desc *next =
2552 (struct fbr_desc *) (rx_local->fbr[0]->ring_virtaddr) +
2553 INDEX10(rx_local->fbr[0]->local_full);
2555 /* Handle the Free Buffer Ring advancement here. Write
2556 * the PA / Buffer Index for the returned buffer into
2557 * the oldest (next to be freed)FBR entry
2559 next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2560 next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2561 next->word2 = buff_index;
2563 writel(bump_free_buff_ring(&rx_local->fbr[0]->local_full,
2564 rx_local->fbr[0]->num_entries - 1),
2565 &rx_dma->fbr1_full_offset);
2569 struct fbr_desc *next = (struct fbr_desc *)
2570 rx_local->fbr[1]->ring_virtaddr +
2571 INDEX10(rx_local->fbr[1]->local_full);
2573 /* Handle the Free Buffer Ring advancement here. Write
2574 * the PA / Buffer Index for the returned buffer into
2575 * the oldest (next to be freed) FBR entry
2577 next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2578 next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2579 next->word2 = buff_index;
2581 writel(bump_free_buff_ring(
2582 &rx_local->fbr[1]->local_full,
2583 rx_local->fbr[1]->num_entries - 1),
2584 &rx_dma->fbr0_full_offset);
2587 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2589 dev_err(&adapter->pdev->dev,
2590 "%s illegal Buffer Index returned\n", __func__);
2593 /* The processing on this RFD is done, so put it back on the tail of
2596 spin_lock_irqsave(&adapter->rcv_lock, flags);
2597 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2598 rx_local->num_ready_recv++;
2599 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2601 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2605 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
2606 * @adapter: pointer to our adapter structure
2608 void et131x_rx_dma_disable(struct et131x_adapter *adapter)
2611 /* Setup the receive dma configuration register */
2612 writel(0x00002001, &adapter->regs->rxdma.csr);
2613 csr = readl(&adapter->regs->rxdma.csr);
2614 if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
2616 csr = readl(&adapter->regs->rxdma.csr);
2617 if ((csr & 0x00020000) == 0)
2618 dev_err(&adapter->pdev->dev,
2619 "RX Dma failed to enter halt state. CSR 0x%08x\n",
2625 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
2626 * @adapter: pointer to our adapter structure
2628 void et131x_rx_dma_enable(struct et131x_adapter *adapter)
2630 /* Setup the receive dma configuration register for normal operation */
2631 u32 csr = 0x2000; /* FBR1 enable */
2633 if (adapter->rx_ring.fbr[0]->buffsize == 4096)
2635 else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
2637 else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
2640 csr |= 0x0400; /* FBR0 enable */
2641 if (adapter->rx_ring.fbr[1]->buffsize == 256)
2643 else if (adapter->rx_ring.fbr[1]->buffsize == 512)
2645 else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
2648 writel(csr, &adapter->regs->rxdma.csr);
2650 csr = readl(&adapter->regs->rxdma.csr);
2651 if ((csr & 0x00020000) != 0) {
2653 csr = readl(&adapter->regs->rxdma.csr);
2654 if ((csr & 0x00020000) != 0) {
2655 dev_err(&adapter->pdev->dev,
2656 "RX Dma failed to exit halt state. CSR 0x%08x\n",
2663 static inline void add_10bit(u32 *v, int n)
2665 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
2668 static inline void add_12bit(u32 *v, int n)
2670 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
2674 * nic_rx_pkts - Checks the hardware for available packets
2675 * @adapter: pointer to our adapter
2677 * Returns rfd, a pointer to our MPRFD.
2679 * Checks the hardware for available packets, using completion ring
2680 * If packets are available, it gets an RFD from the recv_list, attaches
2681 * the packet to it, puts the RFD in the RecvPendList, and also returns
2682 * the pointer to the RFD.
2684 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2686 struct rx_ring *rx_local = &adapter->rx_ring;
2687 struct rx_status_block *status;
2688 struct pkt_stat_desc *psr;
2692 unsigned long flags;
2693 struct list_head *element;
2700 /* RX Status block is written by the DMA engine prior to every
2701 * interrupt. It contains the next to be used entry in the Packet
2702 * Status Ring, and also the two Free Buffer rings.
2704 status = rx_local->rx_status_block;
2705 word1 = status->word1 >> 16; /* Get the useful bits */
2707 /* Check the PSR and wrap bits do not match */
2708 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2709 /* Looks like this ring is not updated yet */
2712 /* The packet status ring indicates that data is available. */
2713 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2714 (rx_local->local_psr_full & 0xFFF);
2716 /* Grab any information that is required once the PSR is
2717 * advanced, since we can no longer rely on the memory being
2720 len = psr->word1 & 0xFFFF;
2721 ring_index = (psr->word1 >> 26) & 0x03;
2722 buff_index = (psr->word1 >> 16) & 0x3FF;
2725 /* Indicate that we have used this PSR entry. */
2727 add_12bit(&rx_local->local_psr_full, 1);
2729 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2730 /* Clear psr full and toggle the wrap bit */
2731 rx_local->local_psr_full &= ~0xFFF;
2732 rx_local->local_psr_full ^= 0x1000;
2735 writel(rx_local->local_psr_full,
2736 &adapter->regs->rxdma.psr_full_offset);
2739 if (ring_index != 1)
2744 if (ring_index > 1 ||
2746 buff_index > rx_local->fbr[1]->num_entries - 1) ||
2748 buff_index > rx_local->fbr[0]->num_entries - 1))
2750 if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2753 /* Illegal buffer or ring index cannot be used by S/W*/
2754 dev_err(&adapter->pdev->dev,
2755 "NICRxPkts PSR Entry %d indicates "
2756 "length of %d and/or bad bi(%d)\n",
2757 rx_local->local_psr_full & 0xFFF,
2762 /* Get and fill the RFD. */
2763 spin_lock_irqsave(&adapter->rcv_lock, flags);
2766 element = rx_local->recv_list.next;
2767 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2770 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2774 list_del(&rfd->list_node);
2775 rx_local->num_ready_recv--;
2777 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2779 rfd->bufferindex = buff_index;
2780 rfd->ringindex = ring_index;
2782 /* In V1 silicon, there is a bug which screws up filtering of
2783 * runt packets. Therefore runt packet filtering is disabled
2784 * in the MAC and the packets are dropped here. They are
2785 * also counted here.
2787 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2788 adapter->stats.rx_other_errs++;
2793 /* Determine if this is a multicast packet coming in */
2794 if ((word0 & ALCATEL_MULTICAST_PKT) &&
2795 !(word0 & ALCATEL_BROADCAST_PKT)) {
2796 /* Promiscuous mode and Multicast mode are
2797 * not mutually exclusive as was first
2798 * thought. I guess Promiscuous is just
2799 * considered a super-set of the other
2800 * filters. Generally filter is 0x2b when in
2803 if ((adapter->packet_filter &
2804 ET131X_PACKET_TYPE_MULTICAST)
2805 && !(adapter->packet_filter &
2806 ET131X_PACKET_TYPE_PROMISCUOUS)
2807 && !(adapter->packet_filter &
2808 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2810 * Note - ring_index for fbr[] array is reversed
2813 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
2816 /* Loop through our list to see if the
2817 * destination address of this packet
2818 * matches one in our list.
2820 for (i = 0; i < adapter->multicast_addr_count;
2823 adapter->multicast_list[i][0]
2825 adapter->multicast_list[i][1]
2827 adapter->multicast_list[i][2]
2829 adapter->multicast_list[i][3]
2831 adapter->multicast_list[i][4]
2833 adapter->multicast_list[i][5]) {
2838 /* If our index is equal to the number
2839 * of Multicast address we have, then
2840 * this means we did not find this
2841 * packet's matching address in our
2842 * list. Set the len to zero,
2843 * so we free our RFD when we return
2844 * from this function.
2846 if (i == adapter->multicast_addr_count)
2851 adapter->stats.multicast_pkts_rcvd++;
2852 } else if (word0 & ALCATEL_BROADCAST_PKT)
2853 adapter->stats.broadcast_pkts_rcvd++;
2855 /* Not sure what this counter measures in
2856 * promiscuous mode. Perhaps we should check
2857 * the MAC address to see if it is directed
2858 * to us in promiscuous mode.
2860 adapter->stats.unicast_pkts_rcvd++;
2864 struct sk_buff *skb = NULL;
2866 /*rfd->len = len - 4; */
2869 skb = dev_alloc_skb(rfd->len + 2);
2871 dev_err(&adapter->pdev->dev,
2872 "Couldn't alloc an SKB for Rx\n");
2876 adapter->net_stats.rx_bytes += rfd->len;
2879 * Note - ring_index for fbr[] array is reversed,
2882 memcpy(skb_put(skb, rfd->len),
2883 rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
2886 skb->dev = adapter->netdev;
2887 skb->protocol = eth_type_trans(skb, adapter->netdev);
2888 skb->ip_summed = CHECKSUM_NONE;
2895 nic_return_rfd(adapter, rfd);
2900 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
2901 * @adapter: pointer to our adapter
2903 * Assumption, Rcv spinlock has been acquired.
2905 void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2907 struct rfd *rfd = NULL;
2911 /* Process up to available RFD's */
2912 while (count < NUM_PACKETS_HANDLED) {
2913 if (list_empty(&adapter->rx_ring.recv_list)) {
2914 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
2919 rfd = nic_rx_pkts(adapter);
2924 /* Do not receive any packets until a filter has been set.
2925 * Do not receive any packets until we have link.
2926 * If length is zero, return the RFD in order to advance the
2929 if (!adapter->packet_filter ||
2930 !netif_carrier_ok(adapter->netdev) ||
2934 /* Increment the number of packets we received */
2935 adapter->net_stats.rx_packets++;
2937 /* Set the status on the packet, either resources or success */
2938 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
2939 dev_warn(&adapter->pdev->dev,
2940 "RFD's are running out\n");
2945 if (count == NUM_PACKETS_HANDLED || !done) {
2946 adapter->rx_ring.unfinished_receives = true;
2947 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2948 &adapter->regs->global.watchdog_timer);
2950 /* Watchdog timer will disable itself if appropriate. */
2951 adapter->rx_ring.unfinished_receives = false;
2957 * et131x_tx_dma_memory_alloc
2958 * @adapter: pointer to our private adapter structure
2960 * Returns 0 on success and errno on failure (as defined in errno.h).
2962 * Allocates memory that will be visible both to the device and to the CPU.
2963 * The OS will pass us packets, pointers to which we will insert in the Tx
2964 * Descriptor queue. The device will read this queue to find the packets in
2965 * memory. The device will update the "status" in memory each time it xmits a
2968 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2971 struct tx_ring *tx_ring = &adapter->tx_ring;
2973 /* Allocate memory for the TCB's (Transmit Control Block) */
2974 adapter->tx_ring.tcb_ring =
2975 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
2976 if (!adapter->tx_ring.tcb_ring) {
2977 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
2981 /* Allocate enough memory for the Tx descriptor ring, and allocate
2982 * some extra so that the ring can be aligned on a 4k boundary.
2984 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
2985 tx_ring->tx_desc_ring =
2986 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, desc_size,
2987 &tx_ring->tx_desc_ring_pa, GFP_KERNEL);
2988 if (!adapter->tx_ring.tx_desc_ring) {
2989 dev_err(&adapter->pdev->dev,
2990 "Cannot alloc memory for Tx Ring\n");
2994 /* Save physical address
2996 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2997 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2998 * are ever returned, make sure the high part is retrieved here before
2999 * storing the adjusted address.
3001 /* Allocate memory for the Tx status block */
3002 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3004 &tx_ring->tx_status_pa,
3006 if (!adapter->tx_ring.tx_status_pa) {
3007 dev_err(&adapter->pdev->dev,
3008 "Cannot alloc memory for Tx status block\n");
3015 * et131x_tx_dma_memory_free - Free all memory allocated within this module
3016 * @adapter: pointer to our private adapter structure
3018 * Returns 0 on success and errno on failure (as defined in errno.h).
3020 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3024 if (adapter->tx_ring.tx_desc_ring) {
3025 /* Free memory relating to Tx rings here */
3026 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3028 dma_free_coherent(&adapter->pdev->dev,
3030 adapter->tx_ring.tx_desc_ring,
3031 adapter->tx_ring.tx_desc_ring_pa);
3032 adapter->tx_ring.tx_desc_ring = NULL;
3035 /* Free memory for the Tx status block */
3036 if (adapter->tx_ring.tx_status) {
3037 dma_free_coherent(&adapter->pdev->dev,
3039 adapter->tx_ring.tx_status,
3040 adapter->tx_ring.tx_status_pa);
3042 adapter->tx_ring.tx_status = NULL;
3044 /* Free the memory for the tcb structures */
3045 kfree(adapter->tx_ring.tcb_ring);
3049 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
3050 * @adapter: pointer to our private adapter structure
3052 * Configure the transmit engine with the ring buffers we have created
3053 * and prepare it for use.
3055 void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
3057 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
3059 /* Load the hardware with the start of the transmit descriptor ring. */
3060 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
3061 &txdma->pr_base_hi);
3062 writel((u32) adapter->tx_ring.tx_desc_ring_pa,
3063 &txdma->pr_base_lo);
3065 /* Initialise the transmit DMA engine */
3066 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
3068 /* Load the completion writeback physical address */
3069 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
3070 &txdma->dma_wb_base_hi);
3071 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
3073 *adapter->tx_ring.tx_status = 0;
3075 writel(0, &txdma->service_request);
3076 adapter->tx_ring.send_idx = 0;
3080 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
3081 * @adapter: pointer to our adapter structure
3083 void et131x_tx_dma_disable(struct et131x_adapter *adapter)
3085 /* Setup the tramsmit dma configuration register */
3086 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
3087 &adapter->regs->txdma.csr);
3091 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
3092 * @adapter: pointer to our adapter structure
3094 * Mainly used after a return to the D0 (full-power) state from a lower state.
3096 void et131x_tx_dma_enable(struct et131x_adapter *adapter)
3098 /* Setup the transmit dma configuration register for normal
3101 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
3102 &adapter->regs->txdma.csr);
3106 * et131x_init_send - Initialize send data structures
3107 * @adapter: pointer to our private adapter structure
3109 void et131x_init_send(struct et131x_adapter *adapter)
3113 struct tx_ring *tx_ring;
3115 /* Setup some convenience pointers */
3116 tx_ring = &adapter->tx_ring;
3117 tcb = adapter->tx_ring.tcb_ring;
3119 tx_ring->tcb_qhead = tcb;
3121 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
3123 /* Go through and set up each TCB */
3124 for (ct = 0; ct++ < NUM_TCB; tcb++)
3125 /* Set the link pointer in HW TCB to the next TCB in the
3128 tcb->next = tcb + 1;
3130 /* Set the tail pointer */
3132 tx_ring->tcb_qtail = tcb;
3134 /* Curr send queue should now be empty */
3135 tx_ring->send_head = NULL;
3136 tx_ring->send_tail = NULL;
3140 * nic_send_packet - NIC specific send handler for version B silicon.
3141 * @adapter: pointer to our adapter
3142 * @tcb: pointer to struct tcb
3144 * Returns 0 or errno.
3146 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3149 struct tx_desc desc[24]; /* 24 x 16 byte */
3151 u32 thiscopy, remainder;
3152 struct sk_buff *skb = tcb->skb;
3153 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3154 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3155 unsigned long flags;
3156 struct phy_device *phydev = adapter->phydev;
3158 /* Part of the optimizations of this send routine restrict us to
3159 * sending 24 fragments at a pass. In practice we should never see
3160 * more than 5 fragments.
3162 * NOTE: The older version of this function (below) can handle any
3163 * number of fragments. If needed, we can call this function,
3164 * although it is less efficient.
3169 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3171 for (i = 0; i < nr_frags; i++) {
3172 /* If there is something in this element, lets get a
3173 * descriptor from the ring and get the necessary data
3176 /* If the fragments are smaller than a standard MTU,
3177 * then map them to a single descriptor in the Tx
3178 * Desc ring. However, if they're larger, as is
3179 * possible with support for jumbo packets, then
3180 * split them each across 2 descriptors.
3182 * This will work until we determine why the hardware
3183 * doesn't seem to like large fragments.
3185 if ((skb->len - skb->data_len) <= 1514) {
3186 desc[frag].addr_hi = 0;
3187 /* Low 16bits are length, high is vlan and
3188 unused currently so zero */
3189 desc[frag].len_vlan =
3190 skb->len - skb->data_len;
3192 /* NOTE: Here, the dma_addr_t returned from
3193 * dma_map_single() is implicitly cast as a
3194 * u32. Although dma_addr_t can be
3195 * 64-bit, the address returned by
3196 * dma_map_single() is always 32-bit
3197 * addressable (as defined by the pci/dma
3200 desc[frag++].addr_lo =
3201 dma_map_single(&adapter->pdev->dev,
3207 desc[frag].addr_hi = 0;
3208 desc[frag].len_vlan =
3209 (skb->len - skb->data_len) / 2;
3211 /* NOTE: Here, the dma_addr_t returned from
3212 * dma_map_single() is implicitly cast as a
3213 * u32. Although dma_addr_t can be
3214 * 64-bit, the address returned by
3215 * dma_map_single() is always 32-bit
3216 * addressable (as defined by the pci/dma
3219 desc[frag++].addr_lo =
3220 dma_map_single(&adapter->pdev->dev,
3223 skb->data_len) / 2),
3225 desc[frag].addr_hi = 0;
3227 desc[frag].len_vlan =
3228 (skb->len - skb->data_len) / 2;
3230 /* NOTE: Here, the dma_addr_t returned from
3231 * dma_map_single() is implicitly cast as a
3232 * u32. Although dma_addr_t can be
3233 * 64-bit, the address returned by
3234 * dma_map_single() is always 32-bit
3235 * addressable (as defined by the pci/dma
3238 desc[frag++].addr_lo =
3239 dma_map_single(&adapter->pdev->dev,
3242 skb->data_len) / 2),
3244 skb->data_len) / 2),
3248 desc[frag].addr_hi = 0;
3249 desc[frag].len_vlan =
3252 /* NOTE: Here, the dma_addr_t returned from
3253 * dma_map_page() is implicitly cast as a u32.
3254 * Although dma_addr_t can be 64-bit, the address
3255 * returned by dma_map_page() is always 32-bit
3256 * addressable (as defined by the pci/dma subsystem)
3258 desc[frag++].addr_lo =
3259 dma_map_page(&adapter->pdev->dev,
3261 frags[i - 1].page_offset,
3270 if (phydev && phydev->speed == SPEED_1000) {
3271 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3272 /* Last element & Interrupt flag */
3273 desc[frag - 1].flags = 0x5;
3274 adapter->tx_ring.since_irq = 0;
3275 } else { /* Last element */
3276 desc[frag - 1].flags = 0x1;
3279 desc[frag - 1].flags = 0x5;
3281 desc[0].flags |= 2; /* First element flag */
3283 tcb->index_start = adapter->tx_ring.send_idx;
3286 spin_lock_irqsave(&adapter->send_hw_lock, flags);
3288 thiscopy = NUM_DESC_PER_RING_TX -
3289 INDEX10(adapter->tx_ring.send_idx);
3291 if (thiscopy >= frag) {
3295 remainder = frag - thiscopy;
3298 memcpy(adapter->tx_ring.tx_desc_ring +
3299 INDEX10(adapter->tx_ring.send_idx), desc,
3300 sizeof(struct tx_desc) * thiscopy);
3302 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3304 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3305 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3306 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3307 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3311 memcpy(adapter->tx_ring.tx_desc_ring,
3313 sizeof(struct tx_desc) * remainder);
3315 add_10bit(&adapter->tx_ring.send_idx, remainder);
3318 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3319 if (adapter->tx_ring.send_idx)
3320 tcb->index = NUM_DESC_PER_RING_TX - 1;
3322 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3324 tcb->index = adapter->tx_ring.send_idx - 1;
3326 spin_lock(&adapter->tcb_send_qlock);
3328 if (adapter->tx_ring.send_tail)
3329 adapter->tx_ring.send_tail->next = tcb;
3331 adapter->tx_ring.send_head = tcb;
3333 adapter->tx_ring.send_tail = tcb;
3335 WARN_ON(tcb->next != NULL);
3337 adapter->tx_ring.used++;
3339 spin_unlock(&adapter->tcb_send_qlock);
3341 /* Write the new write pointer back to the device. */
3342 writel(adapter->tx_ring.send_idx,
3343 &adapter->regs->txdma.service_request);
3345 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
3346 * timer to wake us up if this packet isn't followed by N more.
3348 if (phydev && phydev->speed == SPEED_1000) {
3349 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3350 &adapter->regs->global.watchdog_timer);
3352 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3358 * send_packet - Do the work to send a packet
3359 * @skb: the packet(s) to send
3360 * @adapter: a pointer to the device's private adapter structure
3362 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3364 * Assumption: Send spinlock has been acquired
3366 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3369 struct tcb *tcb = NULL;
3371 unsigned long flags;
3373 /* All packets must have at least a MAC address and a protocol type */
3374 if (skb->len < ETH_HLEN)
3377 /* Get a TCB for this packet */
3378 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3380 tcb = adapter->tx_ring.tcb_qhead;
3383 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3387 adapter->tx_ring.tcb_qhead = tcb->next;
3389 if (adapter->tx_ring.tcb_qhead == NULL)
3390 adapter->tx_ring.tcb_qtail = NULL;
3392 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3396 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3397 shbufva = (u16 *) skb->data;
3399 if ((shbufva[0] == 0xffff) &&
3400 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3401 tcb->flags |= fMP_DEST_BROAD;
3402 } else if ((shbufva[0] & 0x3) == 0x0001) {
3403 tcb->flags |= fMP_DEST_MULTI;
3409 /* Call the NIC specific send handler. */
3410 status = nic_send_packet(adapter, tcb);
3413 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3415 if (adapter->tx_ring.tcb_qtail)
3416 adapter->tx_ring.tcb_qtail->next = tcb;
3418 /* Apparently ready Q is empty. */
3419 adapter->tx_ring.tcb_qhead = tcb;
3421 adapter->tx_ring.tcb_qtail = tcb;
3422 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3425 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3430 * et131x_send_packets - This function is called by the OS to send packets
3431 * @skb: the packet(s) to send
3432 * @netdev:device on which to TX the above packet(s)
3434 * Return 0 in almost all cases; non-zero value in extreme hard failure only
3436 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3439 struct et131x_adapter *adapter = netdev_priv(netdev);
3441 /* Send these packets
3443 * NOTE: The Linux Tx entry point is only given one packet at a time
3444 * to Tx, so the PacketCount and it's array used makes no sense here
3447 /* TCB is not available */
3448 if (adapter->tx_ring.used >= NUM_TCB) {
3449 /* NOTE: If there's an error on send, no need to queue the
3450 * packet under Linux; if we just send an error up to the
3451 * netif layer, it will resend the skb to us.
3455 /* We need to see if the link is up; if it's not, make the
3456 * netif layer think we're good and drop the packet
3458 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3459 !netif_carrier_ok(netdev)) {
3460 dev_kfree_skb_any(skb);
3463 adapter->net_stats.tx_dropped++;
3465 status = send_packet(skb, adapter);
3466 if (status != 0 && status != -ENOMEM) {
3467 /* On any other error, make netif think we're
3468 * OK and drop the packet
3470 dev_kfree_skb_any(skb);
3472 adapter->net_stats.tx_dropped++;
3480 * free_send_packet - Recycle a struct tcb
3481 * @adapter: pointer to our adapter
3482 * @tcb: pointer to struct tcb
3484 * Complete the packet if necessary
3485 * Assumption - Send spinlock has been acquired
3487 static inline void free_send_packet(struct et131x_adapter *adapter,
3490 unsigned long flags;
3491 struct tx_desc *desc = NULL;
3492 struct net_device_stats *stats = &adapter->net_stats;
3494 if (tcb->flags & fMP_DEST_BROAD)
3495 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3496 else if (tcb->flags & fMP_DEST_MULTI)
3497 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3499 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3502 stats->tx_bytes += tcb->skb->len;
3504 /* Iterate through the TX descriptors on the ring
3505 * corresponding to this packet and umap the fragments
3509 desc = (struct tx_desc *)
3510 (adapter->tx_ring.tx_desc_ring +
3511 INDEX10(tcb->index_start));
3513 dma_unmap_single(&adapter->pdev->dev,
3515 desc->len_vlan, DMA_TO_DEVICE);
3517 add_10bit(&tcb->index_start, 1);
3518 if (INDEX10(tcb->index_start) >=
3519 NUM_DESC_PER_RING_TX) {
3520 tcb->index_start &= ~ET_DMA10_MASK;
3521 tcb->index_start ^= ET_DMA10_WRAP;
3523 } while (desc != (adapter->tx_ring.tx_desc_ring +
3524 INDEX10(tcb->index)));
3526 dev_kfree_skb_any(tcb->skb);
3529 memset(tcb, 0, sizeof(struct tcb));
3531 /* Add the TCB to the Ready Q */
3532 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3534 adapter->net_stats.tx_packets++;
3536 if (adapter->tx_ring.tcb_qtail)
3537 adapter->tx_ring.tcb_qtail->next = tcb;
3539 /* Apparently ready Q is empty. */
3540 adapter->tx_ring.tcb_qhead = tcb;
3542 adapter->tx_ring.tcb_qtail = tcb;
3544 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3545 WARN_ON(adapter->tx_ring.used < 0);
3549 * et131x_free_busy_send_packets - Free and complete the stopped active sends
3550 * @adapter: pointer to our adapter
3552 * Assumption - Send spinlock has been acquired
3554 void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3557 unsigned long flags;
3560 /* Any packets being sent? Check the first TCB on the send list */
3561 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3563 tcb = adapter->tx_ring.send_head;
3565 while (tcb != NULL && freed < NUM_TCB) {
3566 struct tcb *next = tcb->next;
3568 adapter->tx_ring.send_head = next;
3571 adapter->tx_ring.send_tail = NULL;
3573 adapter->tx_ring.used--;
3575 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3578 free_send_packet(adapter, tcb);
3580 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3582 tcb = adapter->tx_ring.send_head;
3585 WARN_ON(freed == NUM_TCB);
3587 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3589 adapter->tx_ring.used = 0;
3593 * et131x_handle_send_interrupt - Interrupt handler for sending processing
3594 * @adapter: pointer to our adapter
3596 * Re-claim the send resources, complete sends and get more to send from
3597 * the send wait queue.
3599 * Assumption - Send spinlock has been acquired
3601 void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3603 unsigned long flags;
3608 serviced = readl(&adapter->regs->txdma.new_service_complete);
3609 index = INDEX10(serviced);
3611 /* Has the ring wrapped? Process any descriptors that do not have
3612 * the same "wrap" indicator as the current completion indicator
3614 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3616 tcb = adapter->tx_ring.send_head;
3619 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3620 index < INDEX10(tcb->index)) {
3621 adapter->tx_ring.used--;
3622 adapter->tx_ring.send_head = tcb->next;
3623 if (tcb->next == NULL)
3624 adapter->tx_ring.send_tail = NULL;
3626 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3627 free_send_packet(adapter, tcb);
3628 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3630 /* Goto the next packet */
3631 tcb = adapter->tx_ring.send_head;
3634 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3635 && index > (tcb->index & ET_DMA10_MASK)) {
3636 adapter->tx_ring.used--;
3637 adapter->tx_ring.send_head = tcb->next;
3638 if (tcb->next == NULL)
3639 adapter->tx_ring.send_tail = NULL;
3641 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3642 free_send_packet(adapter, tcb);
3643 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3645 /* Goto the next packet */
3646 tcb = adapter->tx_ring.send_head;
3649 /* Wake up the queue when we hit a low-water mark */
3650 if (adapter->tx_ring.used <= NUM_TCB / 3)
3651 netif_wake_queue(adapter->netdev);
3653 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3656 /* ETHTOOL functions */
3658 static int et131x_get_settings(struct net_device *netdev,
3659 struct ethtool_cmd *cmd)
3661 struct et131x_adapter *adapter = netdev_priv(netdev);
3663 return phy_ethtool_gset(adapter->phydev, cmd);
3666 static int et131x_set_settings(struct net_device *netdev,
3667 struct ethtool_cmd *cmd)
3669 struct et131x_adapter *adapter = netdev_priv(netdev);
3671 return phy_ethtool_sset(adapter->phydev, cmd);
3674 static int et131x_get_regs_len(struct net_device *netdev)
3676 #define ET131X_REGS_LEN 256
3677 return ET131X_REGS_LEN * sizeof(u32);
3680 static void et131x_get_regs(struct net_device *netdev,
3681 struct ethtool_regs *regs, void *regs_data)
3683 struct et131x_adapter *adapter = netdev_priv(netdev);
3684 struct address_map __iomem *aregs = adapter->regs;
3685 u32 *regs_buff = regs_data;
3688 memset(regs_data, 0, et131x_get_regs_len(netdev));
3690 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3691 adapter->pdev->device;
3694 et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]);
3695 et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]);
3696 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]);
3697 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]);
3698 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]);
3699 et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]);
3700 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]);
3701 /* Autoneg next page transmit reg */
3702 et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]);
3703 /* Link partner next page reg */
3704 et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]);
3705 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]);
3706 et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]);
3707 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]);
3708 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]);
3709 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]);
3710 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3711 (u16 *)®s_buff[num++]);
3712 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3713 (u16 *)®s_buff[num++]);
3714 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3715 (u16 *)®s_buff[num++]);
3716 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3717 (u16 *)®s_buff[num++]);
3718 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]);
3719 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]);
3720 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]);
3721 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3722 (u16 *)®s_buff[num++]);
3723 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]);
3724 et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]);
3725 et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]);
3728 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3729 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3730 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3731 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3732 regs_buff[num++] = readl(&aregs->global.pm_csr);
3733 regs_buff[num++] = adapter->stats.interrupt_status;
3734 regs_buff[num++] = readl(&aregs->global.int_mask);
3735 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3736 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3737 regs_buff[num++] = readl(&aregs->global.sw_reset);
3738 regs_buff[num++] = readl(&aregs->global.slv_timer);
3739 regs_buff[num++] = readl(&aregs->global.msi_config);
3740 regs_buff[num++] = readl(&aregs->global.loopback);
3741 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3744 regs_buff[num++] = readl(&aregs->txdma.csr);
3745 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3746 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3747 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3748 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3749 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3750 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3751 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3752 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3753 regs_buff[num++] = readl(&aregs->txdma.service_request);
3754 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3755 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3756 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3757 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3758 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3759 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3760 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3761 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3762 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3763 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3764 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3765 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3766 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3767 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3768 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3769 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3772 regs_buff[num++] = readl(&aregs->rxdma.csr);
3773 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3774 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3775 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3776 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3777 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3778 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3779 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3780 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3781 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3782 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3783 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3784 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3785 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3786 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3787 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3788 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3789 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3790 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3791 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3792 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3793 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3794 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3795 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3796 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3797 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3798 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3799 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3800 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3803 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
3804 static void et131x_get_drvinfo(struct net_device *netdev,
3805 struct ethtool_drvinfo *info)
3807 struct et131x_adapter *adapter = netdev_priv(netdev);
3809 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3810 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3811 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3814 static struct ethtool_ops et131x_ethtool_ops = {
3815 .get_settings = et131x_get_settings,
3816 .set_settings = et131x_set_settings,
3817 .get_drvinfo = et131x_get_drvinfo,
3818 .get_regs_len = et131x_get_regs_len,
3819 .get_regs = et131x_get_regs,
3820 .get_link = ethtool_op_get_link,
3823 void et131x_set_ethtool_ops(struct net_device *netdev)
3825 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3831 * et131x_hwaddr_init - set up the MAC Address on the ET1310
3832 * @adapter: pointer to our private adapter structure
3834 void et131x_hwaddr_init(struct et131x_adapter *adapter)
3836 /* If have our default mac from init and no mac address from
3837 * EEPROM then we need to generate the last octet and set it on the
3840 if (adapter->rom_addr[0] == 0x00 &&
3841 adapter->rom_addr[1] == 0x00 &&
3842 adapter->rom_addr[2] == 0x00 &&
3843 adapter->rom_addr[3] == 0x00 &&
3844 adapter->rom_addr[4] == 0x00 &&
3845 adapter->rom_addr[5] == 0x00) {
3847 * We need to randomly generate the last octet so we
3848 * decrease our chances of setting the mac address to
3849 * same as another one of our cards in the system
3851 get_random_bytes(&adapter->addr[5], 1);
3853 * We have the default value in the register we are
3854 * working with so we need to copy the current
3855 * address into the permanent address
3857 memcpy(adapter->rom_addr,
3858 adapter->addr, ETH_ALEN);
3860 /* We do not have an override address, so set the
3861 * current address to the permanent address and add
3864 memcpy(adapter->addr,
3865 adapter->rom_addr, ETH_ALEN);
3870 * et131x_pci_init - initial PCI setup
3871 * @adapter: pointer to our private adapter structure
3872 * @pdev: our PCI device
3874 * Perform the initial setup of PCI registers and if possible initialise
3875 * the MAC address. At this point the I/O registers have yet to be mapped
3877 static int et131x_pci_init(struct et131x_adapter *adapter,
3878 struct pci_dev *pdev)
3884 if (et131x_init_eeprom(adapter) < 0)
3887 /* Let's set up the PORT LOGIC Register. First we need to know what
3888 * the max_payload_size is
3890 if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
3892 "Could not read PCI config space for Max Payload Size\n");
3896 /* Program the Ack/Nak latency and replay timers */
3897 max_payload &= 0x07; /* Only the lower 3 bits are valid */
3899 if (max_payload < 2) {
3900 static const u16 acknak[2] = { 0x76, 0xD0 };
3901 static const u16 replay[2] = { 0x1E0, 0x2ED };
3903 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3904 acknak[max_payload])) {
3906 "Could not write PCI config space for ACK/NAK\n");
3909 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3910 replay[max_payload])) {
3912 "Could not write PCI config space for Replay Timer\n");
3917 /* l0s and l1 latency timers. We are using default values.
3918 * Representing 001 for L0s and 010 for L1
3920 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3922 "Could not write PCI config space for Latency Timers\n");
3926 /* Change the max read size to 2k */
3927 if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
3929 "Could not read PCI config space for Max read size\n");
3933 read_size_reg &= 0x8f;
3934 read_size_reg |= 0x40;
3936 if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
3938 "Could not write PCI config space for Max read size\n");
3942 /* Get MAC address from config space if an eeprom exists, otherwise
3943 * the MAC address there will not be valid
3945 if (!adapter->has_eeprom) {
3946 et131x_hwaddr_init(adapter);
3950 for (i = 0; i < ETH_ALEN; i++) {
3951 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3952 adapter->rom_addr + i)) {
3953 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3957 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
3962 * et131x_enable_interrupts - enable interrupt
3963 * @adapter: et131x device
3965 * Enable the appropriate interrupts on the ET131x according to our
3968 void et131x_enable_interrupts(struct et131x_adapter *adapter)
3972 /* Enable all global interrupts */
3973 if (adapter->flowcontrol == FLOW_TXONLY ||
3974 adapter->flowcontrol == FLOW_BOTH)
3975 mask = INT_MASK_ENABLE;
3977 mask = INT_MASK_ENABLE_NO_FLOW;
3979 writel(mask, &adapter->regs->global.int_mask);
3983 * et131x_error_timer_handler
3984 * @data: timer-specific variable; here a pointer to our adapter structure
3986 * The routine called when the error timer expires, to track the number of
3989 void et131x_error_timer_handler(unsigned long data)
3991 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3992 struct phy_device *phydev = adapter->phydev;
3994 if (et1310_in_phy_coma(adapter)) {
3995 /* Bring the device immediately out of coma, to
3996 * prevent it from sleeping indefinitely, this
3997 * mechanism could be improved! */
3998 et1310_disable_phy_coma(adapter);
3999 adapter->boot_coma = 20;
4001 et1310_update_macstat_host_counters(adapter);
4004 if (!phydev->link && adapter->boot_coma < 11)
4005 adapter->boot_coma++;
4007 if (adapter->boot_coma == 10) {
4008 if (!phydev->link) {
4009 if (!et1310_in_phy_coma(adapter)) {
4010 /* NOTE - This was originally a 'sync with
4011 * interrupt'. How to do that under Linux?
4013 et131x_enable_interrupts(adapter);
4014 et1310_enable_phy_coma(adapter);
4019 /* This is a periodic timer, so reschedule */
4020 mod_timer(&adapter->error_timer, jiffies +
4021 TX_ERROR_PERIOD * HZ / 1000);
4025 * et131x_configure_global_regs - configure JAGCore global regs
4026 * @adapter: pointer to our adapter structure
4028 * Used to configure the global registers on the JAGCore
4030 void et131x_configure_global_regs(struct et131x_adapter *adapter)
4032 struct global_regs __iomem *regs = &adapter->regs->global;
4034 writel(0, ®s->rxq_start_addr);
4035 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
4037 if (adapter->registry_jumbo_packet < 2048) {
4038 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
4039 * block of RAM that the driver can split between Tx
4040 * and Rx as it desires. Our default is to split it
4043 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
4044 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
4045 } else if (adapter->registry_jumbo_packet < 8192) {
4046 /* For jumbo packets > 2k but < 8k, split 50-50. */
4047 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
4048 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
4050 /* 9216 is the only packet size greater than 8k that
4051 * is available. The Tx buffer has to be big enough
4052 * for one whole packet on the Tx side. We'll make
4053 * the Tx 9408, and give the rest to Rx
4055 writel(0x01b3, ®s->rxq_end_addr);
4056 writel(0x01b4, ®s->txq_start_addr);
4059 /* Initialize the loopback register. Disable all loopbacks. */
4060 writel(0, ®s->loopback);
4063 writel(0, ®s->msi_config);
4065 /* By default, disable the watchdog timer. It will be enabled when
4066 * a packet is queued.
4068 writel(0, ®s->watchdog_timer);
4072 * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
4073 * @adapter: pointer to our private adapter structure
4075 * Returns 0 on success, errno on failure (as defined in errno.h)
4077 void et131x_adapter_setup(struct et131x_adapter *adapter)
4079 /* Configure the JAGCore */
4080 et131x_configure_global_regs(adapter);
4082 et1310_config_mac_regs1(adapter);
4084 /* Configure the MMC registers */
4085 /* All we need to do is initialize the Memory Control Register */
4086 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
4088 et1310_config_rxmac_regs(adapter);
4089 et1310_config_txmac_regs(adapter);
4091 et131x_config_rx_dma_regs(adapter);
4092 et131x_config_tx_dma_regs(adapter);
4094 et1310_config_macstat_regs(adapter);
4096 et1310_phy_power_down(adapter, 0);
4097 et131x_xcvr_init(adapter);
4101 * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
4102 * @adapter: pointer to our private adapter structure
4104 void et131x_soft_reset(struct et131x_adapter *adapter)
4106 /* Disable MAC Core */
4107 writel(0xc00f0000, &adapter->regs->mac.cfg1);
4109 /* Set everything to a reset value */
4110 writel(0x7F, &adapter->regs->global.sw_reset);
4111 writel(0x000f0000, &adapter->regs->mac.cfg1);
4112 writel(0x00000000, &adapter->regs->mac.cfg1);
4116 * et131x_adapter_memory_alloc
4117 * @adapter: pointer to our private adapter structure
4119 * Returns 0 on success, errno on failure (as defined in errno.h).
4121 * Allocate all the memory blocks for send, receive and others.
4123 int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4127 /* Allocate memory for the Tx Ring */
4128 status = et131x_tx_dma_memory_alloc(adapter);
4130 dev_err(&adapter->pdev->dev,
4131 "et131x_tx_dma_memory_alloc FAILED\n");
4134 /* Receive buffer memory allocation */
4135 status = et131x_rx_dma_memory_alloc(adapter);
4137 dev_err(&adapter->pdev->dev,
4138 "et131x_rx_dma_memory_alloc FAILED\n");
4139 et131x_tx_dma_memory_free(adapter);
4143 /* Init receive data structures */
4144 status = et131x_init_recv(adapter);
4146 dev_err(&adapter->pdev->dev,
4147 "et131x_init_recv FAILED\n");
4148 et131x_tx_dma_memory_free(adapter);
4149 et131x_rx_dma_memory_free(adapter);
4155 * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
4156 * @adapter: pointer to our private adapter structure
4158 void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4160 /* Free DMA memory */
4161 et131x_tx_dma_memory_free(adapter);
4162 et131x_rx_dma_memory_free(adapter);
4165 static void et131x_adjust_link(struct net_device *netdev)
4167 struct et131x_adapter *adapter = netdev_priv(netdev);
4168 struct phy_device *phydev = adapter->phydev;
4170 if (netif_carrier_ok(netdev)) {
4171 adapter->boot_coma = 20;
4173 if (phydev && phydev->speed == SPEED_10) {
4175 * NOTE - Is there a way to query this without
4177 * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4178 * EMI_TRUEPHY_A13O) {
4182 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4184 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4186 et131x_mii_write(adapter, PHY_INDEX_REG,
4187 register18 | 0x8402);
4188 et131x_mii_write(adapter, PHY_DATA_REG,
4190 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4194 et1310_config_flow_control(adapter);
4196 if (phydev && phydev->speed == SPEED_1000 &&
4197 adapter->registry_jumbo_packet > 2048) {
4200 et131x_mii_read(adapter, PHY_CONFIG, ®);
4201 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4202 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4203 et131x_mii_write(adapter, PHY_CONFIG, reg);
4206 et131x_set_rx_dma_timer(adapter);
4207 et1310_config_mac_regs2(adapter);
4210 if (phydev && phydev->link != adapter->link) {
4212 * Check to see if we are in coma mode and if
4213 * so, disable it because we will not be able
4214 * to read PHY values until we are out.
4216 if (et1310_in_phy_coma(adapter))
4217 et1310_disable_phy_coma(adapter);
4220 adapter->boot_coma = 20;
4222 dev_warn(&adapter->pdev->dev,
4223 "Link down - cable problem ?\n");
4224 adapter->boot_coma = 0;
4226 if (phydev->speed == SPEED_10) {
4227 /* NOTE - Is there a way to query this without
4229 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4234 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4236 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4238 et131x_mii_write(adapter, PHY_INDEX_REG,
4239 register18 | 0x8402);
4240 et131x_mii_write(adapter, PHY_DATA_REG,
4242 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4246 /* Free the packets being actively sent & stopped */
4247 et131x_free_busy_send_packets(adapter);
4249 /* Re-initialize the send structures */
4250 et131x_init_send(adapter);
4253 * Bring the device back to the state it was during
4254 * init prior to autonegotiation being complete. This
4255 * way, when we get the auto-neg complete interrupt,
4256 * we can complete init by calling config_mac_regs2.
4258 et131x_soft_reset(adapter);
4260 /* Setup ET1310 as per the documentation */
4261 et131x_adapter_setup(adapter);
4263 /* perform reset of tx/rx */
4264 et131x_disable_txrx(netdev);
4265 et131x_enable_txrx(netdev);
4268 adapter->link = phydev->link;
4270 phy_print_status(phydev);
4274 static int et131x_mii_probe(struct net_device *netdev)
4276 struct et131x_adapter *adapter = netdev_priv(netdev);
4277 struct phy_device *phydev = NULL;
4279 phydev = phy_find_first(adapter->mii_bus);
4281 dev_err(&adapter->pdev->dev, "no PHY found\n");
4285 phydev = phy_connect(netdev, dev_name(&phydev->dev),
4286 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4288 if (IS_ERR(phydev)) {
4289 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4290 return PTR_ERR(phydev);
4293 phydev->supported &= (SUPPORTED_10baseT_Half
4294 | SUPPORTED_10baseT_Full
4295 | SUPPORTED_100baseT_Half
4296 | SUPPORTED_100baseT_Full
4301 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4302 phydev->supported |= SUPPORTED_1000baseT_Full;
4304 phydev->advertising = phydev->supported;
4305 adapter->phydev = phydev;
4307 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4308 "(mii_bus:phy_addr=%s)\n",
4309 phydev->drv->name, dev_name(&phydev->dev));
4315 * et131x_adapter_init
4316 * @adapter: pointer to the private adapter struct
4317 * @pdev: pointer to the PCI device
4319 * Initialize the data structures for the et131x_adapter object and link
4320 * them together with the platform provided device structures.
4322 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4323 struct pci_dev *pdev)
4325 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4327 struct et131x_adapter *adapter;
4329 /* Allocate private adapter struct and copy in relevant information */
4330 adapter = netdev_priv(netdev);
4331 adapter->pdev = pci_dev_get(pdev);
4332 adapter->netdev = netdev;
4334 /* Do the same for the netdev struct */
4335 netdev->irq = pdev->irq;
4336 netdev->base_addr = pci_resource_start(pdev, 0);
4338 /* Initialize spinlocks here */
4339 spin_lock_init(&adapter->lock);
4340 spin_lock_init(&adapter->tcb_send_qlock);
4341 spin_lock_init(&adapter->tcb_ready_qlock);
4342 spin_lock_init(&adapter->send_hw_lock);
4343 spin_lock_init(&adapter->rcv_lock);
4344 spin_lock_init(&adapter->rcv_pend_lock);
4345 spin_lock_init(&adapter->fbr_lock);
4346 spin_lock_init(&adapter->phy_lock);
4348 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
4350 /* Set the MAC address to a default */
4351 memcpy(adapter->addr, default_mac, ETH_ALEN);
4357 * et131x_disable_interrupts - interrupt disable
4358 * @adapter: et131x device
4360 * Block all interrupts from the et131x device at the device itself
4362 void et131x_disable_interrupts(struct et131x_adapter *adapter)
4364 /* Disable all global interrupts */
4365 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
4369 * et131x_pci_setup - Perform device initialization
4370 * @pdev: a pointer to the device's pci_dev structure
4371 * @ent: this device's entry in the pci_device_id table
4373 * Returns 0 on success, errno on failure (as defined in errno.h)
4375 * Registered in the pci_driver structure, this function is called when the
4376 * PCI subsystem finds a new PCI device which matches the information
4377 * contained in the pci_device_id table. This routine is the equivalent to
4378 * a device insertion routine.
4380 static int __devinit et131x_pci_setup(struct pci_dev *pdev,
4381 const struct pci_device_id *ent)
4384 struct net_device *netdev;
4385 struct et131x_adapter *adapter;
4388 result = pci_enable_device(pdev);
4390 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4394 /* Perform some basic PCI checks */
4395 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4396 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4400 if (pci_request_regions(pdev, DRIVER_NAME)) {
4401 dev_err(&pdev->dev, "Can't get PCI resources\n");
4405 pci_set_master(pdev);
4407 /* Check the DMA addressing support of this device */
4408 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4409 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4412 "Unable to obtain 64 bit DMA for consistent allocations\n");
4413 goto err_release_res;
4415 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4416 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4419 "Unable to obtain 32 bit DMA for consistent allocations\n");
4420 goto err_release_res;
4423 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4425 goto err_release_res;
4428 /* Allocate netdev and private adapter structs */
4429 netdev = et131x_device_alloc();
4431 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4433 goto err_release_res;
4436 SET_NETDEV_DEV(netdev, &pdev->dev);
4437 et131x_set_ethtool_ops(netdev);
4439 adapter = et131x_adapter_init(netdev, pdev);
4441 /* Initialise the PCI setup for the device */
4442 et131x_pci_init(adapter, pdev);
4444 /* Map the bus-relative registers to system virtual memory */
4445 adapter->regs = pci_ioremap_bar(pdev, 0);
4446 if (!adapter->regs) {
4447 dev_err(&pdev->dev, "Cannot map device registers\n");
4452 /* If Phy COMA mode was enabled when we went down, disable it here. */
4453 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4455 /* Issue a global reset to the et1310 */
4456 et131x_soft_reset(adapter);
4458 /* Disable all interrupts (paranoid) */
4459 et131x_disable_interrupts(adapter);
4461 /* Allocate DMA memory */
4462 result = et131x_adapter_memory_alloc(adapter);
4464 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4468 /* Init send data structures */
4469 et131x_init_send(adapter);
4471 /* Set up the task structure for the ISR's deferred handler */
4472 INIT_WORK(&adapter->task, et131x_isr_handler);
4474 /* Copy address into the net_device struct */
4475 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4477 /* Init variable for counting how long we do not have link status */
4478 adapter->boot_coma = 0;
4479 et1310_disable_phy_coma(adapter);
4481 /* Setup the mii_bus struct */
4482 adapter->mii_bus = mdiobus_alloc();
4483 if (!adapter->mii_bus) {
4484 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4488 adapter->mii_bus->name = "et131x_eth_mii";
4489 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4490 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4491 adapter->mii_bus->priv = netdev;
4492 adapter->mii_bus->read = et131x_mdio_read;
4493 adapter->mii_bus->write = et131x_mdio_write;
4494 adapter->mii_bus->reset = et131x_mdio_reset;
4495 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
4496 if (!adapter->mii_bus->irq) {
4497 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
4501 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4502 adapter->mii_bus->irq[ii] = PHY_POLL;
4504 if (mdiobus_register(adapter->mii_bus)) {
4505 dev_err(&pdev->dev, "failed to register MII bus\n");
4506 mdiobus_free(adapter->mii_bus);
4507 goto err_mdio_free_irq;
4510 if (et131x_mii_probe(netdev)) {
4511 dev_err(&pdev->dev, "failed to probe MII bus\n");
4512 goto err_mdio_unregister;
4515 /* Setup et1310 as per the documentation */
4516 et131x_adapter_setup(adapter);
4518 /* We can enable interrupts now
4520 * NOTE - Because registration of interrupt handler is done in the
4521 * device's open(), defer enabling device interrupts to that
4525 /* Register the net_device struct with the Linux network layer */
4526 result = register_netdev(netdev);
4528 dev_err(&pdev->dev, "register_netdev() failed\n");
4529 goto err_mdio_unregister;
4532 /* Register the net_device struct with the PCI subsystem. Save a copy
4533 * of the PCI config space for this device now that the device has
4534 * been initialized, just in case it needs to be quickly restored.
4536 pci_set_drvdata(pdev, netdev);
4537 pci_save_state(adapter->pdev);
4541 err_mdio_unregister:
4542 mdiobus_unregister(adapter->mii_bus);
4544 kfree(adapter->mii_bus->irq);
4546 mdiobus_free(adapter->mii_bus);
4548 et131x_adapter_memory_free(adapter);
4550 iounmap(adapter->regs);
4553 free_netdev(netdev);
4555 pci_release_regions(pdev);
4557 pci_disable_device(pdev);
4564 * @pdev: a pointer to the device's pci_dev structure
4566 * Registered in the pci_driver structure, this function is called when the
4567 * PCI subsystem detects that a PCI device which matches the information
4568 * contained in the pci_device_id table has been removed.
4570 static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4572 struct net_device *netdev = pci_get_drvdata(pdev);
4573 struct et131x_adapter *adapter = netdev_priv(netdev);
4575 unregister_netdev(netdev);
4576 mdiobus_unregister(adapter->mii_bus);
4577 kfree(adapter->mii_bus->irq);
4578 mdiobus_free(adapter->mii_bus);
4580 et131x_adapter_memory_free(adapter);
4581 iounmap(adapter->regs);
4584 free_netdev(netdev);
4585 pci_release_regions(pdev);
4586 pci_disable_device(pdev);
4589 #ifdef CONFIG_PM_SLEEP
4590 static int et131x_suspend(struct device *dev)
4592 struct pci_dev *pdev = to_pci_dev(dev);
4593 struct net_device *netdev = pci_get_drvdata(pdev);
4595 if (netif_running(netdev)) {
4596 netif_device_detach(netdev);
4597 et131x_down(netdev);
4598 pci_save_state(pdev);
4604 static int et131x_resume(struct device *dev)
4606 struct pci_dev *pdev = to_pci_dev(dev);
4607 struct net_device *netdev = pci_get_drvdata(pdev);
4609 if (netif_running(netdev)) {
4610 pci_restore_state(pdev);
4612 netif_device_attach(netdev);
4618 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4619 #define ET131X_PM_OPS (&et131x_pm_ops)
4621 #define ET131X_PM_OPS NULL
4624 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
4625 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4626 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4629 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4631 static struct pci_driver et131x_driver = {
4632 .name = DRIVER_NAME,
4633 .id_table = et131x_pci_table,
4634 .probe = et131x_pci_setup,
4635 .remove = __devexit_p(et131x_pci_remove),
4636 .driver.pm = ET131X_PM_OPS,
4640 * et131x_init_module - The "main" entry point called on driver initialization
4642 * Returns 0 on success, errno on failure (as defined in errno.h)
4644 static int __init et131x_init_module(void)
4646 return pci_register_driver(&et131x_driver);
4650 * et131x_cleanup_module - The entry point called on driver cleanup
4652 static void __exit et131x_cleanup_module(void)
4654 pci_unregister_driver(&et131x_driver);
4657 module_init(et131x_init_module);
4658 module_exit(et131x_cleanup_module);
4663 * et131x_isr - The Interrupt Service Routine for the driver.
4664 * @irq: the IRQ on which the interrupt was received.
4665 * @dev_id: device-specific info (here a pointer to a net_device struct)
4667 * Returns a value indicating if the interrupt was handled.
4669 irqreturn_t et131x_isr(int irq, void *dev_id)
4671 bool handled = true;
4672 struct net_device *netdev = (struct net_device *)dev_id;
4673 struct et131x_adapter *adapter = NULL;
4676 if (!netif_device_present(netdev)) {
4681 adapter = netdev_priv(netdev);
4683 /* If the adapter is in low power state, then it should not
4684 * recognize any interrupt
4687 /* Disable Device Interrupts */
4688 et131x_disable_interrupts(adapter);
4690 /* Get a copy of the value in the interrupt status register
4691 * so we can process the interrupting section
4693 status = readl(&adapter->regs->global.int_status);
4695 if (adapter->flowcontrol == FLOW_TXONLY ||
4696 adapter->flowcontrol == FLOW_BOTH) {
4697 status &= ~INT_MASK_ENABLE;
4699 status &= ~INT_MASK_ENABLE_NO_FLOW;
4702 /* Make sure this is our interrupt */
4705 et131x_enable_interrupts(adapter);
4709 /* This is our interrupt, so process accordingly */
4711 if (status & ET_INTR_WATCHDOG) {
4712 struct tcb *tcb = adapter->tx_ring.send_head;
4715 if (++tcb->stale > 1)
4716 status |= ET_INTR_TXDMA_ISR;
4718 if (adapter->rx_ring.unfinished_receives)
4719 status |= ET_INTR_RXDMA_XFR_DONE;
4720 else if (tcb == NULL)
4721 writel(0, &adapter->regs->global.watchdog_timer);
4723 status &= ~ET_INTR_WATCHDOG;
4727 /* This interrupt has in some way been "handled" by
4728 * the ISR. Either it was a spurious Rx interrupt, or
4729 * it was a Tx interrupt that has been filtered by
4732 et131x_enable_interrupts(adapter);
4736 /* We need to save the interrupt status value for use in our
4737 * DPC. We will clear the software copy of that in that
4740 adapter->stats.interrupt_status = status;
4742 /* Schedule the ISR handler as a bottom-half task in the
4743 * kernel's tq_immediate queue, and mark the queue for
4746 schedule_work(&adapter->task);
4748 return IRQ_RETVAL(handled);
4752 * et131x_isr_handler - The ISR handler
4753 * @p_adapter, a pointer to the device's private adapter structure
4755 * scheduled to run in a deferred context by the ISR. This is where the ISR's
4756 * work actually gets done.
4758 void et131x_isr_handler(struct work_struct *work)
4760 struct et131x_adapter *adapter =
4761 container_of(work, struct et131x_adapter, task);
4762 u32 status = adapter->stats.interrupt_status;
4763 struct address_map __iomem *iomem = adapter->regs;
4766 * These first two are by far the most common. Once handled, we clear
4767 * their two bits in the status word. If the word is now zero, we
4770 /* Handle all the completed Transmit interrupts */
4771 if (status & ET_INTR_TXDMA_ISR)
4772 et131x_handle_send_interrupt(adapter);
4774 /* Handle all the completed Receives interrupts */
4775 if (status & ET_INTR_RXDMA_XFR_DONE)
4776 et131x_handle_recv_interrupt(adapter);
4778 status &= 0xffffffd7;
4781 /* Handle the TXDMA Error interrupt */
4782 if (status & ET_INTR_TXDMA_ERR) {
4785 /* Following read also clears the register (COR) */
4786 txdma_err = readl(&iomem->txdma.tx_dma_error);
4788 dev_warn(&adapter->pdev->dev,
4789 "TXDMA_ERR interrupt, error = %d\n",
4793 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4795 (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4797 * This indicates the number of unused buffers in
4798 * RXDMA free buffer ring 0 is <= the limit you
4799 * programmed. Free buffer resources need to be
4800 * returned. Free buffers are consumed as packets
4801 * are passed from the network to the host. The host
4802 * becomes aware of the packets from the contents of
4803 * the packet status ring. This ring is queried when
4804 * the packet done interrupt occurs. Packets are then
4805 * passed to the OS. When the OS is done with the
4806 * packets the resources can be returned to the
4807 * ET1310 for re-use. This interrupt is one method of
4808 * returning resources.
4811 /* If the user has flow control on, then we will
4812 * send a pause packet, otherwise just exit
4814 if (adapter->flowcontrol == FLOW_TXONLY ||
4815 adapter->flowcontrol == FLOW_BOTH) {
4818 /* Tell the device to send a pause packet via
4819 * the back pressure register (bp req and
4822 pm_csr = readl(&iomem->global.pm_csr);
4823 if (!et1310_in_phy_coma(adapter))
4824 writel(3, &iomem->txmac.bp_ctrl);
4828 /* Handle Packet Status Ring Low Interrupt */
4829 if (status & ET_INTR_RXDMA_STAT_LOW) {
4832 * Same idea as with the two Free Buffer Rings.
4833 * Packets going from the network to the host each
4834 * consume a free buffer resource and a packet status
4835 * resource. These resoures are passed to the OS.
4836 * When the OS is done with the resources, they need
4837 * to be returned to the ET1310. This is one method
4838 * of returning the resources.
4842 /* Handle RXDMA Error Interrupt */
4843 if (status & ET_INTR_RXDMA_ERR) {
4845 * The rxdma_error interrupt is sent when a time-out
4846 * on a request issued by the JAGCore has occurred or
4847 * a completion is returned with an un-successful
4848 * status. In both cases the request is considered
4849 * complete. The JAGCore will automatically re-try the
4850 * request in question. Normally information on events
4851 * like these are sent to the host using the "Advanced
4852 * Error Reporting" capability. This interrupt is
4853 * another way of getting similar information. The
4854 * only thing required is to clear the interrupt by
4855 * reading the ISR in the global resources. The
4856 * JAGCore will do a re-try on the request. Normally
4857 * you should never see this interrupt. If you start
4858 * to see this interrupt occurring frequently then
4859 * something bad has occurred. A reset might be the
4864 dev_warn(&adapter->pdev->dev,
4865 "RxDMA_ERR interrupt, error %x\n",
4866 readl(&iomem->txmac.tx_test));
4869 /* Handle the Wake on LAN Event */
4870 if (status & ET_INTR_WOL) {
4872 * This is a secondary interrupt for wake on LAN.
4873 * The driver should never see this, if it does,
4874 * something serious is wrong. We will TRAP the
4875 * message when we are in DBG mode, otherwise we
4878 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4881 /* Let's move on to the TxMac */
4882 if (status & ET_INTR_TXMAC) {
4883 u32 err = readl(&iomem->txmac.err);
4886 * When any of the errors occur and TXMAC generates
4887 * an interrupt to report these errors, it usually
4888 * means that TXMAC has detected an error in the data
4889 * stream retrieved from the on-chip Tx Q. All of
4890 * these errors are catastrophic and TXMAC won't be
4891 * able to recover data when these errors occur. In
4892 * a nutshell, the whole Tx path will have to be reset
4893 * and re-configured afterwards.
4895 dev_warn(&adapter->pdev->dev,
4896 "TXMAC interrupt, error 0x%08x\n",
4899 /* If we are debugging, we want to see this error,
4900 * otherwise we just want the device to be reset and
4905 /* Handle RXMAC Interrupt */
4906 if (status & ET_INTR_RXMAC) {
4908 * These interrupts are catastrophic to the device,
4909 * what we need to do is disable the interrupts and
4910 * set the flag to cause us to reset so we can solve
4913 /* MP_SET_FLAG( adapter,
4914 fMP_ADAPTER_HARDWARE_ERROR); */
4916 dev_warn(&adapter->pdev->dev,
4917 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4918 readl(&iomem->rxmac.err_reg));
4920 dev_warn(&adapter->pdev->dev,
4921 "Enable 0x%08x, Diag 0x%08x\n",
4922 readl(&iomem->rxmac.ctrl),
4923 readl(&iomem->rxmac.rxq_diag));
4926 * If we are debugging, we want to see this error,
4927 * otherwise we just want the device to be reset and
4932 /* Handle MAC_STAT Interrupt */
4933 if (status & ET_INTR_MAC_STAT) {
4935 * This means at least one of the un-masked counters
4936 * in the MAC_STAT block has rolled over. Use this
4937 * to maintain the top, software managed bits of the
4940 et1310_handle_macstat_interrupt(adapter);
4943 /* Handle SLV Timeout Interrupt */
4944 if (status & ET_INTR_SLV_TIMEOUT) {
4946 * This means a timeout has occurred on a read or
4947 * write request to one of the JAGCore registers. The
4948 * Global Resources block has terminated the request
4949 * and on a read request, returned a "fake" value.
4950 * The most likely reasons are: Bad Address or the
4951 * addressed module is in a power-down state and
4956 et131x_enable_interrupts(adapter);
4959 /* NETDEV functions */
4962 * et131x_stats - Return the current device statistics.
4963 * @netdev: device whose stats are being queried
4965 * Returns 0 on success, errno on failure (as defined in errno.h)
4967 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4969 struct et131x_adapter *adapter = netdev_priv(netdev);
4970 struct net_device_stats *stats = &adapter->net_stats;
4971 struct ce_stats *devstat = &adapter->stats;
4973 stats->rx_errors = devstat->rx_length_errs +
4974 devstat->rx_align_errs +
4975 devstat->rx_crc_errs +
4976 devstat->rx_code_violations +
4977 devstat->rx_other_errs;
4978 stats->tx_errors = devstat->tx_max_pkt_errs;
4979 stats->multicast = devstat->multicast_pkts_rcvd;
4980 stats->collisions = devstat->tx_collisions;
4982 stats->rx_length_errors = devstat->rx_length_errs;
4983 stats->rx_over_errors = devstat->rx_overflows;
4984 stats->rx_crc_errors = devstat->rx_crc_errs;
4986 /* NOTE: These stats don't have corresponding values in CE_STATS,
4987 * so we're going to have to update these directly from within the
4990 /* stats->rx_bytes = 20; devstat->; */
4991 /* stats->tx_bytes = 20; devstat->; */
4992 /* stats->rx_dropped = devstat->; */
4993 /* stats->tx_dropped = devstat->; */
4995 /* NOTE: Not used, can't find analogous statistics */
4996 /* stats->rx_frame_errors = devstat->; */
4997 /* stats->rx_fifo_errors = devstat->; */
4998 /* stats->rx_missed_errors = devstat->; */
5000 /* stats->tx_aborted_errors = devstat->; */
5001 /* stats->tx_carrier_errors = devstat->; */
5002 /* stats->tx_fifo_errors = devstat->; */
5003 /* stats->tx_heartbeat_errors = devstat->; */
5004 /* stats->tx_window_errors = devstat->; */
5009 * et131x_enable_txrx - Enable tx/rx queues
5010 * @netdev: device to be enabled
5012 void et131x_enable_txrx(struct net_device *netdev)
5014 struct et131x_adapter *adapter = netdev_priv(netdev);
5016 /* Enable the Tx and Rx DMA engines (if not already enabled) */
5017 et131x_rx_dma_enable(adapter);
5018 et131x_tx_dma_enable(adapter);
5020 /* Enable device interrupts */
5021 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
5022 et131x_enable_interrupts(adapter);
5024 /* We're ready to move some data, so start the queue */
5025 netif_start_queue(netdev);
5029 * et131x_disable_txrx - Disable tx/rx queues
5030 * @netdev: device to be disabled
5032 void et131x_disable_txrx(struct net_device *netdev)
5034 struct et131x_adapter *adapter = netdev_priv(netdev);
5036 /* First thing is to stop the queue */
5037 netif_stop_queue(netdev);
5039 /* Stop the Tx and Rx DMA engines */
5040 et131x_rx_dma_disable(adapter);
5041 et131x_tx_dma_disable(adapter);
5043 /* Disable device interrupts */
5044 et131x_disable_interrupts(adapter);
5048 * et131x_up - Bring up a device for use.
5049 * @netdev: device to be opened
5051 void et131x_up(struct net_device *netdev)
5053 struct et131x_adapter *adapter = netdev_priv(netdev);
5055 et131x_enable_txrx(netdev);
5056 phy_start(adapter->phydev);
5060 * et131x_open - Open the device for use.
5061 * @netdev: device to be opened
5063 * Returns 0 on success, errno on failure (as defined in errno.h)
5065 int et131x_open(struct net_device *netdev)
5068 struct et131x_adapter *adapter = netdev_priv(netdev);
5070 /* Start the timer to track NIC errors */
5071 init_timer(&adapter->error_timer);
5072 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
5073 adapter->error_timer.function = et131x_error_timer_handler;
5074 adapter->error_timer.data = (unsigned long)adapter;
5075 add_timer(&adapter->error_timer);
5077 /* Register our IRQ */
5078 result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
5079 netdev->name, netdev);
5081 dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
5086 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
5094 * et131x_down - Bring down the device
5095 * @netdev: device to be broght down
5097 void et131x_down(struct net_device *netdev)
5099 struct et131x_adapter *adapter = netdev_priv(netdev);
5101 /* Save the timestamp for the TX watchdog, prevent a timeout */
5102 netdev->trans_start = jiffies;
5104 phy_stop(adapter->phydev);
5105 et131x_disable_txrx(netdev);
5109 * et131x_close - Close the device
5110 * @netdev: device to be closed
5112 * Returns 0 on success, errno on failure (as defined in errno.h)
5114 int et131x_close(struct net_device *netdev)
5116 struct et131x_adapter *adapter = netdev_priv(netdev);
5118 et131x_down(netdev);
5120 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
5121 free_irq(netdev->irq, netdev);
5123 /* Stop the error timer */
5124 return del_timer_sync(&adapter->error_timer);
5128 * et131x_ioctl - The I/O Control handler for the driver
5129 * @netdev: device on which the control request is being made
5130 * @reqbuf: a pointer to the IOCTL request buffer
5131 * @cmd: the IOCTL command code
5133 * Returns 0 on success, errno on failure (as defined in errno.h)
5135 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
5137 struct et131x_adapter *adapter = netdev_priv(netdev);
5139 if (!adapter->phydev)
5142 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
5146 * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
5147 * @adapter: pointer to our private adapter structure
5149 * FIXME: lot of dups with MAC code
5151 * Returns 0 on success, errno on failure
5153 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
5156 uint32_t filter = adapter->packet_filter;
5160 ctrl = readl(&adapter->regs->rxmac.ctrl);
5161 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
5163 /* Default to disabled packet filtering. Enable it in the individual
5164 * case statements that require the device to filter something
5168 /* Set us to be in promiscuous mode so we receive everything, this
5169 * is also true when we get a packet filter of 0
5171 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
5172 pf_ctrl &= ~7; /* Clear filter bits */
5175 * Set us up with Multicast packet filtering. Three cases are
5176 * possible - (1) we have a multi-cast list, (2) we receive ALL
5177 * multicast entries or (3) we receive none.
5179 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
5180 pf_ctrl &= ~2; /* Multicast filter bit */
5182 et1310_setup_device_for_multicast(adapter);
5187 /* Set us up with Unicast packet filtering */
5188 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
5189 et1310_setup_device_for_unicast(adapter);
5194 /* Set us up with Broadcast packet filtering */
5195 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
5196 pf_ctrl |= 1; /* Broadcast filter bit */
5201 /* Setup the receive mac configuration registers - Packet
5202 * Filter control + the enable / disable for packet filter
5203 * in the control reg.
5205 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
5206 writel(ctrl, &adapter->regs->rxmac.ctrl);
5212 * et131x_multicast - The handler to configure multicasting on the interface
5213 * @netdev: a pointer to a net_device struct representing the device
5215 static void et131x_multicast(struct net_device *netdev)
5217 struct et131x_adapter *adapter = netdev_priv(netdev);
5218 uint32_t packet_filter = 0;
5219 unsigned long flags;
5220 struct netdev_hw_addr *ha;
5223 spin_lock_irqsave(&adapter->lock, flags);
5225 /* Before we modify the platform-independent filter flags, store them
5226 * locally. This allows us to determine if anything's changed and if
5227 * we even need to bother the hardware
5229 packet_filter = adapter->packet_filter;
5231 /* Clear the 'multicast' flag locally; because we only have a single
5232 * flag to check multicast, and multiple multicast addresses can be
5233 * set, this is the easiest way to determine if more than one
5234 * multicast address is being set.
5236 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5238 /* Check the net_device flags and set the device independent flags
5242 if (netdev->flags & IFF_PROMISC)
5243 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
5245 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
5247 if (netdev->flags & IFF_ALLMULTI)
5248 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5250 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
5251 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5253 if (netdev_mc_count(netdev) < 1) {
5254 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
5255 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5257 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
5259 /* Set values in the private adapter struct */
5261 netdev_for_each_mc_addr(ha, netdev) {
5262 if (i == NIC_MAX_MCAST_LIST)
5264 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
5266 adapter->multicast_addr_count = i;
5268 /* Are the new flags different from the previous ones? If not, then no
5269 * action is required
5271 * NOTE - This block will always update the multicast_list with the
5272 * hardware, even if the addresses aren't the same.
5274 if (packet_filter != adapter->packet_filter) {
5275 /* Call the device's filter function */
5276 et131x_set_packet_filter(adapter);
5278 spin_unlock_irqrestore(&adapter->lock, flags);
5282 * et131x_tx - The handler to tx a packet on the device
5283 * @skb: data to be Tx'd
5284 * @netdev: device on which data is to be Tx'd
5286 * Returns 0 on success, errno on failure (as defined in errno.h)
5288 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5291 struct et131x_adapter *adapter = netdev_priv(netdev);
5293 /* stop the queue if it's getting full */
5294 if(adapter->tx_ring.used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
5295 netif_stop_queue(netdev);
5297 /* Save the timestamp for the TX timeout watchdog */
5298 netdev->trans_start = jiffies;
5300 /* Call the device-specific data Tx routine */
5301 status = et131x_send_packets(skb, netdev);
5303 /* Check status and manage the netif queue if necessary */
5305 if (status == -ENOMEM) {
5306 status = NETDEV_TX_BUSY;
5308 status = NETDEV_TX_OK;
5315 * et131x_tx_timeout - Timeout handler
5316 * @netdev: a pointer to a net_device struct representing the device
5318 * The handler called when a Tx request times out. The timeout period is
5319 * specified by the 'tx_timeo" element in the net_device structure (see
5320 * et131x_alloc_device() to see how this value is set).
5322 static void et131x_tx_timeout(struct net_device *netdev)
5324 struct et131x_adapter *adapter = netdev_priv(netdev);
5326 unsigned long flags;
5328 /* If the device is closed, ignore the timeout */
5329 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5332 /* Any nonrecoverable hardware error?
5333 * Checks adapter->flags for any failure in phy reading
5335 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5338 /* Hardware failure? */
5339 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5340 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5344 /* Is send stuck? */
5345 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5347 tcb = adapter->tx_ring.send_head;
5352 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5353 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5356 dev_warn(&adapter->pdev->dev,
5357 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5361 adapter->net_stats.tx_errors++;
5363 /* perform reset of tx/rx */
5364 et131x_disable_txrx(netdev);
5365 et131x_enable_txrx(netdev);
5370 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5374 * et131x_change_mtu - The handler called to change the MTU for the device
5375 * @netdev: device whose MTU is to be changed
5376 * @new_mtu: the desired MTU
5378 * Returns 0 on success, errno on failure (as defined in errno.h)
5380 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5383 struct et131x_adapter *adapter = netdev_priv(netdev);
5385 /* Make sure the requested MTU is valid */
5386 if (new_mtu < 64 || new_mtu > 9216)
5389 et131x_disable_txrx(netdev);
5390 et131x_handle_send_interrupt(adapter);
5391 et131x_handle_recv_interrupt(adapter);
5393 /* Set the new MTU */
5394 netdev->mtu = new_mtu;
5396 /* Free Rx DMA memory */
5397 et131x_adapter_memory_free(adapter);
5399 /* Set the config parameter for Jumbo Packet support */
5400 adapter->registry_jumbo_packet = new_mtu + 14;
5401 et131x_soft_reset(adapter);
5403 /* Alloc and init Rx DMA memory */
5404 result = et131x_adapter_memory_alloc(adapter);
5406 dev_warn(&adapter->pdev->dev,
5407 "Change MTU failed; couldn't re-alloc DMA memory\n");
5411 et131x_init_send(adapter);
5413 et131x_hwaddr_init(adapter);
5414 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5416 /* Init the device with the new settings */
5417 et131x_adapter_setup(adapter);
5419 et131x_enable_txrx(netdev);
5425 * et131x_set_mac_addr - handler to change the MAC address for the device
5426 * @netdev: device whose MAC is to be changed
5427 * @new_mac: the desired MAC address
5429 * Returns 0 on success, errno on failure (as defined in errno.h)
5431 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
5433 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5436 struct et131x_adapter *adapter = netdev_priv(netdev);
5437 struct sockaddr *address = new_mac;
5441 if (adapter == NULL)
5444 /* Make sure the requested MAC is valid */
5445 if (!is_valid_ether_addr(address->sa_data))
5448 et131x_disable_txrx(netdev);
5449 et131x_handle_send_interrupt(adapter);
5450 et131x_handle_recv_interrupt(adapter);
5452 /* Set the new MAC */
5453 /* netdev->set_mac_address = &new_mac; */
5455 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5457 printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5458 netdev->name, netdev->dev_addr);
5460 /* Free Rx DMA memory */
5461 et131x_adapter_memory_free(adapter);
5463 et131x_soft_reset(adapter);
5465 /* Alloc and init Rx DMA memory */
5466 result = et131x_adapter_memory_alloc(adapter);
5468 dev_err(&adapter->pdev->dev,
5469 "Change MAC failed; couldn't re-alloc DMA memory\n");
5473 et131x_init_send(adapter);
5475 et131x_hwaddr_init(adapter);
5477 /* Init the device with the new settings */
5478 et131x_adapter_setup(adapter);
5480 et131x_enable_txrx(netdev);
5485 static const struct net_device_ops et131x_netdev_ops = {
5486 .ndo_open = et131x_open,
5487 .ndo_stop = et131x_close,
5488 .ndo_start_xmit = et131x_tx,
5489 .ndo_set_multicast_list = et131x_multicast,
5490 .ndo_tx_timeout = et131x_tx_timeout,
5491 .ndo_change_mtu = et131x_change_mtu,
5492 .ndo_set_mac_address = et131x_set_mac_addr,
5493 .ndo_validate_addr = eth_validate_addr,
5494 .ndo_get_stats = et131x_stats,
5495 .ndo_do_ioctl = et131x_ioctl,
5499 * et131x_device_alloc
5501 * Returns pointer to the allocated and initialized net_device struct for
5504 * Create instances of net_device and wl_private for the new adapter and
5505 * register the device's entry points in the net_device structure.
5507 struct net_device *et131x_device_alloc(void)
5509 struct net_device *netdev;
5511 /* Alloc net_device and adapter structs */
5512 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5515 printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
5520 * Setup the function registration table (and other data) for a
5523 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5524 netdev->netdev_ops = &et131x_netdev_ops;
5527 /* netdev->poll = &et131x_poll; */
5528 /* netdev->poll_controller = &et131x_poll_controller; */