1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
31 #define EFX_MAX_MTU (9 * 1024)
33 /* RX slow fill workqueue. If memory allocation fails in the fast path,
34 * a work item is pushed onto this work queue to retry the allocation later,
35 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
36 * workqueue, there is nothing to be gained in making it per NIC
38 static struct workqueue_struct *refill_workqueue;
40 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
41 * queued onto this work queue. This is not a per-nic work queue, because
42 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
44 static struct workqueue_struct *reset_workqueue;
46 /**************************************************************************
50 *************************************************************************/
53 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
55 * This sets the default for new devices. It can be controlled later
58 static int lro = true;
59 module_param(lro, int, 0644);
60 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
63 * Use separate channels for TX and RX events
65 * Set this to 1 to use separate channels for TX and RX. It allows us
66 * to control interrupt affinity separately for TX and RX.
68 * This is only used in MSI-X interrupt mode
70 static unsigned int separate_tx_channels;
71 module_param(separate_tx_channels, uint, 0644);
72 MODULE_PARM_DESC(separate_tx_channels,
73 "Use separate channels for TX and RX");
75 /* This is the weight assigned to each of the (per-channel) virtual
78 static int napi_weight = 64;
80 /* This is the time (in jiffies) between invocations of the hardware
81 * monitor, which checks for known hardware bugs and resets the
82 * hardware and driver as necessary.
84 unsigned int efx_monitor_interval = 1 * HZ;
86 /* This controls whether or not the driver will initialise devices
87 * with invalid MAC addresses stored in the EEPROM or flash. If true,
88 * such devices will be initialised with a random locally-generated
89 * MAC address. This allows for loading the sfc_mtd driver to
90 * reprogram the flash, even if the flash contents (including the MAC
91 * address) have previously been erased.
93 static unsigned int allow_bad_hwaddr;
95 /* Initial interrupt moderation settings. They can be modified after
96 * module load with ethtool.
98 * The default for RX should strike a balance between increasing the
99 * round-trip latency and reducing overhead.
101 static unsigned int rx_irq_mod_usec = 60;
103 /* Initial interrupt moderation settings. They can be modified after
104 * module load with ethtool.
106 * This default is chosen to ensure that a 10G link does not go idle
107 * while a TX queue is stopped after it has become full. A queue is
108 * restarted when it drops below half full. The time this takes (assuming
109 * worst case 3 descriptors per packet and 1024 descriptors) is
110 * 512 / 3 * 1.2 = 205 usec.
112 static unsigned int tx_irq_mod_usec = 150;
114 /* This is the first interrupt mode to try out of:
119 static unsigned int interrupt_mode;
121 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
122 * i.e. the number of CPUs among which we may distribute simultaneous
123 * interrupt handling.
125 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
126 * The default (0) means to assign an interrupt to each package (level II cache)
128 static unsigned int rss_cpus;
129 module_param(rss_cpus, uint, 0444);
130 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
132 static int phy_flash_cfg;
133 module_param(phy_flash_cfg, int, 0644);
134 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
136 /**************************************************************************
138 * Utility functions and prototypes
140 *************************************************************************/
141 static void efx_remove_channel(struct efx_channel *channel);
142 static void efx_remove_port(struct efx_nic *efx);
143 static void efx_fini_napi(struct efx_nic *efx);
144 static void efx_fini_channels(struct efx_nic *efx);
146 #define EFX_ASSERT_RESET_SERIALISED(efx) \
148 if (efx->state == STATE_RUNNING) \
152 /**************************************************************************
154 * Event queue processing
156 *************************************************************************/
158 /* Process channel's event queue
160 * This function is responsible for processing the event queue of a
161 * single channel. The caller must guarantee that this function will
162 * never be concurrently called more than once on the same channel,
163 * though different channels may be being processed concurrently.
165 static int efx_process_channel(struct efx_channel *channel, int rx_quota)
167 struct efx_nic *efx = channel->efx;
170 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
174 rx_packets = falcon_process_eventq(channel, rx_quota);
178 /* Deliver last RX packet. */
179 if (channel->rx_pkt) {
180 __efx_rx_packet(channel, channel->rx_pkt,
181 channel->rx_pkt_csummed);
182 channel->rx_pkt = NULL;
185 efx_flush_lro(channel);
186 efx_rx_strategy(channel);
188 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
193 /* Mark channel as finished processing
195 * Note that since we will not receive further interrupts for this
196 * channel before we finish processing and call the eventq_read_ack()
197 * method, there is no need to use the interrupt hold-off timers.
199 static inline void efx_channel_processed(struct efx_channel *channel)
201 /* The interrupt handler for this channel may set work_pending
202 * as soon as we acknowledge the events we've seen. Make sure
203 * it's cleared before then. */
204 channel->work_pending = false;
207 falcon_eventq_read_ack(channel);
212 * NAPI guarantees serialisation of polls of the same device, which
213 * provides the guarantee required by efx_process_channel().
215 static int efx_poll(struct napi_struct *napi, int budget)
217 struct efx_channel *channel =
218 container_of(napi, struct efx_channel, napi_str);
219 struct net_device *napi_dev = channel->napi_dev;
222 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
223 channel->channel, raw_smp_processor_id());
225 rx_packets = efx_process_channel(channel, budget);
227 if (rx_packets < budget) {
228 /* There is no race here; although napi_disable() will
229 * only wait for netif_rx_complete(), this isn't a problem
230 * since efx_channel_processed() will have no effect if
231 * interrupts have already been disabled.
233 netif_rx_complete(napi);
234 efx_channel_processed(channel);
240 /* Process the eventq of the specified channel immediately on this CPU
242 * Disable hardware generated interrupts, wait for any existing
243 * processing to finish, then directly poll (and ack ) the eventq.
244 * Finally reenable NAPI and interrupts.
246 * Since we are touching interrupts the caller should hold the suspend lock
248 void efx_process_channel_now(struct efx_channel *channel)
250 struct efx_nic *efx = channel->efx;
252 BUG_ON(!channel->used_flags);
253 BUG_ON(!channel->enabled);
255 /* Disable interrupts and wait for ISRs to complete */
256 falcon_disable_interrupts(efx);
258 synchronize_irq(efx->legacy_irq);
260 synchronize_irq(channel->irq);
262 /* Wait for any NAPI processing to complete */
263 napi_disable(&channel->napi_str);
265 /* Poll the channel */
266 efx_process_channel(channel, efx->type->evq_size);
268 /* Ack the eventq. This may cause an interrupt to be generated
269 * when they are reenabled */
270 efx_channel_processed(channel);
272 napi_enable(&channel->napi_str);
273 falcon_enable_interrupts(efx);
276 /* Create event queue
277 * Event queue memory allocations are done only once. If the channel
278 * is reset, the memory buffer will be reused; this guards against
279 * errors during channel reset and also simplifies interrupt handling.
281 static int efx_probe_eventq(struct efx_channel *channel)
283 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
285 return falcon_probe_eventq(channel);
288 /* Prepare channel's event queue */
289 static void efx_init_eventq(struct efx_channel *channel)
291 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
293 channel->eventq_read_ptr = 0;
295 falcon_init_eventq(channel);
298 static void efx_fini_eventq(struct efx_channel *channel)
300 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
302 falcon_fini_eventq(channel);
305 static void efx_remove_eventq(struct efx_channel *channel)
307 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
309 falcon_remove_eventq(channel);
312 /**************************************************************************
316 *************************************************************************/
318 static int efx_probe_channel(struct efx_channel *channel)
320 struct efx_tx_queue *tx_queue;
321 struct efx_rx_queue *rx_queue;
324 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
326 rc = efx_probe_eventq(channel);
330 efx_for_each_channel_tx_queue(tx_queue, channel) {
331 rc = efx_probe_tx_queue(tx_queue);
336 efx_for_each_channel_rx_queue(rx_queue, channel) {
337 rc = efx_probe_rx_queue(rx_queue);
342 channel->n_rx_frm_trunc = 0;
347 efx_for_each_channel_rx_queue(rx_queue, channel)
348 efx_remove_rx_queue(rx_queue);
350 efx_for_each_channel_tx_queue(tx_queue, channel)
351 efx_remove_tx_queue(tx_queue);
357 static void efx_set_channel_names(struct efx_nic *efx)
359 struct efx_channel *channel;
360 const char *type = "";
363 efx_for_each_channel(channel, efx) {
364 number = channel->channel;
365 if (efx->n_channels > efx->n_rx_queues) {
366 if (channel->channel < efx->n_rx_queues) {
370 number -= efx->n_rx_queues;
373 snprintf(channel->name, sizeof(channel->name),
374 "%s%s-%d", efx->name, type, number);
378 /* Channels are shutdown and reinitialised whilst the NIC is running
379 * to propagate configuration changes (mtu, checksum offload), or
380 * to clear hardware error conditions
382 static void efx_init_channels(struct efx_nic *efx)
384 struct efx_tx_queue *tx_queue;
385 struct efx_rx_queue *rx_queue;
386 struct efx_channel *channel;
388 /* Calculate the rx buffer allocation parameters required to
389 * support the current MTU, including padding for header
390 * alignment and overruns.
392 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
393 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
394 efx->type->rx_buffer_padding);
395 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
397 /* Initialise the channels */
398 efx_for_each_channel(channel, efx) {
399 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
401 efx_init_eventq(channel);
403 efx_for_each_channel_tx_queue(tx_queue, channel)
404 efx_init_tx_queue(tx_queue);
406 /* The rx buffer allocation strategy is MTU dependent */
407 efx_rx_strategy(channel);
409 efx_for_each_channel_rx_queue(rx_queue, channel)
410 efx_init_rx_queue(rx_queue);
412 WARN_ON(channel->rx_pkt != NULL);
413 efx_rx_strategy(channel);
417 /* This enables event queue processing and packet transmission.
419 * Note that this function is not allowed to fail, since that would
420 * introduce too much complexity into the suspend/resume path.
422 static void efx_start_channel(struct efx_channel *channel)
424 struct efx_rx_queue *rx_queue;
426 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
428 if (!(channel->efx->net_dev->flags & IFF_UP))
429 netif_napi_add(channel->napi_dev, &channel->napi_str,
430 efx_poll, napi_weight);
432 /* The interrupt handler for this channel may set work_pending
433 * as soon as we enable it. Make sure it's cleared before
434 * then. Similarly, make sure it sees the enabled flag set. */
435 channel->work_pending = false;
436 channel->enabled = true;
439 napi_enable(&channel->napi_str);
441 /* Load up RX descriptors */
442 efx_for_each_channel_rx_queue(rx_queue, channel)
443 efx_fast_push_rx_descriptors(rx_queue);
446 /* This disables event queue processing and packet transmission.
447 * This function does not guarantee that all queue processing
448 * (e.g. RX refill) is complete.
450 static void efx_stop_channel(struct efx_channel *channel)
452 struct efx_rx_queue *rx_queue;
454 if (!channel->enabled)
457 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
459 channel->enabled = false;
460 napi_disable(&channel->napi_str);
462 /* Ensure that any worker threads have exited or will be no-ops */
463 efx_for_each_channel_rx_queue(rx_queue, channel) {
464 spin_lock_bh(&rx_queue->add_lock);
465 spin_unlock_bh(&rx_queue->add_lock);
469 static void efx_fini_channels(struct efx_nic *efx)
471 struct efx_channel *channel;
472 struct efx_tx_queue *tx_queue;
473 struct efx_rx_queue *rx_queue;
476 EFX_ASSERT_RESET_SERIALISED(efx);
477 BUG_ON(efx->port_enabled);
479 rc = falcon_flush_queues(efx);
481 EFX_ERR(efx, "failed to flush queues\n");
483 EFX_LOG(efx, "successfully flushed all queues\n");
485 efx_for_each_channel(channel, efx) {
486 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
488 efx_for_each_channel_rx_queue(rx_queue, channel)
489 efx_fini_rx_queue(rx_queue);
490 efx_for_each_channel_tx_queue(tx_queue, channel)
491 efx_fini_tx_queue(tx_queue);
492 efx_fini_eventq(channel);
496 static void efx_remove_channel(struct efx_channel *channel)
498 struct efx_tx_queue *tx_queue;
499 struct efx_rx_queue *rx_queue;
501 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
503 efx_for_each_channel_rx_queue(rx_queue, channel)
504 efx_remove_rx_queue(rx_queue);
505 efx_for_each_channel_tx_queue(tx_queue, channel)
506 efx_remove_tx_queue(tx_queue);
507 efx_remove_eventq(channel);
509 channel->used_flags = 0;
512 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
514 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
517 /**************************************************************************
521 **************************************************************************/
523 /* This ensures that the kernel is kept informed (via
524 * netif_carrier_on/off) of the link status, and also maintains the
525 * link status's stop on the port's TX queue.
527 static void efx_link_status_changed(struct efx_nic *efx)
529 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
530 * that no events are triggered between unregister_netdev() and the
531 * driver unloading. A more general condition is that NETDEV_CHANGE
532 * can only be generated between NETDEV_UP and NETDEV_DOWN */
533 if (!netif_running(efx->net_dev))
536 if (efx->port_inhibited) {
537 netif_carrier_off(efx->net_dev);
541 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
542 efx->n_link_state_changes++;
545 netif_carrier_on(efx->net_dev);
547 netif_carrier_off(efx->net_dev);
550 /* Status message for kernel log */
552 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
553 efx->link_speed, efx->link_fd ? "full" : "half",
555 (efx->promiscuous ? " [PROMISC]" : ""));
557 EFX_INFO(efx, "link down\n");
562 /* This call reinitialises the MAC to pick up new PHY settings. The
563 * caller must hold the mac_lock */
564 void __efx_reconfigure_port(struct efx_nic *efx)
566 WARN_ON(!mutex_is_locked(&efx->mac_lock));
568 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
569 raw_smp_processor_id());
571 /* Serialise the promiscuous flag with efx_set_multicast_list. */
572 if (efx_dev_registered(efx)) {
573 netif_addr_lock_bh(efx->net_dev);
574 netif_addr_unlock_bh(efx->net_dev);
577 falcon_deconfigure_mac_wrapper(efx);
579 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
580 if (LOOPBACK_INTERNAL(efx))
581 efx->phy_mode |= PHY_MODE_TX_DISABLED;
583 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
584 efx->phy_op->reconfigure(efx);
586 if (falcon_switch_mac(efx))
589 efx->mac_op->reconfigure(efx);
591 /* Inform kernel of loss/gain of carrier */
592 efx_link_status_changed(efx);
596 EFX_ERR(efx, "failed to reconfigure MAC\n");
597 efx->phy_op->fini(efx);
598 efx->port_initialized = false;
601 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
603 void efx_reconfigure_port(struct efx_nic *efx)
605 EFX_ASSERT_RESET_SERIALISED(efx);
607 mutex_lock(&efx->mac_lock);
608 __efx_reconfigure_port(efx);
609 mutex_unlock(&efx->mac_lock);
612 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
613 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
614 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
615 static void efx_phy_work(struct work_struct *data)
617 struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
619 mutex_lock(&efx->mac_lock);
620 if (efx->port_enabled)
621 __efx_reconfigure_port(efx);
622 mutex_unlock(&efx->mac_lock);
625 static void efx_mac_work(struct work_struct *data)
627 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
629 mutex_lock(&efx->mac_lock);
630 if (efx->port_enabled)
631 efx->mac_op->irq(efx);
632 mutex_unlock(&efx->mac_lock);
635 static int efx_probe_port(struct efx_nic *efx)
639 EFX_LOG(efx, "create port\n");
641 /* Connect up MAC/PHY operations table and read MAC address */
642 rc = falcon_probe_port(efx);
647 efx->phy_mode = PHY_MODE_SPECIAL;
649 /* Sanity check MAC address */
650 if (is_valid_ether_addr(efx->mac_address)) {
651 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
653 EFX_ERR(efx, "invalid MAC address %pM\n",
655 if (!allow_bad_hwaddr) {
659 random_ether_addr(efx->net_dev->dev_addr);
660 EFX_INFO(efx, "using locally-generated MAC %pM\n",
661 efx->net_dev->dev_addr);
667 efx_remove_port(efx);
671 static int efx_init_port(struct efx_nic *efx)
675 EFX_LOG(efx, "init port\n");
677 rc = efx->phy_op->init(efx);
680 efx->phy_op->reconfigure(efx);
682 mutex_lock(&efx->mac_lock);
683 rc = falcon_switch_mac(efx);
684 mutex_unlock(&efx->mac_lock);
687 efx->mac_op->reconfigure(efx);
689 efx->port_initialized = true;
690 efx->stats_enabled = true;
694 efx->phy_op->fini(efx);
698 /* Allow efx_reconfigure_port() to be scheduled, and close the window
699 * between efx_stop_port and efx_flush_all whereby a previously scheduled
700 * efx_phy_work()/efx_mac_work() may have been cancelled */
701 static void efx_start_port(struct efx_nic *efx)
703 EFX_LOG(efx, "start port\n");
704 BUG_ON(efx->port_enabled);
706 mutex_lock(&efx->mac_lock);
707 efx->port_enabled = true;
708 __efx_reconfigure_port(efx);
709 efx->mac_op->irq(efx);
710 mutex_unlock(&efx->mac_lock);
713 /* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
714 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
715 * and efx_mac_work may still be scheduled via NAPI processing until
716 * efx_flush_all() is called */
717 static void efx_stop_port(struct efx_nic *efx)
719 EFX_LOG(efx, "stop port\n");
721 mutex_lock(&efx->mac_lock);
722 efx->port_enabled = false;
723 mutex_unlock(&efx->mac_lock);
725 /* Serialise against efx_set_multicast_list() */
726 if (efx_dev_registered(efx)) {
727 netif_addr_lock_bh(efx->net_dev);
728 netif_addr_unlock_bh(efx->net_dev);
732 static void efx_fini_port(struct efx_nic *efx)
734 EFX_LOG(efx, "shut down port\n");
736 if (!efx->port_initialized)
739 efx->phy_op->fini(efx);
740 efx->port_initialized = false;
742 efx->link_up = false;
743 efx_link_status_changed(efx);
746 static void efx_remove_port(struct efx_nic *efx)
748 EFX_LOG(efx, "destroying port\n");
750 falcon_remove_port(efx);
753 /**************************************************************************
757 **************************************************************************/
759 /* This configures the PCI device to enable I/O and DMA. */
760 static int efx_init_io(struct efx_nic *efx)
762 struct pci_dev *pci_dev = efx->pci_dev;
763 dma_addr_t dma_mask = efx->type->max_dma_mask;
766 EFX_LOG(efx, "initialising I/O\n");
768 rc = pci_enable_device(pci_dev);
770 EFX_ERR(efx, "failed to enable PCI device\n");
774 pci_set_master(pci_dev);
776 /* Set the PCI DMA mask. Try all possibilities from our
777 * genuine mask down to 32 bits, because some architectures
778 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
779 * masks event though they reject 46 bit masks.
781 while (dma_mask > 0x7fffffffUL) {
782 if (pci_dma_supported(pci_dev, dma_mask) &&
783 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
788 EFX_ERR(efx, "could not find a suitable DMA mask\n");
791 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
792 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
794 /* pci_set_consistent_dma_mask() is not *allowed* to
795 * fail with a mask that pci_set_dma_mask() accepted,
796 * but just in case...
798 EFX_ERR(efx, "failed to set consistent DMA mask\n");
802 efx->membase_phys = pci_resource_start(efx->pci_dev,
804 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
806 EFX_ERR(efx, "request for memory BAR failed\n");
810 efx->membase = ioremap_nocache(efx->membase_phys,
811 efx->type->mem_map_size);
813 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
815 (unsigned long long)efx->membase_phys,
816 efx->type->mem_map_size);
820 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
821 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
822 efx->type->mem_map_size, efx->membase);
827 pci_release_region(efx->pci_dev, efx->type->mem_bar);
829 efx->membase_phys = 0;
831 pci_disable_device(efx->pci_dev);
836 static void efx_fini_io(struct efx_nic *efx)
838 EFX_LOG(efx, "shutting down I/O\n");
841 iounmap(efx->membase);
845 if (efx->membase_phys) {
846 pci_release_region(efx->pci_dev, efx->type->mem_bar);
847 efx->membase_phys = 0;
850 pci_disable_device(efx->pci_dev);
853 /* Get number of RX queues wanted. Return number of online CPU
854 * packages in the expectation that an IRQ balancer will spread
855 * interrupts across them. */
856 static int efx_wanted_rx_queues(void)
862 cpus_clear(core_mask);
864 for_each_online_cpu(cpu) {
865 if (!cpu_isset(cpu, core_mask)) {
867 cpus_or(core_mask, core_mask,
868 topology_core_siblings(cpu));
875 /* Probe the number and type of interrupts we are able to obtain, and
876 * the resulting numbers of channels and RX queues.
878 static void efx_probe_interrupts(struct efx_nic *efx)
881 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
884 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
885 struct msix_entry xentries[EFX_MAX_CHANNELS];
889 /* We want one RX queue and interrupt per CPU package
890 * (or as specified by the rss_cpus module parameter).
891 * We will need one channel per interrupt.
893 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
894 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
895 wanted_ints = min(wanted_ints, max_channels);
897 for (i = 0; i < wanted_ints; i++)
898 xentries[i].entry = i;
899 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
901 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
902 " available (%d < %d).\n", rc, wanted_ints);
903 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
904 EFX_BUG_ON_PARANOID(rc >= wanted_ints);
906 rc = pci_enable_msix(efx->pci_dev, xentries,
911 efx->n_rx_queues = min(rx_queues, wanted_ints);
912 efx->n_channels = wanted_ints;
913 for (i = 0; i < wanted_ints; i++)
914 efx->channel[i].irq = xentries[i].vector;
916 /* Fall back to single channel MSI */
917 efx->interrupt_mode = EFX_INT_MODE_MSI;
918 EFX_ERR(efx, "could not enable MSI-X\n");
922 /* Try single interrupt MSI */
923 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
924 efx->n_rx_queues = 1;
926 rc = pci_enable_msi(efx->pci_dev);
928 efx->channel[0].irq = efx->pci_dev->irq;
930 EFX_ERR(efx, "could not enable MSI\n");
931 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
935 /* Assume legacy interrupts */
936 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
937 efx->n_rx_queues = 1;
938 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
939 efx->legacy_irq = efx->pci_dev->irq;
943 static void efx_remove_interrupts(struct efx_nic *efx)
945 struct efx_channel *channel;
947 /* Remove MSI/MSI-X interrupts */
948 efx_for_each_channel(channel, efx)
950 pci_disable_msi(efx->pci_dev);
951 pci_disable_msix(efx->pci_dev);
953 /* Remove legacy interrupt */
957 static void efx_set_channels(struct efx_nic *efx)
959 struct efx_tx_queue *tx_queue;
960 struct efx_rx_queue *rx_queue;
962 efx_for_each_tx_queue(tx_queue, efx) {
963 if (separate_tx_channels)
964 tx_queue->channel = &efx->channel[efx->n_channels-1];
966 tx_queue->channel = &efx->channel[0];
967 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
970 efx_for_each_rx_queue(rx_queue, efx) {
971 rx_queue->channel = &efx->channel[rx_queue->queue];
972 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
976 static int efx_probe_nic(struct efx_nic *efx)
980 EFX_LOG(efx, "creating NIC\n");
982 /* Carry out hardware-type specific initialisation */
983 rc = falcon_probe_nic(efx);
987 /* Determine the number of channels and RX queues by trying to hook
988 * in MSI-X interrupts. */
989 efx_probe_interrupts(efx);
991 efx_set_channels(efx);
993 /* Initialise the interrupt moderation settings */
994 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
999 static void efx_remove_nic(struct efx_nic *efx)
1001 EFX_LOG(efx, "destroying NIC\n");
1003 efx_remove_interrupts(efx);
1004 falcon_remove_nic(efx);
1007 /**************************************************************************
1009 * NIC startup/shutdown
1011 *************************************************************************/
1013 static int efx_probe_all(struct efx_nic *efx)
1015 struct efx_channel *channel;
1019 rc = efx_probe_nic(efx);
1021 EFX_ERR(efx, "failed to create NIC\n");
1026 rc = efx_probe_port(efx);
1028 EFX_ERR(efx, "failed to create port\n");
1032 /* Create channels */
1033 efx_for_each_channel(channel, efx) {
1034 rc = efx_probe_channel(channel);
1036 EFX_ERR(efx, "failed to create channel %d\n",
1041 efx_set_channel_names(efx);
1046 efx_for_each_channel(channel, efx)
1047 efx_remove_channel(channel);
1048 efx_remove_port(efx);
1050 efx_remove_nic(efx);
1055 /* Called after previous invocation(s) of efx_stop_all, restarts the
1056 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1057 * and ensures that the port is scheduled to be reconfigured.
1058 * This function is safe to call multiple times when the NIC is in any
1060 static void efx_start_all(struct efx_nic *efx)
1062 struct efx_channel *channel;
1064 EFX_ASSERT_RESET_SERIALISED(efx);
1066 /* Check that it is appropriate to restart the interface. All
1067 * of these flags are safe to read under just the rtnl lock */
1068 if (efx->port_enabled)
1070 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1072 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1075 /* Mark the port as enabled so port reconfigurations can start, then
1076 * restart the transmit interface early so the watchdog timer stops */
1077 efx_start_port(efx);
1078 if (efx_dev_registered(efx))
1079 efx_wake_queue(efx);
1081 efx_for_each_channel(channel, efx)
1082 efx_start_channel(channel);
1084 falcon_enable_interrupts(efx);
1086 /* Start hardware monitor if we're in RUNNING */
1087 if (efx->state == STATE_RUNNING)
1088 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1089 efx_monitor_interval);
1092 /* Flush all delayed work. Should only be called when no more delayed work
1093 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1094 * since we're holding the rtnl_lock at this point. */
1095 static void efx_flush_all(struct efx_nic *efx)
1097 struct efx_rx_queue *rx_queue;
1099 /* Make sure the hardware monitor is stopped */
1100 cancel_delayed_work_sync(&efx->monitor_work);
1102 /* Ensure that all RX slow refills are complete. */
1103 efx_for_each_rx_queue(rx_queue, efx)
1104 cancel_delayed_work_sync(&rx_queue->work);
1106 /* Stop scheduled port reconfigurations */
1107 cancel_work_sync(&efx->mac_work);
1108 cancel_work_sync(&efx->phy_work);
1112 /* Quiesce hardware and software without bringing the link down.
1113 * Safe to call multiple times, when the nic and interface is in any
1114 * state. The caller is guaranteed to subsequently be in a position
1115 * to modify any hardware and software state they see fit without
1117 static void efx_stop_all(struct efx_nic *efx)
1119 struct efx_channel *channel;
1121 EFX_ASSERT_RESET_SERIALISED(efx);
1123 /* port_enabled can be read safely under the rtnl lock */
1124 if (!efx->port_enabled)
1127 /* Disable interrupts and wait for ISR to complete */
1128 falcon_disable_interrupts(efx);
1129 if (efx->legacy_irq)
1130 synchronize_irq(efx->legacy_irq);
1131 efx_for_each_channel(channel, efx) {
1133 synchronize_irq(channel->irq);
1136 /* Stop all NAPI processing and synchronous rx refills */
1137 efx_for_each_channel(channel, efx)
1138 efx_stop_channel(channel);
1140 /* Stop all asynchronous port reconfigurations. Since all
1141 * event processing has already been stopped, there is no
1142 * window to loose phy events */
1145 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
1148 /* Isolate the MAC from the TX and RX engines, so that queue
1149 * flushes will complete in a timely fashion. */
1150 falcon_drain_tx_fifo(efx);
1152 /* Stop the kernel transmit interface late, so the watchdog
1153 * timer isn't ticking over the flush */
1154 if (efx_dev_registered(efx)) {
1155 efx_stop_queue(efx);
1156 netif_tx_lock_bh(efx->net_dev);
1157 netif_tx_unlock_bh(efx->net_dev);
1161 static void efx_remove_all(struct efx_nic *efx)
1163 struct efx_channel *channel;
1165 efx_for_each_channel(channel, efx)
1166 efx_remove_channel(channel);
1167 efx_remove_port(efx);
1168 efx_remove_nic(efx);
1171 /* A convinience function to safely flush all the queues */
1172 void efx_flush_queues(struct efx_nic *efx)
1174 EFX_ASSERT_RESET_SERIALISED(efx);
1178 efx_fini_channels(efx);
1179 efx_init_channels(efx);
1184 /**************************************************************************
1186 * Interrupt moderation
1188 **************************************************************************/
1190 /* Set interrupt moderation parameters */
1191 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1193 struct efx_tx_queue *tx_queue;
1194 struct efx_rx_queue *rx_queue;
1196 EFX_ASSERT_RESET_SERIALISED(efx);
1198 efx_for_each_tx_queue(tx_queue, efx)
1199 tx_queue->channel->irq_moderation = tx_usecs;
1201 efx_for_each_rx_queue(rx_queue, efx)
1202 rx_queue->channel->irq_moderation = rx_usecs;
1205 /**************************************************************************
1209 **************************************************************************/
1211 /* Run periodically off the general workqueue. Serialised against
1212 * efx_reconfigure_port via the mac_lock */
1213 static void efx_monitor(struct work_struct *data)
1215 struct efx_nic *efx = container_of(data, struct efx_nic,
1219 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1220 raw_smp_processor_id());
1222 /* If the mac_lock is already held then it is likely a port
1223 * reconfiguration is already in place, which will likely do
1224 * most of the work of check_hw() anyway. */
1225 if (!mutex_trylock(&efx->mac_lock))
1227 if (!efx->port_enabled)
1229 rc = efx->board_info.monitor(efx);
1231 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1232 (rc == -ERANGE) ? "reported fault" : "failed");
1233 efx->phy_mode |= PHY_MODE_LOW_POWER;
1234 falcon_sim_phy_event(efx);
1236 efx->phy_op->poll(efx);
1237 efx->mac_op->poll(efx);
1240 mutex_unlock(&efx->mac_lock);
1242 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1243 efx_monitor_interval);
1246 /**************************************************************************
1250 *************************************************************************/
1253 * Context: process, rtnl_lock() held.
1255 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1257 struct efx_nic *efx = netdev_priv(net_dev);
1259 EFX_ASSERT_RESET_SERIALISED(efx);
1261 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1264 /**************************************************************************
1268 **************************************************************************/
1270 static int efx_init_napi(struct efx_nic *efx)
1272 struct efx_channel *channel;
1275 efx_for_each_channel(channel, efx) {
1276 channel->napi_dev = efx->net_dev;
1277 rc = efx_lro_init(&channel->lro_mgr, efx);
1287 static void efx_fini_napi(struct efx_nic *efx)
1289 struct efx_channel *channel;
1291 efx_for_each_channel(channel, efx) {
1292 efx_lro_fini(&channel->lro_mgr);
1293 channel->napi_dev = NULL;
1297 /**************************************************************************
1299 * Kernel netpoll interface
1301 *************************************************************************/
1303 #ifdef CONFIG_NET_POLL_CONTROLLER
1305 /* Although in the common case interrupts will be disabled, this is not
1306 * guaranteed. However, all our work happens inside the NAPI callback,
1307 * so no locking is required.
1309 static void efx_netpoll(struct net_device *net_dev)
1311 struct efx_nic *efx = netdev_priv(net_dev);
1312 struct efx_channel *channel;
1314 efx_for_each_channel(channel, efx)
1315 efx_schedule_channel(channel);
1320 /**************************************************************************
1322 * Kernel net device interface
1324 *************************************************************************/
1326 /* Context: process, rtnl_lock() held. */
1327 static int efx_net_open(struct net_device *net_dev)
1329 struct efx_nic *efx = netdev_priv(net_dev);
1330 EFX_ASSERT_RESET_SERIALISED(efx);
1332 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1333 raw_smp_processor_id());
1335 if (efx->state == STATE_DISABLED)
1337 if (efx->phy_mode & PHY_MODE_SPECIAL)
1344 /* Context: process, rtnl_lock() held.
1345 * Note that the kernel will ignore our return code; this method
1346 * should really be a void.
1348 static int efx_net_stop(struct net_device *net_dev)
1350 struct efx_nic *efx = netdev_priv(net_dev);
1352 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1353 raw_smp_processor_id());
1355 if (efx->state != STATE_DISABLED) {
1356 /* Stop the device and flush all the channels */
1358 efx_fini_channels(efx);
1359 efx_init_channels(efx);
1365 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1366 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1368 struct efx_nic *efx = netdev_priv(net_dev);
1369 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1370 struct net_device_stats *stats = &net_dev->stats;
1372 /* Update stats if possible, but do not wait if another thread
1373 * is updating them (or resetting the NIC); slightly stale
1374 * stats are acceptable.
1376 if (!spin_trylock(&efx->stats_lock))
1378 if (efx->stats_enabled) {
1379 efx->mac_op->update_stats(efx);
1380 falcon_update_nic_stats(efx);
1382 spin_unlock(&efx->stats_lock);
1384 stats->rx_packets = mac_stats->rx_packets;
1385 stats->tx_packets = mac_stats->tx_packets;
1386 stats->rx_bytes = mac_stats->rx_bytes;
1387 stats->tx_bytes = mac_stats->tx_bytes;
1388 stats->multicast = mac_stats->rx_multicast;
1389 stats->collisions = mac_stats->tx_collision;
1390 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1391 mac_stats->rx_length_error);
1392 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1393 stats->rx_crc_errors = mac_stats->rx_bad;
1394 stats->rx_frame_errors = mac_stats->rx_align_error;
1395 stats->rx_fifo_errors = mac_stats->rx_overflow;
1396 stats->rx_missed_errors = mac_stats->rx_missed;
1397 stats->tx_window_errors = mac_stats->tx_late_collision;
1399 stats->rx_errors = (stats->rx_length_errors +
1400 stats->rx_over_errors +
1401 stats->rx_crc_errors +
1402 stats->rx_frame_errors +
1403 stats->rx_fifo_errors +
1404 stats->rx_missed_errors +
1405 mac_stats->rx_symbol_error);
1406 stats->tx_errors = (stats->tx_window_errors +
1412 /* Context: netif_tx_lock held, BHs disabled. */
1413 static void efx_watchdog(struct net_device *net_dev)
1415 struct efx_nic *efx = netdev_priv(net_dev);
1417 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1418 " resetting channels\n",
1419 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1421 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1425 /* Context: process, rtnl_lock() held. */
1426 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1428 struct efx_nic *efx = netdev_priv(net_dev);
1431 EFX_ASSERT_RESET_SERIALISED(efx);
1433 if (new_mtu > EFX_MAX_MTU)
1438 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1440 efx_fini_channels(efx);
1441 net_dev->mtu = new_mtu;
1442 efx_init_channels(efx);
1448 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1450 struct efx_nic *efx = netdev_priv(net_dev);
1451 struct sockaddr *addr = data;
1452 char *new_addr = addr->sa_data;
1454 EFX_ASSERT_RESET_SERIALISED(efx);
1456 if (!is_valid_ether_addr(new_addr)) {
1457 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1462 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1464 /* Reconfigure the MAC */
1465 efx_reconfigure_port(efx);
1470 /* Context: netif_addr_lock held, BHs disabled. */
1471 static void efx_set_multicast_list(struct net_device *net_dev)
1473 struct efx_nic *efx = netdev_priv(net_dev);
1474 struct dev_mc_list *mc_list = net_dev->mc_list;
1475 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1476 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1477 bool changed = (efx->promiscuous != promiscuous);
1482 efx->promiscuous = promiscuous;
1484 /* Build multicast hash table */
1485 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1486 memset(mc_hash, 0xff, sizeof(*mc_hash));
1488 memset(mc_hash, 0x00, sizeof(*mc_hash));
1489 for (i = 0; i < net_dev->mc_count; i++) {
1490 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1491 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1492 set_bit_le(bit, mc_hash->byte);
1493 mc_list = mc_list->next;
1497 if (!efx->port_enabled)
1498 /* Delay pushing settings until efx_start_port() */
1502 queue_work(efx->workqueue, &efx->phy_work);
1504 /* Create and activate new global multicast hash table */
1505 falcon_set_multicast_hash(efx);
1508 static const struct net_device_ops efx_netdev_ops = {
1509 .ndo_open = efx_net_open,
1510 .ndo_stop = efx_net_stop,
1511 .ndo_get_stats = efx_net_stats,
1512 .ndo_tx_timeout = efx_watchdog,
1513 .ndo_start_xmit = efx_hard_start_xmit,
1514 .ndo_validate_addr = eth_validate_addr,
1515 .ndo_do_ioctl = efx_ioctl,
1516 .ndo_change_mtu = efx_change_mtu,
1517 .ndo_set_mac_address = efx_set_mac_address,
1518 .ndo_set_multicast_list = efx_set_multicast_list,
1519 #ifdef CONFIG_NET_POLL_CONTROLLER
1520 .ndo_poll_controller = efx_netpoll,
1524 static void efx_update_name(struct efx_nic *efx)
1526 strcpy(efx->name, efx->net_dev->name);
1527 efx_mtd_rename(efx);
1528 efx_set_channel_names(efx);
1531 static int efx_netdev_event(struct notifier_block *this,
1532 unsigned long event, void *ptr)
1534 struct net_device *net_dev = ptr;
1536 if (net_dev->netdev_ops == &efx_netdev_ops &&
1537 event == NETDEV_CHANGENAME)
1538 efx_update_name(netdev_priv(net_dev));
1543 static struct notifier_block efx_netdev_notifier = {
1544 .notifier_call = efx_netdev_event,
1548 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
1550 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1551 return sprintf(buf, "%d\n", efx->phy_type);
1553 static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1555 static int efx_register_netdev(struct efx_nic *efx)
1557 struct net_device *net_dev = efx->net_dev;
1560 net_dev->watchdog_timeo = 5 * HZ;
1561 net_dev->irq = efx->pci_dev->irq;
1562 net_dev->netdev_ops = &efx_netdev_ops;
1563 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1564 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1566 /* Always start with carrier off; PHY events will detect the link */
1567 netif_carrier_off(efx->net_dev);
1569 /* Clear MAC statistics */
1570 efx->mac_op->update_stats(efx);
1571 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1573 rc = register_netdev(net_dev);
1575 EFX_ERR(efx, "could not register net dev\n");
1580 efx_update_name(efx);
1583 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1585 EFX_ERR(efx, "failed to init net dev attributes\n");
1586 goto fail_registered;
1592 unregister_netdev(net_dev);
1596 static void efx_unregister_netdev(struct efx_nic *efx)
1598 struct efx_tx_queue *tx_queue;
1603 BUG_ON(netdev_priv(efx->net_dev) != efx);
1605 /* Free up any skbs still remaining. This has to happen before
1606 * we try to unregister the netdev as running their destructors
1607 * may be needed to get the device ref. count to 0. */
1608 efx_for_each_tx_queue(tx_queue, efx)
1609 efx_release_tx_buffers(tx_queue);
1611 if (efx_dev_registered(efx)) {
1612 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1613 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1614 unregister_netdev(efx->net_dev);
1618 /**************************************************************************
1620 * Device reset and suspend
1622 **************************************************************************/
1624 /* Tears down the entire software state and most of the hardware state
1626 void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1628 EFX_ASSERT_RESET_SERIALISED(efx);
1630 /* The net_dev->get_stats handler is quite slow, and will fail
1631 * if a fetch is pending over reset. Serialise against it. */
1632 spin_lock(&efx->stats_lock);
1633 efx->stats_enabled = false;
1634 spin_unlock(&efx->stats_lock);
1637 mutex_lock(&efx->mac_lock);
1638 mutex_lock(&efx->spi_lock);
1640 efx->phy_op->get_settings(efx, ecmd);
1642 efx_fini_channels(efx);
1645 /* This function will always ensure that the locks acquired in
1646 * efx_reset_down() are released. A failure return code indicates
1647 * that we were unable to reinitialise the hardware, and the
1648 * driver should be disabled. If ok is false, then the rx and tx
1649 * engines are not restarted, pending a RESET_DISABLE. */
1650 int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1654 EFX_ASSERT_RESET_SERIALISED(efx);
1656 rc = falcon_init_nic(efx);
1658 EFX_ERR(efx, "failed to initialise NIC\n");
1663 efx_init_channels(efx);
1665 if (efx->phy_op->set_settings(efx, ecmd))
1666 EFX_ERR(efx, "could not restore PHY settings\n");
1669 mutex_unlock(&efx->spi_lock);
1670 mutex_unlock(&efx->mac_lock);
1674 efx->stats_enabled = true;
1679 /* Reset the NIC as transparently as possible. Do not reset the PHY
1680 * Note that the reset may fail, in which case the card will be left
1681 * in a most-probably-unusable state.
1683 * This function will sleep. You cannot reset from within an atomic
1684 * state; use efx_schedule_reset() instead.
1686 * Grabs the rtnl_lock.
1688 static int efx_reset(struct efx_nic *efx)
1690 struct ethtool_cmd ecmd;
1691 enum reset_type method = efx->reset_pending;
1694 /* Serialise with kernel interfaces */
1697 /* If we're not RUNNING then don't reset. Leave the reset_pending
1698 * flag set so that efx_pci_probe_main will be retried */
1699 if (efx->state != STATE_RUNNING) {
1700 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1704 EFX_INFO(efx, "resetting (%d)\n", method);
1706 efx_reset_down(efx, &ecmd);
1708 rc = falcon_reset_hw(efx, method);
1710 EFX_ERR(efx, "failed to reset hardware\n");
1714 /* Allow resets to be rescheduled. */
1715 efx->reset_pending = RESET_TYPE_NONE;
1717 /* Reinitialise bus-mastering, which may have been turned off before
1718 * the reset was scheduled. This is still appropriate, even in the
1719 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1720 * can respond to requests. */
1721 pci_set_master(efx->pci_dev);
1723 /* Leave device stopped if necessary */
1724 if (method == RESET_TYPE_DISABLE) {
1725 efx_reset_up(efx, &ecmd, false);
1728 rc = efx_reset_up(efx, &ecmd, true);
1733 EFX_ERR(efx, "has been disabled\n");
1734 efx->state = STATE_DISABLED;
1735 dev_close(efx->net_dev);
1737 EFX_LOG(efx, "reset complete\n");
1745 /* The worker thread exists so that code that cannot sleep can
1746 * schedule a reset for later.
1748 static void efx_reset_work(struct work_struct *data)
1750 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1755 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1757 enum reset_type method;
1759 if (efx->reset_pending != RESET_TYPE_NONE) {
1760 EFX_INFO(efx, "quenching already scheduled reset\n");
1765 case RESET_TYPE_INVISIBLE:
1766 case RESET_TYPE_ALL:
1767 case RESET_TYPE_WORLD:
1768 case RESET_TYPE_DISABLE:
1771 case RESET_TYPE_RX_RECOVERY:
1772 case RESET_TYPE_RX_DESC_FETCH:
1773 case RESET_TYPE_TX_DESC_FETCH:
1774 case RESET_TYPE_TX_SKIP:
1775 method = RESET_TYPE_INVISIBLE;
1778 method = RESET_TYPE_ALL;
1783 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1785 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1787 efx->reset_pending = method;
1789 queue_work(reset_workqueue, &efx->reset_work);
1792 /**************************************************************************
1794 * List of NICs we support
1796 **************************************************************************/
1798 /* PCI device ID table */
1799 static struct pci_device_id efx_pci_table[] __devinitdata = {
1800 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1801 .driver_data = (unsigned long) &falcon_a_nic_type},
1802 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1803 .driver_data = (unsigned long) &falcon_b_nic_type},
1804 {0} /* end of list */
1807 /**************************************************************************
1809 * Dummy PHY/MAC/Board operations
1811 * Can be used for some unimplemented operations
1812 * Needed so all function pointers are valid and do not have to be tested
1815 **************************************************************************/
1816 int efx_port_dummy_op_int(struct efx_nic *efx)
1820 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1821 void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1823 static struct efx_mac_operations efx_dummy_mac_operations = {
1824 .reconfigure = efx_port_dummy_op_void,
1825 .poll = efx_port_dummy_op_void,
1826 .irq = efx_port_dummy_op_void,
1829 static struct efx_phy_operations efx_dummy_phy_operations = {
1830 .init = efx_port_dummy_op_int,
1831 .reconfigure = efx_port_dummy_op_void,
1832 .poll = efx_port_dummy_op_void,
1833 .fini = efx_port_dummy_op_void,
1834 .clear_interrupt = efx_port_dummy_op_void,
1837 static struct efx_board efx_dummy_board_info = {
1838 .init = efx_port_dummy_op_int,
1839 .init_leds = efx_port_dummy_op_int,
1840 .set_fault_led = efx_port_dummy_op_blink,
1841 .monitor = efx_port_dummy_op_int,
1842 .blink = efx_port_dummy_op_blink,
1843 .fini = efx_port_dummy_op_void,
1846 /**************************************************************************
1850 **************************************************************************/
1852 /* This zeroes out and then fills in the invariants in a struct
1853 * efx_nic (including all sub-structures).
1855 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1856 struct pci_dev *pci_dev, struct net_device *net_dev)
1858 struct efx_channel *channel;
1859 struct efx_tx_queue *tx_queue;
1860 struct efx_rx_queue *rx_queue;
1863 /* Initialise common structures */
1864 memset(efx, 0, sizeof(*efx));
1865 spin_lock_init(&efx->biu_lock);
1866 spin_lock_init(&efx->phy_lock);
1867 mutex_init(&efx->spi_lock);
1868 INIT_WORK(&efx->reset_work, efx_reset_work);
1869 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1870 efx->pci_dev = pci_dev;
1871 efx->state = STATE_INIT;
1872 efx->reset_pending = RESET_TYPE_NONE;
1873 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1874 efx->board_info = efx_dummy_board_info;
1876 efx->net_dev = net_dev;
1877 efx->rx_checksum_enabled = true;
1878 spin_lock_init(&efx->netif_stop_lock);
1879 spin_lock_init(&efx->stats_lock);
1880 mutex_init(&efx->mac_lock);
1881 efx->mac_op = &efx_dummy_mac_operations;
1882 efx->phy_op = &efx_dummy_phy_operations;
1883 efx->mii.dev = net_dev;
1884 INIT_WORK(&efx->phy_work, efx_phy_work);
1885 INIT_WORK(&efx->mac_work, efx_mac_work);
1886 atomic_set(&efx->netif_stop_count, 1);
1888 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1889 channel = &efx->channel[i];
1891 channel->channel = i;
1892 channel->work_pending = false;
1894 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1895 tx_queue = &efx->tx_queue[i];
1896 tx_queue->efx = efx;
1897 tx_queue->queue = i;
1898 tx_queue->buffer = NULL;
1899 tx_queue->channel = &efx->channel[0]; /* for safety */
1900 tx_queue->tso_headers_free = NULL;
1902 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1903 rx_queue = &efx->rx_queue[i];
1904 rx_queue->efx = efx;
1905 rx_queue->queue = i;
1906 rx_queue->channel = &efx->channel[0]; /* for safety */
1907 rx_queue->buffer = NULL;
1908 spin_lock_init(&rx_queue->add_lock);
1909 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1914 /* Sanity-check NIC type */
1915 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1916 (efx->type->txd_ring_mask + 1));
1917 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1918 (efx->type->rxd_ring_mask + 1));
1919 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1920 (efx->type->evq_size - 1));
1921 /* As close as we can get to guaranteeing that we don't overflow */
1922 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1923 (efx->type->txd_ring_mask + 1 +
1924 efx->type->rxd_ring_mask + 1));
1925 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1927 /* Higher numbered interrupt modes are less capable! */
1928 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1931 /* Would be good to use the net_dev name, but we're too early */
1932 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
1934 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
1935 if (!efx->workqueue)
1941 static void efx_fini_struct(struct efx_nic *efx)
1943 if (efx->workqueue) {
1944 destroy_workqueue(efx->workqueue);
1945 efx->workqueue = NULL;
1949 /**************************************************************************
1953 **************************************************************************/
1955 /* Main body of final NIC shutdown code
1956 * This is called only at module unload (or hotplug removal).
1958 static void efx_pci_remove_main(struct efx_nic *efx)
1960 EFX_ASSERT_RESET_SERIALISED(efx);
1962 /* Skip everything if we never obtained a valid membase */
1966 efx_fini_channels(efx);
1969 /* Shutdown the board, then the NIC and board state */
1970 efx->board_info.fini(efx);
1971 falcon_fini_interrupt(efx);
1974 efx_remove_all(efx);
1977 /* Final NIC shutdown
1978 * This is called only at module unload (or hotplug removal).
1980 static void efx_pci_remove(struct pci_dev *pci_dev)
1982 struct efx_nic *efx;
1984 efx = pci_get_drvdata(pci_dev);
1988 /* Mark the NIC as fini, then stop the interface */
1990 efx->state = STATE_FINI;
1991 dev_close(efx->net_dev);
1993 /* Allow any queued efx_resets() to complete */
1996 if (efx->membase == NULL)
1999 efx_unregister_netdev(efx);
2001 efx_mtd_remove(efx);
2003 /* Wait for any scheduled resets to complete. No more will be
2004 * scheduled from this point because efx_stop_all() has been
2005 * called, we are no longer registered with driverlink, and
2006 * the net_device's have been removed. */
2007 cancel_work_sync(&efx->reset_work);
2009 efx_pci_remove_main(efx);
2013 EFX_LOG(efx, "shutdown successful\n");
2015 pci_set_drvdata(pci_dev, NULL);
2016 efx_fini_struct(efx);
2017 free_netdev(efx->net_dev);
2020 /* Main body of NIC initialisation
2021 * This is called at module load (or hotplug insertion, theoretically).
2023 static int efx_pci_probe_main(struct efx_nic *efx)
2027 /* Do start-of-day initialisation */
2028 rc = efx_probe_all(efx);
2032 rc = efx_init_napi(efx);
2036 /* Initialise the board */
2037 rc = efx->board_info.init(efx);
2039 EFX_ERR(efx, "failed to initialise board\n");
2043 rc = falcon_init_nic(efx);
2045 EFX_ERR(efx, "failed to initialise NIC\n");
2049 rc = efx_init_port(efx);
2051 EFX_ERR(efx, "failed to initialise port\n");
2055 efx_init_channels(efx);
2057 rc = falcon_init_interrupt(efx);
2064 efx_fini_channels(efx);
2068 efx->board_info.fini(efx);
2072 efx_remove_all(efx);
2077 /* NIC initialisation
2079 * This is called at module load (or hotplug insertion,
2080 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2081 * sets up and registers the network devices with the kernel and hooks
2082 * the interrupt service routine. It does not prepare the device for
2083 * transmission; this is left to the first time one of the network
2084 * interfaces is brought up (i.e. efx_net_open).
2086 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2087 const struct pci_device_id *entry)
2089 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2090 struct net_device *net_dev;
2091 struct efx_nic *efx;
2094 /* Allocate and initialise a struct net_device and struct efx_nic */
2095 net_dev = alloc_etherdev(sizeof(*efx));
2098 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2099 NETIF_F_HIGHDMA | NETIF_F_TSO);
2101 net_dev->features |= NETIF_F_LRO;
2102 /* Mask for features that also apply to VLAN devices */
2103 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2104 NETIF_F_HIGHDMA | NETIF_F_TSO);
2105 efx = netdev_priv(net_dev);
2106 pci_set_drvdata(pci_dev, efx);
2107 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2111 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2113 /* Set up basic I/O (BAR mappings etc) */
2114 rc = efx_init_io(efx);
2118 /* No serialisation is required with the reset path because
2119 * we're in STATE_INIT. */
2120 for (i = 0; i < 5; i++) {
2121 rc = efx_pci_probe_main(efx);
2123 /* Serialise against efx_reset(). No more resets will be
2124 * scheduled since efx_stop_all() has been called, and we
2125 * have not and never have been registered with either
2126 * the rtnetlink or driverlink layers. */
2127 cancel_work_sync(&efx->reset_work);
2130 if (efx->reset_pending != RESET_TYPE_NONE) {
2131 /* If there was a scheduled reset during
2132 * probe, the NIC is probably hosed anyway */
2133 efx_pci_remove_main(efx);
2140 /* Retry if a recoverably reset event has been scheduled */
2141 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2142 (efx->reset_pending != RESET_TYPE_ALL))
2145 efx->reset_pending = RESET_TYPE_NONE;
2149 EFX_ERR(efx, "Could not reset NIC\n");
2153 /* Switch to the running state before we expose the device to
2154 * the OS. This is to ensure that the initial gathering of
2155 * MAC stats succeeds. */
2156 efx->state = STATE_RUNNING;
2158 efx_mtd_probe(efx); /* allowed to fail */
2160 rc = efx_register_netdev(efx);
2164 EFX_LOG(efx, "initialisation successful\n");
2168 efx_pci_remove_main(efx);
2173 efx_fini_struct(efx);
2175 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2176 free_netdev(net_dev);
2180 static struct pci_driver efx_pci_driver = {
2181 .name = EFX_DRIVER_NAME,
2182 .id_table = efx_pci_table,
2183 .probe = efx_pci_probe,
2184 .remove = efx_pci_remove,
2187 /**************************************************************************
2189 * Kernel module interface
2191 *************************************************************************/
2193 module_param(interrupt_mode, uint, 0444);
2194 MODULE_PARM_DESC(interrupt_mode,
2195 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2197 static int __init efx_init_module(void)
2201 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2203 rc = register_netdevice_notifier(&efx_netdev_notifier);
2207 refill_workqueue = create_workqueue("sfc_refill");
2208 if (!refill_workqueue) {
2212 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2213 if (!reset_workqueue) {
2218 rc = pci_register_driver(&efx_pci_driver);
2225 destroy_workqueue(reset_workqueue);
2227 destroy_workqueue(refill_workqueue);
2229 unregister_netdevice_notifier(&efx_netdev_notifier);
2234 static void __exit efx_exit_module(void)
2236 printk(KERN_INFO "Solarflare NET driver unloading\n");
2238 pci_unregister_driver(&efx_pci_driver);
2239 destroy_workqueue(reset_workqueue);
2240 destroy_workqueue(refill_workqueue);
2241 unregister_netdevice_notifier(&efx_netdev_notifier);
2245 module_init(efx_init_module);
2246 module_exit(efx_exit_module);
2248 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2249 "Solarflare Communications");
2250 MODULE_DESCRIPTION("Solarflare Communications network driver");
2251 MODULE_LICENSE("GPL");
2252 MODULE_DEVICE_TABLE(pci, efx_pci_table);