struct pci_dev *pdev;
struct mii_bus *mii_bus;
struct phy_device *phydev;
- struct work_struct task;
+ struct napi_struct napi;
/* Flags that indicate current state of the adapter */
u32 flags;
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
- netif_rx_ni(skb);
+ netif_receive_skb(skb);
out:
nic_return_rfd(adapter, rfd);
return rfd;
}
-/* et131x_handle_recv_interrupt - Interrupt handler for receive processing
+/* et131x_handle_recv_pkts - Interrupt handler for receive processing
*
* Assumption, Rcv spinlock has been acquired.
*/
-static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
+static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
{
struct rfd *rfd = NULL;
- u32 count = 0;
+ int count = 0;
+ int limit = budget;
bool done = true;
struct rx_ring *rx_ring = &adapter->rx_ring;
+ if (budget > MAX_PACKETS_HANDLED)
+ limit = MAX_PACKETS_HANDLED;
+
/* Process up to available RFD's */
- while (count < MAX_PACKETS_HANDLED) {
+ while (count < limit) {
if (list_empty(&rx_ring->recv_list)) {
WARN_ON(rx_ring->num_ready_recv != 0);
done = false;
count++;
}
- if (count == MAX_PACKETS_HANDLED || !done) {
+ if (count == limit || !done) {
rx_ring->unfinished_receives = true;
writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
&adapter->regs->global.watchdog_timer);
} else
/* Watchdog timer will disable itself if appropriate. */
rx_ring->unfinished_receives = false;
+
+ return count;
}
/* et131x_tx_dma_memory_alloc
tx_ring->used = 0;
}
-/* et131x_handle_send_interrupt - Interrupt handler for sending processing
+/* et131x_handle_send_pkts - Interrupt handler for sending processing
*
* Re-claim the send resources, complete sends and get more to send from
* the send wait queue.
*
* Assumption - Send spinlock has been acquired
*/
-static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 serviced;
struct et131x_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
+ netif_napi_del(&adapter->napi);
phy_disconnect(adapter->phydev);
mdiobus_unregister(adapter->mii_bus);
- cancel_work_sync(&adapter->task);
kfree(adapter->mii_bus->irq);
mdiobus_free(adapter->mii_bus);
bool handled = true;
struct net_device *netdev = (struct net_device *)dev_id;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct address_map __iomem *iomem = adapter->regs;
struct rx_ring *rx_ring = &adapter->rx_ring;
struct tx_ring *tx_ring = &adapter->tx_ring;
u32 status;
}
/* This is our interrupt, so process accordingly */
-
if (status & ET_INTR_WATCHDOG) {
struct tcb *tcb = tx_ring->send_head;
status &= ~ET_INTR_WATCHDOG;
}
- if (!status) {
- /* This interrupt has in some way been "handled" by
- * the ISR. Either it was a spurious Rx interrupt, or
- * it was a Tx interrupt that has been filtered by
- * the ISR.
- */
- et131x_enable_interrupts(adapter);
- goto out;
- }
-
- /* We need to save the interrupt status value for use in our
- * DPC. We will clear the software copy of that in that
- * routine.
- */
- adapter->stats.interrupt_status = status;
-
- /* Schedule the ISR handler as a bottom-half task in the
- * kernel's tq_immediate queue, and mark the queue for
- * execution
- */
- schedule_work(&adapter->task);
-out:
- return IRQ_RETVAL(handled);
-}
-
-/* et131x_isr_handler - The ISR handler
- *
- * scheduled to run in a deferred context by the ISR. This is where the ISR's
- * work actually gets done.
- */
-static void et131x_isr_handler(struct work_struct *work)
-{
- struct et131x_adapter *adapter =
- container_of(work, struct et131x_adapter, task);
- u32 status = adapter->stats.interrupt_status;
- struct address_map __iomem *iomem = adapter->regs;
-
- /* These first two are by far the most common. Once handled, we clear
- * their two bits in the status word. If the word is now zero, we
- * exit.
- */
- /* Handle all the completed Transmit interrupts */
- if (status & ET_INTR_TXDMA_ISR)
- et131x_handle_send_interrupt(adapter);
-
- /* Handle all the completed Receives interrupts */
- if (status & ET_INTR_RXDMA_XFR_DONE)
- et131x_handle_recv_interrupt(adapter);
+ if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR))
+ napi_schedule(&adapter->napi);
status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
* addressed module is in a power-down state and can't respond.
*/
}
+
+ if (!status) {
+ /* This interrupt has in some way been "handled" by
+ * the ISR. Either it was a spurious Rx interrupt, or
+ * it was a Tx interrupt that has been filtered by
+ * the ISR.
+ */
+ et131x_enable_interrupts(adapter);
+ }
+
out:
- et131x_enable_interrupts(adapter);
+ return IRQ_RETVAL(handled);
+}
+
+static int et131x_poll(struct napi_struct *napi, int budget)
+{
+ struct et131x_adapter *adapter =
+ container_of(napi, struct et131x_adapter, napi);
+ int work_done = et131x_handle_recv_pkts(adapter, budget);
+
+ et131x_handle_send_pkts(adapter);
+
+ if (work_done < budget) {
+ napi_complete(&adapter->napi);
+ et131x_enable_interrupts(adapter);
+ }
+
+ return work_done;
}
/* et131x_stats - Return the current device statistics */
adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
+ napi_enable(&adapter->napi);
+
et131x_up(netdev);
return result;
struct et131x_adapter *adapter = netdev_priv(netdev);
et131x_down(netdev);
+ napi_disable(&adapter->napi);
adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
free_irq(adapter->pdev->irq, netdev);
/* Init send data structures */
et131x_init_send(adapter);
- /* Set up the task structure for the ISR's deferred handler */
- INIT_WORK(&adapter->task, et131x_isr_handler);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
/* Copy address into the net_device struct */
memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);