Merge branch 'linux-2.6.30.y' of git://git.kernel.org/pub/scm/linux/kernel/git/inaky...
authorDavid S. Miller <davem@davemloft.net>
Tue, 26 May 2009 05:56:11 +0000 (22:56 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 26 May 2009 05:56:11 +0000 (22:56 -0700)
drivers/isdn/gigaset/isocdata.c
drivers/net/Makefile
drivers/net/gianfar.c
drivers/net/mac8390.c
drivers/net/r8169.c
net/core/pktgen.c
net/ipv4/fib_trie.c
net/ipv4/tcp_vegas.c
net/rxrpc/ar-connection.c

index b171e75cb52e51f920125abc984648f22415e961..29808c4fb1cb658ddc9c91f9187ef4cadb39afa4 100644 (file)
@@ -175,7 +175,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
                return -EINVAL;
        }
        src = iwb->read;
-       if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
+       if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
                     (read < src && limit >= src))) {
                pr_err("isoc write buffer frame reservation violated\n");
                return -EFAULT;
index 1fc4602a6ff2c29912f64759207f0d38a8e17895..a1c25cb4669fb8b9a95b7a1410355423d46b5599 100644 (file)
@@ -102,7 +102,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
 obj-$(CONFIG_NET) += Space.o loopback.o
 obj-$(CONFIG_SEEQ8005) += seeq8005.o
 obj-$(CONFIG_NET_SB1000) += sb1000.o
-obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
+obj-$(CONFIG_MAC8390) += mac8390.o
 obj-$(CONFIG_APNE) += apne.o 8390.o
 obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
 obj-$(CONFIG_HP100) += hp100.o
index b2c49679bba78407fe51d58b57cc62344feb7b0d..a0519184e54ee7d7a30aed189a7ce13e5ab122f1 100644 (file)
@@ -1885,8 +1885,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 
                        if (unlikely(!newskb))
                                newskb = skb;
-                       else if (skb)
+                       else if (skb) {
+                               /*
+                                * We need to reset ->data to what it
+                                * was before gfar_new_skb() re-aligned
+                                * it to an RXBUF_ALIGNMENT boundary
+                                * before we put the skb back on the
+                                * recycle list.
+                                */
+                               skb->data = skb->head + NET_SKB_PAD;
                                __skb_queue_head(&priv->rx_recycle, skb);
+                       }
                } else {
                        /* Increment the number of packets */
                        dev->stats.rx_packets++;
index 8e884869a05bfd40995af322554e6601c6d129c5..f26667d5eaaec9a4c8837528259665cff1513a6f 100644 (file)
@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
        if (!MACH_IS_MAC)
                return ERR_PTR(-ENODEV);
 
-       dev = alloc_ei_netdev();
+       dev = ____alloc_ei_netdev(0);
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
@@ -481,10 +481,10 @@ void cleanup_module(void)
 static const struct net_device_ops mac8390_netdev_ops = {
        .ndo_open               = mac8390_open,
        .ndo_stop               = mac8390_close,
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
index 0b6e8c89683581efdfc35925adaa018998468db5..8247a945a1d9b0df7f11aebc858eccb7552a02cd 100644 (file)
@@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
        int handled = 0;
        int status;
 
+       /* loop handling interrupts until we have no new ones or
+        * we hit a invalid/hotplug case.
+        */
        status = RTL_R16(IntrStatus);
+       while (status && status != 0xffff) {
+               handled = 1;
 
-       /* hotplug/major error/no more work/shared irq */
-       if ((status == 0xffff) || !status)
-               goto out;
-
-       handled = 1;
+               /* Handle all of the error cases first. These will reset
+                * the chip, so just exit the loop.
+                */
+               if (unlikely(!netif_running(dev))) {
+                       rtl8169_asic_down(ioaddr);
+                       break;
+               }
 
-       if (unlikely(!netif_running(dev))) {
-               rtl8169_asic_down(ioaddr);
-               goto out;
-       }
+               /* Work around for rx fifo overflow */
+               if (unlikely(status & RxFIFOOver) &&
+               (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+                       netif_stop_queue(dev);
+                       rtl8169_tx_timeout(dev);
+                       break;
+               }
 
-       status &= tp->intr_mask;
-       RTL_W16(IntrStatus,
-               (status & RxFIFOOver) ? (status | RxOverflow) : status);
+               if (unlikely(status & SYSErr)) {
+                       rtl8169_pcierr_interrupt(dev);
+                       break;
+               }
 
-       if (!(status & tp->intr_event))
-               goto out;
+               if (status & LinkChg)
+                       rtl8169_check_link_status(dev, tp, ioaddr);
 
-       /* Work around for rx fifo overflow */
-       if (unlikely(status & RxFIFOOver) &&
-           (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
-               netif_stop_queue(dev);
-               rtl8169_tx_timeout(dev);
-               goto out;
-       }
+               /* We need to see the lastest version of tp->intr_mask to
+                * avoid ignoring an MSI interrupt and having to wait for
+                * another event which may never come.
+                */
+               smp_rmb();
+               if (status & tp->intr_mask & tp->napi_event) {
+                       RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+                       tp->intr_mask = ~tp->napi_event;
+
+                       if (likely(napi_schedule_prep(&tp->napi)))
+                               __napi_schedule(&tp->napi);
+                       else if (netif_msg_intr(tp)) {
+                               printk(KERN_INFO "%s: interrupt %04x in poll\n",
+                               dev->name, status);
+                       }
+               }
 
-       if (unlikely(status & SYSErr)) {
-               rtl8169_pcierr_interrupt(dev);
-               goto out;
+               /* We only get a new MSI interrupt when all active irq
+                * sources on the chip have been acknowledged. So, ack
+                * everything we've seen and check if new sources have become
+                * active to avoid blocking all interrupts from the chip.
+                */
+               RTL_W16(IntrStatus,
+                       (status & RxFIFOOver) ? (status | RxOverflow) : status);
+               status = RTL_R16(IntrStatus);
        }
 
-       if (status & LinkChg)
-               rtl8169_check_link_status(dev, tp, ioaddr);
-
-       if (status & tp->napi_event) {
-               RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
-               tp->intr_mask = ~tp->napi_event;
-
-               if (likely(napi_schedule_prep(&tp->napi)))
-                       __napi_schedule(&tp->napi);
-               else if (netif_msg_intr(tp)) {
-                       printk(KERN_INFO "%s: interrupt %04x in poll\n",
-                              dev->name, status);
-               }
-       }
-out:
        return IRQ_RETVAL(handled);
 }
 
@@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
 
        if (work_done < budget) {
                napi_complete(napi);
-               tp->intr_mask = 0xffff;
-               /*
-                * 20040426: the barrier is not strictly required but the
-                * behavior of the irq handler could be less predictable
-                * without it. Btw, the lack of flush for the posted pci
-                * write is safe - FR
+
+               /* We need for force the visibility of tp->intr_mask
+                * for other CPUs, as we can loose an MSI interrupt
+                * and potentially wait for a retransmit timeout if we don't.
+                * The posted write to IntrMask is safe, as it will
+                * eventually make it to the chip and we won't loose anything
+                * until it does.
                 */
+               tp->intr_mask = 0xffff;
                smp_wmb();
                RTL_W16(IntrMask, tp->intr_event);
        }
index 3779c1438c11c84613f5ba9b40619907c9a18a9d..0666a827bc62d3a9cd067c744c650f4d6337d563 100644 (file)
@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
        if (pkt_dev->cflows) {
                /* let go of the SAs if we have them */
                int i = 0;
-               for (;  i < pkt_dev->nflows; i++){
+               for (;  i < pkt_dev->cflows; i++) {
                        struct xfrm_state *x = pkt_dev->flows[i].x;
                        if (x) {
                                xfrm_state_put(x);
index ec0ae490f0b60306029126c0d848674cb2ee055d..33c7c85dfe40c5301e1953d3cddb7a45da305983 100644 (file)
@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key)
 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
 {
        int wasfull;
-       t_key cindex, key = tn->key;
+       t_key cindex, key;
        struct tnode *tp;
 
+       preempt_disable();
+       key = tn->key;
+
        while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
                cindex = tkey_extract_bits(key, tp->pos, tp->bits);
                wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
        if (IS_TNODE(tn))
                tn = (struct tnode *)resize(t, (struct tnode *)tn);
 
+       preempt_enable();
        return (struct node *)tn;
 }
 
index a453aac91bd3b740ef44363ef10e5be5390bf6dc..c6743eec9b7d0c555db5e32fdf73c82d6fa7422e 100644 (file)
@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
 }
 EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
 
+static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
+{
+       return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
+}
+
 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                         */
                        diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
-                       if (diff > gamma && tp->snd_ssthresh > 2 ) {
+                       if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
                                /* Going too fast. Time to slow down
                                 * and switch to congestion avoidance.
                                 */
-                               tp->snd_ssthresh = 2;
 
                                /* Set cwnd to match the actual rate
                                 * exactly:
@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                 * utilization.
                                 */
                                tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
+                               tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
                        } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
                                /* Slow start.  */
@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                         * we slow down.
                                         */
                                        tp->snd_cwnd--;
+                                       tp->snd_ssthresh
+                                               = tcp_vegas_ssthresh(tp);
                                } else if (diff < alpha) {
                                        /* We don't have enough extra packets
                                         * in the network, so speed up.
index 0f1218b8d289859cb89062ed1f799a32e051c1f6..67e38a056240df73712c56a0ba81ddc5a645626b 100644 (file)
@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
                /* not yet present - create a candidate for a new connection
                 * and then redo the check */
                conn = rxrpc_alloc_connection(gfp);
-               if (IS_ERR(conn)) {
-                       _leave(" = %ld", PTR_ERR(conn));
-                       return PTR_ERR(conn);
+               if (!conn) {
+                       _leave(" = -ENOMEM");
+                       return -ENOMEM;
                }
 
                conn->trans = trans;
@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
                /* not yet present - create a candidate for a new connection and then
                 * redo the check */
                candidate = rxrpc_alloc_connection(gfp);
-               if (IS_ERR(candidate)) {
-                       _leave(" = %ld", PTR_ERR(candidate));
-                       return PTR_ERR(candidate);
+               if (!candidate) {
+                       _leave(" = -ENOMEM");
+                       return -ENOMEM;
                }
 
                candidate->trans = trans;