Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jun 2009 15:48:32 +0000 (08:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jun 2009 15:48:32 +0000 (08:48 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  kvm: fix kvm reboot crash when MAXSMP is used
  cpumask: alloc zeroed cpumask for static cpumask_var_ts
  cpumask: introduce zalloc_cpumask_var

block/bsg.c
drivers/md/raid5.c
drivers/net/r8169.c
net/sched/cls_cgroup.c

index 206060e795da4324732835bf5012afca4ac7d43f..dd81be455e00b3c258357cd2537d200d06455e27 100644 (file)
@@ -315,6 +315,7 @@ out:
        blk_put_request(rq);
        if (next_rq) {
                blk_rq_unmap_user(next_rq->bio);
+               next_rq->bio = NULL;
                blk_put_request(next_rq);
        }
        return ERR_PTR(ret);
@@ -448,6 +449,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
                hdr->dout_resid = rq->data_len;
                hdr->din_resid = rq->next_rq->data_len;
                blk_rq_unmap_user(bidi_bio);
+               rq->next_rq->bio = NULL;
                blk_put_request(rq->next_rq);
        } else if (rq_data_dir(rq) == READ)
                hdr->din_resid = rq->data_len;
@@ -466,6 +468,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
        blk_rq_unmap_user(bio);
        if (rq->cmd != rq->__cmd)
                kfree(rq->cmd);
+       rq->bio = NULL;
        blk_put_request(rq);
 
        return ret;
index 5d400aef8d9b40f9ec0a58749037bcbbf61977df..bb37fb1b2d82dd484e749ec3b18a9dcf9e1b8f55 100644 (file)
@@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q);
 
 static struct stripe_head *
 get_active_stripe(raid5_conf_t *conf, sector_t sector,
-                 int previous, int noblock)
+                 int previous, int noblock, int noquiesce)
 {
        struct stripe_head *sh;
 
@@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
 
        do {
                wait_event_lock_irq(conf->wait_for_stripe,
-                                   conf->quiesce == 0,
+                                   conf->quiesce == 0 || noquiesce,
                                    conf->device_lock, /* nothing */);
                sh = __find_stripe(conf, sector, conf->generation - previous);
                if (!sh) {
@@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                        sector_t bn = compute_blocknr(sh, i, 1);
                        sector_t s = raid5_compute_sector(conf, bn, 0,
                                                          &dd_idx, NULL);
-                       sh2 = get_active_stripe(conf, s, 0, 1);
+                       sh2 = get_active_stripe(conf, s, 0, 1, 1);
                        if (sh2 == NULL)
                                /* so far only the early blocks of this stripe
                                 * have been requested.  When later blocks
@@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh)
        /* Finish reconstruct operations initiated by the expansion process */
        if (sh->reconstruct_state == reconstruct_state_result) {
                struct stripe_head *sh2
-                       = get_active_stripe(conf, sh->sector, 1, 1);
+                       = get_active_stripe(conf, sh->sector, 1, 1, 1);
                if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
                        /* sh cannot be written until sh2 has been read.
                         * so arrange for sh to be delayed a little
@@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
 
        if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
                struct stripe_head *sh2
-                       = get_active_stripe(conf, sh->sector, 1, 1);
+                       = get_active_stripe(conf, sh->sector, 1, 1, 1);
                if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
                        /* sh cannot be written until sh2 has been read.
                         * so arrange for sh to be delayed a little
@@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev)
        int i;
 
        rcu_read_lock();
-       for (i=0; i<mddev->raid_disks; i++) {
+       for (i = 0; i < conf->raid_disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
                        struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
                        (unsigned long long)logical_sector);
 
                sh = get_active_stripe(conf, new_sector, previous,
-                                      (bi->bi_rw&RWA_MASK));
+                                      (bi->bi_rw&RWA_MASK), 0);
                if (sh) {
                        if (unlikely(previous)) {
                                /* expansion might have moved on while waiting for a
@@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
        for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
                int j;
                int skipped = 0;
-               sh = get_active_stripe(conf, stripe_addr+i, 0, 0);
+               sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
                set_bit(STRIPE_EXPANDING, &sh->state);
                atomic_inc(&conf->reshape_stripes);
                /* If any of this stripe is beyond the end of the old
@@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
                raid5_compute_sector(conf, stripe_addr*(new_data_disks),
                                     1, &dd_idx, NULL);
        last_sector =
-               raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512)
+               raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
                                            *(new_data_disks) - 1),
                                     1, &dd_idx, NULL);
        if (last_sector >= mddev->dev_sectors)
                last_sector = mddev->dev_sectors - 1;
        while (first_sector <= last_sector) {
-               sh = get_active_stripe(conf, first_sector, 1, 0);
+               sh = get_active_stripe(conf, first_sector, 1, 0, 1);
                set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
                set_bit(STRIPE_HANDLE, &sh->state);
                release_stripe(sh);
@@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
 
-       sh = get_active_stripe(conf, sector_nr, 0, 1);
+       sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
        if (sh == NULL) {
-               sh = get_active_stripe(conf, sector_nr, 0, 0);
+               sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
                /* make sure we don't swamp the stripe cache if someone else
                 * is trying to get access
                 */
@@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
         * We don't need to check the 'failed' flag as when that gets set,
         * recovery aborts.
         */
-       for (i=0; i<mddev->raid_disks; i++)
+       for (i = 0; i < conf->raid_disks; i++)
                if (conf->disks[i].rdev == NULL)
                        still_degraded = 1;
 
@@ -4086,7 +4086,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                        /* already done this stripe */
                        continue;
 
-               sh = get_active_stripe(conf, sector, 0, 1);
+               sh = get_active_stripe(conf, sector, 0, 1, 0);
 
                if (!sh) {
                        /* failed to get a stripe - must wait */
index 8247a945a1d9b0df7f11aebc858eccb7552a02cd..3b19e0ce290fa4b6ba5f53a0b33c23b30ef098f8 100644 (file)
@@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32;
 #define RX_DMA_BURST   6       /* Maximum PCI burst, '6' is 1024 */
 #define TX_DMA_BURST   6       /* Maximum PCI burst, '6' is 1024 */
 #define EarlyTxThld    0x3F    /* 0x3F means NO early transmit */
-#define RxPacketMaxSize        0x3FE8  /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
 #define SafeMtu                0x1c20  /* ... actually life sucks beyond ~7k */
 #define InterFrameGap  0x03    /* 3 means InterFrameGap = the shortest one */
 
@@ -2357,10 +2356,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
        return cmd;
 }
 
-static void rtl_set_rx_max_size(void __iomem *ioaddr)
+static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
 {
        /* Low hurts. Let's disable the filtering. */
-       RTL_W16(RxMaxSize, 16383);
+       RTL_W16(RxMaxSize, rx_buf_sz);
 }
 
 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
@@ -2407,7 +2406,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
 
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
-       rtl_set_rx_max_size(ioaddr);
+       rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
 
        if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
            (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -2668,7 +2667,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
 
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
-       rtl_set_rx_max_size(ioaddr);
+       rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
 
        tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
 
@@ -2846,7 +2845,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
 
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
-       rtl_set_rx_max_size(ioaddr);
+       rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
 
        tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
 
index cc29b44b1500ef24a5115e044cd3478a602c6a61..e5becb92b3e7b0f17d43b3342a4621e51e162b4f 100644 (file)
@@ -167,6 +167,9 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
        struct tcf_exts e;
        int err;
 
+       if (!tca[TCA_OPTIONS])
+               return -EINVAL;
+
        if (head == NULL) {
                if (!handle)
                        return -EINVAL;