block: disable entropy contributions for nonrot devices
authorMike Snitzer <snitzer@redhat.com>
Sat, 4 Oct 2014 16:55:32 +0000 (10:55 -0600)
committerJens Axboe <axboe@fb.com>
Sat, 4 Oct 2014 16:55:32 +0000 (10:55 -0600)
Clear QUEUE_FLAG_ADD_RANDOM in all block drivers that set
QUEUE_FLAG_NONROT.

Historically, all block devices have automatically made entropy
contributions.  But as previously stated in commit e2e1a148 ("block: add
sysfs knob for turning off disk entropy contributions"):
    - On SSD disks, the completion times aren't as random as they
      are for rotational drives. So it's questionable whether they
      should contribute to the random pool in the first place.
    - Calling add_disk_randomness() has a lot of overhead.

There are more reliable sources for randomness than non-rotational block
devices.  From a security perspective it is better to err on the side of
caution than to allow entropy contributions from unreliable "random"
sources.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
14 files changed:
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/rsxx/dev.c
drivers/block/skd_main.c
drivers/block/zram/zram_drv.c
drivers/ide/ide-disk.c
drivers/md/bcache/super.c
drivers/mmc/card/queue.c
drivers/mtd/mtd_blkdevs.c
drivers/s390/block/scm_blk.c
drivers/s390/block/xpram.c
drivers/scsi/sd.c

index db1e9560d8a721c5165955651462c11d5b306f0a..936f8c160e4630fff48252b6aae8299341ca604e 100644 (file)
@@ -3952,6 +3952,7 @@ skip_create_disk:
 
        /* Set device limits. */
        set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
+       clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags);
        blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
        blk_queue_physical_block_size(dd->queue, 4096);
        blk_queue_max_hw_sectors(dd->queue, 0xffff);
index fb31b8ee4372323a4a93eddb0aa9141ea97d1021..4bc2a5cb9935fbc6f5256102bdf9c2c24b9e6d8c 100644 (file)
@@ -847,6 +847,7 @@ static int __init nbd_init(void)
                 * Tell the block layer that we are not a rotational device
                 */
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
                disk->queue->limits.discard_granularity = 512;
                disk->queue->limits.max_discard_sectors = UINT_MAX;
                disk->queue->limits.discard_zeroes_data = 0;
index a3b042c4d448dcb4ad0a21272dd8ff83527f3b9d..b0d94b6973abf6ad5b170a9cdf667eb6edb60141 100644 (file)
@@ -507,6 +507,7 @@ static int null_add_dev(void)
 
        nullb->q->queuedata = nullb;
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
 
        disk = nullb->disk = alloc_disk_node(1, home_node);
        if (!disk)
index 02351e2171651dc47585d4f5a25a9b5e85d303f3..e2bb8afbeae5902081f95c52da9e81e58d1d1693 100644 (file)
@@ -1916,6 +1916,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
        queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, ns->queue);
        blk_queue_make_request(ns->queue, nvme_make_request);
        ns->dev = dev;
        ns->queue->queuedata = ns;
index 2839d37e5af77922051cb1d48561602ea3a3c2b7..40ee7705df63a061863539a03b8df5555f799c93 100644 (file)
@@ -307,6 +307,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
        blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
 
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
        if (rsxx_discard_supported(card)) {
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
                blk_queue_max_discard_sectors(card->queue,
index 8fcdcfb4b47238469022146fe7b1861c904b7a2f..1e46eb2305c04a8e63e378befb95a905fdf01850 100644 (file)
@@ -4426,6 +4426,7 @@ static int skd_cons_disk(struct skd_device *skdev)
        q->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 
        spin_lock_irqsave(&skdev->lock, flags);
        pr_debug("%s:%s:%d stopping %s queue\n",
index dfa4024c448a6222d8d12ffb2f05e1976652fca0..6dd2cef5b865e8f360add0c838068891cbe7e420 100644 (file)
@@ -925,6 +925,7 @@ static int create_device(struct zram *zram, int device_id)
        set_capacity(zram->disk, 0);
        /* zram devices sort of resembles non-rotational disks */
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
        /*
         * To ensure that we always get PAGE_SIZE aligned
         * and n*PAGE_SIZED sized I/O requests.
index ee880382e3bce50b03775e9ab48504febb4ab6b3..56b9708894a5e294302e42066711274b6941616c 100644 (file)
@@ -685,8 +685,10 @@ static void ide_disk_setup(ide_drive_t *drive)
        printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
               queue_max_sectors(q) / 2);
 
-       if (ata_id_is_ssd(id))
+       if (ata_id_is_ssd(id)) {
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+       }
 
        /* calculate drive capacity, and select LBA if possible */
        ide_disk_get_capacity(drive);
index d4713d098a397c2f1b124f9fdd240d9907fa16d6..4dd2bb7167f05e94bda5d33b213dc67356fc9f5a 100644 (file)
@@ -842,6 +842,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        q->limits.logical_block_size    = block_size;
        q->limits.physical_block_size   = block_size;
        set_bit(QUEUE_FLAG_NONROT,      &d->disk->queue->queue_flags);
+       clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
        set_bit(QUEUE_FLAG_DISCARD,     &d->disk->queue->queue_flags);
 
        blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
index 3e049c13429cfbe730179724053796cc65ba62de..c19bfc1e565a6317770fca7682c5bf6c577e301f 100644 (file)
@@ -210,6 +210,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
        if (mmc_can_erase(card))
                mmc_queue_setup_discard(mq->queue, card);
 
index 43e30992a3697a76f932bc778c76d8bcdf7080ce..d08229eb44d8a8911dc49c6cbdba51ec99cdbdf0 100644 (file)
@@ -417,6 +417,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
        blk_queue_logical_block_size(new->rq, tr->blksize);
 
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
 
        if (tr->discard) {
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
index 76bed1743db1c7ef23576282b13d9ef8f551aed8..56046ab3962946012c653c421f75d47f55caf3ce 100644 (file)
@@ -386,6 +386,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
        blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
        blk_queue_max_segments(rq, nr_max_blk);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
        scm_blk_dev_cluster_setup(bdev);
 
        bdev->gendisk = alloc_disk(SCM_NR_PARTS);
index 6969d39f1e2eba7de41856cabc0d1557b7f3efe4..9e0de9c9a6fc73012e37871b3368d0191f1380f1 100644 (file)
@@ -346,6 +346,7 @@ static int __init xpram_setup_blkdev(void)
                        goto out;
                }
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
                blk_queue_make_request(xpram_queues[i], xpram_make_request);
                blk_queue_logical_block_size(xpram_queues[i], 4096);
        }
index 2c2041ca4b7065402a46820e0e714e6df8f31cba..fe67f5c107a27e79296b8d4bc2b7175a2072f6ed 100644 (file)
@@ -2660,8 +2660,10 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
 
        rot = get_unaligned_be16(&buffer[4]);
 
-       if (rot == 1)
+       if (rot == 1) {
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+       }
 
  out:
        kfree(buffer);