block: make barrier completion more robust
[firefly-linux-kernel-4.4.55.git] / include / linux / blkdev.h
index 9cc7cc5fdce1a34632cb26825ed5ef0ecde7125e..3c7078e0129d2bab84198627de19164126e08b5d 100644 (file)
@@ -449,6 +449,7 @@ struct request_queue
 #define QUEUE_FLAG_FAIL_IO     12      /* fake timeout */
 #define QUEUE_FLAG_STACKABLE   13      /* supports request stacking */
 #define QUEUE_FLAG_NONROT      14      /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 
 static inline int queue_is_locked(struct request_queue *q)
 {
@@ -522,22 +523,32 @@ enum {
         * TAG_FLUSH    : ordering by tag w/ pre and post flushes
         * TAG_FUA      : ordering by tag w/ pre flush and FUA write
         */
-       QUEUE_ORDERED_NONE      = 0x00,
-       QUEUE_ORDERED_DRAIN     = 0x01,
-       QUEUE_ORDERED_TAG       = 0x02,
-
-       QUEUE_ORDERED_PREFLUSH  = 0x10,
-       QUEUE_ORDERED_POSTFLUSH = 0x20,
-       QUEUE_ORDERED_FUA       = 0x40,
-
-       QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
-       QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_TAG_FUA   = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+       QUEUE_ORDERED_BY_DRAIN          = 0x01,
+       QUEUE_ORDERED_BY_TAG            = 0x02,
+       QUEUE_ORDERED_DO_PREFLUSH       = 0x10,
+       QUEUE_ORDERED_DO_BAR            = 0x20,
+       QUEUE_ORDERED_DO_POSTFLUSH      = 0x40,
+       QUEUE_ORDERED_DO_FUA            = 0x80,
+
+       QUEUE_ORDERED_NONE              = 0x00,
+
+       QUEUE_ORDERED_DRAIN             = QUEUE_ORDERED_BY_DRAIN |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_DRAIN_FLUSH       = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_DRAIN_FUA         = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
+
+       QUEUE_ORDERED_TAG               = QUEUE_ORDERED_BY_TAG |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_TAG_FLUSH         = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_TAG_FUA           = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
 
        /*
         * Ordered operation sequence
@@ -662,6 +673,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
  * default timeout for SG_IO if none specified
  */
 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT     (7 * HZ)
 
 #ifdef CONFIG_BOUNCE
 extern int init_emergency_isa_pool(void);
@@ -854,10 +866,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern int blk_do_ordered(struct request_queue *, struct request **);
+extern bool blk_do_ordered(struct request_queue *, struct request **);
 extern unsigned blk_ordered_cur_seq(struct request_queue *);
 extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
+extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
@@ -918,6 +930,8 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
 
 #define MAX_SEGMENT_SIZE       65536
 
+#define BLK_SEG_BOUNDARY_MASK  0xFFFFFFFFUL
+
 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
 
 static inline int queue_hardsect_size(struct request_queue *q)
@@ -974,7 +988,6 @@ static inline void put_dev_sector(Sector p)
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-void kblockd_flush_work(struct work_struct *work);
 
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))