#include <linux/cpu.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
+#include <linux/scatterlist.h>
/*
* for max sense size
* must make sure sg can hold rq->nr_phys_segments entries
*/
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sg)
+ struct scatterlist *sglist)
{
struct bio_vec *bvec, *bvprv;
struct req_iterator iter;
+ struct scatterlist *sg;
int nsegs, cluster;
nsegs = 0;
* for each bio in rq
*/
bvprv = NULL;
+ sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
- if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+ if (sg->length + nbytes > q->max_segment_size)
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
- sg[nsegs - 1].length += nbytes;
+ sg->length += nbytes;
} else {
new_segment:
- memset(&sg[nsegs],0,sizeof(struct scatterlist));
- sg[nsegs].page = bvec->bv_page;
- sg[nsegs].length = nbytes;
- sg[nsegs].offset = bvec->bv_offset;
-
+ if (!sg)
+ sg = sglist;
+ else
+ sg = sg_next(sg);
+
+ memset(sg, 0, sizeof(*sg));
+ sg->page = bvec->bv_page;
+ sg->length = nbytes;
+ sg->offset = bvec->bv_offset;
nsegs++;
}
bvprv = bvec;
blk_trace_shutdown(q);
+ bdi_destroy(&q->backing_dev_info);
kmem_cache_free(requestq_cachep, q);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
+ int err;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+ q->backing_dev_info.unplug_io_data = q;
+ err = bdi_init(&q->backing_dev_info);
+ if (err) {
+ kmem_cache_free(requestq_cachep, q);
+ return NULL;
+ }
+
init_timer(&q->unplug_timer);
kobject_set_name(&q->kobj, "%s", "queue");
q->kobj.ktype = &queue_ktype;
kobject_init(&q->kobj);
- q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
- q->backing_dev_info.unplug_io_data = q;
-
mutex_init(&q->sysfs_lock);
return q;
if (unlikely(block_dump)) {
char b[BDEVNAME_SIZE];
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
- current->comm, current->pid,
+ current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
(unsigned long long)bio->bi_sector,
bdevname(bio->bi_bdev,b));
/**
* end_request - end I/O on the current segment of the request
- * @rq: the request being processed
+ * @req: the request being processed
* @uptodate: error value or 0/1 uptodate flag
*
* Description:
max_hw_sectors_kb = q->max_hw_sectors >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
- int ra_kb;
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
* values synchronously:
*/
spin_lock_irq(q->queue_lock);
- /*
- * Trim readahead window as well, if necessary:
- */
- ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
- if (ra_kb > max_sectors_kb)
- q->backing_dev_info.ra_pages =
- max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
-
q->max_sectors = max_sectors_kb << 1;
spin_unlock_irq(q->queue_lock);
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->max_phys_segments, page);
+}
+
+static ssize_t queue_max_segments_store(struct request_queue *q,
+ const char *page, size_t count)
+{
+ unsigned long segments;
+ ssize_t ret = queue_var_store(&segments, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ q->max_phys_segments = segments;
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+}
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
.show = queue_max_hw_sectors_show,
};
+static struct queue_sysfs_entry queue_max_segments_entry = {
+ .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_max_segments_show,
+ .store = queue_max_segments_store,
+};
+
static struct queue_sysfs_entry queue_iosched_entry = {
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
.show = elv_iosched_show,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
+ &queue_max_segments_entry.attr,
&queue_iosched_entry.attr,
NULL,
};