1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
10 #include <linux/ceph/libceph.h>
13 * build a vector of user pages
15 struct page **ceph_get_direct_page_vector(const void __user *data,
16 int num_pages, bool write_page)
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
24 return ERR_PTR(-ENOMEM);
26 while (got < num_pages) {
27 rc = get_user_pages_unlocked(current, current->mm,
28 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
29 num_pages - got, write_page, 0, pages + got);
40 ceph_put_page_vector(pages, got, false);
43 EXPORT_SYMBOL(ceph_get_direct_page_vector);
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
49 for (i = 0; i < num_pages; i++) {
51 set_page_dirty_lock(pages[i]);
54 if (is_vmalloc_addr(pages))
59 EXPORT_SYMBOL(ceph_put_page_vector);
61 void ceph_release_page_vector(struct page **pages, int num_pages)
65 for (i = 0; i < num_pages; i++)
66 __free_pages(pages[i], 0);
69 EXPORT_SYMBOL(ceph_release_page_vector);
72 * allocate a vector new pages
74 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
79 pages = kmalloc(sizeof(*pages) * num_pages, flags);
81 return ERR_PTR(-ENOMEM);
82 for (i = 0; i < num_pages; i++) {
83 pages[i] = __page_cache_alloc(flags);
84 if (pages[i] == NULL) {
85 ceph_release_page_vector(pages, i);
86 return ERR_PTR(-ENOMEM);
91 EXPORT_SYMBOL(ceph_alloc_page_vector);
94 * copy user data into a page vector
96 int ceph_copy_user_to_page_vector(struct page **pages,
97 const void __user *data,
98 loff_t off, size_t len)
101 int po = off & ~PAGE_CACHE_MASK;
106 l = min_t(int, PAGE_CACHE_SIZE-po, left);
107 bad = copy_from_user(page_address(pages[i]) + po, data, l);
113 if (po == PAGE_CACHE_SIZE) {
120 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
122 void ceph_copy_to_page_vector(struct page **pages,
124 loff_t off, size_t len)
127 size_t po = off & ~PAGE_CACHE_MASK;
131 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
133 memcpy(page_address(pages[i]) + po, data, l);
137 if (po == PAGE_CACHE_SIZE) {
143 EXPORT_SYMBOL(ceph_copy_to_page_vector);
145 void ceph_copy_from_page_vector(struct page **pages,
147 loff_t off, size_t len)
150 size_t po = off & ~PAGE_CACHE_MASK;
154 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
156 memcpy(data, page_address(pages[i]) + po, l);
160 if (po == PAGE_CACHE_SIZE) {
166 EXPORT_SYMBOL(ceph_copy_from_page_vector);
169 * Zero an extent within a page vector. Offset is relative to the
170 * start of the first page.
172 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
174 int i = off >> PAGE_CACHE_SHIFT;
176 off &= ~PAGE_CACHE_MASK;
178 dout("zero_page_vector_page %u~%u\n", off, len);
180 /* leading partial page? */
182 int end = min((int)PAGE_CACHE_SIZE, off + len);
183 dout("zeroing %d %p head from %d\n", i, pages[i],
185 zero_user_segment(pages[i], off, end);
189 while (len >= PAGE_CACHE_SIZE) {
190 dout("zeroing %d %p len=%d\n", i, pages[i], len);
191 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
192 len -= PAGE_CACHE_SIZE;
195 /* trailing partial page? */
197 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
198 zero_user_segment(pages[i], 0, len);
201 EXPORT_SYMBOL(ceph_zero_page_vector_range);