1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
7 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
10 size_t skip, copy, left, wanted;
11 const struct iovec *iov;
15 if (unlikely(bytes > i->count))
24 buf = iov->iov_base + skip;
25 copy = min(bytes, iov->iov_len - skip);
27 if (!fault_in_pages_writeable(buf, copy)) {
28 kaddr = kmap_atomic(page);
29 from = kaddr + offset;
31 /* first chunk, usually the only one */
32 left = __copy_to_user_inatomic(buf, from, copy);
38 while (unlikely(!left && bytes)) {
41 copy = min(bytes, iov->iov_len);
42 left = __copy_to_user_inatomic(buf, from, copy);
52 offset = from - kaddr;
55 copy = min(bytes, iov->iov_len - skip);
57 /* Too bad - revert to non-atomic kmap */
59 from = kaddr + offset;
60 left = __copy_to_user(buf, from, copy);
65 while (unlikely(!left && bytes)) {
68 copy = min(bytes, iov->iov_len);
69 left = __copy_to_user(buf, from, copy);
77 i->count -= wanted - bytes;
78 i->nr_segs -= iov - i->iov;
81 return wanted - bytes;
83 EXPORT_SYMBOL(copy_page_to_iter);
85 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
86 const struct iovec *iov, size_t base, size_t bytes)
88 size_t copied = 0, left = 0;
91 char __user *buf = iov->iov_base + base;
92 int copy = min(bytes, iov->iov_len - base);
95 left = __copy_from_user_inatomic(vaddr, buf, copy);
104 return copied - left;
108 * Copy as much as we can into the page and return the number of bytes which
109 * were successfully copied. If a fault is encountered then return the number of
110 * bytes which were copied.
112 size_t iov_iter_copy_from_user_atomic(struct page *page,
113 struct iov_iter *i, unsigned long offset, size_t bytes)
118 kaddr = kmap_atomic(page);
119 if (likely(i->nr_segs == 1)) {
121 char __user *buf = i->iov->iov_base + i->iov_offset;
122 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
123 copied = bytes - left;
125 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
126 i->iov, i->iov_offset, bytes);
128 kunmap_atomic(kaddr);
132 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
134 void iov_iter_advance(struct iov_iter *i, size_t bytes)
136 BUG_ON(i->count < bytes);
138 if (likely(i->nr_segs == 1)) {
139 i->iov_offset += bytes;
142 const struct iovec *iov = i->iov;
143 size_t base = i->iov_offset;
144 unsigned long nr_segs = i->nr_segs;
147 * The !iov->iov_len check ensures we skip over unlikely
148 * zero-length segments (without overruning the iovec).
150 while (bytes || unlikely(i->count && !iov->iov_len)) {
153 copy = min(bytes, iov->iov_len - base);
154 BUG_ON(!i->count || i->count < copy);
158 if (iov->iov_len == base) {
165 i->iov_offset = base;
166 i->nr_segs = nr_segs;
169 EXPORT_SYMBOL(iov_iter_advance);
172 * Fault in the first iovec of the given iov_iter, to a maximum length
173 * of bytes. Returns 0 on success, or non-zero if the memory could not be
174 * accessed (ie. because it is an invalid address).
176 * writev-intensive code may want this to prefault several iovecs -- that
177 * would be possible (callers must not rely on the fact that _only_ the
178 * first iovec will be faulted with the current implementation).
180 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
182 char __user *buf = i->iov->iov_base + i->iov_offset;
183 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
184 return fault_in_pages_readable(buf, bytes);
186 EXPORT_SYMBOL(iov_iter_fault_in_readable);
189 * Return the count of just the current iov_iter segment.
191 size_t iov_iter_single_seg_count(const struct iov_iter *i)
193 const struct iovec *iov = i->iov;
197 return min(i->count, iov->iov_len - i->iov_offset);
199 EXPORT_SYMBOL(iov_iter_single_seg_count);
201 unsigned long iov_iter_alignment(const struct iov_iter *i)
203 const struct iovec *iov = i->iov;
205 size_t size = i->count;
211 res = (unsigned long)iov->iov_base + i->iov_offset;
212 n = iov->iov_len - i->iov_offset;
217 while (size > (++iov)->iov_len) {
218 res |= (unsigned long)iov->iov_base | iov->iov_len;
219 size -= iov->iov_len;
221 res |= (unsigned long)iov->iov_base | size;
224 EXPORT_SYMBOL(iov_iter_alignment);
226 void iov_iter_init(struct iov_iter *i, int direction,
227 const struct iovec *iov, unsigned long nr_segs,
230 /* It will get better. Eventually... */
231 if (segment_eq(get_fs(), KERNEL_DS))
232 direction |= REQ_KERNEL;
235 i->nr_segs = nr_segs;
239 EXPORT_SYMBOL(iov_iter_init);
241 ssize_t iov_iter_get_pages(struct iov_iter *i,
242 struct page **pages, size_t maxsize,
245 size_t offset = i->iov_offset;
246 const struct iovec *iov = i->iov;
252 len = iov->iov_len - offset;
257 addr = (unsigned long)iov->iov_base + offset;
258 len += *start = addr & (PAGE_SIZE - 1);
259 addr &= ~(PAGE_SIZE - 1);
260 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
261 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
262 if (unlikely(res < 0))
264 return (res == n ? len : res * PAGE_SIZE) - *start;
266 EXPORT_SYMBOL(iov_iter_get_pages);
268 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
269 struct page ***pages, size_t maxsize,
272 size_t offset = i->iov_offset;
273 const struct iovec *iov = i->iov;
280 len = iov->iov_len - offset;
285 addr = (unsigned long)iov->iov_base + offset;
286 len += *start = addr & (PAGE_SIZE - 1);
287 addr &= ~(PAGE_SIZE - 1);
288 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
290 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
292 p = vmalloc(n * sizeof(struct page *));
296 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
297 if (unlikely(res < 0)) {
302 return (res == n ? len : res * PAGE_SIZE) - *start;
304 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
306 int iov_iter_npages(const struct iov_iter *i, int maxpages)
308 size_t offset = i->iov_offset;
309 size_t size = i->count;
310 const struct iovec *iov = i->iov;
314 for (n = 0; size && n < i->nr_segs; n++, iov++) {
315 unsigned long addr = (unsigned long)iov->iov_base + offset;
316 size_t len = iov->iov_len - offset;
318 if (unlikely(!len)) /* empty segment */
322 npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
324 if (npages >= maxpages) /* don't bother going further */
329 return min(npages, maxpages);
331 EXPORT_SYMBOL(iov_iter_npages);