2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <asm/semaphore.h>
25 #include "ops_address.h"
31 * get_block - Fills in a buffer head with details about a block
33 * @lblock: The block number to look up
34 * @bh_result: The buffer head to return the result in
35 * @create: Non-zero if we may add block to the file
40 static int get_block(struct inode *inode, sector_t lblock,
41 struct buffer_head *bh_result, int create)
43 struct gfs2_inode *ip = get_v2ip(inode);
48 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
55 map_bh(bh_result, inode->i_sb, dblock);
57 set_buffer_new(bh_result);
63 * get_block_noalloc - Fills in a buffer head with details about a block
65 * @lblock: The block number to look up
66 * @bh_result: The buffer head to return the result in
67 * @create: Non-zero if we may add block to the file
72 static int get_block_noalloc(struct inode *inode, sector_t lblock,
73 struct buffer_head *bh_result, int create)
75 struct gfs2_inode *ip = get_v2ip(inode);
80 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
85 map_bh(bh_result, inode->i_sb, dblock);
86 else if (gfs2_assert_withdraw(ip->i_sbd, !create))
92 static int get_blocks(struct inode *inode, sector_t lblock,
93 unsigned long max_blocks, struct buffer_head *bh_result,
96 struct gfs2_inode *ip = get_v2ip(inode);
102 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
109 map_bh(bh_result, inode->i_sb, dblock);
111 set_buffer_new(bh_result);
113 if (extlen > max_blocks)
115 bh_result->b_size = extlen << inode->i_blkbits;
120 static int get_blocks_noalloc(struct inode *inode, sector_t lblock,
121 unsigned long max_blocks,
122 struct buffer_head *bh_result, int create)
124 struct gfs2_inode *ip = get_v2ip(inode);
130 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
135 map_bh(bh_result, inode->i_sb, dblock);
136 if (extlen > max_blocks)
138 bh_result->b_size = extlen << inode->i_blkbits;
139 } else if (gfs2_assert_withdraw(ip->i_sbd, !create))
146 * gfs2_writepage - Write complete page
147 * @page: Page to write
151 * Use Linux VFS block_write_full_page() to write one page,
152 * using GFS2's get_block_noalloc to find which blocks to write.
155 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
157 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
158 struct gfs2_sbd *sdp = ip->i_sbd;
161 atomic_inc(&sdp->sd_ops_address);
163 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
167 if (get_transaction) {
168 redirty_page_for_writepage(wbc, page);
173 error = block_write_full_page(page, get_block_noalloc, wbc);
175 gfs2_meta_cache_flush(ip);
181 * stuffed_readpage - Fill in a Linux page with stuffed file data
188 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
190 struct buffer_head *dibh;
194 error = gfs2_meta_inode_buffer(ip, &dibh);
199 memcpy((char *)kaddr,
200 dibh->b_data + sizeof(struct gfs2_dinode),
202 memset((char *)kaddr + ip->i_di.di_size,
204 PAGE_CACHE_SIZE - ip->i_di.di_size);
209 SetPageUptodate(page);
214 static int zero_readpage(struct page *page)
219 memset(kaddr, 0, PAGE_CACHE_SIZE);
222 SetPageUptodate(page);
229 * jdata_readpage - readpage that goes through gfs2_jdata_read_mem()
231 * @page: The page to read
236 static int jdata_readpage(struct gfs2_inode *ip, struct page *page)
243 ret = gfs2_jdata_read_mem(ip, kaddr,
244 (uint64_t)page->index << PAGE_CACHE_SHIFT,
247 if (ret < PAGE_CACHE_SIZE)
248 memset(kaddr + ret, 0, PAGE_CACHE_SIZE - ret);
249 SetPageUptodate(page);
261 * gfs2_readpage - readpage with locking
262 * @file: The file to read a page for
263 * @page: The page to read
268 static int gfs2_readpage(struct file *file, struct page *page)
270 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
271 struct gfs2_sbd *sdp = ip->i_sbd;
274 atomic_inc(&sdp->sd_ops_address);
276 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl))) {
281 if (!gfs2_is_jdata(ip)) {
282 if (gfs2_is_stuffed(ip)) {
284 error = stuffed_readpage(ip, page);
287 error = zero_readpage(page);
289 error = block_read_full_page(page, get_block);
291 error = jdata_readpage(ip, page);
293 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
300 * gfs2_prepare_write - Prepare to write a page to a file
301 * @file: The file to write to
302 * @page: The page which is to be prepared for writing
303 * @from: From (byte range within page)
304 * @to: To (byte range within page)
309 static int gfs2_prepare_write(struct file *file, struct page *page,
310 unsigned from, unsigned to)
312 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
313 struct gfs2_sbd *sdp = ip->i_sbd;
316 atomic_inc(&sdp->sd_ops_address);
318 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
321 if (gfs2_is_stuffed(ip)) {
323 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
325 if (file_size > sdp->sd_sb.sb_bsize -
326 sizeof(struct gfs2_dinode)) {
327 error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page,
330 error = block_prepare_write(page, from, to,
332 } else if (!PageUptodate(page))
333 error = stuffed_readpage(ip, page);
335 error = block_prepare_write(page, from, to, get_block);
341 * gfs2_commit_write - Commit write to a file
342 * @file: The file to write to
343 * @page: The page containing the data
344 * @from: From (byte range within page)
345 * @to: To (byte range within page)
350 static int gfs2_commit_write(struct file *file, struct page *page,
351 unsigned from, unsigned to)
353 struct inode *inode = page->mapping->host;
354 struct gfs2_inode *ip = get_v2ip(inode);
355 struct gfs2_sbd *sdp = ip->i_sbd;
358 atomic_inc(&sdp->sd_ops_address);
360 if (gfs2_is_stuffed(ip)) {
361 struct buffer_head *dibh;
365 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
367 error = gfs2_meta_inode_buffer(ip, &dibh);
371 gfs2_trans_add_bh(ip->i_gl, dibh);
374 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
375 (char *)kaddr + from,
381 SetPageUptodate(page);
383 if (inode->i_size < file_size)
384 i_size_write(inode, file_size);
386 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED)
387 gfs2_page_add_databufs(sdp, page, from, to);
388 error = generic_commit_write(file, page, from, to);
396 ClearPageUptodate(page);
402 * gfs2_bmap - Block map function
403 * @mapping: Address space info
404 * @lblock: The block to map
406 * Returns: The disk address for the block or 0 on hole or error
409 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
411 struct gfs2_inode *ip = get_v2ip(mapping->host);
412 struct gfs2_holder i_gh;
416 atomic_inc(&ip->i_sbd->sd_ops_address);
418 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
422 if (!gfs2_is_stuffed(ip))
423 dblock = generic_block_bmap(mapping, lblock, get_block);
425 gfs2_glock_dq_uninit(&i_gh);
430 static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
432 struct gfs2_databuf *db;
439 gfs2_log_unlock(sdp);
442 gfs2_log_unlock(sdp);
445 clear_buffer_dirty(bh);
447 clear_buffer_mapped(bh);
448 clear_buffer_req(bh);
449 clear_buffer_new(bh);
450 clear_buffer_delay(bh);
454 static int gfs2_invalidatepage(struct page *page, unsigned long offset)
456 struct gfs2_sbd *sdp = get_v2sdp(page->mapping->host->i_sb);
457 struct buffer_head *head, *bh, *next;
458 unsigned int curr_off = 0;
461 BUG_ON(!PageLocked(page));
462 if (!page_has_buffers(page))
465 bh = head = page_buffers(page);
467 unsigned int next_off = curr_off + bh->b_size;
468 next = bh->b_this_page;
470 if (offset <= curr_off)
471 discard_buffer(sdp, bh);
475 } while (bh != head);
478 ret = try_to_release_page(page, 0);
483 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
484 loff_t offset, unsigned long nr_segs)
486 struct file *file = iocb->ki_filp;
487 struct inode *inode = file->f_mapping->host;
488 struct gfs2_inode *ip = get_v2ip(inode);
489 struct gfs2_sbd *sdp = ip->i_sbd;
490 get_blocks_t *gb = get_blocks;
492 atomic_inc(&sdp->sd_ops_address);
494 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)) ||
495 gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
498 if (rw == WRITE && !get_transaction)
499 gb = get_blocks_noalloc;
501 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
502 offset, nr_segs, gb, NULL);
505 struct address_space_operations gfs2_file_aops = {
506 .writepage = gfs2_writepage,
507 .readpage = gfs2_readpage,
508 .sync_page = block_sync_page,
509 .prepare_write = gfs2_prepare_write,
510 .commit_write = gfs2_commit_write,
512 .invalidatepage = gfs2_invalidatepage,
513 .direct_IO = gfs2_direct_IO,