5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_next_one_bit(addr, size, offset) \
35 ext2_find_next_bit(addr, size, offset)
37 static int read_block_bitmap(struct super_block *sb,
38 struct udf_bitmap *bitmap, unsigned int block,
39 unsigned long bitmap_nr)
41 struct buffer_head *bh = NULL;
43 struct kernel_lb_addr loc;
45 loc.logicalBlockNum = bitmap->s_extPosition;
46 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
48 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
52 bitmap->s_block_bitmap[bitmap_nr] = bh;
56 static int __load_block_bitmap(struct super_block *sb,
57 struct udf_bitmap *bitmap,
58 unsigned int block_group)
61 int nr_groups = bitmap->s_nr_groups;
63 if (block_group >= nr_groups) {
64 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
68 if (bitmap->s_block_bitmap[block_group]) {
71 retval = read_block_bitmap(sb, bitmap, block_group,
79 static inline int load_block_bitmap(struct super_block *sb,
80 struct udf_bitmap *bitmap,
81 unsigned int block_group)
85 slot = __load_block_bitmap(sb, bitmap, block_group);
90 if (!bitmap->s_block_bitmap[slot])
96 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
98 struct udf_sb_info *sbi = UDF_SB(sb);
99 struct logicalVolIntegrityDesc *lvid;
104 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
105 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
106 udf_updated_lvid(sb);
109 static void udf_bitmap_free_blocks(struct super_block *sb,
111 struct udf_bitmap *bitmap,
112 struct kernel_lb_addr *bloc,
116 struct udf_sb_info *sbi = UDF_SB(sb);
117 struct buffer_head *bh = NULL;
118 struct udf_part_map *partmap;
120 unsigned long block_group;
124 unsigned long overflow;
126 mutex_lock(&sbi->s_alloc_mutex);
127 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
128 if (bloc->logicalBlockNum + count < count ||
129 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
130 udf_debug("%d < %d || %d + %d > %d\n",
131 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
132 count, partmap->s_partition_len);
136 block = bloc->logicalBlockNum + offset +
137 (sizeof(struct spaceBitmapDesc) << 3);
141 block_group = block >> (sb->s_blocksize_bits + 3);
142 bit = block % (sb->s_blocksize << 3);
145 * Check to see if we are freeing blocks across a group boundary.
147 if (bit + count > (sb->s_blocksize << 3)) {
148 overflow = bit + count - (sb->s_blocksize << 3);
151 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
155 bh = bitmap->s_block_bitmap[bitmap_nr];
156 for (i = 0; i < count; i++) {
157 if (udf_set_bit(bit + i, bh->b_data)) {
158 udf_debug("bit %ld already set\n", bit + i);
159 udf_debug("byte=%2x\n",
160 ((char *)bh->b_data)[(bit + i) >> 3]);
163 dquot_free_block(inode, 1);
164 udf_add_free_space(sb, sbi->s_partition, 1);
167 mark_buffer_dirty(bh);
175 mutex_unlock(&sbi->s_alloc_mutex);
178 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
180 struct udf_bitmap *bitmap,
181 uint16_t partition, uint32_t first_block,
182 uint32_t block_count)
184 struct udf_sb_info *sbi = UDF_SB(sb);
186 int bit, block, block_group, group_start;
187 int nr_groups, bitmap_nr;
188 struct buffer_head *bh;
191 mutex_lock(&sbi->s_alloc_mutex);
192 part_len = sbi->s_partmaps[partition].s_partition_len;
193 if (first_block >= part_len)
196 if (first_block + block_count > part_len)
197 block_count = part_len - first_block;
200 nr_groups = udf_compute_nr_groups(sb, partition);
201 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
202 block_group = block >> (sb->s_blocksize_bits + 3);
203 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
205 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
208 bh = bitmap->s_block_bitmap[bitmap_nr];
210 bit = block % (sb->s_blocksize << 3);
212 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
213 if (!udf_test_bit(bit, bh->b_data))
215 else if (dquot_prealloc_block(inode, 1))
217 else if (!udf_clear_bit(bit, bh->b_data)) {
218 udf_debug("bit already cleared for block %d\n", bit);
219 dquot_free_block(inode, 1);
227 mark_buffer_dirty(bh);
228 } while (block_count > 0);
231 udf_add_free_space(sb, partition, -alloc_count);
232 mutex_unlock(&sbi->s_alloc_mutex);
236 static int udf_bitmap_new_block(struct super_block *sb,
238 struct udf_bitmap *bitmap, uint16_t partition,
239 uint32_t goal, int *err)
241 struct udf_sb_info *sbi = UDF_SB(sb);
242 int newbit, bit = 0, block, block_group, group_start;
243 int end_goal, nr_groups, bitmap_nr, i;
244 struct buffer_head *bh = NULL;
249 mutex_lock(&sbi->s_alloc_mutex);
252 if (goal >= sbi->s_partmaps[partition].s_partition_len)
255 nr_groups = bitmap->s_nr_groups;
256 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
257 block_group = block >> (sb->s_blocksize_bits + 3);
258 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
260 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
263 bh = bitmap->s_block_bitmap[bitmap_nr];
264 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
265 sb->s_blocksize - group_start);
267 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
268 bit = block % (sb->s_blocksize << 3);
269 if (udf_test_bit(bit, bh->b_data))
272 end_goal = (bit + 63) & ~63;
273 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
277 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
278 sb->s_blocksize - ((bit + 7) >> 3));
279 newbit = (ptr - ((char *)bh->b_data)) << 3;
280 if (newbit < sb->s_blocksize << 3) {
285 newbit = udf_find_next_one_bit(bh->b_data,
286 sb->s_blocksize << 3, bit);
287 if (newbit < sb->s_blocksize << 3) {
293 for (i = 0; i < (nr_groups * 2); i++) {
295 if (block_group >= nr_groups)
297 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
299 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
302 bh = bitmap->s_block_bitmap[bitmap_nr];
304 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
305 sb->s_blocksize - group_start);
306 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
307 bit = (ptr - ((char *)bh->b_data)) << 3;
311 bit = udf_find_next_one_bit((char *)bh->b_data,
312 sb->s_blocksize << 3,
314 if (bit < sb->s_blocksize << 3)
318 if (i >= (nr_groups * 2)) {
319 mutex_unlock(&sbi->s_alloc_mutex);
322 if (bit < sb->s_blocksize << 3)
325 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
327 if (bit >= sb->s_blocksize << 3) {
328 mutex_unlock(&sbi->s_alloc_mutex);
334 while (i < 7 && bit > (group_start << 3) &&
335 udf_test_bit(bit - 1, bh->b_data)) {
343 * Check quota for allocation of this block.
346 int ret = dquot_alloc_block(inode, 1);
349 mutex_unlock(&sbi->s_alloc_mutex);
355 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
356 (sizeof(struct spaceBitmapDesc) << 3);
358 if (!udf_clear_bit(bit, bh->b_data)) {
359 udf_debug("bit already cleared for block %d\n", bit);
363 mark_buffer_dirty(bh);
365 udf_add_free_space(sb, partition, -1);
366 mutex_unlock(&sbi->s_alloc_mutex);
372 mutex_unlock(&sbi->s_alloc_mutex);
376 static void udf_table_free_blocks(struct super_block *sb,
379 struct kernel_lb_addr *bloc,
383 struct udf_sb_info *sbi = UDF_SB(sb);
384 struct udf_part_map *partmap;
387 struct kernel_lb_addr eloc;
388 struct extent_position oepos, epos;
391 struct udf_inode_info *iinfo;
393 mutex_lock(&sbi->s_alloc_mutex);
394 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
395 if (bloc->logicalBlockNum + count < count ||
396 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
397 udf_debug("%d < %d || %d + %d > %d\n",
398 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
399 partmap->s_partition_len);
403 iinfo = UDF_I(table);
404 /* We do this up front - There are some error conditions that
405 could occure, but.. oh well */
407 dquot_free_block(inode, count);
408 udf_add_free_space(sb, sbi->s_partition, count);
410 start = bloc->logicalBlockNum + offset;
411 end = bloc->logicalBlockNum + offset + count - 1;
413 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
415 epos.block = oepos.block = iinfo->i_location;
416 epos.bh = oepos.bh = NULL;
419 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
420 if (((eloc.logicalBlockNum +
421 (elen >> sb->s_blocksize_bits)) == start)) {
422 if ((0x3FFFFFFF - elen) <
423 (count << sb->s_blocksize_bits)) {
424 uint32_t tmp = ((0x3FFFFFFF - elen) >>
425 sb->s_blocksize_bits);
428 elen = (etype << 30) |
429 (0x40000000 - sb->s_blocksize);
431 elen = (etype << 30) |
433 (count << sb->s_blocksize_bits));
437 udf_write_aext(table, &oepos, &eloc, elen, 1);
438 } else if (eloc.logicalBlockNum == (end + 1)) {
439 if ((0x3FFFFFFF - elen) <
440 (count << sb->s_blocksize_bits)) {
441 uint32_t tmp = ((0x3FFFFFFF - elen) >>
442 sb->s_blocksize_bits);
445 eloc.logicalBlockNum -= tmp;
446 elen = (etype << 30) |
447 (0x40000000 - sb->s_blocksize);
449 eloc.logicalBlockNum = start;
450 elen = (etype << 30) |
452 (count << sb->s_blocksize_bits));
456 udf_write_aext(table, &oepos, &eloc, elen, 1);
459 if (epos.bh != oepos.bh) {
461 oepos.block = epos.block;
467 oepos.offset = epos.offset;
473 * NOTE: we CANNOT use udf_add_aext here, as it can try to
474 * allocate a new block, and since we hold the super block
475 * lock already very bad things would happen :)
477 * We copy the behavior of udf_add_aext, but instead of
478 * trying to allocate a new block close to the existing one,
479 * we just steal a block from the extent we are trying to add.
481 * It would be nice if the blocks were close together, but it
486 struct short_ad *sad = NULL;
487 struct long_ad *lad = NULL;
488 struct allocExtDesc *aed;
490 eloc.logicalBlockNum = start;
491 elen = EXT_RECORDED_ALLOCATED |
492 (count << sb->s_blocksize_bits);
494 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
495 adsize = sizeof(struct short_ad);
496 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
497 adsize = sizeof(struct long_ad);
504 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
505 unsigned char *sptr, *dptr;
511 /* Steal a block from the extent being free'd */
512 epos.block.logicalBlockNum = eloc.logicalBlockNum;
513 eloc.logicalBlockNum++;
514 elen -= sb->s_blocksize;
516 epos.bh = udf_tread(sb,
517 udf_get_lb_pblock(sb, &epos.block, 0));
522 aed = (struct allocExtDesc *)(epos.bh->b_data);
523 aed->previousAllocExtLocation =
524 cpu_to_le32(oepos.block.logicalBlockNum);
525 if (epos.offset + adsize > sb->s_blocksize) {
526 loffset = epos.offset;
527 aed->lengthAllocDescs = cpu_to_le32(adsize);
528 sptr = iinfo->i_ext.i_data + epos.offset
530 dptr = epos.bh->b_data +
531 sizeof(struct allocExtDesc);
532 memcpy(dptr, sptr, adsize);
533 epos.offset = sizeof(struct allocExtDesc) +
536 loffset = epos.offset + adsize;
537 aed->lengthAllocDescs = cpu_to_le32(0);
539 sptr = oepos.bh->b_data + epos.offset;
540 aed = (struct allocExtDesc *)
542 le32_add_cpu(&aed->lengthAllocDescs,
545 sptr = iinfo->i_ext.i_data +
547 iinfo->i_lenAlloc += adsize;
548 mark_inode_dirty(table);
550 epos.offset = sizeof(struct allocExtDesc);
552 if (sbi->s_udfrev >= 0x0200)
553 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
554 3, 1, epos.block.logicalBlockNum,
557 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
558 2, 1, epos.block.logicalBlockNum,
561 switch (iinfo->i_alloc_type) {
562 case ICBTAG_FLAG_AD_SHORT:
563 sad = (struct short_ad *)sptr;
564 sad->extLength = cpu_to_le32(
565 EXT_NEXT_EXTENT_ALLOCDECS |
568 cpu_to_le32(epos.block.logicalBlockNum);
570 case ICBTAG_FLAG_AD_LONG:
571 lad = (struct long_ad *)sptr;
572 lad->extLength = cpu_to_le32(
573 EXT_NEXT_EXTENT_ALLOCDECS |
576 cpu_to_lelb(epos.block);
580 udf_update_tag(oepos.bh->b_data, loffset);
581 mark_buffer_dirty(oepos.bh);
583 mark_inode_dirty(table);
587 /* It's possible that stealing the block emptied the extent */
589 udf_write_aext(table, &epos, &eloc, elen, 1);
592 iinfo->i_lenAlloc += adsize;
593 mark_inode_dirty(table);
595 aed = (struct allocExtDesc *)epos.bh->b_data;
596 le32_add_cpu(&aed->lengthAllocDescs, adsize);
597 udf_update_tag(epos.bh->b_data, epos.offset);
598 mark_buffer_dirty(epos.bh);
607 mutex_unlock(&sbi->s_alloc_mutex);
611 static int udf_table_prealloc_blocks(struct super_block *sb,
613 struct inode *table, uint16_t partition,
614 uint32_t first_block, uint32_t block_count)
616 struct udf_sb_info *sbi = UDF_SB(sb);
618 uint32_t elen, adsize;
619 struct kernel_lb_addr eloc;
620 struct extent_position epos;
622 struct udf_inode_info *iinfo;
624 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
627 iinfo = UDF_I(table);
628 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
629 adsize = sizeof(struct short_ad);
630 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
631 adsize = sizeof(struct long_ad);
635 mutex_lock(&sbi->s_alloc_mutex);
636 epos.offset = sizeof(struct unallocSpaceEntry);
637 epos.block = iinfo->i_location;
639 eloc.logicalBlockNum = 0xFFFFFFFF;
641 while (first_block != eloc.logicalBlockNum &&
642 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
643 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
644 eloc.logicalBlockNum, elen, first_block);
645 ; /* empty loop body */
648 if (first_block == eloc.logicalBlockNum) {
649 epos.offset -= adsize;
651 alloc_count = (elen >> sb->s_blocksize_bits);
652 if (inode && dquot_prealloc_block(inode,
653 alloc_count > block_count ? block_count : alloc_count))
655 else if (alloc_count > block_count) {
656 alloc_count = block_count;
657 eloc.logicalBlockNum += alloc_count;
658 elen -= (alloc_count << sb->s_blocksize_bits);
659 udf_write_aext(table, &epos, &eloc,
660 (etype << 30) | elen, 1);
662 udf_delete_aext(table, epos, eloc,
663 (etype << 30) | elen);
671 udf_add_free_space(sb, partition, -alloc_count);
672 mutex_unlock(&sbi->s_alloc_mutex);
676 static int udf_table_new_block(struct super_block *sb,
678 struct inode *table, uint16_t partition,
679 uint32_t goal, int *err)
681 struct udf_sb_info *sbi = UDF_SB(sb);
682 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
683 uint32_t newblock = 0, adsize;
684 uint32_t elen, goal_elen = 0;
685 struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
686 struct extent_position epos, goal_epos;
688 struct udf_inode_info *iinfo = UDF_I(table);
692 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
693 adsize = sizeof(struct short_ad);
694 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
695 adsize = sizeof(struct long_ad);
699 mutex_lock(&sbi->s_alloc_mutex);
700 if (goal >= sbi->s_partmaps[partition].s_partition_len)
703 /* We search for the closest matching block to goal. If we find
704 a exact hit, we stop. Otherwise we keep going till we run out
705 of extents. We store the buffer_head, bloc, and extoffset
706 of the current closest match and use that when we are done.
708 epos.offset = sizeof(struct unallocSpaceEntry);
709 epos.block = iinfo->i_location;
710 epos.bh = goal_epos.bh = NULL;
713 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
714 if (goal >= eloc.logicalBlockNum) {
715 if (goal < eloc.logicalBlockNum +
716 (elen >> sb->s_blocksize_bits))
719 nspread = goal - eloc.logicalBlockNum -
720 (elen >> sb->s_blocksize_bits);
722 nspread = eloc.logicalBlockNum - goal;
725 if (nspread < spread) {
727 if (goal_epos.bh != epos.bh) {
728 brelse(goal_epos.bh);
729 goal_epos.bh = epos.bh;
730 get_bh(goal_epos.bh);
732 goal_epos.block = epos.block;
733 goal_epos.offset = epos.offset - adsize;
735 goal_elen = (etype << 30) | elen;
741 if (spread == 0xFFFFFFFF) {
742 brelse(goal_epos.bh);
743 mutex_unlock(&sbi->s_alloc_mutex);
747 /* Only allocate blocks from the beginning of the extent.
748 That way, we only delete (empty) extents, never have to insert an
749 extent because of splitting */
750 /* This works, but very poorly.... */
752 newblock = goal_eloc.logicalBlockNum;
753 goal_eloc.logicalBlockNum++;
754 goal_elen -= sb->s_blocksize;
756 *err = dquot_alloc_block(inode, 1);
758 brelse(goal_epos.bh);
759 mutex_unlock(&sbi->s_alloc_mutex);
765 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
767 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
768 brelse(goal_epos.bh);
770 udf_add_free_space(sb, partition, -1);
772 mutex_unlock(&sbi->s_alloc_mutex);
777 void udf_free_blocks(struct super_block *sb, struct inode *inode,
778 struct kernel_lb_addr *bloc, uint32_t offset,
781 uint16_t partition = bloc->partitionReferenceNum;
782 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
784 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
785 udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
786 bloc, offset, count);
787 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
788 udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
789 bloc, offset, count);
790 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
791 udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
792 bloc, offset, count);
793 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
794 udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
795 bloc, offset, count);
799 inline int udf_prealloc_blocks(struct super_block *sb,
801 uint16_t partition, uint32_t first_block,
802 uint32_t block_count)
804 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
806 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
807 return udf_bitmap_prealloc_blocks(sb, inode,
808 map->s_uspace.s_bitmap,
809 partition, first_block,
811 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
812 return udf_table_prealloc_blocks(sb, inode,
813 map->s_uspace.s_table,
814 partition, first_block,
816 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
817 return udf_bitmap_prealloc_blocks(sb, inode,
818 map->s_fspace.s_bitmap,
819 partition, first_block,
821 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
822 return udf_table_prealloc_blocks(sb, inode,
823 map->s_fspace.s_table,
824 partition, first_block,
830 inline int udf_new_block(struct super_block *sb,
832 uint16_t partition, uint32_t goal, int *err)
834 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
836 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
837 return udf_bitmap_new_block(sb, inode,
838 map->s_uspace.s_bitmap,
839 partition, goal, err);
840 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
841 return udf_table_new_block(sb, inode,
842 map->s_uspace.s_table,
843 partition, goal, err);
844 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
845 return udf_bitmap_new_block(sb, inode,
846 map->s_fspace.s_bitmap,
847 partition, goal, err);
848 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
849 return udf_table_new_block(sb, inode,
850 map->s_fspace.s_table,
851 partition, goal, err);