Skip to content

Commit

Permalink
udf: remove some ugly macros
Browse files Browse the repository at this point in the history
remove macros:
- UDF_SB_PARTMAPS
- UDF_SB_PARTTYPE
- UDF_SB_PARTROOT
- UDF_SB_PARTLEN
- UDF_SB_PARTVSN
- UDF_SB_PARTNUM
- UDF_SB_TYPESPAR
- UDF_SB_TYPEVIRT
- UDF_SB_PARTFUNC
- UDF_SB_PARTFLAGS
- UDF_SB_VOLIDENT
- UDF_SB_NUMPARTS
- UDF_SB_PARTITION
- UDF_SB_SESSION
- UDF_SB_ANCHOR
- UDF_SB_LASTBLOCK
- UDF_SB_LVIDBH
- UDF_SB_LVID
- UDF_SB_UMASK
- UDF_SB_GID
- UDF_SB_UID
- UDF_SB_RECORDTIME
- UDF_SB_SERIALNUM
- UDF_SB_UDFREV
- UDF_SB_FLAGS
- UDF_SB_VAT
- UDF_UPDATE_UDFREV
- UDF_SB_FREE
and open code them

convert UDF_SB_LVIDIU macro to udf_sb_lvidiu function

rename some struct udf_sb_info fields:
- s_volident to s_volume_ident
- s_lastblock to s_last_block
- s_lvidbh to s_lvid_bh
- s_recordtime to s_record_time
- s_serialnum to s_serial_number;
- s_vat to s_vat_inode;

Signed-off-by: Marcin Slusarz <[email protected]>
Cc: Ben Fennema <[email protected]>
Cc: Jan Kara <[email protected]>
Acked-by: Christoph Hellwig <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
mslusarz authored and Linus Torvalds committed Feb 8, 2008
1 parent 3a71fc5 commit 6c79e98
Show file tree
Hide file tree
Showing 11 changed files with 509 additions and 442 deletions.
136 changes: 74 additions & 62 deletions fs/udf/balloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static int read_block_bitmap(struct super_block *sb,
kernel_lb_addr loc;

loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;

bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
if (!bh) {
Expand Down Expand Up @@ -155,10 +155,10 @@ static void udf_bitmap_free_blocks(struct super_block *sb,

mutex_lock(&sbi->s_alloc_mutex);
if (bloc.logicalBlockNum < 0 ||
(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
(bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
udf_debug("%d < %d || %d + %d > %d\n",
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len);
goto error_return;
}

Expand Down Expand Up @@ -188,9 +188,10 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
} else {
if (inode)
DQUOT_FREE_BLOCK(inode, 1);
if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
lvid->freeSpaceTable[sbi->s_partition] =
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + 1);
}
}
}
Expand All @@ -202,8 +203,8 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
}
error_return:
sb->s_dirt = 1;
if (UDF_SB_LVIDBH(sb))
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
if (sbi->s_lvid_bh)
mark_buffer_dirty(sbi->s_lvid_bh);
mutex_unlock(&sbi->s_alloc_mutex);
return;
}
Expand All @@ -219,16 +220,18 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
int bit, block, block_group, group_start;
int nr_groups, bitmap_nr;
struct buffer_head *bh;
__u32 part_len;

mutex_lock(&sbi->s_alloc_mutex);
if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
part_len = sbi->s_partmaps[partition].s_partition_len;
if (first_block < 0 || first_block >= part_len)
goto out;

if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
if (first_block + block_count > part_len)
block_count = part_len - first_block;

repeat:
nr_groups = (UDF_SB_PARTLEN(sb, partition) +
nr_groups = (sbi->s_partmaps[partition].s_partition_len +
(sizeof(struct spaceBitmapDesc) << 3) +
(sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
Expand Down Expand Up @@ -261,10 +264,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
if (block_count > 0)
goto repeat;
out:
if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
lvid->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
mark_buffer_dirty(sbi->s_lvid_bh);
}
sb->s_dirt = 1;
mutex_unlock(&sbi->s_alloc_mutex);
Expand All @@ -287,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
mutex_lock(&sbi->s_alloc_mutex);

repeat:
if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
goal = 0;

nr_groups = bitmap->s_nr_groups;
Expand Down Expand Up @@ -389,10 +393,11 @@ static int udf_bitmap_new_block(struct super_block *sb,

mark_buffer_dirty(bh);

if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
lvid->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
mark_buffer_dirty(sbi->s_lvid_bh);
}
sb->s_dirt = 1;
mutex_unlock(&sbi->s_alloc_mutex);
Expand Down Expand Up @@ -421,21 +426,22 @@ static void udf_table_free_blocks(struct super_block *sb,

mutex_lock(&sbi->s_alloc_mutex);
if (bloc.logicalBlockNum < 0 ||
(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
(bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
udf_debug("%d < %d || %d + %d > %d\n",
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
sbi->s_partmaps[bloc.partitionReferenceNum]->s_partition_len);
goto error_return;
}

/* We do this up front - There are some error conditions that could occure,
but.. oh well */
if (inode)
DQUOT_FREE_BLOCK(inode, count);
if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
lvid->freeSpaceTable[sbi->s_partition] =
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + count);
mark_buffer_dirty(sbi->s_lvid_bh);
}

start = bloc.logicalBlockNum + offset;
Expand Down Expand Up @@ -559,7 +565,7 @@ static void udf_table_free_blocks(struct super_block *sb,
}
epos.offset = sizeof(struct allocExtDesc);
}
if (UDF_SB_UDFREV(sb) >= 0x0200)
if (sbi->s_udfrev >= 0x0200)
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
epos.block.logicalBlockNum, sizeof(tag));
else
Expand Down Expand Up @@ -627,7 +633,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
struct extent_position epos;
int8_t etype = -1;

if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
if (first_block < 0 || first_block >= sbi->s_partmaps[partition].s_partition_len)
return 0;

if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
Expand Down Expand Up @@ -670,10 +676,11 @@ static int udf_table_prealloc_blocks(struct super_block *sb,

brelse(epos.bh);

if (alloc_count && UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
if (alloc_count && sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
lvid->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
mark_buffer_dirty(sbi->s_lvid_bh);
sb->s_dirt = 1;
}
mutex_unlock(&sbi->s_alloc_mutex);
Expand Down Expand Up @@ -703,7 +710,7 @@ static int udf_table_new_block(struct super_block *sb,
return newblock;

mutex_lock(&sbi->s_alloc_mutex);
if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
goal = 0;

/* We search for the closest matching block to goal. If we find a exact hit,
Expand Down Expand Up @@ -771,10 +778,11 @@ static int udf_table_new_block(struct super_block *sb,
udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
brelse(goal_epos.bh);

if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
lvid->freeSpaceTable[partition] =
cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
mark_buffer_dirty(sbi->s_lvid_bh);
}

sb->s_dirt = 1;
Expand All @@ -789,22 +797,23 @@ inline void udf_free_blocks(struct super_block *sb,
uint32_t count)
{
uint16_t partition = bloc.partitionReferenceNum;
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];

if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
return udf_bitmap_free_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
map->s_uspace.s_bitmap,
bloc, offset, count);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
return udf_table_free_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
map->s_uspace.s_table,
bloc, offset, count);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
return udf_bitmap_free_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
map->s_fspace.s_bitmap,
bloc, offset, count);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
return udf_table_free_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
map->s_fspace.s_table,
bloc, offset, count);
} else {
return;
Expand All @@ -816,21 +825,23 @@ inline int udf_prealloc_blocks(struct super_block *sb,
uint16_t partition, uint32_t first_block,
uint32_t block_count)
{
if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];

if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
return udf_bitmap_prealloc_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
map->s_uspace.s_bitmap,
partition, first_block, block_count);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
return udf_table_prealloc_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
map->s_uspace.s_table,
partition, first_block, block_count);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
return udf_bitmap_prealloc_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
map->s_fspace.s_bitmap,
partition, first_block, block_count);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
return udf_table_prealloc_blocks(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
map->s_fspace.s_table,
partition, first_block, block_count);
} else {
return 0;
Expand All @@ -842,23 +853,24 @@ inline int udf_new_block(struct super_block *sb,
uint16_t partition, uint32_t goal, int *err)
{
int ret;
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];

if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
ret = udf_bitmap_new_block(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
map->s_uspace.s_bitmap,
partition, goal, err);
return ret;
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
return udf_table_new_block(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
map->s_uspace.s_table,
partition, goal, err);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
return udf_bitmap_new_block(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
map->s_fspace.s_bitmap,
partition, goal, err);
} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
return udf_table_new_block(sb, inode,
UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
map->s_fspace.s_table,
partition, goal, err);
} else {
*err = -EIO;
Expand Down
2 changes: 1 addition & 1 deletion fs/udf/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
switch (cmd) {
case UDF_GETVOLIDENT:
return copy_to_user((char __user *)arg,
UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
UDF_SB(inode->i_sb)->s_volume_ident, 32) ? -EFAULT : 0;
case UDF_RELOCATE_BLOCKS:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
Expand Down
33 changes: 19 additions & 14 deletions fs/udf/ialloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,17 @@ void udf_free_inode(struct inode *inode)
clear_inode(inode);

mutex_lock(&sbi->s_alloc_mutex);
if (sbi->s_lvidbh) {
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDescImpUse *lvidiu =
udf_sb_lvidiu(sbi);
if (S_ISDIR(inode->i_mode))
UDF_SB_LVIDIU(sb)->numDirs =
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
lvidiu->numDirs =
cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
else
UDF_SB_LVIDIU(sb)->numFiles =
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
lvidiu->numFiles =
cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);

mark_buffer_dirty(sbi->s_lvidbh);
mark_buffer_dirty(sbi->s_lvid_bh);
}
mutex_unlock(&sbi->s_alloc_mutex);

Expand Down Expand Up @@ -88,21 +90,23 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
}

mutex_lock(&sbi->s_alloc_mutex);
if (UDF_SB_LVIDBH(sb)) {
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sbi);
struct logicalVolHeaderDesc *lvhd;
uint64_t uniqueID;
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
lvhd = (struct logicalVolHeaderDesc *)(lvid->logicalVolContentsUse);
if (S_ISDIR(mode))
UDF_SB_LVIDIU(sb)->numDirs =
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
lvidiu->numDirs =
cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
else
UDF_SB_LVIDIU(sb)->numFiles =
cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
lvidiu->numFiles =
cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
uniqueID += 16;
lvhd->uniqueID = cpu_to_le64(uniqueID);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
mark_buffer_dirty(sbi->s_lvid_bh);
}
inode->i_mode = mode;
inode->i_uid = current->fsuid;
Expand All @@ -123,7 +127,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
UDF_I_USE(inode) = 0;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
UDF_I_EFE(inode) = 1;
UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
} else {
UDF_I_EFE(inode) = 0;
Expand Down
Loading

0 comments on commit 6c79e98

Please sign in to comment.