aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Disseldorp <ddiss@suse.de>2022-04-13 18:44:19 +0200
committerEryu Guan <guaneryu@gmail.com>2022-04-17 20:08:04 +0800
commit73aa648ce176f159f41538765be3392d3941d8e8 (patch)
tree79ef3a52aa89abdad77d44831f229b1d47f21904
parentc4cefb1c5ecd92f16fb14d82e8b6542e5fdbb535 (diff)
downloadxfstests-dev-73aa648ce176f159f41538765be3392d3941d8e8.tar.gz
generic/020: move MAX_ATTRS and MAX_ATTRVAL_SIZE logic
No functional change. MAX_ATTRS and MAX_ATTRVAL_SIZE are only used within generic/020, so move the logic for determining these values over there. Signed-off-by: David Disseldorp <ddiss@suse.de> Reviewed-by: Eryu Guan <guaneryu@gmail.com> Signed-off-by: Eryu Guan <guaneryu@gmail.com>
-rw-r--r--common/attr75
-rwxr-xr-xtests/generic/02076
2 files changed, 76 insertions, 75 deletions
diff --git a/common/attr b/common/attr
index dae8a1bb08..cce4d1b201 100644
--- a/common/attr
+++ b/common/attr
@@ -264,80 +264,5 @@ _getfattr()
return ${PIPESTATUS[0]}
}
-# set maximum total attr space based on fs type
-case "$FSTYP" in
-xfs|udf|pvfs2|9p|ceph|nfs)
- MAX_ATTRS=1000
- ;;
-ext2|ext3|ext4)
- # For 4k blocksizes, most of the attributes have an attr_name of
- # "attribute_NN" which is 12, and "value_NN" which is 8.
- # But for larger block sizes, we start having extended attributes of the
- # form "attribute_NNN" or "attribute_NNNN", and "value_NNN" and
- # "value_NNNN", which causes the round(len(..), 4) to jump up by 4
- # bytes. So round_up(len(attr_name, 4)) becomes 16 instead of 12, and
- # round_up(len(value, 4)) becomes 12 instead of 8.
- #
- # For 64K blocksize the calculation becomes
- # max_attrs = (block_size - 32) / (16 + 12 + 16)
- # or
- # max_attrs = (block_size - 32) / 44
- #
- # For 4K blocksize:-
- # max_attrs = (block_size - 32) / (16 + 8 + 12)
- # or
- # max_attrs = (block_size - 32) / 36
- #
- # Note (for 4K bs) above are exact calculations for attrs of type
- # attribute_NN with values of type value_NN.
- # With above calculations, for 4k blocksize max_attrs becomes 112.
- # This means we can have few attrs of type attribute_NNN with values of
- # type value_NNN. To avoid/handle this we need to add extra 4 bytes of
- # headroom.
- #
- # So for 4K, the calculations becomes:-
- # max_attrs = (block_size - 32) / (16 + 8 + 12 + 4)
- # or
- # max_attrs = (block_size - 32) / 40
- #
- # Assume max ~1 block of attrs
- BLOCK_SIZE=`_get_block_size $TEST_DIR`
- if [ $BLOCK_SIZE -le 4096 ]; then
- let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 8 + 12 + 4)))
- else
- let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 12 + 16 )))
- fi
- ;;
-*)
- # Assume max ~1 block of attrs
- BLOCK_SIZE=`_get_block_size $TEST_DIR`
- # user.attribute_XXX="value.XXX" is about 32 bytes; leave some overhead
- let MAX_ATTRS=$BLOCK_SIZE/40
-esac
-
-export MAX_ATTRS
-
-# Set max attr value size based on fs type
-case "$FSTYP" in
-xfs|udf|btrfs)
- MAX_ATTRVAL_SIZE=64
- ;;
-pvfs2)
- MAX_ATTRVAL_SIZE=8192
- ;;
-9p|ceph|nfs)
- MAX_ATTRVAL_SIZE=65536
- ;;
-bcachefs)
- MAX_ATTRVAL_SIZE=1024
- ;;
-*)
- # Assume max ~1 block of attrs
- BLOCK_SIZE=`_get_block_size $TEST_DIR`
- # leave a little overhead
- let MAX_ATTRVAL_SIZE=$BLOCK_SIZE-256
-esac
-
-export MAX_ATTRVAL_SIZE
# make sure this script returns success
/bin/true
diff --git a/tests/generic/020 b/tests/generic/020
index 29ef853c87..c2c285f60d 100755
--- a/tests/generic/020
+++ b/tests/generic/020
@@ -51,6 +51,82 @@ _attr_list()
fi
}
+# set maximum total attr space based on fs type
+case "$FSTYP" in
+xfs|udf|pvfs2|9p|ceph|nfs)
+ MAX_ATTRS=1000
+ ;;
+ext2|ext3|ext4)
+ # For 4k blocksizes, most of the attributes have an attr_name of
+ # "attribute_NN" which is 12, and "value_NN" which is 8.
+ # But for larger block sizes, we start having extended attributes of the
+ # form "attribute_NNN" or "attribute_NNNN", and "value_NNN" and
+ # "value_NNNN", which causes the round(len(..), 4) to jump up by 4
+ # bytes. So round_up(len(attr_name, 4)) becomes 16 instead of 12, and
+ # round_up(len(value, 4)) becomes 12 instead of 8.
+ #
+ # For 64K blocksize the calculation becomes
+ # max_attrs = (block_size - 32) / (16 + 12 + 16)
+ # or
+ # max_attrs = (block_size - 32) / 44
+ #
+ # For 4K blocksize:-
+ # max_attrs = (block_size - 32) / (16 + 8 + 12)
+ # or
+ # max_attrs = (block_size - 32) / 36
+ #
+ # Note (for 4K bs) above are exact calculations for attrs of type
+ # attribute_NN with values of type value_NN.
+ # With above calculations, for 4k blocksize max_attrs becomes 112.
+ # This means we can have few attrs of type attribute_NNN with values of
+ # type value_NNN. To avoid/handle this we need to add extra 4 bytes of
+ # headroom.
+ #
+ # So for 4K, the calculations becomes:-
+ # max_attrs = (block_size - 32) / (16 + 8 + 12 + 4)
+ # or
+ # max_attrs = (block_size - 32) / 40
+ #
+ # Assume max ~1 block of attrs
+ BLOCK_SIZE=`_get_block_size $TEST_DIR`
+ if [ $BLOCK_SIZE -le 4096 ]; then
+ let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 8 + 12 + 4)))
+ else
+ let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 12 + 16 )))
+ fi
+ ;;
+*)
+ # Assume max ~1 block of attrs
+ BLOCK_SIZE=`_get_block_size $TEST_DIR`
+ # user.attribute_XXX="value.XXX" is about 32 bytes; leave some overhead
+ let MAX_ATTRS=$BLOCK_SIZE/40
+esac
+
+export MAX_ATTRS
+
+# Set max attr value size based on fs type
+case "$FSTYP" in
+xfs|udf|btrfs)
+ MAX_ATTRVAL_SIZE=64
+ ;;
+pvfs2)
+ MAX_ATTRVAL_SIZE=8192
+ ;;
+9p|ceph|nfs)
+ MAX_ATTRVAL_SIZE=65536
+ ;;
+bcachefs)
+ MAX_ATTRVAL_SIZE=1024
+ ;;
+*)
+ # Assume max ~1 block of attrs
+ BLOCK_SIZE=`_get_block_size $TEST_DIR`
+ # leave a little overhead
+ let MAX_ATTRVAL_SIZE=$BLOCK_SIZE-256
+esac
+
+export MAX_ATTRVAL_SIZE
+
# real QA test starts here
_supported_fs generic