]> git.apps.os.sepia.ceph.com Git - xfsprogs-dev.git/commitdiff
xfs_db: improve number extraction in getbitval
authorDarrick J. Wong <djwong@kernel.org>
Mon, 15 Apr 2024 23:07:45 +0000 (16:07 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Wed, 17 Apr 2024 21:06:27 +0000 (14:06 -0700)
For some reason, getbitval insists upon collecting a u64 from a pointer
bit by bit if it's not aligned to a 16-byte boundary.  If not, then it
resorts to scraping bits individually.  I don't know of any platform
where we require 16-byte alignment for a 8-byte access, or why we'd care
now that we have things like get_unaligned_beXX.

Rework this function to detect either naturally aligned accesses and use
the regular beXX_to_cpu functions; or byte-aligned accesses and use the
get_unaligned_beXX functions.  Only fall back to the bit scraping
algorithm for the really weird cases.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
db/bit.c

index c9bfd2eb025f3c2ac3b7fc199e5d4895d2c55a35..1b9ca054f3b1e6357c6e1e19211c3c14157aae3d 100644 (file)
--- a/db/bit.c
+++ b/db/bit.c
@@ -55,39 +55,34 @@ getbitval(
        char            *p;
        int64_t         rval;
        int             signext;
-       int             z1, z2, z3, z4;
 
        ASSERT(nbits<=64);
 
        p = (char *)obj + byteize(bitoff);
        bit = bitoffs(bitoff);
        signext = (flags & BVSIGNED) != 0;
-       z4 = ((intptr_t)p & 0xf) == 0 && bit == 0;
-       if (nbits == 64 && z4)
-               return be64_to_cpu(*(__be64 *)p);
-       z3 = ((intptr_t)p & 0x7) == 0 && bit == 0;
-       if (nbits == 32 && z3) {
+
+       if (bit != 0)
+               goto scrape_bits;
+
+       switch (nbits) {
+       case 64:
+               return get_unaligned_be64(p);
+       case 32:
                if (signext)
-                       return (__s32)be32_to_cpu(*(__be32 *)p);
-               else
-                       return (__u32)be32_to_cpu(*(__be32 *)p);
-       }
-       z2 = ((intptr_t)p & 0x3) == 0 && bit == 0;
-       if (nbits == 16 && z2) {
+                       return (__s32)get_unaligned_be32(p);
+               return (__u32)get_unaligned_be32(p);
+       case 16:
                if (signext)
-                       return (__s16)be16_to_cpu(*(__be16 *)p);
-               else
-                       return (__u16)be16_to_cpu(*(__be16 *)p);
-       }
-       z1 = ((intptr_t)p & 0x1) == 0 && bit == 0;
-       if (nbits == 8 && z1) {
+                       return (__s16)get_unaligned_be16(p);
+               return (__u16)get_unaligned_be16(p);
+       case 8:
                if (signext)
                        return *(__s8 *)p;
-               else
-                       return *(__u8 *)p;
+               return *(__u8 *)p;
        }
 
-
+scrape_bits:
        for (i = 0, rval = 0LL; i < nbits; i++) {
                if (getbit_l(p, bit + i)) {
                        /* If the last bit is on and we care about sign