Btrfs: unaligned access fixes
Btrfs set/get macros lose type information needed to avoid unaligned accesses on sparc64. ere is a patch for the kernel bits which fixes most of the unaligned accesses on sparc64. btrfs_name_hash is modified to return the hash value instead of getting a return location via a (potentially unaligned) pointer. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
39b5637f6f
commit
df68b8a7ad
@ -495,22 +495,17 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
|
||||
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
|
||||
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
|
||||
{ \
|
||||
char *kaddr = kmap_atomic(eb->first_page, KM_USER0); \
|
||||
unsigned long offset = offsetof(type, member); \
|
||||
u##bits res; \
|
||||
__le##bits *tmp = (__le##bits *)(kaddr + offset); \
|
||||
res = le##bits##_to_cpu(*tmp); \
|
||||
kunmap_atomic(kaddr, KM_USER0); \
|
||||
type *p = kmap_atomic(eb->first_page, KM_USER0); \
|
||||
u##bits res = le##bits##_to_cpu(p->member); \
|
||||
kunmap_atomic(p, KM_USER0); \
|
||||
return res; \
|
||||
} \
|
||||
static inline void btrfs_set_##name(struct extent_buffer *eb, \
|
||||
u##bits val) \
|
||||
{ \
|
||||
char *kaddr = kmap_atomic(eb->first_page, KM_USER0); \
|
||||
unsigned long offset = offsetof(type, member); \
|
||||
__le##bits *tmp = (__le##bits *)(kaddr + offset); \
|
||||
*tmp = cpu_to_le##bits(val); \
|
||||
kunmap_atomic(kaddr, KM_USER0); \
|
||||
type *p = kmap_atomic(eb->first_page, KM_USER0); \
|
||||
p->member = cpu_to_le##bits(val); \
|
||||
kunmap_atomic(p, KM_USER0); \
|
||||
}
|
||||
|
||||
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
|
||||
|
@ -71,8 +71,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
|
||||
|
||||
key.objectid = dir;
|
||||
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
|
||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
||||
BUG_ON(ret);
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
@ -125,8 +124,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
||||
|
||||
key.objectid = dir;
|
||||
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
|
||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
||||
BUG_ON(ret);
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
path = btrfs_alloc_path();
|
||||
data_size = sizeof(*dir_item) + name_len;
|
||||
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
|
||||
@ -199,8 +197,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
||||
key.objectid = dir;
|
||||
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
|
||||
|
||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
||||
BUG_ON(ret);
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
||||
if (ret < 0)
|
||||
@ -261,8 +258,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
|
||||
|
||||
key.objectid = dir;
|
||||
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
|
||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
||||
BUG_ON(ret);
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
@ -76,19 +76,18 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
|
||||
*buf++ = pad;
|
||||
}
|
||||
|
||||
int btrfs_name_hash(const char *name, int len, u64 *hash_result)
|
||||
u64 btrfs_name_hash(const char *name, int len)
|
||||
{
|
||||
__u32 hash;
|
||||
__u32 minor_hash = 0;
|
||||
const char *p;
|
||||
__u32 in[8], buf[2];
|
||||
u64 hash_result;
|
||||
|
||||
if (len == 1 && *name == '.') {
|
||||
*hash_result = 1;
|
||||
return 0;
|
||||
return 1;
|
||||
} else if (len == 2 && name[0] == '.' && name[1] == '.') {
|
||||
*hash_result = 2;
|
||||
return 0;
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* Initialize the default seed for the hash checksum functions */
|
||||
@ -106,8 +105,8 @@ int btrfs_name_hash(const char *name, int len, u64 *hash_result)
|
||||
}
|
||||
hash = buf[0];
|
||||
minor_hash = buf[1];
|
||||
*hash_result = buf[0];
|
||||
*hash_result <<= 32;
|
||||
*hash_result |= buf[1];
|
||||
return 0;
|
||||
hash_result = buf[0];
|
||||
hash_result <<= 32;
|
||||
hash_result |= buf[1];
|
||||
return hash_result;
|
||||
}
|
||||
|
@ -18,5 +18,5 @@
|
||||
|
||||
#ifndef __HASH__
|
||||
#define __HASH__
|
||||
int btrfs_name_hash(const char *name, int len, u64 *hash_result);
|
||||
u64 btrfs_name_hash(const char *name, int len);
|
||||
#endif
|
||||
|
@ -21,16 +21,15 @@
|
||||
u##bits btrfs_##name(struct extent_buffer *eb, \
|
||||
type *s) \
|
||||
{ \
|
||||
unsigned long offset = (unsigned long)s + \
|
||||
offsetof(type, member); \
|
||||
__le##bits *tmp; \
|
||||
unsigned long part_offset = (unsigned long)s; \
|
||||
unsigned long offset = part_offset + offsetof(type, member); \
|
||||
type *p; \
|
||||
/* ugly, but we want the fast path here */ \
|
||||
if (eb->map_token && offset >= eb->map_start && \
|
||||
offset + sizeof(((type *)0)->member) <= eb->map_start + \
|
||||
eb->map_len) { \
|
||||
tmp = (__le##bits *)(eb->kaddr + offset - \
|
||||
eb->map_start); \
|
||||
return le##bits##_to_cpu(*tmp); \
|
||||
p = (type *)(eb->kaddr + part_offset - eb->map_start); \
|
||||
return le##bits##_to_cpu(p->member); \
|
||||
} \
|
||||
{ \
|
||||
int err; \
|
||||
@ -48,8 +47,8 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
|
||||
read_eb_member(eb, s, type, member, &res); \
|
||||
return le##bits##_to_cpu(res); \
|
||||
} \
|
||||
tmp = (__le##bits *)(kaddr + offset - map_start); \
|
||||
res = le##bits##_to_cpu(*tmp); \
|
||||
p = (type *)(kaddr + part_offset - map_start); \
|
||||
res = le##bits##_to_cpu(p->member); \
|
||||
if (unmap_on_exit) \
|
||||
unmap_extent_buffer(eb, map_token, KM_USER1); \
|
||||
return res; \
|
||||
@ -58,16 +57,15 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
|
||||
void btrfs_set_##name(struct extent_buffer *eb, \
|
||||
type *s, u##bits val) \
|
||||
{ \
|
||||
unsigned long offset = (unsigned long)s + \
|
||||
offsetof(type, member); \
|
||||
__le##bits *tmp; \
|
||||
unsigned long part_offset = (unsigned long)s; \
|
||||
unsigned long offset = part_offset + offsetof(type, member); \
|
||||
type *p; \
|
||||
/* ugly, but we want the fast path here */ \
|
||||
if (eb->map_token && offset >= eb->map_start && \
|
||||
offset + sizeof(((type *)0)->member) <= eb->map_start + \
|
||||
eb->map_len) { \
|
||||
tmp = (__le##bits *)(eb->kaddr + offset - \
|
||||
eb->map_start); \
|
||||
*tmp = cpu_to_le##bits(val); \
|
||||
p = (type *)(eb->kaddr + part_offset - eb->map_start); \
|
||||
p->member = cpu_to_le##bits(val); \
|
||||
return; \
|
||||
} \
|
||||
{ \
|
||||
@ -86,8 +84,8 @@ void btrfs_set_##name(struct extent_buffer *eb, \
|
||||
write_eb_member(eb, s, type, member, &val); \
|
||||
return; \
|
||||
} \
|
||||
tmp = (__le##bits *)(kaddr + offset - map_start); \
|
||||
*tmp = cpu_to_le##bits(val); \
|
||||
p = (type *)(kaddr + part_offset - map_start); \
|
||||
p->member = cpu_to_le##bits(val); \
|
||||
if (unmap_on_exit) \
|
||||
unmap_extent_buffer(eb, map_token, KM_USER1); \
|
||||
} \
|
||||
|
Loading…
Reference in New Issue
Block a user