Squashfs: fix block size use in LZO decompressor

Sizing the buffer using block size alone is incorrect leading
to a potential buffer over-run on 4K block size file systems
(because the metadata block size is always 8K).  Srclength is
set to the maximum expected size of the decompressed block and
it is block_size or 8K depending on whether a data or metadata
block is being decompressed.

Signed-off-by: Phillip Lougher <phillip@lougher.demon.co.uk>
This commit is contained in:
Phillip Lougher 2010-08-05 04:51:50 +01:00
parent 79cb8ced7e
commit f3065f60dd

View File

@ -40,13 +40,15 @@ struct squashfs_lzo {
static void *lzo_init(struct squashfs_sb_info *msblk)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->input = vmalloc(msblk->block_size);
stream->input = vmalloc(block_size);
if (stream->input == NULL)
goto failed;
stream->output = vmalloc(msblk->block_size);
stream->output = vmalloc(block_size);
if (stream->output == NULL)
goto failed2;
@ -80,7 +82,7 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
struct squashfs_lzo *stream = msblk->stream;
void *buff = stream->input;
int avail, i, bytes = length, res;
size_t out_len = msblk->block_size;
size_t out_len = srclength;
mutex_lock(&msblk->read_data_mutex);