forked from luck/tmp_suning_uos_patched
f405c445a4
[ Original patch from Minchan Kim <minchan@kernel.org> ]
Commit ba6b17d68c
("zram: fix umount-reset_store-mount race
condition") introduced bdev->bd_mutex to protect a race between mount
and reset. At that time, we don't have dynamic zram-add/remove feature
so it was okay.
However, as we introduce dynamic device feature, bd_mutex became
trouble.
CPU 0
echo 1 > /sys/block/zram<id>/reset
-> kernfs->s_active(A)
-> zram:reset_store->bd_mutex(B)
CPU 1
echo <id> > /sys/class/zram/zram-remove
->zram:zram_remove: bd_mutex(B)
-> sysfs_remove_group
-> kernfs->s_active(A)
IOW, AB -> BA deadlock
The reason we are holding bd_mutex for zram_remove is to prevent
any incoming open /dev/zram[0-9]. Otherwise, we could remove zram
others already have opened. But it causes above deadlock problem.
To fix the problem, this patch overrides block_device.open and
it returns -EBUSY if zram asserts he claims zram to reset so any
incoming open will be failed so we don't need to hold bd_mutex
for zram_remove ayn more.
This patch is to prepare for zram-add/remove feature.
[sergey.senozhatsky@gmail.com: simplify reset_store()]
Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
124 lines
3.4 KiB
C
124 lines
3.4 KiB
C
/*
|
|
* Compressed RAM block device
|
|
*
|
|
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
|
|
* 2012, 2013 Minchan Kim
|
|
*
|
|
* This code is released using a dual license strategy: BSD/GPL
|
|
* You can choose the licence that better fits your requirements.
|
|
*
|
|
* Released under the terms of 3-clause BSD License
|
|
* Released under the terms of GNU General Public License Version 2.0
|
|
*
|
|
*/
|
|
|
|
#ifndef _ZRAM_DRV_H_
|
|
#define _ZRAM_DRV_H_
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/zsmalloc.h>
|
|
|
|
#include "zcomp.h"
|
|
|
|
/*-- Configurable parameters */
|
|
|
|
/*
|
|
* Pages that compress to size greater than this are stored
|
|
* uncompressed in memory.
|
|
*/
|
|
static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
|
|
|
|
/*
|
|
* NOTE: max_zpage_size must be less than or equal to:
|
|
* ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
|
|
* always return failure.
|
|
*/
|
|
|
|
/*-- End of configurable params */
|
|
|
|
#define SECTOR_SHIFT 9
|
|
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
|
|
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
|
|
#define ZRAM_LOGICAL_BLOCK_SHIFT 12
|
|
#define ZRAM_LOGICAL_BLOCK_SIZE (1 << ZRAM_LOGICAL_BLOCK_SHIFT)
|
|
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
|
|
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
|
|
|
|
|
|
/*
|
|
* The lower ZRAM_FLAG_SHIFT bits of table.value is for
|
|
* object size (excluding header), the higher bits is for
|
|
* zram_pageflags.
|
|
*
|
|
* zram is mainly used for memory efficiency so we want to keep memory
|
|
* footprint small so we can squeeze size and flags into a field.
|
|
* The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
|
|
* the higher bits is for zram_pageflags.
|
|
*/
|
|
#define ZRAM_FLAG_SHIFT 24
|
|
|
|
/* Flags for zram pages (table[page_no].value) */
|
|
enum zram_pageflags {
|
|
/* Page consists entirely of zeros */
|
|
ZRAM_ZERO = ZRAM_FLAG_SHIFT,
|
|
ZRAM_ACCESS, /* page is now accessed */
|
|
|
|
__NR_ZRAM_PAGEFLAGS,
|
|
};
|
|
|
|
/*-- Data structures */
|
|
|
|
/* Allocated for each disk page */
|
|
struct zram_table_entry {
|
|
unsigned long handle;
|
|
unsigned long value;
|
|
};
|
|
|
|
struct zram_stats {
|
|
atomic64_t compr_data_size; /* compressed size of pages stored */
|
|
atomic64_t num_reads; /* failed + successful */
|
|
atomic64_t num_writes; /* --do-- */
|
|
atomic64_t num_migrated; /* no. of migrated object */
|
|
atomic64_t failed_reads; /* can happen when memory is too low */
|
|
atomic64_t failed_writes; /* can happen when memory is too low */
|
|
atomic64_t invalid_io; /* non-page-aligned I/O requests */
|
|
atomic64_t notify_free; /* no. of swap slot free notifications */
|
|
atomic64_t zero_pages; /* no. of zero filled pages */
|
|
atomic64_t pages_stored; /* no. of pages currently stored */
|
|
atomic_long_t max_used_pages; /* no. of maximum pages stored */
|
|
};
|
|
|
|
struct zram_meta {
|
|
struct zram_table_entry *table;
|
|
struct zs_pool *mem_pool;
|
|
};
|
|
|
|
struct zram {
|
|
struct zram_meta *meta;
|
|
struct zcomp *comp;
|
|
struct gendisk *disk;
|
|
/* Prevent concurrent execution of device init */
|
|
struct rw_semaphore init_lock;
|
|
/*
|
|
* the number of pages zram can consume for storing compressed data
|
|
*/
|
|
unsigned long limit_pages;
|
|
int max_comp_streams;
|
|
|
|
struct zram_stats stats;
|
|
atomic_t refcount; /* refcount for zram_meta */
|
|
/* wait all IO under all of cpu are done */
|
|
wait_queue_head_t io_done;
|
|
/*
|
|
* This is the limit on amount of *uncompressed* worth of data
|
|
* we can store in a disk.
|
|
*/
|
|
u64 disksize; /* bytes */
|
|
char compressor[10];
|
|
/*
|
|
* zram is claimed so open request will be failed
|
|
*/
|
|
bool claim; /* Protected by bdev->bd_mutex */
|
|
};
|
|
#endif
|