forked from luck/tmp_suning_uos_patched
This pull request contains updates for both UBI and UBIFS:
- A new interface for UBI to deal better with read disturb - Reject unsupported ioctl flags in UBIFS (xfstests found it) -----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEdgfidid8lnn52cLTZvlZhesYu8EFAlyHyMUWHHJpY2hhcmRA c2lnbWEtc3Rhci5hdAAKCRBm+VmF6xi7wf9+EACFjPJaTJeLPHQofH3+u9O8gPzh ptQFzkEcRrr7Y7WjXnYGhjw83Nx4o5iM17gfqq7zYfuCMxVbC8zm0WZ9Ujj3p7xV p3IJ0bu/9sdIgdo+X9P8XJugAlWit1cW4mI8ZIAl2/CmYBzho8Zo55BNngNQ5G+Y o3JujvP7TAHm9gbqIUMrGpweBHKX0GoooYZBTPdkLyKnFT0yxzOc/jdVILspIxi5 GtDl4738xV7Ts3Fwson1BVqDdwqLvd2j+LBWeRTSYXKyQLIizxRHtk1EZHZtBDZk hWS/IW6HOzJJ5EQHn1EFAyQEGhfm4Yty+X0/BaPn8wvGE3Oud7bd9zgUCoBrhhTv ztLPXY1U1LV8aTCmww6IOXwFj+6BGpj5fIu7my14aqGPKVV5M2kkf+prnLimb9QN C3WxUz1Spz6CwrexoncvGm9ujoQbmwYLtKVNjRFIJ267OelaVD8icuAp1pZLSDom 1B6l39UQctrMiNqxuzJL+eq2raVZnnSQTlDqbUjFnUuU3LccRRNYgzhT1O6Ph50U xqSO2k7Pf41/zZXhdB009HLecVL4gsZOunhGOE7Vv4kr7hin0AfrnoegdL37YG8W GF6BNBgeegOxYDyvbOIWOxDqwyBWY2TPLJJ1IUE6j0lU6P1293IlsYHyTXUIK6bM CQinKMNAXICWvXG0YQ== =DlM9 -----END PGP SIGNATURE----- Merge tag 'upstream-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs Pull UBI and UBIFS updates from Richard Weinberger: - A new interface for UBI to deal better with read disturb - Reject unsupported ioctl flags in UBIFS (xfstests found it) * tag 'upstream-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: ubi: wl: Silence uninitialized variable warning ubifs: Reject unsupported ioctl flags explicitly ubi: Expose the bitrot interface ubi: Introduce in_pq()
This commit is contained in:
commit
a840b56ba3
|
@ -974,6 +974,36 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
|
|||
break;
|
||||
}
|
||||
|
||||
/* Check a specific PEB for bitflips and scrub it if needed */
|
||||
case UBI_IOCRPEB:
|
||||
{
|
||||
int pnum;
|
||||
|
||||
err = get_user(pnum, (__user int32_t *)argp);
|
||||
if (err) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
err = ubi_bitflip_check(ubi, pnum, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Force scrubbing for a specific PEB */
|
||||
case UBI_IOCSPEB:
|
||||
{
|
||||
int pnum;
|
||||
|
||||
err = get_user(pnum, (__user int32_t *)argp);
|
||||
if (err) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
err = ubi_bitflip_check(ubi, pnum, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
err = -ENOTTY;
|
||||
break;
|
||||
|
|
|
@ -929,6 +929,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
|
|||
int ubi_is_erase_work(struct ubi_work *wrk);
|
||||
void ubi_refill_pools(struct ubi_device *ubi);
|
||||
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
|
||||
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
|
||||
|
||||
/* io.c */
|
||||
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
|
||||
|
|
|
@ -277,6 +277,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* in_pq - check if a wear-leveling entry is present in the protection queue.
|
||||
* @ubi: UBI device description object
|
||||
* @e: the wear-leveling entry to check
|
||||
*
|
||||
* This function returns non-zero if @e is in the protection queue and zero
|
||||
* if it is not.
|
||||
*/
|
||||
static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
|
||||
{
|
||||
struct ubi_wl_entry *p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
|
||||
list_for_each_entry(p, &ubi->pq[i], u.list)
|
||||
if (p == e)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* prot_queue_add - add physical eraseblock to the protection queue.
|
||||
* @ubi: UBI device description object
|
||||
|
@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
|
||||
{
|
||||
if (in_wl_tree(e, &ubi->scrub))
|
||||
return false;
|
||||
else if (in_wl_tree(e, &ubi->erroneous))
|
||||
return false;
|
||||
else if (ubi->move_from == e)
|
||||
return false;
|
||||
else if (ubi->move_to == e)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
|
||||
* @ubi: UBI device description object
|
||||
* @pnum: the physical eraseblock to schedule
|
||||
* @force: dont't read the block, assume bitflips happened and take action.
|
||||
*
|
||||
* This function reads the given eraseblock and checks if bitflips occured.
|
||||
* In case of bitflips, the eraseblock is scheduled for scrubbing.
|
||||
* If scrubbing is forced with @force, the eraseblock is not read,
|
||||
* but scheduled for scrubbing right away.
|
||||
*
|
||||
* Returns:
|
||||
* %EINVAL, PEB is out of range
|
||||
* %ENOENT, PEB is no longer used by UBI
|
||||
* %EBUSY, PEB cannot be checked now or a check is currently running on it
|
||||
* %EAGAIN, bit flips happened but scrubbing is currently not possible
|
||||
* %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
|
||||
* %0, no bit flips detected
|
||||
*/
|
||||
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
|
||||
{
|
||||
int err;
|
||||
struct ubi_wl_entry *e;
|
||||
|
||||
if (pnum < 0 || pnum >= ubi->peb_count) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pause all parallel work, otherwise it can happen that the
|
||||
* erase worker frees a wl entry under us.
|
||||
*/
|
||||
down_write(&ubi->work_sem);
|
||||
|
||||
/*
|
||||
* Make sure that the wl entry does not change state while
|
||||
* inspecting it.
|
||||
*/
|
||||
spin_lock(&ubi->wl_lock);
|
||||
e = ubi->lookuptbl[pnum];
|
||||
if (!e) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
err = -ENOENT;
|
||||
goto out_resume;
|
||||
}
|
||||
|
||||
/*
|
||||
* Does it make sense to check this PEB?
|
||||
*/
|
||||
if (!scrub_possible(ubi, e)) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
err = -EBUSY;
|
||||
goto out_resume;
|
||||
}
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
if (!force) {
|
||||
mutex_lock(&ubi->buf_mutex);
|
||||
err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
|
||||
mutex_unlock(&ubi->buf_mutex);
|
||||
}
|
||||
|
||||
if (force || err == UBI_IO_BITFLIPS) {
|
||||
/*
|
||||
* Okay, bit flip happened, let's figure out what we can do.
|
||||
*/
|
||||
spin_lock(&ubi->wl_lock);
|
||||
|
||||
/*
|
||||
* Recheck. We released wl_lock, UBI might have killed the
|
||||
* wl entry under us.
|
||||
*/
|
||||
e = ubi->lookuptbl[pnum];
|
||||
if (!e) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
err = -ENOENT;
|
||||
goto out_resume;
|
||||
}
|
||||
|
||||
/*
|
||||
* Need to re-check state
|
||||
*/
|
||||
if (!scrub_possible(ubi, e)) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
err = -EBUSY;
|
||||
goto out_resume;
|
||||
}
|
||||
|
||||
if (in_pq(ubi, e)) {
|
||||
prot_queue_del(ubi, e->pnum);
|
||||
wl_tree_add(e, &ubi->scrub);
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
err = ensure_wear_leveling(ubi, 1);
|
||||
} else if (in_wl_tree(e, &ubi->used)) {
|
||||
rb_erase(&e->u.rb, &ubi->used);
|
||||
wl_tree_add(e, &ubi->scrub);
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
err = ensure_wear_leveling(ubi, 1);
|
||||
} else if (in_wl_tree(e, &ubi->free)) {
|
||||
rb_erase(&e->u.rb, &ubi->free);
|
||||
ubi->free_count--;
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
/*
|
||||
* This PEB is empty we can schedule it for
|
||||
* erasure right away. No wear leveling needed.
|
||||
*/
|
||||
err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
|
||||
force ? 0 : 1, true);
|
||||
} else {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
err = -EAGAIN;
|
||||
}
|
||||
|
||||
if (!err && !force)
|
||||
err = -EUCLEAN;
|
||||
} else {
|
||||
err = 0;
|
||||
}
|
||||
|
||||
out_resume:
|
||||
up_write(&ubi->work_sem);
|
||||
out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* tree_destroy - destroy an RB-tree.
|
||||
* @ubi: UBI device description object
|
||||
|
@ -1848,16 +2013,11 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
|
|||
static int self_check_in_pq(const struct ubi_device *ubi,
|
||||
struct ubi_wl_entry *e)
|
||||
{
|
||||
struct ubi_wl_entry *p;
|
||||
int i;
|
||||
|
||||
if (!ubi_dbg_chk_gen(ubi))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
|
||||
list_for_each_entry(p, &ubi->pq[i], u.list)
|
||||
if (p == e)
|
||||
return 0;
|
||||
if (in_pq(ubi, e))
|
||||
return 0;
|
||||
|
||||
ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
|
||||
e->pnum, e->ec);
|
||||
|
|
|
@ -28,6 +28,11 @@
|
|||
#include <linux/mount.h>
|
||||
#include "ubifs.h"
|
||||
|
||||
/* Need to be kept consistent with checked flags in ioctl2ubifs() */
|
||||
#define UBIFS_SUPPORTED_IOCTL_FLAGS \
|
||||
(FS_COMPR_FL | FS_SYNC_FL | FS_APPEND_FL | \
|
||||
FS_IMMUTABLE_FL | FS_DIRSYNC_FL)
|
||||
|
||||
/**
|
||||
* ubifs_set_inode_flags - set VFS inode flags.
|
||||
* @inode: VFS inode to set flags for
|
||||
|
@ -169,6 +174,9 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
if (get_user(flags, (int __user *) arg))
|
||||
return -EFAULT;
|
||||
|
||||
if (flags & ~UBIFS_SUPPORTED_IOCTL_FLAGS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
flags &= ~FS_DIRSYNC_FL;
|
||||
|
||||
|
|
|
@ -171,6 +171,11 @@
|
|||
/* Re-name volumes */
|
||||
#define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req)
|
||||
|
||||
/* Read the specified PEB and scrub it if there are bitflips */
|
||||
#define UBI_IOCRPEB _IOW(UBI_IOC_MAGIC, 4, __s32)
|
||||
/* Force scrubbing on the specified PEB */
|
||||
#define UBI_IOCSPEB _IOW(UBI_IOC_MAGIC, 5, __s32)
|
||||
|
||||
/* ioctl commands of the UBI control character device */
|
||||
|
||||
#define UBI_CTRL_IOC_MAGIC 'o'
|
||||
|
|
Loading…
Reference in New Issue
Block a user