forked from luck/tmp_suning_uos_patched
Fixes for pstore for 3.11 merge window
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABAgAGBQJRzg7SAAoJEKurIx+X31iBYsEP/A06TUjFMx8ZmPBdya1u0thq 4fMD8hUFv/vG90Opk1PBBCLWwAJJ6wwrQ8EQd5ieBInYSYW4bDlDFqCk7X0cZ7LE tlRNsUVDS7LpscWEXoYshRzquj7xyCg0nJMBlMSca3vPYQ82iUD0v9vp5/TKwOgu ti0mpj7kRj18FPkhokl+ja6fuUgUICPiRBvNR8DPskrnJ0lXv2Zyo0CKER47Wfa4 +fVqXZL0Ih1tivqH/3abA1MYXjxh9k9Db+USupYpQnPwZwDTWap0z/OYU9Swni6I /XW5c5vrAX9LyMPxMenD0RUWMZXmRDc9ddseRBwVSzQtowQxFGChgwz1+41nFers KTSLlrU1kij7+axBsjgPMli0SUoPlspv7Lt3JzEoJb0rtkOZmCDXu254qbDy7zxH Q06Zmb86XtJRGQH43lxEUDZ4QSg+OHA4Uskp6s12Ly1KNvLpk5KOkbuM1DT2jPJb 6KbfOJNcATeCAoax/qJky++qh4E42okG5gOeqC1zquWcfRoVuS+nWBbu4kubKlNs Qe3IjmybR2S6c1pidDViTnnR/FiununknZaBPavqvuqaf5wBaBwDKiBi973TYFyu D4g+KyaMwvicGp59Z+u9QLGTaeVxHlVv1l3oSd3x7SVhJBwVLihgz7tULS4+X+YX csTLhy2n8wE0aOHWH5su =Xcer -----END PGP SIGNATURE----- Merge tag 'please-pull-pstore' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux Pull pstore update from Tony Luck: "Fixes for pstore for 3.11 merge window" * tag 'please-pull-pstore' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: efivars: If pstore_register fails, free unneeded pstore buffer acpi: Eliminate console msg if pstore.backend excludes ERST pstore: Return unique error if backend registration excluded by kernel param pstore: Fail to unlink if a driver has not defined pstore_erase pstore/ram: remove the power of buffer size limitation pstore/ram: avoid atomic accesses for ioremapped regions efi, pstore: Cocci spatch "memdup.spatch"
This commit is contained in:
commit
04bbc8e1f6
|
@ -1180,20 +1180,28 @@ static int __init erst_init(void)
|
|||
if (!erst_erange.vaddr)
|
||||
goto err_release_erange;
|
||||
|
||||
pr_info(ERST_PFX
|
||||
"Error Record Serialization Table (ERST) support is initialized.\n");
|
||||
|
||||
buf = kmalloc(erst_erange.size, GFP_KERNEL);
|
||||
spin_lock_init(&erst_info.buf_lock);
|
||||
if (buf) {
|
||||
erst_info.buf = buf + sizeof(struct cper_pstore_record);
|
||||
erst_info.bufsize = erst_erange.size -
|
||||
sizeof(struct cper_pstore_record);
|
||||
if (pstore_register(&erst_info)) {
|
||||
pr_info(ERST_PFX "Could not register with persistent store\n");
|
||||
rc = pstore_register(&erst_info);
|
||||
if (rc) {
|
||||
if (rc != -EPERM)
|
||||
pr_info(ERST_PFX
|
||||
"Could not register with persistent store\n");
|
||||
erst_info.buf = NULL;
|
||||
erst_info.bufsize = 0;
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
pr_info(ERST_PFX
|
||||
"Error Record Serialization Table (ERST) support is initialized.\n");
|
||||
} else
|
||||
pr_err(ERST_PFX
|
||||
"Failed to allocate %lld bytes for persistent store error log\n",
|
||||
erst_erange.size);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -79,10 +79,9 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
|
|||
&entry->var.DataSize, entry->var.Data);
|
||||
size = entry->var.DataSize;
|
||||
|
||||
*cb_data->buf = kmalloc(size, GFP_KERNEL);
|
||||
*cb_data->buf = kmemdup(entry->var.Data, size, GFP_KERNEL);
|
||||
if (*cb_data->buf == NULL)
|
||||
return -ENOMEM;
|
||||
memcpy(*cb_data->buf, entry->var.Data, size);
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -236,7 +235,11 @@ static __init int efivars_pstore_init(void)
|
|||
efi_pstore_info.bufsize = 1024;
|
||||
spin_lock_init(&efi_pstore_info.buf_lock);
|
||||
|
||||
pstore_register(&efi_pstore_info);
|
||||
if (pstore_register(&efi_pstore_info)) {
|
||||
kfree(efi_pstore_info.buf);
|
||||
efi_pstore_info.buf = NULL;
|
||||
efi_pstore_info.bufsize = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -178,6 +178,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
|
|||
if (p->psi->erase)
|
||||
p->psi->erase(p->type, p->id, p->count,
|
||||
dentry->d_inode->i_ctime, p->psi);
|
||||
else
|
||||
return -EPERM;
|
||||
|
||||
return simple_unlink(dir, dentry);
|
||||
}
|
||||
|
|
|
@ -239,17 +239,15 @@ int pstore_register(struct pstore_info *psi)
|
|||
{
|
||||
struct module *owner = psi->owner;
|
||||
|
||||
if (backend && strcmp(backend, psi->name))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock(&pstore_lock);
|
||||
if (psinfo) {
|
||||
spin_unlock(&pstore_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (backend && strcmp(backend, psi->name)) {
|
||||
spin_unlock(&pstore_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!psi->write)
|
||||
psi->write = pstore_write_compat;
|
||||
psinfo = psi;
|
||||
|
@ -274,6 +272,9 @@ int pstore_register(struct pstore_info *psi)
|
|||
add_timer(&pstore_timer);
|
||||
}
|
||||
|
||||
pr_info("pstore: Registered %s as persistent store backend\n",
|
||||
psi->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pstore_register);
|
||||
|
|
|
@ -399,8 +399,6 @@ static int ramoops_probe(struct platform_device *pdev)
|
|||
goto fail_out;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(pdata->mem_size))
|
||||
pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
|
||||
if (!is_power_of_2(pdata->record_size))
|
||||
pdata->record_size = rounddown_pow_of_two(pdata->record_size);
|
||||
if (!is_power_of_2(pdata->console_size))
|
||||
|
|
|
@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
|
|||
}
|
||||
|
||||
/* increase and wrap the start pointer, returning the old value */
|
||||
static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
||||
static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
|
||||
{
|
||||
int old;
|
||||
int new;
|
||||
|
@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
|||
}
|
||||
|
||||
/* increase the size counter until it hits the max size */
|
||||
static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
||||
static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
|
||||
{
|
||||
size_t old;
|
||||
size_t new;
|
||||
|
@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
|||
} while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(buffer_lock);
|
||||
|
||||
/* increase and wrap the start pointer, returning the old value */
|
||||
static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
|
||||
{
|
||||
int old;
|
||||
int new;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&buffer_lock, flags);
|
||||
|
||||
old = atomic_read(&prz->buffer->start);
|
||||
new = old + a;
|
||||
while (unlikely(new > prz->buffer_size))
|
||||
new -= prz->buffer_size;
|
||||
atomic_set(&prz->buffer->start, new);
|
||||
|
||||
raw_spin_unlock_irqrestore(&buffer_lock, flags);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
/* increase the size counter until it hits the max size */
|
||||
static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
|
||||
{
|
||||
size_t old;
|
||||
size_t new;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&buffer_lock, flags);
|
||||
|
||||
old = atomic_read(&prz->buffer->size);
|
||||
if (old == prz->buffer_size)
|
||||
goto exit;
|
||||
|
||||
new = old + a;
|
||||
if (new > prz->buffer_size)
|
||||
new = prz->buffer_size;
|
||||
atomic_set(&prz->buffer->size, new);
|
||||
|
||||
exit:
|
||||
raw_spin_unlock_irqrestore(&buffer_lock, flags);
|
||||
}
|
||||
|
||||
static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
|
||||
static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
|
||||
|
||||
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
|
||||
uint8_t *data, size_t len, uint8_t *ecc)
|
||||
{
|
||||
|
@ -372,6 +419,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
buffer_start_add = buffer_start_add_locked;
|
||||
buffer_size_add = buffer_size_add_locked;
|
||||
|
||||
return ioremap(start, size);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user