Merge branch 'pm-sleep'
* pm-sleep: freezer: Fix typo in freezable_schedule_timeout() comment PM / s2idle: Clear the events_check_enabled flag PM / sleep: Remove pm_complete_with_resume_check() PM: ARM: locomo: Drop suspend and resume bus type callbacks PM: Use a more common logging style PM: Document rules on using pm_runtime_resume() in system suspend callbacks
This commit is contained in:
commit
05d658b5b5
|
@ -328,7 +328,10 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
|
|||
After the ``->prepare`` callback method returns, no new children may be
|
||||
registered below the device. The method may also prepare the device or
|
||||
driver in some way for the upcoming system power transition, but it
|
||||
should not put the device into a low-power state.
|
||||
should not put the device into a low-power state. Moreover, if the
|
||||
device supports runtime power management, the ``->prepare`` callback
|
||||
method must not update its state in case it is necessary to resume it
|
||||
from runtime suspend later on.
|
||||
|
||||
For devices supporting runtime power management, the return value of the
|
||||
prepare callback can be used to indicate to the PM core that it may
|
||||
|
@ -356,6 +359,16 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
|
|||
the appropriate low-power state, depending on the bus type the device is
|
||||
on, and they may enable wakeup events.
|
||||
|
||||
However, for devices supporting runtime power management, the
|
||||
``->suspend`` methods provided by subsystems (bus types and PM domains
|
||||
in particular) must follow an additional rule regarding what can be done
|
||||
to the devices before their drivers' ``->suspend`` methods are called.
|
||||
Namely, they can only resume the devices from runtime suspend by
|
||||
calling :c:func:`pm_runtime_resume` for them, if that is necessary, and
|
||||
they must not update the state of the devices in any other way at that
|
||||
time (in case the drivers need to resume the devices from runtime
|
||||
suspend in their ``->suspend`` methods).
|
||||
|
||||
3. For a number of devices it is convenient to split suspend into the
|
||||
"quiesce device" and "save device state" phases, in which cases
|
||||
``suspend_late`` is meant to do the latter. It is always executed after
|
||||
|
@ -729,6 +742,16 @@ state temporarily, for example so that its system wakeup capability can be
|
|||
disabled. This all depends on the hardware and the design of the subsystem and
|
||||
device driver in question.
|
||||
|
||||
If it is necessary to resume a device from runtime suspend during a system-wide
|
||||
transition into a sleep state, that can be done by calling
|
||||
:c:func:`pm_runtime_resume` for it from the ``->suspend`` callback (or its
|
||||
couterpart for transitions related to hibernation) of either the device's driver
|
||||
or a subsystem responsible for it (for example, a bus type or a PM domain).
|
||||
That is guaranteed to work by the requirement that subsystems must not change
|
||||
the state of devices (possibly except for resuming them from runtime suspend)
|
||||
from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before*
|
||||
invoking device drivers' ``->suspend`` callbacks (or equivalent).
|
||||
|
||||
During system-wide resume from a sleep state it's easiest to put devices into
|
||||
the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`.
|
||||
Refer to that document for more information regarding this particular issue as
|
||||
|
|
|
@ -826,28 +826,6 @@ static int locomo_match(struct device *_dev, struct device_driver *_drv)
|
|||
return dev->devid == drv->devid;
|
||||
}
|
||||
|
||||
static int locomo_bus_suspend(struct device *dev, pm_message_t state)
|
||||
{
|
||||
struct locomo_dev *ldev = LOCOMO_DEV(dev);
|
||||
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
|
||||
int ret = 0;
|
||||
|
||||
if (drv && drv->suspend)
|
||||
ret = drv->suspend(ldev, state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int locomo_bus_resume(struct device *dev)
|
||||
{
|
||||
struct locomo_dev *ldev = LOCOMO_DEV(dev);
|
||||
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
|
||||
int ret = 0;
|
||||
|
||||
if (drv && drv->resume)
|
||||
ret = drv->resume(ldev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int locomo_bus_probe(struct device *dev)
|
||||
{
|
||||
struct locomo_dev *ldev = LOCOMO_DEV(dev);
|
||||
|
@ -875,8 +853,6 @@ struct bus_type locomo_bus_type = {
|
|||
.match = locomo_match,
|
||||
.probe = locomo_bus_probe,
|
||||
.remove = locomo_bus_remove,
|
||||
.suspend = locomo_bus_suspend,
|
||||
.resume = locomo_bus_resume,
|
||||
};
|
||||
|
||||
int locomo_driver_register(struct locomo_driver *driver)
|
||||
|
|
|
@ -189,8 +189,6 @@ struct locomo_driver {
|
|||
unsigned int devid;
|
||||
int (*probe)(struct locomo_dev *);
|
||||
int (*remove)(struct locomo_dev *);
|
||||
int (*suspend)(struct locomo_dev *, pm_message_t);
|
||||
int (*resume)(struct locomo_dev *);
|
||||
};
|
||||
|
||||
#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/**
|
||||
|
@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev)
|
|||
if (drv && drv->pm && drv->pm->complete)
|
||||
drv->pm->complete(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_complete_with_resume_check - Complete a device power transition.
|
||||
* @dev: Device to handle.
|
||||
*
|
||||
* Complete a device power transition during a system-wide power transition and
|
||||
* optionally schedule a runtime resume of the device if the system resume in
|
||||
* progress has been initated by the platform firmware and the device had its
|
||||
* power.direct_complete flag set.
|
||||
*/
|
||||
void pm_complete_with_resume_check(struct device *dev)
|
||||
{
|
||||
pm_generic_complete(dev);
|
||||
/*
|
||||
* If the device had been runtime-suspended before the system went into
|
||||
* the sleep state it is going out of and it has never been resumed till
|
||||
* now, resume it in case the firmware powered it up.
|
||||
*/
|
||||
if (dev->power.direct_complete && pm_resume_via_firmware())
|
||||
pm_request_resume(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
|
|
@ -182,7 +182,7 @@ static inline void freezable_schedule_unsafe(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Like freezable_schedule_timeout(), but should not block the freezer. Do not
|
||||
* Like schedule_timeout(), but should not block the freezer. Do not
|
||||
* call this with locks held.
|
||||
*/
|
||||
static inline long freezable_schedule_timeout(long timeout)
|
||||
|
|
|
@ -736,7 +736,6 @@ extern int pm_generic_poweroff_noirq(struct device *dev);
|
|||
extern int pm_generic_poweroff_late(struct device *dev);
|
||||
extern int pm_generic_poweroff(struct device *dev);
|
||||
extern void pm_generic_complete(struct device *dev);
|
||||
extern void pm_complete_with_resume_check(struct device *dev);
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
|
|
|
@ -701,8 +701,8 @@ static int __init pm_qos_power_init(void)
|
|||
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
|
||||
ret = register_pm_qos_misc(pm_qos_array[i], d);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "pm_qos_param: %s setup failed\n",
|
||||
pm_qos_array[i]->name);
|
||||
pr_err("%s: %s setup failed\n",
|
||||
__func__, pm_qos_array[i]->name);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "PM: " fmt
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -967,7 +969,7 @@ void __init __register_nosave_region(unsigned long start_pfn,
|
|||
region->end_pfn = end_pfn;
|
||||
list_add_tail(®ion->list, &nosave_regions);
|
||||
Report:
|
||||
printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
|
||||
pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
|
||||
(unsigned long long) start_pfn << PAGE_SHIFT,
|
||||
((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
|
||||
}
|
||||
|
@ -1039,7 +1041,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
|
|||
list_for_each_entry(region, &nosave_regions, list) {
|
||||
unsigned long pfn;
|
||||
|
||||
pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
|
||||
pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
|
||||
(unsigned long long) region->start_pfn << PAGE_SHIFT,
|
||||
((unsigned long long) region->end_pfn << PAGE_SHIFT)
|
||||
- 1);
|
||||
|
@ -1095,7 +1097,7 @@ int create_basic_memory_bitmaps(void)
|
|||
free_pages_map = bm2;
|
||||
mark_nosave_pages(forbidden_pages_map);
|
||||
|
||||
pr_debug("PM: Basic memory bitmaps created\n");
|
||||
pr_debug("Basic memory bitmaps created\n");
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1131,7 +1133,7 @@ void free_basic_memory_bitmaps(void)
|
|||
memory_bm_free(bm2, PG_UNSAFE_CLEAR);
|
||||
kfree(bm2);
|
||||
|
||||
pr_debug("PM: Basic memory bitmaps freed\n");
|
||||
pr_debug("Basic memory bitmaps freed\n");
|
||||
}
|
||||
|
||||
void clear_free_pages(void)
|
||||
|
@ -1152,7 +1154,7 @@ void clear_free_pages(void)
|
|||
pfn = memory_bm_next_pfn(bm);
|
||||
}
|
||||
memory_bm_position_reset(bm);
|
||||
pr_info("PM: free pages cleared after restore\n");
|
||||
pr_info("free pages cleared after restore\n");
|
||||
#endif /* PAGE_POISONING_ZERO */
|
||||
}
|
||||
|
||||
|
@ -1690,7 +1692,7 @@ int hibernate_preallocate_memory(void)
|
|||
ktime_t start, stop;
|
||||
int error;
|
||||
|
||||
printk(KERN_INFO "PM: Preallocating image memory... ");
|
||||
pr_info("Preallocating image memory... ");
|
||||
start = ktime_get();
|
||||
|
||||
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
|
||||
|
@ -1821,13 +1823,13 @@ int hibernate_preallocate_memory(void)
|
|||
|
||||
out:
|
||||
stop = ktime_get();
|
||||
printk(KERN_CONT "done (allocated %lu pages)\n", pages);
|
||||
pr_cont("done (allocated %lu pages)\n", pages);
|
||||
swsusp_show_speed(start, stop, pages, "Allocated");
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
printk(KERN_CONT "\n");
|
||||
pr_cont("\n");
|
||||
swsusp_free();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1867,8 +1869,8 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
|
|||
free += zone_page_state(zone, NR_FREE_PAGES);
|
||||
|
||||
nr_pages += count_pages_for_highmem(nr_highmem);
|
||||
pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
|
||||
nr_pages, PAGES_FOR_IO, free);
|
||||
pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
|
||||
nr_pages, PAGES_FOR_IO, free);
|
||||
|
||||
return free > nr_pages + PAGES_FOR_IO;
|
||||
}
|
||||
|
@ -1961,20 +1963,20 @@ asmlinkage __visible int swsusp_save(void)
|
|||
{
|
||||
unsigned int nr_pages, nr_highmem;
|
||||
|
||||
printk(KERN_INFO "PM: Creating hibernation image:\n");
|
||||
pr_info("Creating hibernation image:\n");
|
||||
|
||||
drain_local_pages(NULL);
|
||||
nr_pages = count_data_pages();
|
||||
nr_highmem = count_highmem_pages();
|
||||
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
|
||||
pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
|
||||
|
||||
if (!enough_free_mem(nr_pages, nr_highmem)) {
|
||||
printk(KERN_ERR "PM: Not enough free memory\n");
|
||||
pr_err("Not enough free memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
|
||||
printk(KERN_ERR "PM: Memory allocation failed\n");
|
||||
pr_err("Memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1995,8 +1997,7 @@ asmlinkage __visible int swsusp_save(void)
|
|||
nr_copy_pages = nr_pages;
|
||||
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
|
||||
|
||||
printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
|
||||
nr_pages);
|
||||
pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2170,7 +2171,7 @@ static int check_header(struct swsusp_info *info)
|
|||
if (!reason && info->num_physpages != get_num_physpages())
|
||||
reason = "memory size";
|
||||
if (reason) {
|
||||
printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
|
||||
pr_err("Image mismatch: %s\n", reason);
|
||||
return -EPERM;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -437,7 +437,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|||
error = suspend_ops->enter(state);
|
||||
trace_suspend_resume(TPS("machine_suspend"),
|
||||
state, false);
|
||||
events_check_enabled = false;
|
||||
} else if (*wakeup) {
|
||||
error = -EBUSY;
|
||||
}
|
||||
|
@ -582,6 +581,7 @@ static int enter_state(suspend_state_t state)
|
|||
pm_restore_gfp_mask();
|
||||
|
||||
Finish:
|
||||
events_check_enabled = false;
|
||||
pm_pr_dbg("Finishing wakeup.\n");
|
||||
suspend_finish();
|
||||
Unlock:
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "PM: " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -241,9 +243,9 @@ static void hib_end_io(struct bio *bio)
|
|||
struct page *page = bio->bi_io_vec[0].bv_page;
|
||||
|
||||
if (bio->bi_status) {
|
||||
printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
|
||||
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
|
||||
(unsigned long long)bio->bi_iter.bi_sector);
|
||||
pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
|
||||
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
|
||||
(unsigned long long)bio->bi_iter.bi_sector);
|
||||
}
|
||||
|
||||
if (bio_data_dir(bio) == WRITE)
|
||||
|
@ -273,8 +275,8 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
|
|||
bio_set_op_attrs(bio, op, op_flags);
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
|
||||
(unsigned long long)bio->bi_iter.bi_sector);
|
||||
pr_err("Adding page to bio failed at %llu\n",
|
||||
(unsigned long long)bio->bi_iter.bi_sector);
|
||||
bio_put(bio);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -319,7 +321,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
|
|||
error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
|
||||
swsusp_resume_block, swsusp_header, NULL);
|
||||
} else {
|
||||
printk(KERN_ERR "PM: Swap header not found!\n");
|
||||
pr_err("Swap header not found!\n");
|
||||
error = -ENODEV;
|
||||
}
|
||||
return error;
|
||||
|
@ -413,8 +415,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
|
|||
ret = swsusp_swap_check();
|
||||
if (ret) {
|
||||
if (ret != -ENOSPC)
|
||||
printk(KERN_ERR "PM: Cannot find swap device, try "
|
||||
"swapon -a.\n");
|
||||
pr_err("Cannot find swap device, try swapon -a\n");
|
||||
return ret;
|
||||
}
|
||||
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
|
||||
|
@ -491,9 +492,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
|
|||
{
|
||||
if (!error) {
|
||||
flush_swap_writer(handle);
|
||||
printk(KERN_INFO "PM: S");
|
||||
pr_info("S");
|
||||
error = mark_swapfiles(handle, flags);
|
||||
printk("|\n");
|
||||
pr_cont("|\n");
|
||||
}
|
||||
|
||||
if (error)
|
||||
|
@ -542,7 +543,7 @@ static int save_image(struct swap_map_handle *handle,
|
|||
|
||||
hib_init_batch(&hb);
|
||||
|
||||
printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
|
||||
pr_info("Saving image data pages (%u pages)...\n",
|
||||
nr_to_write);
|
||||
m = nr_to_write / 10;
|
||||
if (!m)
|
||||
|
@ -557,8 +558,8 @@ static int save_image(struct swap_map_handle *handle,
|
|||
if (ret)
|
||||
break;
|
||||
if (!(nr_pages % m))
|
||||
printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
pr_info("Image saving progress: %3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
nr_pages++;
|
||||
}
|
||||
err2 = hib_wait_io(&hb);
|
||||
|
@ -566,7 +567,7 @@ static int save_image(struct swap_map_handle *handle,
|
|||
if (!ret)
|
||||
ret = err2;
|
||||
if (!ret)
|
||||
printk(KERN_INFO "PM: Image saving done.\n");
|
||||
pr_info("Image saving done\n");
|
||||
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
|
||||
return ret;
|
||||
}
|
||||
|
@ -692,14 +693,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
|
||||
page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
|
||||
if (!page) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
|
||||
pr_err("Failed to allocate LZO page\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
||||
data = vmalloc(sizeof(*data) * nr_threads);
|
||||
if (!data) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO data\n");
|
||||
pr_err("Failed to allocate LZO data\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -708,7 +709,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
|
||||
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
|
||||
if (!crc) {
|
||||
printk(KERN_ERR "PM: Failed to allocate crc\n");
|
||||
pr_err("Failed to allocate crc\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -726,8 +727,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
"image_compress/%u", thr);
|
||||
if (IS_ERR(data[thr].thr)) {
|
||||
data[thr].thr = NULL;
|
||||
printk(KERN_ERR
|
||||
"PM: Cannot start compression threads\n");
|
||||
pr_err("Cannot start compression threads\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -749,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
|
||||
if (IS_ERR(crc->thr)) {
|
||||
crc->thr = NULL;
|
||||
printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
|
||||
pr_err("Cannot start CRC32 thread\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -760,10 +760,9 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
*/
|
||||
handle->reqd_free_pages = reqd_free_pages();
|
||||
|
||||
printk(KERN_INFO
|
||||
"PM: Using %u thread(s) for compression.\n"
|
||||
"PM: Compressing and saving image data (%u pages)...\n",
|
||||
nr_threads, nr_to_write);
|
||||
pr_info("Using %u thread(s) for compression\n", nr_threads);
|
||||
pr_info("Compressing and saving image data (%u pages)...\n",
|
||||
nr_to_write);
|
||||
m = nr_to_write / 10;
|
||||
if (!m)
|
||||
m = 1;
|
||||
|
@ -783,10 +782,8 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
data_of(*snapshot), PAGE_SIZE);
|
||||
|
||||
if (!(nr_pages % m))
|
||||
printk(KERN_INFO
|
||||
"PM: Image saving progress: "
|
||||
"%3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
pr_info("Image saving progress: %3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
nr_pages++;
|
||||
}
|
||||
if (!off)
|
||||
|
@ -813,15 +810,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
ret = data[thr].ret;
|
||||
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "PM: LZO compression failed\n");
|
||||
pr_err("LZO compression failed\n");
|
||||
goto out_finish;
|
||||
}
|
||||
|
||||
if (unlikely(!data[thr].cmp_len ||
|
||||
data[thr].cmp_len >
|
||||
lzo1x_worst_compress(data[thr].unc_len))) {
|
||||
printk(KERN_ERR
|
||||
"PM: Invalid LZO compressed length\n");
|
||||
pr_err("Invalid LZO compressed length\n");
|
||||
ret = -1;
|
||||
goto out_finish;
|
||||
}
|
||||
|
@ -857,7 +853,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
if (!ret)
|
||||
ret = err2;
|
||||
if (!ret)
|
||||
printk(KERN_INFO "PM: Image saving done.\n");
|
||||
pr_info("Image saving done\n");
|
||||
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
|
||||
out_clean:
|
||||
if (crc) {
|
||||
|
@ -888,7 +884,7 @@ static int enough_swap(unsigned int nr_pages, unsigned int flags)
|
|||
unsigned int free_swap = count_swap_pages(root_swap, 1);
|
||||
unsigned int required;
|
||||
|
||||
pr_debug("PM: Free swap pages: %u\n", free_swap);
|
||||
pr_debug("Free swap pages: %u\n", free_swap);
|
||||
|
||||
required = PAGES_FOR_IO + nr_pages;
|
||||
return free_swap > required;
|
||||
|
@ -915,12 +911,12 @@ int swsusp_write(unsigned int flags)
|
|||
pages = snapshot_get_image_size();
|
||||
error = get_swap_writer(&handle);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Cannot get swap writer\n");
|
||||
pr_err("Cannot get swap writer\n");
|
||||
return error;
|
||||
}
|
||||
if (flags & SF_NOCOMPRESS_MODE) {
|
||||
if (!enough_swap(pages, flags)) {
|
||||
printk(KERN_ERR "PM: Not enough free swap\n");
|
||||
pr_err("Not enough free swap\n");
|
||||
error = -ENOSPC;
|
||||
goto out_finish;
|
||||
}
|
||||
|
@ -1068,8 +1064,7 @@ static int load_image(struct swap_map_handle *handle,
|
|||
hib_init_batch(&hb);
|
||||
|
||||
clean_pages_on_read = true;
|
||||
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
|
||||
nr_to_read);
|
||||
pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
|
||||
m = nr_to_read / 10;
|
||||
if (!m)
|
||||
m = 1;
|
||||
|
@ -1087,8 +1082,8 @@ static int load_image(struct swap_map_handle *handle,
|
|||
if (ret)
|
||||
break;
|
||||
if (!(nr_pages % m))
|
||||
printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
pr_info("Image loading progress: %3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
nr_pages++;
|
||||
}
|
||||
err2 = hib_wait_io(&hb);
|
||||
|
@ -1096,7 +1091,7 @@ static int load_image(struct swap_map_handle *handle,
|
|||
if (!ret)
|
||||
ret = err2;
|
||||
if (!ret) {
|
||||
printk(KERN_INFO "PM: Image loading done.\n");
|
||||
pr_info("Image loading done\n");
|
||||
snapshot_write_finalize(snapshot);
|
||||
if (!snapshot_image_loaded(snapshot))
|
||||
ret = -ENODATA;
|
||||
|
@ -1190,14 +1185,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
|
||||
page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
|
||||
if (!page) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
|
||||
pr_err("Failed to allocate LZO page\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
||||
data = vmalloc(sizeof(*data) * nr_threads);
|
||||
if (!data) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO data\n");
|
||||
pr_err("Failed to allocate LZO data\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -1206,7 +1201,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
|
||||
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
|
||||
if (!crc) {
|
||||
printk(KERN_ERR "PM: Failed to allocate crc\n");
|
||||
pr_err("Failed to allocate crc\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -1226,8 +1221,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
"image_decompress/%u", thr);
|
||||
if (IS_ERR(data[thr].thr)) {
|
||||
data[thr].thr = NULL;
|
||||
printk(KERN_ERR
|
||||
"PM: Cannot start decompression threads\n");
|
||||
pr_err("Cannot start decompression threads\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -1249,7 +1243,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
|
||||
if (IS_ERR(crc->thr)) {
|
||||
crc->thr = NULL;
|
||||
printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
|
||||
pr_err("Cannot start CRC32 thread\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
@ -1274,8 +1268,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
if (!page[i]) {
|
||||
if (i < LZO_CMP_PAGES) {
|
||||
ring_size = i;
|
||||
printk(KERN_ERR
|
||||
"PM: Failed to allocate LZO pages\n");
|
||||
pr_err("Failed to allocate LZO pages\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
} else {
|
||||
|
@ -1285,10 +1278,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
}
|
||||
want = ring_size = i;
|
||||
|
||||
printk(KERN_INFO
|
||||
"PM: Using %u thread(s) for decompression.\n"
|
||||
"PM: Loading and decompressing image data (%u pages)...\n",
|
||||
nr_threads, nr_to_read);
|
||||
pr_info("Using %u thread(s) for decompression\n", nr_threads);
|
||||
pr_info("Loading and decompressing image data (%u pages)...\n",
|
||||
nr_to_read);
|
||||
m = nr_to_read / 10;
|
||||
if (!m)
|
||||
m = 1;
|
||||
|
@ -1348,8 +1340,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
if (unlikely(!data[thr].cmp_len ||
|
||||
data[thr].cmp_len >
|
||||
lzo1x_worst_compress(LZO_UNC_SIZE))) {
|
||||
printk(KERN_ERR
|
||||
"PM: Invalid LZO compressed length\n");
|
||||
pr_err("Invalid LZO compressed length\n");
|
||||
ret = -1;
|
||||
goto out_finish;
|
||||
}
|
||||
|
@ -1400,16 +1391,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
ret = data[thr].ret;
|
||||
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR
|
||||
"PM: LZO decompression failed\n");
|
||||
pr_err("LZO decompression failed\n");
|
||||
goto out_finish;
|
||||
}
|
||||
|
||||
if (unlikely(!data[thr].unc_len ||
|
||||
data[thr].unc_len > LZO_UNC_SIZE ||
|
||||
data[thr].unc_len & (PAGE_SIZE - 1))) {
|
||||
printk(KERN_ERR
|
||||
"PM: Invalid LZO uncompressed length\n");
|
||||
pr_err("Invalid LZO uncompressed length\n");
|
||||
ret = -1;
|
||||
goto out_finish;
|
||||
}
|
||||
|
@ -1420,10 +1409,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
data[thr].unc + off, PAGE_SIZE);
|
||||
|
||||
if (!(nr_pages % m))
|
||||
printk(KERN_INFO
|
||||
"PM: Image loading progress: "
|
||||
"%3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
pr_info("Image loading progress: %3d%%\n",
|
||||
nr_pages / m * 10);
|
||||
nr_pages++;
|
||||
|
||||
ret = snapshot_write_next(snapshot);
|
||||
|
@ -1448,15 +1435,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
}
|
||||
stop = ktime_get();
|
||||
if (!ret) {
|
||||
printk(KERN_INFO "PM: Image loading done.\n");
|
||||
pr_info("Image loading done\n");
|
||||
snapshot_write_finalize(snapshot);
|
||||
if (!snapshot_image_loaded(snapshot))
|
||||
ret = -ENODATA;
|
||||
if (!ret) {
|
||||
if (swsusp_header->flags & SF_CRC32_MODE) {
|
||||
if(handle->crc32 != swsusp_header->crc32) {
|
||||
printk(KERN_ERR
|
||||
"PM: Invalid image CRC32!\n");
|
||||
pr_err("Invalid image CRC32!\n");
|
||||
ret = -ENODATA;
|
||||
}
|
||||
}
|
||||
|
@ -1513,9 +1499,9 @@ int swsusp_read(unsigned int *flags_p)
|
|||
swap_reader_finish(&handle);
|
||||
end:
|
||||
if (!error)
|
||||
pr_debug("PM: Image successfully loaded\n");
|
||||
pr_debug("Image successfully loaded\n");
|
||||
else
|
||||
pr_debug("PM: Error %d resuming\n", error);
|
||||
pr_debug("Error %d resuming\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1552,13 +1538,13 @@ int swsusp_check(void)
|
|||
if (error)
|
||||
blkdev_put(hib_resume_bdev, FMODE_READ);
|
||||
else
|
||||
pr_debug("PM: Image signature found, resuming\n");
|
||||
pr_debug("Image signature found, resuming\n");
|
||||
} else {
|
||||
error = PTR_ERR(hib_resume_bdev);
|
||||
}
|
||||
|
||||
if (error)
|
||||
pr_debug("PM: Image not found (code %d)\n", error);
|
||||
pr_debug("Image not found (code %d)\n", error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -1570,7 +1556,7 @@ int swsusp_check(void)
|
|||
void swsusp_close(fmode_t mode)
|
||||
{
|
||||
if (IS_ERR(hib_resume_bdev)) {
|
||||
pr_debug("PM: Image device not initialised\n");
|
||||
pr_debug("Image device not initialised\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1594,7 +1580,7 @@ int swsusp_unmark(void)
|
|||
swsusp_resume_block,
|
||||
swsusp_header, NULL);
|
||||
} else {
|
||||
printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
|
||||
pr_err("Cannot find swsusp signature!\n");
|
||||
error = -ENODEV;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user