libnvdimm, pmem: direct map legacy pmem by default
The expectation is that the legacy / non-standard pmem discovery method (e820 type-12) will only ever be used to describe small quantities of persistent memory. Larger capacities will be described via the ACPI NFIT. When "allocate struct page from pmem" support is added this default policy can be overridden by assigning a legacy pmem namespace to a pfn device, however this would be only be necessary if a platform used the legacy mechanism to define a very large range. Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
32ab0a3f51
commit
004f1afbe1
|
@ -49,6 +49,7 @@ static int e820_pmem_probe(struct platform_device *pdev)
|
|||
ndr_desc.res = p;
|
||||
ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
|
||||
ndr_desc.numa_node = NUMA_NO_NODE;
|
||||
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
|
||||
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pmem.h>
|
||||
#include <linux/nd.h>
|
||||
#include "nd-core.h"
|
||||
#include "nd.h"
|
||||
|
@ -76,11 +77,32 @@ static bool is_namespace_io(struct device *dev)
|
|||
return dev ? dev->type == &namespace_io_device_type : false;
|
||||
}
|
||||
|
||||
bool pmem_should_map_pages(struct device *dev)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
|
||||
return false;
|
||||
|
||||
if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
|
||||
return false;
|
||||
|
||||
if (is_nd_pfn(dev) || is_nd_btt(dev))
|
||||
return false;
|
||||
|
||||
#ifdef ARCH_MEMREMAP_PMEM
|
||||
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(pmem_should_map_pages);
|
||||
|
||||
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
|
||||
char *name)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
|
||||
const char *suffix = "";
|
||||
const char *suffix = NULL;
|
||||
|
||||
if (ndns->claim) {
|
||||
if (is_nd_btt(ndns->claim))
|
||||
|
@ -93,13 +115,16 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
|
|||
dev_name(ndns->claim));
|
||||
}
|
||||
|
||||
if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev))
|
||||
sprintf(name, "pmem%d%s", nd_region->id, suffix);
|
||||
else if (is_namespace_blk(&ndns->dev)) {
|
||||
if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
|
||||
if (!suffix && pmem_should_map_pages(&ndns->dev))
|
||||
suffix = "m";
|
||||
sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
|
||||
} else if (is_namespace_blk(&ndns->dev)) {
|
||||
struct nd_namespace_blk *nsblk;
|
||||
|
||||
nsblk = to_nd_namespace_blk(&ndns->dev);
|
||||
sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix);
|
||||
sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
|
||||
suffix ? suffix : "");
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -100,6 +100,7 @@ struct nd_region {
|
|||
struct ida ns_ida;
|
||||
struct ida btt_ida;
|
||||
struct ida pfn_ida;
|
||||
unsigned long flags;
|
||||
struct device *ns_seed;
|
||||
struct device *btt_seed;
|
||||
struct device *pfn_seed;
|
||||
|
@ -276,4 +277,5 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
|
|||
void nd_iostat_end(struct bio *bio, unsigned long start);
|
||||
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
|
||||
const u8 *nd_dev_to_uuid(struct device *dev);
|
||||
bool pmem_should_map_pages(struct device *dev);
|
||||
#endif /* __ND_H__ */
|
||||
|
|
|
@ -148,9 +148,18 @@ static struct pmem_device *pmem_alloc(struct device *dev,
|
|||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr, pmem->size);
|
||||
if (!pmem->virt_addr)
|
||||
return ERR_PTR(-ENXIO);
|
||||
if (pmem_should_map_pages(dev)) {
|
||||
void *addr = devm_memremap_pages(dev, res);
|
||||
|
||||
if (IS_ERR(addr))
|
||||
return addr;
|
||||
pmem->virt_addr = (void __pmem *) addr;
|
||||
} else {
|
||||
pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr,
|
||||
pmem->size);
|
||||
if (!pmem->virt_addr)
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
|
|
@ -758,6 +758,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
|
|||
nd_region->provider_data = ndr_desc->provider_data;
|
||||
nd_region->nd_set = ndr_desc->nd_set;
|
||||
nd_region->num_lanes = ndr_desc->num_lanes;
|
||||
nd_region->flags = ndr_desc->flags;
|
||||
nd_region->ro = ro;
|
||||
nd_region->numa_node = ndr_desc->numa_node;
|
||||
ida_init(&nd_region->ns_ida);
|
||||
|
|
|
@ -31,6 +31,9 @@ enum {
|
|||
ND_CMD_ARS_STATUS_MAX = SZ_4K,
|
||||
ND_MAX_MAPPINGS = 32,
|
||||
|
||||
/* region flag indicating to direct-map persistent memory by default */
|
||||
ND_REGION_PAGEMAP = 0,
|
||||
|
||||
/* mark newly adjusted resources as requiring a label update */
|
||||
DPA_RESOURCE_ADJUSTED = 1 << 0,
|
||||
};
|
||||
|
@ -91,6 +94,7 @@ struct nd_region_desc {
|
|||
void *provider_data;
|
||||
int num_lanes;
|
||||
int numa_node;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
struct nvdimm_bus;
|
||||
|
|
Loading…
Reference in New Issue
Block a user