forked from luck/tmp_suning_uos_patched
Microblaze patches for 5.4-rc1
- Clean up reset gpio handler - Defconfig updates - Add support for 8 byte get_user() - Switch to generic dma code In merge please fix dma_atomic_pool_init reported also by: https://lkml.org/lkml/2019/9/2/393 or https://lore.kernel.org/linux-next/20190902214011.2a5400c9@canb.auug.org.au/ -----BEGIN PGP SIGNATURE----- iF0EABECAB0WIQQbPNTMvXmYlBPRwx7KSWXLKUoMIQUCXYnguwAKCRDKSWXLKUoM IaF8AKCawdYH+58xRg7riR7Evbv2kM0ghwCfXosnu6Ncv07UEY9Tv5zx/qibafk= =yI3X -----END PGP SIGNATURE----- Merge tag 'microblaze-v5.4-rc1' of git://git.monstr.eu/linux-2.6-microblaze Pull Microblaze updates from Michal Simek: - clean up reset gpio handler - defconfig updates - add support for 8 byte get_user() - switch to generic dma code * tag 'microblaze-v5.4-rc1' of git://git.monstr.eu/linux-2.6-microblaze: microblaze: Switch to standard restart handler microblaze: defconfig synchronization microblaze: Enable Xilinx AXI emac driver by default arch/microblaze: support get_user() of size 8 bytes microblaze: remove ioremap_fullcache microblaze: use the generic dma coherent remap allocator microblaze/nommu: use the generic uncached segment support
This commit is contained in:
commit
5184d44960
|
@ -5,15 +5,18 @@ config MICROBLAZE
|
|||
select ARCH_NO_SWAP
|
||||
select ARCH_HAS_BINFMT_FLAT if !MMU
|
||||
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
|
||||
select ARCH_HAS_DMA_PREP_COHERENT
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_UNCACHED_SEGMENT if !MMU
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select TIMER_OF
|
||||
select CLONE_BACKWARDS3
|
||||
select COMMON_CLK
|
||||
select DMA_DIRECT_REMAP if MMU
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CPU_DEVICES
|
||||
|
|
|
@ -18,7 +18,6 @@ / {
|
|||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "xlnx,microblaze";
|
||||
hard-reset-gpios = <&LEDs_8Bit 2 1>;
|
||||
model = "testing";
|
||||
DDR2_SDRAM: memory@90000000 {
|
||||
device_type = "memory";
|
||||
|
@ -281,6 +280,21 @@ green {
|
|||
gpios = <&LEDs_8Bit 7 1>;
|
||||
};
|
||||
} ;
|
||||
|
||||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
/*
|
||||
* FIXME: is this active low or active high?
|
||||
* the current flag (1) indicates active low.
|
||||
* delay measures are templates, should be adjusted
|
||||
* to datasheet or trial-and-error with real hardware.
|
||||
*/
|
||||
gpios = <&LEDs_8Bit 2 1>;
|
||||
active-delay = <100>;
|
||||
inactive-delay = <10>;
|
||||
wait-delay = <100>;
|
||||
};
|
||||
|
||||
RS232_Uart_1: serial@84000000 {
|
||||
clock-frequency = <125000000>;
|
||||
compatible = "xlnx,xps-uartlite-1.00.a";
|
||||
|
|
|
@ -5,15 +5,10 @@ CONFIG_IKCONFIG=y
|
|||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_SYSFS_DEPRECATED=y
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
# CONFIG_BASE_FULL is not set
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
|
||||
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
|
||||
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
|
||||
|
@ -25,14 +20,19 @@ CONFIG_MMU=y
|
|||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE_FORCE=y
|
||||
CONFIG_HIGHMEM=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCI_XILINX=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_INET=y
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_BRIDGE=m
|
||||
CONFIG_PCI=y
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_CFI=y
|
||||
CONFIG_MTD_CFI_INTELEXT=y
|
||||
|
@ -41,6 +41,7 @@ CONFIG_BLK_DEV_RAM=y
|
|||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_XILINX_EMACLITE=y
|
||||
CONFIG_XILINX_AXI_EMAC=y
|
||||
CONFIG_XILINX_LL_TEMAC=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
|
@ -59,6 +60,8 @@ CONFIG_SPI_XILINX=y
|
|||
CONFIG_GPIOLIB=y
|
||||
CONFIG_GPIO_SYSFS=y
|
||||
CONFIG_GPIO_XILINX=y
|
||||
CONFIG_POWER_RESET=y
|
||||
CONFIG_POWER_RESET_GPIO_RESTART=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_WATCHDOG=y
|
||||
CONFIG_XILINX_WATCHDOG=y
|
||||
|
@ -74,8 +77,8 @@ CONFIG_CRAMFS=y
|
|||
CONFIG_ROMFS_FS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_CIFS=y
|
||||
CONFIG_CIFS_STATS=y
|
||||
CONFIG_CIFS_STATS2=y
|
||||
CONFIG_ENCRYPTED_KEYS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_SLAB=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
|
@ -83,6 +86,3 @@ CONFIG_DEBUG_SPINLOCK=y
|
|||
CONFIG_KGDB=y
|
||||
CONFIG_KGDB_TESTS=y
|
||||
CONFIG_KGDB_KDB=y
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_KEYS=y
|
||||
CONFIG_ENCRYPTED_KEYS=y
|
||||
|
|
|
@ -7,15 +7,10 @@ CONFIG_IKCONFIG=y
|
|||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_SYSFS_DEPRECATED=y
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
# CONFIG_BASE_FULL is not set
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
|
||||
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
|
||||
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
|
||||
|
@ -25,13 +20,18 @@ CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
|
|||
CONFIG_HZ_100=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE_FORCE=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCI_XILINX=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_INET=y
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_PCI=y
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -62,6 +62,8 @@ CONFIG_SPI_XILINX=y
|
|||
CONFIG_GPIOLIB=y
|
||||
CONFIG_GPIO_SYSFS=y
|
||||
CONFIG_GPIO_XILINX=y
|
||||
CONFIG_POWER_RESET=y
|
||||
CONFIG_POWER_RESET_GPIO_RESTART=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_WATCHDOG=y
|
||||
CONFIG_XILINX_WATCHDOG=y
|
||||
|
@ -75,11 +77,6 @@ CONFIG_ROMFS_FS=y
|
|||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_SLAB=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_KEYS=y
|
||||
CONFIG_ENCRYPTED_KEYS=y
|
||||
CONFIG_CRYPTO_ECB=y
|
||||
|
@ -87,3 +84,7 @@ CONFIG_CRYPTO_MD4=y
|
|||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_ARC4=y
|
||||
CONFIG_CRYPTO_DES=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_SLAB=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
|
|
|
@ -40,7 +40,6 @@ extern void iounmap(volatile void __iomem *addr);
|
|||
|
||||
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
|
||||
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_fullcache(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_wc(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_wt(addr, size) ioremap((addr), (size))
|
||||
|
||||
|
|
|
@ -163,44 +163,15 @@ extern long __user_bad(void);
|
|||
* Returns zero on success, or -EFAULT on error.
|
||||
* On error, the variable @x is set to zero.
|
||||
*/
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
unsigned long __gu_val = 0; \
|
||||
const typeof(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
int __gu_err = 0; \
|
||||
\
|
||||
if (access_ok(__gu_addr, size)) { \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__get_user_asm("lbu", __gu_addr, __gu_val, \
|
||||
__gu_err); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm("lhu", __gu_addr, __gu_val, \
|
||||
__gu_err); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm("lw", __gu_addr, __gu_val, \
|
||||
__gu_err); \
|
||||
break; \
|
||||
default: \
|
||||
__gu_err = __user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
__gu_err = -EFAULT; \
|
||||
} \
|
||||
x = (__force typeof(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
#define get_user(x, ptr) ({ \
|
||||
const typeof(*(ptr)) __user *__gu_ptr = (ptr); \
|
||||
access_ok(__gu_ptr, sizeof(*__gu_ptr)) ? \
|
||||
__get_user(x, __gu_ptr) : -EFAULT; \
|
||||
})
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
unsigned long __gu_val = 0; \
|
||||
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
|
||||
long __gu_err; \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
|
@ -212,6 +183,11 @@ extern long __user_bad(void);
|
|||
case 4: \
|
||||
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
|
||||
break; \
|
||||
case 8: \
|
||||
__gu_err = __copy_from_user(&__gu_val, ptr, 8); \
|
||||
if (__gu_err) \
|
||||
__gu_err = -EFAULT; \
|
||||
break; \
|
||||
default: \
|
||||
/* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
|
||||
} \
|
||||
|
|
|
@ -8,83 +8,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
/* Trigger specific functions */
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
|
||||
#include <linux/of_gpio.h>
|
||||
|
||||
static int handle; /* reset pin handle */
|
||||
static unsigned int reset_val;
|
||||
|
||||
static int of_platform_reset_gpio_probe(void)
|
||||
{
|
||||
int ret;
|
||||
handle = of_get_named_gpio(of_find_node_by_path("/"),
|
||||
"hard-reset-gpios", 0);
|
||||
|
||||
if (!gpio_is_valid(handle)) {
|
||||
pr_info("Skipping unavailable RESET gpio %d (%s)\n",
|
||||
handle, "reset");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = gpio_request(handle, "reset");
|
||||
if (ret < 0) {
|
||||
pr_info("GPIO pin is already allocated\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get current setup value */
|
||||
reset_val = gpio_get_value(handle);
|
||||
/* FIXME maybe worth to perform any action */
|
||||
pr_debug("Reset: Gpio output state: 0x%x\n", reset_val);
|
||||
|
||||
/* Setup GPIO as output */
|
||||
ret = gpio_direction_output(handle, 0);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
/* Setup output direction */
|
||||
gpio_set_value(handle, 0);
|
||||
|
||||
pr_info("RESET: Registered gpio device: %d, current val: %d\n",
|
||||
handle, reset_val);
|
||||
return 0;
|
||||
err:
|
||||
gpio_free(handle);
|
||||
return ret;
|
||||
}
|
||||
device_initcall(of_platform_reset_gpio_probe);
|
||||
|
||||
|
||||
static void gpio_system_reset(void)
|
||||
{
|
||||
if (gpio_is_valid(handle))
|
||||
gpio_set_value(handle, 1 - reset_val);
|
||||
else
|
||||
pr_notice("Reset GPIO unavailable - halting!\n");
|
||||
}
|
||||
#else
|
||||
static void gpio_system_reset(void)
|
||||
{
|
||||
pr_notice("No reset GPIO present - halting!\n");
|
||||
}
|
||||
|
||||
void of_platform_reset_gpio_probe(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
void machine_restart(char *cmd)
|
||||
{
|
||||
pr_notice("Machine restart...\n");
|
||||
gpio_system_reset();
|
||||
while (1)
|
||||
;
|
||||
}
|
||||
#include <linux/reboot.h>
|
||||
|
||||
void machine_shutdown(void)
|
||||
{
|
||||
|
@ -106,3 +32,12 @@ void machine_power_off(void)
|
|||
while (1)
|
||||
;
|
||||
}
|
||||
|
||||
void machine_restart(char *cmd)
|
||||
{
|
||||
do_kernel_restart(cmd);
|
||||
/* Give the restart hook 1 s to take us down */
|
||||
mdelay(1000);
|
||||
pr_emerg("Reboot failed -- System halted\n");
|
||||
while (1);
|
||||
}
|
||||
|
|
|
@ -4,217 +4,56 @@
|
|||
* Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
|
||||
* Copyright (C) 2010 PetaLogix
|
||||
* Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
|
||||
*
|
||||
* Based on PowerPC version derived from arch/arm/mm/consistent.c
|
||||
* Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
|
||||
* Copyright (C) 2000 Russell King
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
/* I have to use dcache values because I can't relate on ram size */
|
||||
# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Consistent memory allocators. Used for DMA devices that want to
|
||||
* share uncached memory with the processor core.
|
||||
* My crufty no-MMU approach is simple. In the HW platform we can optionally
|
||||
* mirror the DDR up above the processor cacheable region. So, memory accessed
|
||||
* in this mirror region will not be cached. It's alloced from the same
|
||||
* pool as normal memory, but the handle we return is shifted up into the
|
||||
* uncached region. This will no doubt cause big problems if memory allocated
|
||||
* here is not also freed properly. -- JW
|
||||
*/
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
{
|
||||
unsigned long order, vaddr;
|
||||
void *ret;
|
||||
unsigned int i, err = 0;
|
||||
struct page *page, *end;
|
||||
phys_addr_t paddr = page_to_phys(page);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
phys_addr_t pa;
|
||||
struct vm_struct *area;
|
||||
unsigned long va;
|
||||
#endif
|
||||
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
|
||||
/* Only allocate page size areas. */
|
||||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
||||
vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
|
||||
if (!vaddr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* we need to ensure that there are no cachelines in use,
|
||||
* or worse dirty in this area.
|
||||
*/
|
||||
flush_dcache_range(virt_to_phys((void *)vaddr),
|
||||
virt_to_phys((void *)vaddr) + size);
|
||||
flush_dcache_range(paddr, paddr + size);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
ret = (void *)vaddr;
|
||||
/*
|
||||
* Here's the magic! Note if the uncached shadow is not implemented,
|
||||
* it's up to the calling code to also test that condition and make
|
||||
* other arranegments, such as manually flushing the cache and so on.
|
||||
*/
|
||||
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
|
||||
ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
|
||||
# endif
|
||||
if ((unsigned int)ret > cpuinfo.dcache_base &&
|
||||
(unsigned int)ret < cpuinfo.dcache_high)
|
||||
/*
|
||||
* Consistent memory allocators. Used for DMA devices that want to share
|
||||
* uncached memory with the processor core. My crufty no-MMU approach is
|
||||
* simple. In the HW platform we can optionally mirror the DDR up above the
|
||||
* processor cacheable region. So, memory accessed in this mirror region will
|
||||
* not be cached. It's alloced from the same pool as normal memory, but the
|
||||
* handle we return is shifted up into the uncached region. This will no doubt
|
||||
* cause big problems if memory allocated here is not also freed properly. -- JW
|
||||
*
|
||||
* I have to use dcache values because I can't relate on ram size:
|
||||
*/
|
||||
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
|
||||
#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
|
||||
#else
|
||||
#define UNCACHED_SHADOW_MASK 0
|
||||
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
|
||||
|
||||
void *uncached_kernel_address(void *ptr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)ptr;
|
||||
|
||||
addr |= UNCACHED_SHADOW_MASK;
|
||||
if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
|
||||
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
|
||||
|
||||
/* dma_handle is same as physical (shadowed) address */
|
||||
*dma_handle = (dma_addr_t)ret;
|
||||
#else
|
||||
/* Allocate some common virtual space to map the new pages. */
|
||||
area = get_vm_area(size, VM_ALLOC);
|
||||
if (!area) {
|
||||
free_pages(vaddr, order);
|
||||
return NULL;
|
||||
}
|
||||
va = (unsigned long) area->addr;
|
||||
ret = (void *)va;
|
||||
|
||||
/* This gives us the real physical address of the first page. */
|
||||
*dma_handle = pa = __virt_to_phys(vaddr);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* free wasted pages. We skip the first page since we know
|
||||
* that it will have count = 1 and won't require freeing.
|
||||
* We also mark the pages in use as reserved so that
|
||||
* remap_page_range works.
|
||||
*/
|
||||
page = virt_to_page(vaddr);
|
||||
end = page + (1 << order);
|
||||
|
||||
split_page(page, order);
|
||||
|
||||
for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
|
||||
#ifdef CONFIG_MMU
|
||||
/* MS: This is the whole magic - use cache inhibit pages */
|
||||
err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
|
||||
#endif
|
||||
|
||||
SetPageReserved(page);
|
||||
page++;
|
||||
}
|
||||
|
||||
/* Free the otherwise unused pages. */
|
||||
while (page < end) {
|
||||
__free_page(page);
|
||||
page++;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
free_pages(vaddr, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return (void *)addr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static pte_t *consistent_virt_to_pte(void *vaddr)
|
||||
void *cached_kernel_address(void *ptr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)vaddr;
|
||||
unsigned long addr = (unsigned long)ptr;
|
||||
|
||||
return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
|
||||
}
|
||||
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
pte_t *ptep = consistent_virt_to_pte(vaddr);
|
||||
|
||||
if (pte_none(*ptep) || !pte_present(*ptep))
|
||||
return 0;
|
||||
|
||||
return pte_pfn(*ptep);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* free page(s) as defined by the above mapping.
|
||||
*/
|
||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
/* Clear SHADOW_MASK bit in address, and free as per usual */
|
||||
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
|
||||
vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
|
||||
# endif
|
||||
page = virt_to_page(vaddr);
|
||||
|
||||
do {
|
||||
__free_reserved_page(page);
|
||||
page++;
|
||||
} while (size -= PAGE_SIZE);
|
||||
#else
|
||||
do {
|
||||
pte_t *ptep = consistent_virt_to_pte(vaddr);
|
||||
unsigned long pfn;
|
||||
|
||||
if (!pte_none(*ptep) && pte_present(*ptep)) {
|
||||
pfn = pte_pfn(*ptep);
|
||||
pte_clear(&init_mm, (unsigned int)vaddr, ptep);
|
||||
if (pfn_valid(pfn)) {
|
||||
page = pfn_to_page(pfn);
|
||||
__free_reserved_page(page);
|
||||
}
|
||||
}
|
||||
vaddr += PAGE_SIZE;
|
||||
} while (size -= PAGE_SIZE);
|
||||
|
||||
/* flush tlb */
|
||||
flush_tlb_all();
|
||||
#endif
|
||||
return (void *)(addr & ~UNCACHED_SHADOW_MASK);
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
|
Loading…
Reference in New Issue
Block a user