Merge branch 'nfs-rdma'

This commit is contained in:
Trond Myklebust 2016-07-24 17:09:02 -04:00
commit 1592c4d62a
205 changed files with 2422 additions and 1703 deletions

View File

@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
MPX-instrumented.
3) The kernel detects that the CPU has MPX, allows the new prctl() to
succeed, and notes the location of the bounds directory. Userspace is
expected to keep the bounds directory at that locationWe note it
expected to keep the bounds directory at that location. We note it
instead of reading it each time because the 'xsave' operation needed
to access the bounds directory register is an expensive operation.
4) If the application needs to spill bounds out of the 4 registers, it
@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
We need to decode MPX instructions to get violation address and
set this address into extended struct siginfo.
The _sigfault feild of struct siginfo is extended as follow:
The _sigfault field of struct siginfo is extended as follow:
87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
88 struct {
@ -240,5 +240,5 @@ them at the same bounds table.
This is allowed architecturally. See more information "Intel(R) Architecture
Instruction Set Extensions Programming Reference" (9.3.4).
However, if users did this, the kernel might be fooled in to unmaping an
However, if users did this, the kernel might be fooled in to unmapping an
in-use bounds table since it does not recognize sharing.

View File

@ -5,7 +5,7 @@ memory, it has two choices:
from areas other than the one we are trying to flush will be
destroyed and must be refilled later, at some cost.
2. Use the invlpg instruction to invalidate a single page at a
time. This could potentialy cost many more instructions, but
time. This could potentially cost many more instructions, but
it is a much more precise operation, causing no collateral
damage to other TLB entries.
@ -19,7 +19,7 @@ Which method to do depends on a few things:
work.
3. The size of the TLB. The larger the TLB, the more collateral
damage we do with a full flush. So, the larger the TLB, the
more attrative an individual flush looks. Data and
more attractive an individual flush looks. Data and
instructions have separate TLBs, as do different page sizes.
4. The microarchitecture. The TLB has become a multi-level
cache on modern CPUs, and the global flushes have become more

View File

@ -36,7 +36,7 @@ between all CPUs.
check_interval
How often to poll for corrected machine check errors, in seconds
(Note output is hexademical). Default 5 minutes. When the poller
(Note output is hexadecimal). Default 5 minutes. When the poller
finds MCEs it triggers an exponential speedup (poll more often) on
the polling interval. When the poller stops finding MCEs, it
triggers an exponential backoff (poll less often) on the polling

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*

View File

@ -80,12 +80,14 @@
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define BRCM_CPU_PART_VULCAN 0x516
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#ifndef __ASSEMBLY__

View File

@ -117,6 +117,8 @@ struct pt_regs {
};
u64 orig_x0;
u64 syscallno;
u64 orig_addr_limit;
u64 unused; // maintain 16 byte alignment
};
#define arch_has_single_step() (1)

View File

@ -60,6 +60,7 @@ int main(void)
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));

View File

@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 1),
},
{
/* Cavium ThunderX, T81 pass 1.0 */
.desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
},
#endif
{
}

View File

@ -28,6 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@ -97,7 +98,14 @@
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
.endif
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
ldr x20, [tsk, #TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
str x20, [tsk, #TI_ADDR_LIMIT]
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
@ -128,6 +136,14 @@
.endm
.macro kernel_exit, el
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TI_ADDR_LIMIT]
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
@ -406,7 +422,6 @@ el1_irq:
bl trace_hardirqs_off
#endif
get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT

View File

@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}
if (permission_fault(esr) && (addr < USER_DS)) {
if (get_fs() == KERNEL_DS)
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (!search_exception_tables(regs->pc))

View File

@ -24,7 +24,7 @@ struct mm_struct;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
_CACHE_CACHABLE_NONCOHERENT)
_page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif
@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}

View File

@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
#define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */
#include <asm/book3s/64/hash.h>

View File

@ -647,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
pci_unlock_rescan_remove();
}
} else if (frozen_bus) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
}
/*

View File

@ -47,7 +47,6 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/

View File

@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.regs = regs - 1;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Clear any transactional state, we're exec()ing. The cause is
* not important as there will never be a recheckpoint so it's not
* user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
#endif
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;

View File

@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
/* We need to setup MSR for VSX register save instructions. Here we
* also clear the MSR RI since when we do the treclaim, we won't have a
* valid kernel pointer for a while. We clear RI here as it avoids
* adding another mtmsr closer to the treclaim. This makes the region
* maked as non-recoverable wider than it needs to be but it saves on
* inserting another mtmsrd later.
*/
/* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
li r16, MSR_RI
li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
@ -176,7 +170,17 @@ dont_backup_fp:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
/* The moment we treclaim, ALL of our GPRs will switch
/* Clear MSR RI since we are about to change r1, EE is already off. */
li r4, 0
mtmsrd r4, 1
/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*
* The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
@ -197,6 +201,11 @@ dont_backup_fp:
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
mfspr r11, SPRN_PPR
HMT_MEDIUM
@ -397,11 +406,6 @@ restore_gprs:
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)
/* Clear the MSR RI since we are about to change R1. EE is already off
*/
li r4, 0
mtmsrd r4, 1
REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
@ -439,10 +443,33 @@ restore_gprs:
ld r6, _CCR(r7)
mtcr r6
REST_GPR(1, r7) /* GPR1 */
REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
ld r7, GPR7(r7)
/*
* Store r1 and r5 on the stack so that we can access them
* after we clear MSR RI.
*/
REST_GPR(5, r7)
std r5, -8(r1)
ld r5, GPR1(r7)
std r5, -16(r1)
REST_GPR(7, r7)
/* Clear MSR RI since we are about to change r1. EE is already off */
li r5, 0
mtmsrd r5, 1
/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*/
ld r5, -8(r1)
ld r1, -16(r1)
/* Commit register state as checkpointed state: */
TRECHKPT

View File

@ -922,6 +922,10 @@ void __init hash__early_init_mmu(void)
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif
/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.

View File

@ -328,6 +328,11 @@ void __init radix__early_init_mmu(void)
__vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif
/*
* For now radix also use the same frag size
*/

View File

@ -2319,7 +2319,7 @@ void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
const unsigned long __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return;
fp = (void __user *)regs->bp;
fp = (unsigned long __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
pagefault_disable();
while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
if (!access_ok(VERIFY_READ, fp, 16))
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
if (bytes != 0)
break;
bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
if (bytes != 0)
break;

View File

@ -1,8 +1,8 @@
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
intel-rapl-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
intel-rapl-perf-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o

View File

@ -115,6 +115,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
/*
* When HT is off these events can only run on the bottom 4 counters
* When HT is on, they are impacted by the HT bug and require EXCL access
*/
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@ -139,6 +143,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
/*
* When HT is off these events can only run on the bottom 4 counters
* When HT is on, they are impacted by the HT bug and require EXCL access
*/
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@ -182,6 +190,16 @@ struct event_constraint intel_skl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
/*
* when HT is off, these can only run on the bottom 4 counters
*/
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
EVENT_CONSTRAINT_END
};
@ -250,6 +268,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
/*
* When HT is off these events can only run on the bottom 4 counters
* When HT is on, they are impacted by the HT bug and require EXCL access
*/
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@ -264,6 +286,13 @@ struct event_constraint intel_bdw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
/*
* when HT is off, these can only run on the bottom 4 counters
*/
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
EVENT_CONSTRAINT_END
};

View File

@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
i++;
if (i == 0)
return 0;
if (!i)
return -ENODEV;
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)

View File

@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
return -ENODEV;
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
acpi_irq_penalty_init();
pcibios_enable_irq = acpi_pci_irq_enable;
pcibios_disable_irq = acpi_pci_irq_disable;
x86_init.pci.init_irq = x86_init_noop;

View File

@ -19,6 +19,7 @@
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
/* Defined in hibernate_asm_64.S */
extern asmlinkage __visible int restore_image(void);
@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
* kernel's text (this value is passed in the image header).
*/
unsigned long restore_jump_address __visible;
unsigned long jump_address_phys;
/*
* Value of the cr3 register from before the hibernation (this value is passed
@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
pgd_t *temp_level4_pgt __visible;
void *relocated_restore_code __visible;
unsigned long relocated_restore_code __visible;
static int set_up_temporary_text_mapping(void)
{
pmd_t *pmd;
pud_t *pud;
/*
* The new mapping only has to cover the page containing the image
* kernel's entry point (jump_address_phys), because the switch over to
* it is carried out by relocated code running from a page allocated
* specifically for this purpose and covered by the identity mapping, so
* the temporary kernel text mapping is only needed for the final jump.
* Moreover, in that mapping the virtual address of the image kernel's
* entry point must be the same as its virtual address in the image
* kernel (restore_jump_address), so the image kernel's
* restore_registers() code doesn't find itself in a different area of
* the virtual address space after switching over to the original page
* tables used by the image kernel.
*/
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | _KERNPG_TABLE));
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
__pgd(__pa(pud) | _KERNPG_TABLE));
return 0;
}
static void *alloc_pgt_page(void *context)
{
@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
if (!temp_level4_pgt)
return -ENOMEM;
/* It is safe to reuse the original kernel mapping */
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
init_level4_pgt[pgd_index(__START_KERNEL_map)]);
/* Prepare a temporary mapping for the kernel text */
result = set_up_temporary_text_mapping();
if (result)
return result;
/* Set up the direct mapping from scratch */
for (i = 0; i < nr_pfn_mapped; i++) {
@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
return 0;
}
static int relocate_restore_code(void)
{
pgd_t *pgd;
pud_t *pud;
relocated_restore_code = get_safe_page(GFP_ATOMIC);
if (!relocated_restore_code)
return -ENOMEM;
memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
/* Make the page containing the relocated code executable */
pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
pud = pud_offset(pgd, relocated_restore_code);
if (pud_large(*pud)) {
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
} else {
pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
if (pmd_large(*pmd)) {
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
} else {
pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
}
}
__flush_tlb_all();
return 0;
}
int swsusp_arch_resume(void)
{
int error;
/* We have got enough memory and from now on we cannot recover */
if ((error = set_up_temporary_mappings()))
error = set_up_temporary_mappings();
if (error)
return error;
relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
if (!relocated_restore_code)
return -ENOMEM;
memcpy(relocated_restore_code, &core_restore_code,
&restore_registers - &core_restore_code);
error = relocate_restore_code();
if (error)
return error;
restore_image();
return 0;
@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
struct restore_data_record {
unsigned long jump_address;
unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
};
#define RESTORE_MAGIC 0x0123456789ABCDEFUL
#define RESTORE_MAGIC 0x123456789ABCDEF0UL
/**
* arch_hibernation_header_save - populate the architecture specific part
@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
if (max_size < sizeof(struct restore_data_record))
return -EOVERFLOW;
rdr->jump_address = restore_jump_address;
rdr->jump_address = (unsigned long)&restore_registers;
rdr->jump_address_phys = __pa_symbol(&restore_registers);
rdr->cr3 = restore_cr3;
rdr->magic = RESTORE_MAGIC;
return 0;
@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
struct restore_data_record *rdr = addr;
restore_jump_address = rdr->jump_address;
jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
}

View File

@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
pushfq
popq pt_regs_flags(%rax)
/* save the address of restore_registers */
movq $restore_registers, %rax
movq %rax, restore_jump_address(%rip)
/* save cr3 */
movq %cr3, %rax
movq %rax, restore_cr3(%rip)
@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
/* switch to temporary page tables */
movq $__PAGE_OFFSET, %rdx
movq temp_level4_pgt(%rip), %rax
subq %rdx, %rax
movq %rax, %cr3
/* Flush TLB */
movq mmu_cr4_features(%rip), %rax
movq %rax, %rdx
andq $~(X86_CR4_PGE), %rdx
movq %rdx, %cr4; # turn off PGE
movq %cr3, %rcx; # flush TLB
movq %rcx, %cr3;
movq %rax, %cr4; # turn PGE back on
/* prepare to jump to the image kernel */
movq restore_jump_address(%rip), %rax
movq restore_cr3(%rip), %rbx
movq restore_jump_address(%rip), %r8
movq restore_cr3(%rip), %r9
/* prepare to switch to temporary page tables */
movq temp_level4_pgt(%rip), %rax
movq mmu_cr4_features(%rip), %rbx
/* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
/* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
/* switch to temporary page tables */
movq $__PAGE_OFFSET, %rcx
subq %rcx, %rax
movq %rax, %cr3
/* flush TLB */
movq %rbx, %rcx
andq $~(X86_CR4_PGE), %rcx
movq %rcx, %cr4; # turn off PGE
movq %cr3, %rcx; # flush TLB
movq %rcx, %cr3;
movq %rbx, %cr4; # turn PGE back on
.Lloop:
testq %rdx, %rdx
jz .Ldone
@ -96,24 +96,17 @@ ENTRY(core_restore_code)
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
jmp .Lloop
.Ldone:
/* jump to the restore_registers address from the image header */
jmpq *%rax
/*
* NOTE: This assumes that the boot kernel's text mapping covers the
* image kernel's page containing restore_registers and the address of
* this page is the same as in the image kernel's text mapping (it
* should always be true, because the text mapping is linear, starting
* from 0, and is supposed to cover the entire kernel text for every
* kernel).
*
* code below belongs to the image kernel
*/
jmpq *%r8
/* code below belongs to the image kernel */
.align PAGE_SIZE
ENTRY(restore_registers)
FRAME_BEGIN
/* go back to the original page tables */
movq %rbx, %cr3
movq %r9, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax

View File

@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
task_unlock(p);
out:
return ret;
}

View File

@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
return ret;
}
@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
return n;
}

View File

@ -46,6 +46,7 @@
#include "acnamesp.h"
#include "acdispat.h"
#include "actables.h"
#include "acinterp.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsload")
@ -78,6 +79,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
ACPI_FUNCTION_TRACE(ns_load_table);
acpi_ex_enter_interpreter();
/*
* Parse the table and load the namespace with all named
* objects found within. Control methods are NOT parsed
@ -89,7 +92,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
goto unlock_interp;
}
/* If table already loaded into namespace, just return */
@ -130,6 +133,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
unlock:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
unlock_interp:
(void)acpi_ex_exit_interpreter();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);

View File

@ -47,7 +47,6 @@
#include "acparser.h"
#include "acdispat.h"
#include "actables.h"
#include "acinterp.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsparse")
@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
ACPI_FUNCTION_TRACE(ns_parse_table);
acpi_ex_enter_interpreter();
/*
* AML Parse, pass 1
*
@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
table_index, start_node);
if (ACPI_FAILURE(status)) {
goto error_exit;
return_ACPI_STATUS(status);
}
/*
@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
table_index, start_node);
if (ACPI_FAILURE(status)) {
goto error_exit;
return_ACPI_STATUS(status);
}
error_exit:
acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}

View File

@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
}
static DEVICE_ATTR_RO(format);
@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev,
continue;
if (nfit_dcr->dcr->code == dcr->code)
continue;
rc = sprintf(buf, "%#x\n",
be16_to_cpu(nfit_dcr->dcr->code));
rc = sprintf(buf, "0x%04x\n",
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
if (rc != ENXIO)
@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
/*
* Until standardization materializes we need to consider up to 3
* different command sets. Note, that checking for function0 (bit0)
* tells us if any commands are reachable through this uuid.
* different command sets. Note, that checking for zero functions
* tells us if any commands might be reachable through this uuid.
*/
for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
break;
/* limit the supported commands to those that are publicly documented */

View File

@ -53,12 +53,12 @@ enum nfit_uuids {
};
/*
* Region format interface codes are stored as an array of bytes in the
* NFIT DIMM Control Region structure
* Region format interface codes are stored with the interface as the
* LSB and the function as the MSB.
*/
#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
enum {
NFIT_BLK_READ_FLUSH = 1,

View File

@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
{
struct acpi_pci_link *link;
int penalty = 0;
int i;
list_for_each_entry(link, &acpi_link_list, list) {
/*
@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
*/
if (link->irq.active && link->irq.active == irq)
penalty += PIRQ_PENALTY_PCI_USING;
else {
int i;
/*
* If a link is inactive, penalize the IRQs it
* might use, but not as severely.
*/
for (i = 0; i < link->irq.possible_count; i++)
if (link->irq.possible[i] == irq)
penalty += PIRQ_PENALTY_PCI_POSSIBLE /
link->irq.possible_count;
}
/*
* penalize the IRQs PCI might use, but not as severely.
*/
for (i = 0; i < link->irq.possible_count; i++)
if (link->irq.possible[i] == irq)
penalty += PIRQ_PENALTY_PCI_POSSIBLE /
link->irq.possible_count;
}
return penalty;
@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
if (irq < ACPI_MAX_ISA_IRQS)
penalty += acpi_isa_irq_penalty[irq];
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
penalty += PIRQ_PENALTY_PCI_USING;
}
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
penalty += acpi_irq_pci_sharing_penalty(irq);
return penalty;
}
int __init acpi_irq_penalty_init(void)
{
struct acpi_pci_link *link;
int i;
/*
* Update penalties to facilitate IRQ balancing.
*/
list_for_each_entry(link, &acpi_link_list, list) {
/*
* reflect the possible and active irqs in the penalty table --
* useful for breaking ties.
*/
if (link->irq.possible_count) {
int penalty =
PIRQ_PENALTY_PCI_POSSIBLE /
link->irq.possible_count;
for (i = 0; i < link->irq.possible_count; i++) {
if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
acpi_isa_irq_penalty[link->irq.
possible[i]] +=
penalty;
}
} else if (link->irq.active &&
(link->irq.active < ACPI_MAX_ISA_IRQS)) {
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_POSSIBLE;
}
}
return 0;
}
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
@ -839,7 +872,7 @@ void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
bool acpi_isa_irq_available(int irq)

View File

@ -680,9 +680,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
u64 mask = 0;
union acpi_object *obj;
if (funcs == 0)
return false;
obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
if (!obj)
return false;
@ -695,6 +692,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
ACPI_FREE(obj);
if (funcs == 0)
return true;
/*
* Bit 0 indicates whether there's support for any functions other than
* function 0 for the specified UUID and revision.

View File

@ -207,6 +207,9 @@ struct blkfront_info
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
};
static unsigned int nr_minors;
@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
{
unsigned int i, r_index;
struct request *req, *n;
struct blk_shadow *copy;
int rc;
struct bio *bio, *cloned_bio;
struct bio_list bio_list, merge_bio;
unsigned int segs, offset;
int pending, size;
struct split_bio *split_bio;
struct list_head requests;
blkfront_gather_backend_features(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs);
bio_list_init(&bio_list);
INIT_LIST_HEAD(&requests);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo;
rinfo = &info->rinfo[r_index];
/* Stage 1: Make a safe copy of the shadow state. */
copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
if (!copy)
return -ENOMEM;
/* Stage 2: Set up free list. */
memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
for (i = 0; i < BLK_RING_SIZE(info); i++)
rinfo->shadow[i].req.u.rw.id = i+1;
rinfo->shadow_free = rinfo->ring.req_prod_pvt;
rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
rc = blkfront_setup_indirect(rinfo);
if (rc) {
kfree(copy);
if (rc)
return rc;
}
for (i = 0; i < BLK_RING_SIZE(info); i++) {
/* Not in use? */
if (!copy[i].request)
continue;
/*
* Get the bios in the request so we can re-queue them.
*/
if (copy[i].request->cmd_flags &
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
/*
* Flush operations don't contain bios, so
* we need to requeue the whole request
*/
list_add(&copy[i].request->queuelist, &requests);
continue;
}
merge_bio.head = copy[i].request->bio;
merge_bio.tail = copy[i].request->biotail;
bio_list_merge(&bio_list, &merge_bio);
copy[i].request->bio = NULL;
blk_end_request_all(copy[i].request, 0);
}
kfree(copy);
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
kick_pending_request_queues(rinfo);
}
list_for_each_entry_safe(req, n, &requests, queuelist) {
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
}
blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&bio_list)) != NULL) {
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
if (bio_segments(bio) > segs) {
/*
@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
unsigned int i, j;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests);
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow;
for (j = 0; j < BLK_RING_SIZE(info); j++) {
/* Not in use? */
if (!shadow[j].request)
continue;
/*
* Get the bios in the request so we can re-queue them.
*/
if (shadow[j].request->cmd_flags &
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
/*
* Flush operations don't contain bios, so
* we need to requeue the whole request
*/
list_add(&shadow[j].request->queuelist, &info->requests);
continue;
}
merge_bio.head = shadow[j].request->bio;
merge_bio.tail = shadow[j].request->biotail;
bio_list_merge(&info->bio_list, &merge_bio);
shadow[j].request->bio = NULL;
blk_mq_end_request(shadow[j].request, 0);
}
}
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);

View File

@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
return -ENOMEM;
regmap = syscon_node_to_regmap(of_get_parent(np));
if (!regmap) {
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
return -EINVAL;
return PTR_ERR(regmap);
}
for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {

View File

@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
}
cclk = clk_register(NULL, &cpuclk->hw);
if (IS_ERR(clk)) {
if (IS_ERR(cclk)) {
pr_err("%s: could not register cpuclk %s\n", __func__, name);
ret = PTR_ERR(clk);
ret = PTR_ERR(cclk);
goto free_rate_table;
}

View File

@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
#define PSECS_PER_SEC 1000000000000LL
@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->reg = reg;
mmc_clock->shift = shift;
/*
* Assert init_state to soft reset the CLKGEN
* for mmc tuning phase and degree
*/
if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
ROCKCHIP_MMC_INIT_STATE_RESET,
mmc_clock->shift), mmc_clock->reg);
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
kfree(mmc_clock);

View File

@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(13), 1, GFLAGS),
/* perihp */
GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 0, GFLAGS),
GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 1, GFLAGS),
COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
static const char *const rk3399_cru_critical_clocks[] __initconst = {
"aclk_cci_pre",
"aclk_gic",
"aclk_gic_noc",
"pclk_perilp0",
"pclk_perilp0",
"hclk_perilp0",
@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
return;
}
@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
iounmap(reg_base);
return;
}

View File

@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = {
static int __init cpufreq_dt_platdev_init(void)
{
struct device_node *np = of_find_node_by_path("/");
const struct of_device_id *match;
if (!np)
return -ENODEV;
if (!of_match_node(machines, np))
match = of_match_node(machines, np);
of_node_put(np);
if (!match)
return -ENODEV;
of_node_put(of_root);
return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
NULL, 0));
}

View File

@ -2261,6 +2261,10 @@ int cpufreq_update_policy(unsigned int cpu)
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
if (cpufreq_suspended) {
ret = -EAGAIN;
goto unlock;
}
new_policy.cur = cpufreq_update_current_freq(policy);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;

View File

@ -1400,6 +1400,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
if (cpu->update_util_set)
return;
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
@ -1440,8 +1443,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
intel_pstate_clear_update_util_hook(policy->cpu);
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);

View File

@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
u64 time_start, time_end;
ktime_t time_start, time_end;
s64 diff;
/*
@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = local_clock();
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
time_end = local_clock();
time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
/*
* local_clock() returns the time in nanosecond, let's shift
* by 10 (divide by 1024) to have microsecond based time.
*/
diff = (time_end - time_start) >> 10;
diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;

View File

@ -49,7 +49,7 @@ config GPIO_DEVRES
config OF_GPIO
def_bool y
depends on OF || COMPILE_TEST
depends on OF
config GPIO_ACPI
def_bool y

View File

@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
return gpio % 8;
}
static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
return reg_val;
}
static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
sch_gpio_reg_set(gc, gpio_num, GIO, 1);
sch_gpio_reg_set(sch, gpio_num, GIO, 1);
spin_unlock(&sch->lock);
return 0;
}
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
{
return sch_gpio_reg_get(gc, gpio_num, GLV);
struct sch_gpio *sch = gpiochip_get_data(gc);
return sch_gpio_reg_get(sch, gpio_num, GLV);
}
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
sch_gpio_reg_set(gc, gpio_num, GLV, val);
sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock(&sch->lock);
}
@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
sch_gpio_reg_set(gc, gpio_num, GIO, 0);
sch_gpio_reg_set(sch, gpio_num, GIO, 0);
spin_unlock(&sch->lock);
/*
@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
* GPIO7 is configured by the CMC as SLPIOVR
* Enable GPIO[9:8] core powered gpios explicitly
*/
sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
sch_gpio_reg_set(sch, 8, GEN, 1);
sch_gpio_reg_set(sch, 9, GEN, 1);
/*
* SUS_GPIO[2:0] enabled by default
* Enable SUS_GPIO3 resume powered gpio explicitly
*/
sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
sch_gpio_reg_set(sch, 13, GEN, 1);
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:

View File

@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (!desc && gpio_is_valid(gpio))
return -EPROBE_DEFER;
err = gpiod_request(desc, label);
if (err)
return err;
if (flags & GPIOF_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (flags & GPIOF_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
err = gpiod_request(desc, label);
if (err)
return err;
if (flags & GPIOF_DIR_IN)
err = gpiod_direction_input(desc);
else

View File

@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
spin_lock_irqsave(&gpio_lock, flags);
}
done:
if (status < 0) {
/* Clear flags that might have been set by the caller before
* requesting the GPIO.
*/
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
}
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
}
EXPORT_SYMBOL_GPL(gpiod_get_optional);
/**
* gpiod_parse_flags - helper function to parse GPIO lookup flags
* @desc: gpio to be setup
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
* of_get_gpio_hog()
*
* Set the GPIO descriptor flags based on the given GPIO lookup flags.
*/
static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
{
if (lflags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
if (lflags & GPIO_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
}
/**
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
* of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
* occurred while trying to acquire the GPIO.
*/
static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
enum gpiod_flags dflags)
unsigned long lflags, enum gpiod_flags dflags)
{
int status;
if (lflags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
if (lflags & GPIO_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
pr_debug("no flags found for %s\n", con_id);
@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
gpiod_parse_flags(desc, lookupflags);
status = gpiod_request(desc, con_id);
if (status < 0)
return ERR_PTR(status);
status = gpiod_configure_flags(desc, con_id, flags);
status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (status < 0) {
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
if (IS_ERR(desc))
return desc;
ret = gpiod_request(desc, NULL);
if (ret)
return ERR_PTR(ret);
if (active_low)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
}
ret = gpiod_request(desc, NULL);
if (ret)
return ERR_PTR(ret);
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
chip = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
gpiod_parse_flags(desc, lflags);
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
if (IS_ERR(local_desc)) {
status = PTR_ERR(local_desc);
@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
return status;
}
status = gpiod_configure_flags(desc, name, dflags);
status = gpiod_configure_flags(desc, name, lflags, dflags);
if (status < 0) {
pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, chip->label, hwnum, status);

View File

@ -1106,6 +1106,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
if (fences == 0 && handles == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
/* just work around for uvd clock remain high even
* when uvd dpm disabled on Polaris10 */
if (adev->asic_type == CHIP_POLARIS10)
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}

View File

@ -47,6 +47,8 @@
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_NUM_COMPUTE_RINGS 8
@ -693,6 +695,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,

View File

@ -732,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->Smio[level] |=
data->mvdd_voltage_table.entries[level].smio_low;
}
table->SmioMask2 = data->vddci_voltage_table.mask_low;
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
}
@ -1422,22 +1422,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
if (!data->sclk_dpm_key_disabled) {
/* Get MinVoltage and Frequency from DPM0,
* already converted to SMC_UL */
sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_sclk,
table->ACPILevel.SclkFrequency,
&table->ACPILevel.MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((0 == result),
"Cannot find ACPI VDDC voltage value "
"in Clock Dependency Table", );
} else {
sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
table->ACPILevel.MinVoltage =
data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
}
/* Get MinVoltage and Frequency from DPM0,
* already converted to SMC_UL */
sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_sclk,
sclk_frequency,
&table->ACPILevel.MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((0 == result),
"Cannot find ACPI VDDC voltage value "
"in Clock Dependency Table",
);
result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
@ -1462,24 +1459,18 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
if (!data->mclk_dpm_key_disabled) {
/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
table->MemoryACPILevel.MclkFrequency =
data->dpm_table.mclk_table.dpm_levels[0].value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_mclk,
table->MemoryACPILevel.MclkFrequency,
&table->MemoryACPILevel.MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((0 == result),
"Cannot find ACPI VDDCI voltage value "
"in Clock Dependency Table",
);
} else {
table->MemoryACPILevel.MclkFrequency =
data->vbios_boot_state.mclk_bootup_value;
table->MemoryACPILevel.MinVoltage =
data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
}
/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
table->MemoryACPILevel.MclkFrequency =
data->dpm_table.mclk_table.dpm_levels[0].value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_mclk,
table->MemoryACPILevel.MclkFrequency,
&table->MemoryACPILevel.MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((0 == result),
"Cannot find ACPI VDDCI voltage value "
"in Clock Dependency Table",
);
us_mvdd = 0;
if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
@ -1524,6 +1515,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t vddci;
table->VceLevelCount = (uint8_t)(mm_table->count);
table->VceBootLevel = 0;
@ -1533,9 +1525,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
table->VceLevel[count].MinVoltage = 0;
table->VceLevel[count].MinVoltage |=
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
else
vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->VceLevel[count].MinVoltage |=
((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
VOLTAGE_SCALE) << VDDCI_SHIFT;
(vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/*retrieve divider value for VBIOS */
@ -1564,6 +1565,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t vddci;
table->SamuBootLevel = 0;
table->SamuLevelCount = (uint8_t)(mm_table->count);
@ -1574,8 +1576,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
else
vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/* retrieve divider value for VBIOS */
@ -1658,6 +1668,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t vddci;
table->UvdLevelCount = (uint8_t)(mm_table->count);
table->UvdBootLevel = 0;
@ -1668,8 +1679,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
else
vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/* retrieve divider value for VBIOS */
@ -1690,8 +1709,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
}
return result;
}
@ -1791,20 +1810,26 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
for (i = 0; i < sclk_table->count; i++) {
data->smc_state_table.Sclk_CKS_masterEn0_7 |=
sclk_table->entries[i].cks_enable << i;
volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \
(sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100);
volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \
(sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10);
if (hwmgr->chip_id == CHIP_POLARIS10) {
volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
} else {
volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
(2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
(3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
}
if (volt_without_cks >= volt_with_cks)
volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
}
data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
stretch_amount2 = 0;
@ -2487,6 +2512,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
tmp_result = polaris10_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable SCLK control!", result = tmp_result);
@ -2655,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint16_t vv_id;
uint16_t vddc = 0;
uint32_t vddc = 0;
uint16_t i, j;
uint32_t sclk = 0;
struct phm_ppt_v1_information *table_info =
@ -2686,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
continue);
/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
* real voltage level in unit of 0.01mv */
PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
"Invalid VDDC value", result = -EINVAL;);
/* the voltage should not be zero nor equal to leakage ID */
@ -2913,6 +2941,31 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
return 0;
}
int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
table_info->vdd_dep_on_mclk;
struct phm_ppt_v1_voltage_lookup_table *lookup_table =
table_info->vddc_lookup_table;
uint32_t i;
if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
return 0;
for (i = 0; i < lookup_table->count; i++) {
if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
return 0;
}
}
}
return 0;
}
int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@ -2990,6 +3043,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
polaris10_set_features_platform_caps(hwmgr);
polaris10_patch_voltage_workaround(hwmgr);
polaris10_init_dpm_defaults(hwmgr);
/* Get leakage voltage based on leakage ID. */
@ -4359,6 +4413,15 @@ static int polaris10_notify_link_speed_change_after_state_change(
return 0;
}
static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
}
static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int tmp_result, result = 0;
@ -4407,6 +4470,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
"Failed to program memory timing parameters!",
result = tmp_result);
tmp_result = polaris10_notify_smc_display(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to notify smc display settings!",
result = tmp_result);
tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to unfreeze SCLK MCLK DPM!",
@ -4441,6 +4509,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
@ -4460,8 +4529,6 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
polaris10_notify_smc_display_change(hwmgr, false);
else
polaris10_notify_smc_display_change(hwmgr, true);
return 0;
}
@ -4502,6 +4569,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
data->frame_time_x2 = frame_time_in_us * 2 / 100;
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
@ -4510,8 +4579,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
return 0;
}
@ -4623,7 +4690,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
return 0;
}
data->need_long_memory_training = true;
data->need_long_memory_training = false;
/*
* PPMCME_FirmwareDescriptorEntry *pfd = NULL;

View File

@ -315,6 +315,7 @@ struct polaris10_hwmgr {
uint32_t avfs_vdroop_override_setting;
bool apply_avfs_cks_off_voltage;
uint32_t frame_time_x2;
};
/* To convert to Q8.8 format for firmware */

View File

@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
}
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
{
int result;
@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
*voltage = get_voltage_info_param_space.usVoltageLevel;
*voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
return result;
}

View File

@ -305,7 +305,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level);
extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);

View File

@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->Smio[count] |=
data->mvdd_voltage_table.entries[count].smio_low;
}
table->SmioMask2 = data->vddci_voltage_table.mask_low;
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
}

View File

@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
if (0 != powerplay_table->usPPMTableOffset) {
if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnablePlatformPowerManagement);
}

View File

@ -411,6 +411,8 @@ struct phm_cac_tdp_table {
uint8_t ucVr_I2C_Line;
uint8_t ucPlx_I2C_address;
uint8_t ucPlx_I2C_Line;
uint32_t usBoostPowerLimit;
uint8_t ucCKS_LDO_REFSEL;
};
struct phm_ppm_table {

View File

@ -392,6 +392,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)

View File

@ -270,7 +270,8 @@ struct SMU74_Discrete_DpmTable {
uint8_t BootPhases;
uint8_t VRHotLevel;
uint8_t Reserved1[3];
uint8_t LdoRefSel;
uint8_t Reserved1[2];
uint16_t FanStartTemperature;
uint16_t FanStopTemperature;
uint16_t MaxVoltage;

View File

@ -2365,16 +2365,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
task = get_pid_task(file->pid, PIDTYPE_PID);
if (!task) {
ret = -ESRCH;
goto out_put;
goto out_unlock;
}
seq_printf(m, "\nproc: %s\n", task->comm);
put_task_struct(task);
idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m);
}
out_unlock:
mutex_unlock(&dev->filelist_mutex);
out_put:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);

View File

@ -8447,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
DRM_ERROR("FDI mPHY reset assert timeout\n");
tmp = I915_READ(SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
@ -9440,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
if (wait_for_us(I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@ -9514,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
if (wait_for_us((I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
}

View File

@ -663,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
else
done = wait_for_atomic(C, 10) == 0;
done = wait_for(C, 10) == 0;
if (!done)
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
has_aux_irq);
@ -4899,13 +4899,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
intel_dp = enc_to_intel_dp(encoder);
pps_lock(intel_dp);
/*

View File

@ -1377,8 +1377,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_LOCK), 200))
if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
200))
DRM_ERROR("PLL %d not locked\n", port);
/*

View File

@ -40,7 +40,8 @@ static int
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
const u32 soff = gf119_sor_soff(outp);
nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}

View File

@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
sun4i_tcon_disable(drv->tcon);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
}
static void sun4i_crtc_enable(struct drm_crtc *crtc)

View File

@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
/* Frame Buffer Operations */
/* VBlank Operations */
.get_vblank_counter = drm_vblank_count,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = sun4i_drv_enable_vblank,
.disable_vblank = sun4i_drv_disable_vblank,
};
@ -310,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
count += sun4i_drv_add_endpoints(&pdev->dev, &match,
pipeline);
of_node_put(pipeline);
DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
count, i);

View File

@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
mutex_lock(&st->buf_lock);
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret)
if (ret < 0)
goto error_ret;
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret)
if (ret < 0)
goto error_ret;
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
ret = IIO_VAL_INT_PLUS_MICRO;

View File

@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
st->reg = devm_regulator_get(&spi->dev, "vref");
if (!IS_ERR_OR_NULL(st->reg)) {
st->reg = devm_regulator_get_optional(&spi->dev, "vref");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
return ret;
@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
st->vref_mv = ret / 1000;
} else {
/* Any other error indicates that the regulator does exist */
if (PTR_ERR(st->reg) != -ENODEV)
return PTR_ERR(st->reg);
/* Use internal reference */
st->vref_mv = 2500;
}

View File

@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
int i;
acpi_status status;
union acpi_object *cpm;
int ret;
status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
if (ACPI_FAILURE(status))
@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
}
}
}
ret = cpm->package.count;
kfree(buffer.pointer);
return cpm->package.count;
return ret;
}
static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)

View File

@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
devid = e->devid;
DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
hid, uid,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
devid = e->devid;
flags = e->flags;
ret = add_acpi_hid_device(hid, uid, &devid, false);
@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
break;
}
/*
* Order is important here to make sure any unity map requirements are
* fulfilled. The unity mappings are created and written to the device
* table during the amd_iommu_init_api() call.
*
* After that we call init_device_table_dma() to make sure any
* uninitialized DTE will block DMA, and in the end we flush the caches
* of all IOMMUs to make sure the changes to the device table are
* active.
*/
ret = amd_iommu_init_api();
init_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
ret = amd_iommu_init_api();
if (!ret)
print_iommu_info();

View File

@ -4602,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
struct dmar_domain *domain;
u16 did;
int did;
if (!iommu)
continue;
for (did = 0; did < 0xffff; did++) {
domain = get_iommu_domain(iommu, did);
for (did = 0; did < cap_ndoms(iommu->cap); did++) {
domain = get_iommu_domain(iommu, (u16)did);
if (!domain)
continue;

View File

@ -420,8 +420,10 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
/* Try replenishing IOVAs by flushing rcache. */
flushed_rcache = true;
preempt_disable();
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
preempt_enable();
goto retry;
}
@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
bool can_insert = false;
unsigned long flags;
cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_full(cpu_rcache->loaded)) {
@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
iova_magazine_push(cpu_rcache->loaded, iova_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
put_cpu_ptr(rcache->cpu_rcaches);
if (mag_to_free) {
iova_magazine_free_pfns(mag_to_free, iovad);
@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
bool has_pfn = false;
unsigned long flags;
cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_empty(cpu_rcache->loaded)) {
@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
put_cpu_ptr(rcache->cpu_rcaches);
return iova_pfn;
}

View File

@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
gic_map_to_vpe(intr, vpe);
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
switch (bus_token) {
case DOMAIN_BUS_IPI:
is_ipi = d->bus_token == bus_token;
return to_of_node(d->fwnode) == node && is_ipi;
return (!node || to_of_node(d->fwnode) == node) && is_ipi;
break;
default:
return 0;

View File

@ -101,11 +101,14 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
MULTICAST_LACPDU_ADDR;
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
aggregator->partner_system = null_mac_addr;
eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
if (aggregator) {
ad_clear_agg(aggregator);
aggregator->aggregator_mac_address = null_mac_addr;
eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}

View File

@ -42,13 +42,10 @@
#ifndef __long_aligned
#define __long_aligned __attribute__((aligned((sizeof(long)))))
#endif
static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;

View File

@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {

View File

@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
data[i] = *(u32 *)p;
data[i] = *(unsigned long *)p;
}
}

View File

@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x0E
#define T4FW_VERSION_MICRO 0x04
#define T4FW_VERSION_MINOR 0x0F
#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x0E
#define T5FW_VERSION_MICRO 0x04
#define T5FW_VERSION_MINOR 0x0F
#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x0E
#define T6FW_VERSION_MICRO 0x04
#define T6FW_VERSION_MINOR 0x0F
#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00

View File

@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
writel(val, hw->hw_addr + reg);
}
static bool e1000e_vlan_used(struct e1000_adapter *adapter)
{
u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
return true;
return false;
}
/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
ew32(RCTL, rctl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
e1000e_vlan_used(adapter))
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
e1000e_vlan_strip_enable(adapter);
else
e1000e_vlan_strip_disable(adapter);
@ -6926,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
/* Since there is no support for separate Rx/Tx vlan accel
* enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX;
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}

View File

@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = -IXGBE_ERR_MBX;
s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
@ -111,7 +111,7 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = -IXGBE_ERR_MBX;
s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)

View File

@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
unregister_cpu_notifier(&pp->cpu_notifier);
on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);

View File

@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_2ERR_QP:
case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
case MLX5_CMD_OP_2ERR_QP:
case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
pr_debug("\n");
}
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
{
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
return be16_to_cpu(hdr->opcode);
}
static void cb_timeout_handler(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
work);
struct mlx5_cmd_work_ent *ent = container_of(dwork,
struct mlx5_cmd_work_ent,
cb_timeout_work);
struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
cmd);
ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
}
}
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
{
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
return be16_to_cpu(hdr->opcode);
}
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
err = ent->ret;
} else {
if (!wait_for_completion_timeout(&ent->done, timeout))
err = -ETIMEDOUT;
else
err = 0;
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
err = ent->ret;
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (!callback)
init_completion(&ent->done);
INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
}
if (!callback) {
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
goto out;
if (callback)
goto out;
ds = ent->ts2 - ent->ts1;
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
spin_unlock_irq(&stats->lock);
}
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
mlx5_command_str(op), ds);
*status = ent->status;
free_cmd(ent);
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
goto out_free;
ds = ent->ts2 - ent->ts1;
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
spin_unlock_irq(&stats->lock);
}
return err;
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
mlx5_command_str(op), ds);
*status = ent->status;
out_free:
free_cmd(ent);
@ -1181,41 +1205,30 @@ static int create_debugfs_files(struct mlx5_core_dev *dev)
return err;
}
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
down(&cmd->pages_sem);
flush_workqueue(cmd->wq);
cmd->mode = CMD_MODE_EVENTS;
cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
}
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
down(&cmd->pages_sem);
flush_workqueue(cmd->wq);
cmd->mode = CMD_MODE_POLLING;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem;
ent = cmd->ent_arr[i];
if (ent->callback)
cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else

View File

@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
struct mlx5e_params {
@ -191,6 +190,7 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_FLUSH_TIMEOUT,
};
struct mlx5e_cq {
@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
u16 ix);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
@ -241,6 +243,7 @@ struct mlx5e_rq {
struct mlx5e_cq cq;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
@ -538,6 +542,7 @@ struct mlx5e_priv {
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
struct mlx5_core_dev *mdev;
@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,

View File

@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
if (!ets->tc_tx_bw[i])
return -EINVAL;
bw_sum += ets->tc_tx_bw[i];
}
}
if (bw_sum != 0 && bw_sum != 100)

View File

@ -39,6 +39,13 @@
#include "eswitch.h"
#include "vxlan.h"
enum {
MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
};
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
if (port_state == VPORT_STATE_UP)
if (port_state == VPORT_STATE_UP) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
else
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
}
}
static void mlx5e_update_carrier_work(struct work_struct *work)
@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
static void mlx5e_tx_timeout_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
tx_timeout_work);
int err;
rtnl_lock();
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
mlx5e_close_locked(priv->netdev);
err = mlx5e_open_locked(priv->netdev);
if (err)
netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
err);
unlock:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
}
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@ -305,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@ -320,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq->wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
@ -525,17 +557,25 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
int tout = 0;
int err;
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
while (!mlx5_wq_ll_is_empty(&rq->wq))
msleep(20);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
@ -782,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
int tout = 0;
int err;
if (sq->txq) {
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
/* prevent netif_tx_wake_queue */
@ -792,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_ERR);
if (err)
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
while (sq->cc != sq->pc) /* wait till sq is empty */
msleep(20);
/* wait till sq is empty, unless a TX timeout occurred on this SQ */
while (sq->cc != sq->pc &&
!test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
@ -1658,8 +1710,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_num_tc(netdev, ntc);
/* Map netdev TCs to offset 0
* We have our own UP to TXQ mapping for QoS
*/
for (tc = 0; tc < ntc; tc++)
netdev_set_tc_queue(netdev, tc, nch, tc * nch);
netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
@ -2590,6 +2645,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
static void mlx5e_tx_timeout(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool sched_work = false;
int i;
netdev_err(dev, "TX timeout detected\n");
for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
schedule_work(&priv->tx_timeout_work);
}
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@ -2607,6 +2685,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@ -2636,6 +2715,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@ -2838,6 +2918,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}

View File

@ -212,6 +212,20 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
return -ENOMEM;
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct sk_buff *skb = rq->skb[ix];
if (skb) {
rq->skb[ix] = NULL;
dma_unmap_single(rq->pdev,
*((dma_addr_t *)skb->cb),
rq->wqe_sz,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
}
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
return 0;
}
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
wi->free_wqe(rq, wi);
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
}
#define RQ_CANNOT_POST(rq) \
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
return 0;
if (cq->decmprs_left)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);

View File

@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
int up = 0;
if (!netdev_get_num_tc(dev))
return channel_ix;
if (skb_vlan_tag_present(skb))
up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
*/
if (channel_ix >= priv->params.num_channels)
channel_ix = reciprocal_scale(channel_ix,
priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
#define MLX5E_MIN_INLINE ETH_HLEN
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
if (bf) {
u16 ihs = skb_headlen(skb);
@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return skb_headlen(skb);
}
return MLX5E_MIN_INLINE;
return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
u16 ci;
int i;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
skb = sq->skb[ci];
wi = &sq->wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
sq->cc += wi->num_wqebbs;
}
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
return false;
npkts = 0;
nbytes = 0;

View File

@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return;
goto unlock;
mlx5_core_err(dev, "start\n");
if (pci_channel_offline(dev->pdev) || in_fatal(dev))
if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
trigger_cmd_completions(dev);
}
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
unlock:
mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}

View File

@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
mlx5_pci_err_detected(dev->pdev, 0);
}
/* wait for the device to show vital signs. For now we check
* that we can read the device ID and that the health buffer
* shows a non zero value which is different than 0xffffffff
/* wait for the device to show vital signs by waiting
* for the health counter to start counting.
*/
static void wait_vital(struct pci_dev *pdev)
static int wait_vital(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_health *health = &dev->priv.health;
const int niter = 100;
u32 last_count = 0;
u32 count;
u16 did;
int i;
/* Wait for firmware to be ready after reset */
msleep(1000);
for (i = 0; i < niter; i++) {
if (pci_read_config_word(pdev, 2, &did)) {
dev_warn(&pdev->dev, "failed reading config word\n");
break;
}
if (did == pdev->device) {
dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
break;
}
msleep(50);
}
if (i == niter)
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
break;
if (last_count && last_count != count) {
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
return 0;
}
last_count = count;
}
msleep(50);
}
if (i == niter)
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
return -ETIMEDOUT;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
wait_vital(pdev);
err = wait_vital(pdev);
if (err) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
return;
}
err = mlx5_load_one(dev, priv);
if (err)

View File

@ -345,7 +345,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
func_id, npages, err);
goto out_4k;
}
dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
@ -373,6 +372,33 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
return err;
}
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct mlx5_manage_pages_inbox *in, int in_size,
struct mlx5_manage_pages_outbox *out, int out_size)
{
struct fw_page *fwp;
struct rb_node *p;
u32 npages;
u32 i = 0;
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
(u32 *)out, out_size);
npages = be32_to_cpu(in->num_entries);
p = rb_first(&dev->priv.page_root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
out->pas[i] = cpu_to_be64(fwp->addr);
p = rb_next(p);
i++;
}
out->num_entries = cpu_to_be32(i);
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free;
}
dev->priv.fw_pages -= npages;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
err = -EINVAL;
goto out_free;
}
if (nclaimed)
*nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
if (nclaimed)
*nclaimed = num_claimed;
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
free_4k(dev, fwp->addr);
nclaimed = 1;
} else {
err = reclaim_pages(dev, fwp->func_id,
optimal_reclaimed_pages(),
&nclaimed);
}
err = reclaim_pages(dev, fwp->func_id,
optimal_reclaimed_pages(),
&nclaimed);
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
}
} while (p);
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
WARN(dev->priv.vfs_pages,
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
return 0;
}

View File

@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *nic_vport_context;
u8 *guid;
void *in;
int err;
@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
node_guid);
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);

View File

@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
if ((intflags & EIR_TXIF) != 0) {
if (((intflags & EIR_TXIF) != 0) &&
((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
locked_reg_bfclr(priv, EIR, EIR_TXERIF);
locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
BUG_ON(!priv->tx_skb);
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);

View File

@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
/* Ensure writes are complete before HW fetches Tx descriptors */
wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;

View File

@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
if (status & CORE_IRQ_MTL_RX_OVERFLOW)
if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
priv->rx_tail_addr,
STMMAC_CHAN0);

View File

@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
struct geneve_dev *geneve = netdev_priv(dev);
/* The max_mtu calculation does not take account of GENEVE
* options, to avoid excluding potentially valid
* configurations.
*/
int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
- dev->hard_header_len;
int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
if (geneve->remote.sa.sa_family == AF_INET6)
max_mtu -= sizeof(struct ipv6hdr);
else
max_mtu -= sizeof(struct iphdr);
if (new_mtu < 68)
return -EINVAL;

View File

@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.OutPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
skb->dev = macsec->real_dev;
len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);

View File

@ -57,6 +57,7 @@
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
int ret;
u16 val, delay;
int ret, val;
u16 delay;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
}
if (phy_interface_is_rgmii(phydev)) {
ret = phy_write(phydev, MII_DP83867_PHYCTRL,
(dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
val = phy_read(phydev, MII_DP83867_PHYCTRL);
if (val < 0)
return val;
val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
}

View File

@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
if (cdc_ncm_init(dev))
goto error2;
/* Some firmwares need a pause here or they will silently fail
* to set up the interface properly. This value was decided
* empirically on a Sierra Wireless MC7455 running 02.08.02.00
* firmware.
*/
usleep_range(10000, 20000);
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {

View File

@ -31,7 +31,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
#define NET_VERSION "4"
#define NET_VERSION "5"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@ -624,6 +624,7 @@ struct r8152 {
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
int intr_interval;
@ -2408,9 +2409,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
if (enable) {
u32 ocp_data;
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@ -2421,7 +2419,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
u32 ocp_data;
__rtl_set_wol(tp, tp->saved_wolopts);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
ocp_data &= ~LINK_OFF_WAKE_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
}
}
static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
{
rtl_runtime_suspend_enable(tp, enable);
if (enable) {
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
} else {
r8153_u2p3en(tp, true);
r8153_u1u2en(tp, true);
}
@ -3512,7 +3531,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
rtl_runtime_suspend_enable(tp, true);
tp->rtl_ops.autosuspend_en(tp, true);
} else {
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
@ -3538,7 +3557,7 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_runtime_suspend_enable(tp, false);
tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
@ -3557,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf)
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
if (tp->netdev->flags & IFF_UP)
rtl_runtime_suspend_enable(tp, false);
tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
@ -4137,6 +4156,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
case RTL_VER_03:
@ -4152,6 +4172,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
ops->autosuspend_en = rtl8153_runtime_enable;
break;
default:

View File

@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
dev->rx_urb_size = dev->hard_mtu;
if (dev->rx_urb_size > old_rx_urb_size)
if (dev->rx_urb_size > old_rx_urb_size) {
usbnet_pause_rx(dev);
usbnet_unlink_rx_urbs(dev);
usbnet_resume_rx(dev);
}
}
/* max qlen depend on hard_mtu and rx_urb_size */
@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
!timer_pending (&dev->delay) &&
!test_bit (EVENT_RX_HALT, &dev->flags)) {
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {

View File

@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
unsigned long align;
enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
align = le32_to_cpu(pfn_sb->align);
offset = le64_to_cpu(pfn_sb->dataoff);
if (align == 0)
align = 1UL << ilog2(offset);
mode = le32_to_cpu(pfn_sb->mode);
if (!nd_pfn->uuid) {
/* from probe we allocate */
/*
* When probing a namepace via nd_pfn_probe() the uuid
* is NULL (see: nd_pfn_devinit()) we init settings from
* pfn_sb
*/
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
if (!nd_pfn->uuid)
return -ENOMEM;
nd_pfn->align = align;
nd_pfn->mode = mode;
} else {
/* from init we validate */
/*
* When probing a pfn / dax instance we validate the
* live settings against the pfn_sb
*/
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -ENODEV;
/*
* If the uuid validates, but other settings mismatch
* return EINVAL because userspace has managed to change
* the configuration without specifying new
* identification.
*/
if (nd_pfn->align != align || nd_pfn->mode != mode) {
dev_err(&nd_pfn->dev,
"init failed, settings mismatch\n");
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
nd_pfn->align, align, nd_pfn->mode,
mode);
return -EINVAL;
}
}
if (nd_pfn->align == 0)
nd_pfn->align = le32_to_cpu(pfn_sb->align);
if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
nd_pfn->align, nvdimm_namespace_capacity(ndns));
align, nvdimm_namespace_capacity(ndns));
return -EINVAL;
}
@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
* namespace has changed since the pfn superblock was
* established.
*/
offset = le64_to_cpu(pfn_sb->dataoff);
nsio = to_nd_namespace_io(&ndns->dev);
if (offset >= resource_size(&nsio->res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -EBUSY;
}
if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
if ((align && !IS_ALIGNED(offset, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
offset);
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
offset, align);
return -ENXIO;
}
@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
res->start += start_pad;
res->end -= end_trunc;
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < SZ_8K)
return ERR_PTR(-EINVAL);

View File

@ -109,8 +109,8 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
}
usb2->phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(dev))
return PTR_ERR(dev);
if (IS_ERR(usb2->phy))
return PTR_ERR(usb2->phy);
phy_set_drvdata(usb2->phy, usb2);
platform_set_drvdata(pdev, usb2);

Some files were not shown because too many files have changed in this diff Show More