nfsd: merge stable fix into main nfsd branch

This commit is contained in:
J. Bruce Fields 2017-02-20 12:24:00 -05:00
commit 60709c093e
183 changed files with 1692 additions and 1239 deletions

View File

@ -10195,7 +10195,6 @@ F: drivers/media/tuners/qt1010*
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
L: ath9k-devel@lists.ath9k.org
W: http://wireless.kernel.org/en/users/Drivers/ath9k
S: Supported
F: drivers/net/wireless/ath/ath9k/
@ -13066,7 +13065,7 @@ F: drivers/input/serio/userio.c
F: include/uapi/linux/userio.h
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com>
M: Amit Shah <amit@kernel.org>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/char/virtio_console.c

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif

View File

@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
cbz w6, .Lcbcencloop
ld1 {v0.16b}, [x5] /* get iv */
enc_prepare w3, x2, x5
enc_prepare w3, x2, x6
.Lcbcencloop:
ld1 {v1.16b}, [x1], #16 /* get next pt block */
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
encrypt_block v0, w3, x2, x5, w6
encrypt_block v0, w3, x2, x6, w7
st1 {v0.16b}, [x0], #16
subs w4, w4, #1
bne .Lcbcencloop
st1 {v0.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
cbz w6, .LcbcdecloopNx
ld1 {v7.16b}, [x5] /* get iv */
dec_prepare w3, x2, x5
dec_prepare w3, x2, x6
.LcbcdecloopNx:
#if INTERLEAVE >= 2
@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
.Lcbcdecloop:
ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
decrypt_block v0, w3, x2, x5, w6
decrypt_block v0, w3, x2, x6, w7
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
mov v7.16b, v1.16b /* ct is next iv */
st1 {v0.16b}, [x0], #16
@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
bne .Lcbcdecloop
.Lcbcdecout:
FRAME_POP
st1 {v7.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_decrypt)
@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
AES_ENTRY(aes_ctr_encrypt)
FRAME_PUSH
cbnz w6, .Lctrfirst /* 1st time around? */
umov x5, v4.d[1] /* keep swabbed ctr in reg */
rev x5, x5
#if INTERLEAVE >= 2
cmn w5, w4 /* 32 bit overflow? */
bcs .Lctrinc
add x5, x5, #1 /* increment BE ctr */
b .LctrincNx
#else
b .Lctrinc
#endif
.Lctrfirst:
cbz w6, .Lctrnotfirst /* 1st time around? */
enc_prepare w3, x2, x6
ld1 {v4.16b}, [x5]
umov x5, v4.d[1] /* keep swabbed ctr in reg */
rev x5, x5
.Lctrnotfirst:
umov x8, v4.d[1] /* keep swabbed ctr in reg */
rev x8, x8
#if INTERLEAVE >= 2
cmn w5, w4 /* 32 bit overflow? */
cmn w8, w4 /* 32 bit overflow? */
bcs .Lctrloop
.LctrloopNx:
subs w4, w4, #INTERLEAVE
@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
#if INTERLEAVE == 2
mov v0.8b, v4.8b
mov v1.8b, v4.8b
rev x7, x5
add x5, x5, #1
rev x7, x8
add x8, x8, #1
ins v0.d[1], x7
rev x7, x5
add x5, x5, #1
rev x7, x8
add x8, x8, #1
ins v1.d[1], x7
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
do_encrypt_block2x
@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
st1 {v0.16b-v1.16b}, [x0], #32
#else
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
dup v7.4s, w5
dup v7.4s, w8
mov v0.16b, v4.16b
add v7.4s, v7.4s, v8.4s
mov v1.16b, v4.16b
@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
st1 {v0.16b-v3.16b}, [x0], #64
add x5, x5, #INTERLEAVE
add x8, x8, #INTERLEAVE
#endif
cbz w4, .LctroutNx
.LctrincNx:
rev x7, x5
rev x7, x8
ins v4.d[1], x7
cbz w4, .Lctrout
b .LctrloopNx
.LctroutNx:
sub x5, x5, #1
rev x7, x5
ins v4.d[1], x7
b .Lctrout
.Lctr1x:
adds w4, w4, #INTERLEAVE
beq .Lctrout
@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
.Lctrloop:
mov v0.16b, v4.16b
encrypt_block v0, w3, x2, x6, w7
adds x8, x8, #1 /* increment BE ctr */
rev x7, x8
ins v4.d[1], x7
bcs .Lctrcarry /* overflow? */
.Lctrcarrydone:
subs w4, w4, #1
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
ld1 {v3.16b}, [x1], #16
eor v3.16b, v0.16b, v3.16b
st1 {v3.16b}, [x0], #16
beq .Lctrout
.Lctrinc:
adds x5, x5, #1 /* increment BE ctr */
rev x7, x5
ins v4.d[1], x7
bcc .Lctrloop /* no overflow? */
bne .Lctrloop
.Lctrout:
st1 {v4.16b}, [x5] /* return next CTR value */
FRAME_POP
ret
.Lctrhalfblock:
ld1 {v3.8b}, [x1]
eor v3.8b, v0.8b, v3.8b
st1 {v3.8b}, [x0]
FRAME_POP
ret
.Lctrcarry:
umov x7, v4.d[0] /* load upper word of ctr */
rev x7, x7 /* ... to handle the carry */
add x7, x7, #1
rev x7, x7
ins v4.d[0], x7
b .Lctrloop
.Lctrhalfblock:
ld1 {v3.8b}, [x1]
eor v3.8b, v0.8b, v3.8b
st1 {v3.8b}, [x0]
.Lctrout:
FRAME_POP
ret
b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
.ltorg

View File

@ -164,7 +164,6 @@ config PPC
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
select HAVE_CC_STACKPROTECTOR
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@ -484,6 +483,7 @@ config RELOCATABLE
bool "Build a relocatable kernel"
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
select NONSTATIC_KERNEL
select MODULE_REL_CRCS if MODVERSIONS
help
This builds a kernel image that is capable of running at the
location the kernel is loaded at. For ppc32, there is no any

View File

@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
{
int i;
#ifndef __clang__ /* clang can't cope with this */
BUILD_BUG_ON(!__builtin_constant_p(feature));
#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) {

View File

@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
{
int i;
#ifndef __clang__ /* clang can't cope with this */
BUILD_BUG_ON(!__builtin_constant_p(feature));
#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) {

View File

@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
}
#endif
#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */

View File

@ -1,40 +0,0 @@
/*
* GCC stack protector support.
*
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
* "__stack_chk_guard" on PPC. This unfortunately means that on SMP
* we cannot have a different canary value per task.
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H
#include <linux/random.h>
#include <linux/version.h>
#include <asm/reg.h>
extern unsigned long __stack_chk_guard;
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= mftb();
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_STACKPROTECTOR_H */

View File

@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
# -fstack-protector triggers protection checks in this code,
# but it is being used too early to link to meaningful stack_chk logic.
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)

View File

@ -91,9 +91,6 @@ int main(void)
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif
#ifdef CONFIG_CC_STACKPROTECTOR
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE

View File

@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
bool *clear_sw_state = flag;
bool clear_sw_state = *(bool *)flag;
int i, rc = 1;
for (i = 0; rc && i < 3; i++)

View File

@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
lwz r0,TSK_STACK_CANARY(r2)
lis r4,__stack_chk_guard@ha
stw r0,__stack_chk_guard@l(r4)
#endif
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */

View File

@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
for (end = (void *)vers + size; vers < end; vers++)
if (vers->name[0] == '.') {
memmove(vers->name, vers->name+1, strlen(vers->name));
#ifdef ARCH_RELOCATES_KCRCTAB
/* The TOC symbol has no CRC computed. To avoid CRC
* check failing, we must force it to the expected
* value (see CRC check in module.c).
*/
if (!strcmp(vers->name, "TOC."))
vers->crc = -(unsigned long)reloc_start;
#endif
}
}

View File

@ -64,12 +64,6 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/* Transactional Memory debug */
#ifdef TM_DEBUG_SW
#define TM_DEBUG(x...) printk(KERN_INFO x)

View File

@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
if (!PHANDLE_VALID(cpu_pkg))
return;
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval);

View File

@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
if (!pmdp)
return -ENOMEM;
if (map_page_size == PMD_SIZE) {
ptep = (pte_t *)pudp;
ptep = pmdp_ptep(pmdp);
goto set_the_pte;
}
ptep = pte_alloc_kernel(pmdp, ea);
@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
}
pmdp = pmd_offset(pudp, ea);
if (map_page_size == PMD_SIZE) {
ptep = (pte_t *)pudp;
ptep = pmdp_ptep(pmdp);
goto set_the_pte;
}
if (!pmd_present(*pmdp)) {

View File

@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
static inline void tsb_context_switch(struct mm_struct *mm)
{
__tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[0],
&mm->context.tsb_block[MM_TSB_BASE],
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
(mm->context.tsb_block[1].tsb ?
&mm->context.tsb_block[1] :
(mm->context.tsb_block[MM_TSB_HUGE].tsb ?
&mm->context.tsb_block[MM_TSB_HUGE] :
NULL)
#else
NULL
#endif
, __pa(&mm->context.tsb_descr[0]));
, __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
}
void tsb_grow(struct mm_struct *mm,

View File

@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
unsigned long order = get_order(size);
unsigned long p;
p = __get_free_pages(GFP_KERNEL, order);
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();

View File

@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
"Linux powering off";
static const char rebooting_msg[32] __attribute__((aligned(32))) =
"Linux rebooting";
static const char panicing_msg[32] __attribute__((aligned(32))) =
"Linux panicing";
static const char panicking_msg[32] __attribute__((aligned(32))) =
"Linux panicking";
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
{
@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
return NOTIFY_DONE;
}

View File

@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
atomic_inc(&sun4v_resum_oflow_cnt);
}
/* Given a set of registers, get the virtual addressi that was being accessed
* by the faulting instructions at tpc.
*/
static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
{
unsigned int insn;
if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
return compute_effective_address(regs, insn,
(insn >> 25) & 0x1f);
}
return 0;
}
/* Attempt to handle non-resumable errors generated from userspace.
* Returns true if the signal was handled, false otherwise.
*/
bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
struct sun4v_error_entry *ent) {
unsigned int attrs = ent->err_attrs;
if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
unsigned long addr = ent->err_raddr;
siginfo_t info;
if (addr == ~(u64)0) {
/* This seems highly unlikely to ever occur */
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
} else {
unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
PAGE_SIZE);
/* Break the unfortunate news. */
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
addr);
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
page_cnt);
while (page_cnt-- > 0) {
if (pfn_valid(addr >> PAGE_SHIFT))
get_page(pfn_to_page(addr >> PAGE_SHIFT));
addr += PAGE_SIZE;
}
}
info.si_signo = SIGKILL;
info.si_errno = 0;
info.si_trapno = 0;
force_sig_info(info.si_signo, &info, current);
return true;
}
if (attrs & SUN4V_ERR_ATTRS_PIO) {
siginfo_t info;
info.si_signo = SIGBUS;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)sun4v_get_vaddr(regs);
force_sig_info(info.si_signo, &info, current);
return true;
}
/* Default to doing nothing */
return false;
}
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event, clear the first word of the entry, and die.
*/
@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
put_cpu();
if (!(regs->tstate & TSTATE_PRIV) &&
sun4v_nonresum_error_user_handled(regs, &local_copy)) {
/* DON'T PANIC: This userspace error was handled. */
return;
}
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == cpu) {

View File

@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
return rapl_pmus->pmus[topology_logical_package_id(cpu)];
unsigned int pkgid = topology_logical_package_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
}
static inline u64 rapl_read_counter(struct perf_event *event)
@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
/* must be done before validate_group */
pmu = cpu_to_rapl_pmu(event->cpu);
if (!pmu)
return -EINVAL;
event->cpu = pmu->cpu;
event->pmu_private = pmu;
event->hw.event_base = msr;
@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
if (!pmu) {
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
return -ENOMEM;
raw_spin_lock_init(&pmu->lock);
INIT_LIST_HEAD(&pmu->active_list);
pmu->pmu = &rapl_pmus->pmu;
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
rapl_hrtimer_init(pmu);
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
}
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
return 0;
}
static int rapl_cpu_prepare(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
if (pmu)
return 0;
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
return -ENOMEM;
raw_spin_lock_init(&pmu->lock);
INIT_LIST_HEAD(&pmu->active_list);
pmu->pmu = &rapl_pmus->pmu;
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
pmu->cpu = -1;
rapl_hrtimer_init(pmu);
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
return 0;
}
static int rapl_check_hw_unit(bool apply_quirk)
{
u64 msr_rapl_power_unit_bits;
@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
/*
* Install callbacks. Core will call them for each online cpu.
*/
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
rapl_cpu_prepare, NULL);
if (ret)
goto out;
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
"perf/x86/rapl:online",
rapl_cpu_online, rapl_cpu_offline);
if (ret)
goto out1;
goto out;
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
if (ret)
goto out2;
goto out1;
rapl_advertise();
return 0;
out2:
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out1:
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out:
pr_warn("Initialization failed (%d), disabled\n", ret);
cleanup_rapl_pmus();
@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
static void __exit intel_rapl_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
perf_pmu_unregister(&rapl_pmus->pmu);
cleanup_rapl_pmus();
}

View File

@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
return pmu->boxes[topology_logical_package_id(cpu)];
unsigned int pkgid = topology_logical_package_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
pmu->registered = false;
}
static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
{
struct intel_uncore_pmu *pmu = type->pmus;
struct intel_uncore_box *box;
int i, pkg;
if (pmu) {
pkg = topology_physical_package_id(cpu);
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box)
uncore_box_exit(box);
}
}
}
static void uncore_exit_boxes(void *dummy)
{
struct intel_uncore_type **types;
for (types = uncore_msr_uncores; *types; types++)
__uncore_exit_boxes(*types++, smp_processor_id());
}
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{
int pkg;
@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
}
}
static int uncore_cpu_dying(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
return 0;
}
static int first_init;
static int uncore_cpu_starting(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, ncpus = 1;
if (first_init) {
/*
* On init we get the number of online cpus in the package
* and set refcount for all of them.
*/
ncpus = cpumask_weight(topology_core_cpumask(cpu));
}
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (!box)
continue;
/* The first cpu on a package activates the box */
if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
uncore_box_init(box);
}
}
return 0;
}
static int uncore_cpu_prepare(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg])
continue;
/* First cpu of a package allocates the box */
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
return -ENOMEM;
box->pmu = pmu;
box->pkgid = pkg;
pmu->boxes[pkg] = box;
}
}
return 0;
}
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
int new_cpu)
{
@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
static int uncore_event_cpu_offline(unsigned int cpu)
{
int target;
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
return 0;
goto unref;
/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
unref:
/* Clear the references */
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
return 0;
}
static int allocate_boxes(struct intel_uncore_type **types,
unsigned int pkg, unsigned int cpu)
{
struct intel_uncore_box *box, *tmp;
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
LIST_HEAD(allocated);
int i;
/* Try to allocate all required boxes */
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg])
continue;
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
goto cleanup;
box->pmu = pmu;
box->pkgid = pkg;
list_add(&box->active_list, &allocated);
}
}
/* Install them in the pmus */
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
box->pmu->boxes[pkg] = box;
}
return 0;
cleanup:
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
kfree(box);
}
return -ENOMEM;
}
static int uncore_event_cpu_online(unsigned int cpu)
{
int target;
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, ret, pkg, target;
pkg = topology_logical_package_id(cpu);
ret = allocate_boxes(types, pkg, cpu);
if (ret)
return ret;
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (!box && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
}
/*
* Check if there is an online cpu in the package
@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
if (cret && pret)
return -ENODEV;
/*
* Install callbacks. Core will call them for each online cpu.
*
* The first online cpu of each package allocates and takes
* the refcounts for all other online cpus in that package.
* If msrs are not enabled no allocation is required and
* uncore_cpu_prepare() is not called for each online cpu.
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
}
first_init = 1;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
"perf/x86/uncore:starting",
uncore_cpu_starting, uncore_cpu_dying);
first_init = 0;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"perf/x86/uncore:online",
uncore_event_cpu_online, uncore_event_cpu_offline);
/* Install hotplug callbacks to setup the targets for each package */
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"perf/x86/intel/uncore:online",
uncore_event_cpu_online,
uncore_event_cpu_offline);
if (ret)
goto err;
return 0;
err:
/* Undo box->init_box() */
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
return ret;
@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
}

View File

@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
extern bool initrd_gone;
#else
static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }

View File

@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
if (idx != -1 && irq_trigger(idx))
unmask_ioapic_irq(irq_get_chip_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
if (timer_irq_works()) {
if (disable_timer_pin_1 > 0)
@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
* legacy devices should be connected to IO APIC #0
*/
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
legacy_pic->unmask(0);
if (timer_irq_works()) {

View File

@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
static void __restart_timer(struct timer_list *t, unsigned long interval)
static void __start_timer(struct timer_list *t, unsigned long interval)
{
unsigned long when = jiffies + interval;
unsigned long flags;
local_irq_save(flags);
if (timer_pending(t)) {
if (time_before(when, t->expires))
mod_timer(t, when);
} else {
t->expires = round_jiffies(when);
add_timer_on(t, smp_processor_id());
}
if (!timer_pending(t) || time_before(when, t->expires))
mod_timer(t, round_jiffies(when));
local_irq_restore(flags);
}
@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
done:
__this_cpu_write(mce_next_interval, iv);
__restart_timer(t, iv);
__start_timer(t, iv);
}
/*
@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
__start_timer(t, interval);
if (interval < iv)
__this_cpu_write(mce_next_interval, interval);
@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
}
}
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
static void mce_start_timer(struct timer_list *t)
{
unsigned long iv = check_interval * HZ;
if (mca_cfg.ignore_ce || !iv)
return;
per_cpu(mce_next_interval, cpu) = iv;
t->expires = round_jiffies(jiffies + iv);
add_timer_on(t, cpu);
this_cpu_write(mce_next_interval, iv);
__start_timer(t, iv);
}
static void __mcheck_cpu_setup_timer(void)
@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
unsigned int cpu = smp_processor_id();
setup_pinned_timer(t, mce_timer_fn, cpu);
mce_start_timer(cpu, t);
mce_start_timer(t);
}
/* Handle unconfigured int18 (should never happen) */
@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
static int mce_cpu_online(unsigned int cpu)
{
struct timer_list *t = &per_cpu(mce_timer, cpu);
struct timer_list *t = this_cpu_ptr(&mce_timer);
int ret;
mce_device_create(cpu);
@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
return ret;
}
mce_reenable_cpu();
mce_start_timer(cpu, t);
mce_start_timer(t);
return 0;
}
static int mce_cpu_pre_down(unsigned int cpu)
{
struct timer_list *t = &per_cpu(mce_timer, cpu);
struct timer_list *t = this_cpu_ptr(&mce_timer);
mce_disable_cpu();
del_timer_sync(t);

View File

@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
reget:
if (!get_builtin_microcode(&cp, family)) {
#ifdef CONFIG_BLK_DEV_INITRD
cp = find_cpio_data(ucode_path, (void *)initrd_start,
initrd_end - initrd_start, NULL);
if (!initrd_gone)
cp = find_cpio_data(ucode_path, (void *)initrd_start,
initrd_end - initrd_start, NULL);
#endif
if (!(cp.data && cp.size)) {
/*

View File

@ -46,6 +46,8 @@
static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr = true;
bool initrd_gone;
LIST_HEAD(microcode_cache);
/*
@ -190,21 +192,24 @@ void load_ucode_ap(void)
static int __init save_microcode_in_initrd(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
int ret = -EINVAL;
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
if (c->x86 >= 6)
return save_microcode_in_initrd_intel();
ret = save_microcode_in_initrd_intel();
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
return save_microcode_in_initrd_amd(c->x86);
ret = save_microcode_in_initrd_amd(c->x86);
break;
default:
break;
}
return -EINVAL;
initrd_gone = true;
return ret;
}
struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
* has the virtual address of the beginning of the initrd. It also
* possibly relocates the ramdisk. In either case, initrd_start contains
* the updated address so use that instead.
*
* initrd_gone is for the hotplug case where we've thrown out initrd
* already.
*/
if (!use_pa && initrd_start)
start = initrd_start;
if (!use_pa) {
if (initrd_gone)
return (struct cpio_data){ NULL, 0, "" };
if (initrd_start)
start = initrd_start;
}
return find_cpio_data(path, (void *)start, size, NULL);
#else /* !CONFIG_BLK_DEV_INITRD */

View File

@ -41,7 +41,7 @@
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
/* Current microcode patch used in early patching */
/* Current microcode patch used in early patching on the APs. */
struct microcode_intel *intel_ucode_patch;
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
struct ucode_cpu_info uci;
struct cpio_data cp;
/*
* AP loading didn't find any microcode patch, no need to save anything.
*/
if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
return 0;
if (!load_builtin_intel_microcode(&cp))
cp = find_microcode_in_initrd(ucode_path, false);
@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
return 0;
}
/*
* @res_patch, output: a pointer to the patch we found.
*/

View File

@ -9,6 +9,7 @@
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
#include <asm/fpu/xstate.h>
#include <asm/traps.h>
#include <linux/hardirq.h>
@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
* it will #GP. Make sure it is replaced after the memset().
*/
if (static_cpu_has(X86_FEATURE_XSAVES))
state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
xfeatures_mask;
if (static_cpu_has(X86_FEATURE_FXSR))
fpstate_init_fxstate(&state->fxsave);

View File

@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
} else {
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
disable_irq(hdev->irq);
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

View File

@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
memcpy(dest, xsave, XSAVE_HDR_OFFSET);
/* Set XSTATE_BV */
xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
/*

View File

@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
efi_scratch.use_pgd = true;
/*
* Certain firmware versions are way too sentimential and still believe
* they are exclusive and unquestionable owners of the first physical page,
* even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
* (but then write-access it later during SetVirtualAddressMap()).
*
* Create a 1:1 mapping for this page, to avoid triple faults during early
* boot with such firmware. We are free to hand this page to the BIOS,
* as trim_bios_range() will reserve the first page and isolate it away
* from memory allocators anyway.
*/
if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
pr_err("Failed to create 1:1 mapping for the first page!\n");
return 1;
}
/*
* When making calls to the firmware everything needs to be 1:1
* mapped and addressable with 32-bit pointers. Map the kernel

View File

@ -419,7 +419,7 @@ subsys_initcall(topology_init);
void cpu_reset(void)
{
#if XCHAL_HAVE_PTP_MMU
#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
local_irq_disable();
/*
* We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must

View File

@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
struct crypto_larval *larval;
int err;
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_check_alg(alg);
if (err)
return err;

View File

@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
qc->result_tf.command |= ATA_SENSE;
}
/* finish up */
@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
/*
* Device times out with higher max sects.
* These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
{ "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */

View File

@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!hpriv->base)
return -ENOMEM;
hpriv->base -= SATAHC0_REG_BASE;
hpriv->clk = clk_get(&pdev->dev, NULL);

View File

@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
struct firmware_buf *buf = fw_priv->buf;
__fw_load_abort(buf);
/* avoid user action after loading abort */
fw_priv->buf = NULL;
}
static LIST_HEAD(pending_fw_head);
@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_lock(&fw_lock);
fw_buf = fw_priv->buf;
if (!fw_buf)
if (fw_state_is_aborted(&fw_buf->fw_st))
goto out;
switch (loading) {

View File

@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn, end_pfn;
unsigned long valid_start, valid_end, valid_pages;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
struct zone *zone;
int zone_shift = 0;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
end_pfn = start_pfn + nr_pages;
first_page = pfn_to_page(start_pfn);
/* The block contains more than one zone can not be offlined. */
if (!test_pages_in_a_zone(start_pfn, end_pfn))
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
return sprintf(buf, "none\n");
zone = page_zone(first_page);
zone = page_zone(pfn_to_page(valid_start));
valid_pages = valid_end - valid_start;
/* MMOP_ONLINE_KEEP */
sprintf(buf, "%s", zone->name);
/* MMOP_ONLINE_KERNEL */
zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
}
/* MMOP_ONLINE_MOVABLE */
zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);

View File

@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
#ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_b.c */
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);

View File

@ -15,8 +15,6 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
u32 mask, u32 value)
{
@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_early_init(cc);
if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
bcma_chipco_serial_init(cc);
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
bcma_core_chipcommon_flash_detect(cc);
@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
return res;
}
static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
#ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
{
#if IS_BUILTIN(CONFIG_BCM47XX)
unsigned int irq;
u32 baud_base;
u32 i;
@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
#endif /* CONFIG_BCM47XX */
}
#endif /* CONFIG_BCMA_DRIVER_MIPS */

View File

@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
if (mcore->early_setup_done)
return;
bcma_chipco_serial_init(&bus->drv_cc);
bcma_core_mips_nvram_init(mcore);
mcore->early_setup_done = true;

View File

@ -153,6 +153,8 @@ struct cppi41_dd {
/* context for suspend/resume */
unsigned int dma_tdfdq;
bool is_suspended;
};
#define FIST_COMPLETION_QUEUE 93
@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
BUG_ON(desc_num >= ALLOC_DECS_NUM);
c = cdd->chan_busy[desc_num];
cdd->chan_busy[desc_num] = NULL;
/* Usecount for chan_busy[], paired with push_desc_queue() */
pm_runtime_put(cdd->ddev.dev);
return c;
}
@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
while (val) {
u32 desc, len;
int error;
error = pm_runtime_get(cdd->ddev.dev);
if (error < 0)
dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
__func__, error);
/*
* This should never trigger, see the comments in
* push_desc_queue()
*/
WARN_ON(cdd->is_suspended);
q_num = __fls(val);
val &= ~(1 << q_num);
@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
}
}
return IRQ_HANDLED;
@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
*/
__iowmb();
/*
* DMA transfers can take at least 200ms to complete with USB mass
* storage connected. To prevent autosuspend timeouts, we must use
* pm_runtime_get/put() when chan_busy[] is modified. This will get
* cleared in desc_to_chan() or cppi41_stop_chan() depending on the
* outcome of the transfer.
*/
pm_runtime_get(cdd->ddev.dev);
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(cdd->chan_busy[desc_num]);
@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
}
static void pending_desc(struct cppi41_channel *c)
/*
* Caller must hold cdd->lock to prevent push_desc_queue()
* getting called out of order. We have both cppi41_dma_issue_pending()
* and cppi41_runtime_resume() call this function.
*/
static void cppi41_run_queue(struct cppi41_dd *cdd)
{
struct cppi41_dd *cdd = c->cdd;
unsigned long flags;
struct cppi41_channel *c, *_c;
spin_lock_irqsave(&cdd->lock, flags);
list_add_tail(&c->node, &cdd->pending);
spin_unlock_irqrestore(&cdd->lock, flags);
list_for_each_entry_safe(c, _c, &cdd->pending, node) {
push_desc_queue(c);
list_del(&c->node);
}
}
static void cppi41_dma_issue_pending(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct cppi41_dd *cdd = c->cdd;
unsigned long flags;
int error;
error = pm_runtime_get(cdd->ddev.dev);
@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
return;
}
if (likely(pm_runtime_active(cdd->ddev.dev)))
push_desc_queue(c);
else
pending_desc(c);
spin_lock_irqsave(&cdd->lock, flags);
list_add_tail(&c->node, &cdd->pending);
if (!cdd->is_suspended)
cppi41_run_queue(cdd);
spin_unlock_irqrestore(&cdd->lock, flags);
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
WARN_ON(!cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = NULL;
/* Usecount for chan_busy[], paired with push_desc_queue() */
pm_runtime_put(cdd->ddev.dev);
return 0;
}
@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&cdd->lock, flags);
cdd->is_suspended = true;
WARN_ON(!list_empty(&cdd->pending));
spin_unlock_irqrestore(&cdd->lock, flags);
return 0;
}
@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
static int __maybe_unused cppi41_runtime_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
struct cppi41_channel *c, *_c;
unsigned long flags;
spin_lock_irqsave(&cdd->lock, flags);
list_for_each_entry_safe(c, _c, &cdd->pending, node) {
push_desc_queue(c);
list_del(&c->node);
}
cdd->is_suspended = false;
cppi41_run_queue(cdd);
spin_unlock_irqrestore(&cdd->lock, flags);
return 0;

View File

@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
unsigned long flags;
int chans, i;
if (pl330->state == DYING)
@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
chans = pl330->pcfg.num_chan;
spin_lock_irqsave(&pl330->lock, flags);
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
thrd = NULL;
}
spin_unlock_irqrestore(&pl330->lock, flags);
return thrd;
}
@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330;
unsigned long flags;
if (!thrd || thrd->free)
return;
@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
pl330 = thrd->dmac;
spin_lock_irqsave(&pl330->lock, flags);
_free_event(thrd, thrd->ev);
thrd->free = true;
spin_unlock_irqrestore(&pl330->lock, flags);
}
/* Initialize the structure for PL330 configuration, that can be used
@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
spin_lock_irqsave(&pch->lock, flags);
spin_lock_irqsave(&pl330->lock, flags);
dma_cookie_init(chan);
pch->cyclic = false;
pch->thread = pl330_request_channel(pl330);
if (!pch->thread) {
spin_unlock_irqrestore(&pch->lock, flags);
spin_unlock_irqrestore(&pl330->lock, flags);
return -ENOMEM;
}
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
spin_unlock_irqrestore(&pch->lock, flags);
spin_unlock_irqrestore(&pl330->lock, flags);
return 1;
}
@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
static void pl330_free_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
tasklet_kill(&pch->task);
pm_runtime_get_sync(pch->dmac->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
spin_lock_irqsave(&pl330->lock, flags);
pl330_release_channel(pch->thread);
pch->thread = NULL;
@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
spin_unlock_irqrestore(&pl330->lock, flags);
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}

View File

@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
struct exit_boot_struct {
efi_memory_desc_t *runtime_map;
int *runtime_entry_count;
void *new_fdt_addr;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
p->runtime_map, p->runtime_entry_count);
return EFI_SUCCESS;
return update_fdt_memmap(p->new_fdt_addr, map);
}
/*
@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
priv.new_fdt_addr = (void *)*new_fdt_addr;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
status = update_fdt_memmap((void *)*new_fdt_addr, &map);
if (status != EFI_SUCCESS) {
/*
* The kernel won't get far without the memory map, but
* may still be able to print something meaningful so
* return success here.
*/
return EFI_SUCCESS;
}
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,

View File

@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
if (adev->mode_info.num_crtc)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v6_0_mc_stop(adev, &save);
if (gmc_v6_0_wait_for_idle((void *)adev)) {
@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v6_0_mc_resume(adev, &save);
amdgpu_display_set_vga_render_state(adev, false);
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)

View File

@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *event = crtc_state->event;
/*
* TEST_ONLY and PAGE_FLIP_EVENT are mutually
* exclusive, if they weren't, this code should be
* called on success for TEST_ONLY too.
* Free the allocated event. drm_atomic_helper_setup_commit
* can allocate an event too, so only free it if it's ours
* to prevent a double free in drm_atomic_state_clear.
*/
if (crtc_state->event)
drm_event_cancel_free(dev, &crtc_state->event->base);
if (event && (event->base.fence || event->base.file_priv)) {
drm_event_cancel_free(dev, &event->base);
crtc_state->event = NULL;
}
}
if (!fence_state)

View File

@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private;
if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
continue;
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, plane_state);
if (ret)
@ -1685,9 +1682,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
if (j >= i)
continue;
if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
continue;
funcs = plane->helper_private;
if (funcs->cleanup_fb)
@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
for_each_plane_in_state(old_state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
continue;
funcs = plane->helper_private;
if (funcs->cleanup_fb)

View File

@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->funcs->atomic_destroy_state(connector,
connector->state);
mutex_destroy(&connector->mutex);
memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
*/
int drm_connector_register(struct drm_connector *connector)
{
int ret;
int ret = 0;
if (connector->registered)
if (!connector->dev->registered)
return 0;
mutex_lock(&connector->mutex);
if (connector->registered)
goto unlock;
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
goto unlock;
ret = drm_debugfs_connector_add(connector);
if (ret) {
@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registered = true;
return 0;
goto unlock;
err_debugfs:
drm_debugfs_connector_remove(connector);
err_sysfs:
drm_sysfs_connector_remove(connector);
unlock:
mutex_unlock(&connector->mutex);
return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
*/
void drm_connector_unregister(struct drm_connector *connector)
{
if (!connector->registered)
mutex_lock(&connector->mutex);
if (!connector->registered) {
mutex_unlock(&connector->mutex);
return;
}
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_debugfs_connector_remove(connector);
connector->registered = false;
mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);

View File

@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
dev->registered = true;
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev);
dev->registered = false;
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);

View File

@ -1012,6 +1012,8 @@ struct intel_fbc {
struct work_struct underrun_work;
struct intel_fbc_state_cache {
struct i915_vma *vma;
struct {
unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate;
@ -1025,15 +1027,14 @@ struct intel_fbc {
} plane;
struct {
u64 ilk_ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
int fence_reg;
unsigned int tiling_mode;
} fb;
} state_cache;
struct intel_fbc_reg_params {
struct i915_vma *vma;
struct {
enum pipe pipe;
enum plane plane;
@ -1041,10 +1042,8 @@ struct intel_fbc {
} crtc;
struct {
u64 ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
int fence_reg;
} fb;
int cfb_size;
@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
}
static inline unsigned long
i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);

View File

@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->vma = NULL;
return state;
}
@ -100,6 +102,24 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct i915_vma *vma;
vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
/*
* FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
* We currently don't clear all planes during driver unload, so we have
* to be able to unpin vma here for now.
*
* Normally this can only happen during unload when kmscon is disabled
* and userspace doesn't attempt to set a framebuffer at all.
*/
if (vma) {
mutex_lock(&plane->dev->struct_mutex);
intel_unpin_fb_vma(vma);
mutex_unlock(&plane->dev->struct_mutex);
}
drm_atomic_helper_plane_destroy_state(plane, state);
}

View File

@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
i915_vma_pin_fence(vma);
}
i915_vma_get(vma);
err:
intel_runtime_pm_put(dev_priv);
return vma;
}
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
void intel_unpin_fb_vma(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (WARN_ON_ONCE(!vma))
return;
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* an fb with another CRTC instead
*/
for_each_crtc(dev, c) {
i = to_intel_crtc(c);
struct intel_plane_state *state;
if (c == &intel_crtc->base)
continue;
if (!i->active)
if (!to_intel_crtc(c)->active)
continue;
fb = c->primary->fb;
if (!fb)
state = to_intel_plane_state(c->primary->state);
if (!state->vma)
continue;
obj = intel_fb_obj(fb);
if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = c->primary->fb;
drm_framebuffer_reference(fb);
goto valid_fb;
}
@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
intel_crtc->pipe, PTR_ERR(intel_state->vma));
intel_state->vma = NULL;
drm_framebuffer_unreference(fb);
return;
}
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = fb->width << 16;
@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE(DSPADDR(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
}
POSTING_READ(reg);
@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
}
}
u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
view.type))
return -1;
return i915_ggtt_offset(vma);
}
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
}
I915_WRITE(PLANE_SURF(pipe, 0),
intel_fb_gtt_offset(fb, rotation) + surf_addr);
intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
flush_work(&work->mmio_work);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
intel_unpin_fb_vma(work->old_vma);
i915_gem_object_put(work->pending_flip_obj);
mutex_unlock(&dev->struct_mutex);
@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
}
work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
work->gtt_offset += intel_crtc->dspaddr_offset;
work->old_vma = to_intel_plane_state(primary->state)->vma;
to_intel_plane_state(primary->state)->vma = vma;
work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
work->rotation = crtc->primary->state->rotation;
/*
@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_request:
i915_add_request_no_flush(request);
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
to_intel_plane_state(primary->state)->vma = work->old_vma;
intel_unpin_fb_vma(vma);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
unlock:
@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
DRM_DEBUG_KMS("failed to pin object\n");
return PTR_ERR(vma);
}
to_intel_plane_state(new_state)->vma = vma;
}
return 0;
@ -14812,19 +14807,12 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
struct i915_vma *vma;
old_intel_state = to_intel_plane_state(old_state);
if (!obj && !old_obj)
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
/* Should only be called after a successful intel_prepare_plane_fb()! */
vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
if (vma)
intel_unpin_fb_vma(vma);
}
int
@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
addr = intel_plane_ggtt_offset(state);
else
addr = obj->phys_handle->busaddr;
@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
intel_setup_overlay(dev_priv);
/*
* Make sure any fbs we allocated at startup are properly
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
for_each_crtc(dev, c) {
struct i915_vma *vma;
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
mutex_lock(&dev->struct_mutex);
vma = intel_pin_and_fence_fb_obj(c->primary->fb,
c->primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL;
c->primary->crtc = c->primary->state->crtc = NULL;
update_state_fb(c->primary);
c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
}
int intel_connector_register(struct drm_connector *connector)

View File

@ -377,6 +377,7 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
struct drm_rect clip;
struct i915_vma *vma;
struct {
u32 offset;
@ -1046,6 +1047,7 @@ struct intel_flip_work {
struct work_struct mmio_work;
struct drm_crtc *crtc;
struct i915_vma *old_vma;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
return i915_ggtt_offset(state->vma);
}
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);

View File

@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= params->fb.fence_reg;
fbc_ctl |= params->vma->fence->id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
if (params->vma->fence) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
if (params->vma->fence) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= params->fb.fence_reg;
dpfc_ctl |= params->vma->fence->id;
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
SNB_CPU_FENCE_ENABLE |
params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
I915_WRITE(ILK_FBC_RT_BASE,
i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
if (params->vma->fence) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
SNB_CPU_FENCE_ENABLE |
params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
/* XXX replace me when we have VMA tracking for intel_plane_state */
static int get_fence_id(struct drm_framebuffer *fb)
{
struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
}
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
cache->vma = NULL;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
if (!cache->plane.visible)
return;
obj = intel_fb_obj(fb);
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
if (IS_GEN(dev_priv, 5, 6))
cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = get_fence_id(fb);
cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
cache->vma = plane_state->vma;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
if (!cache->plane.visible) {
if (!cache->vma) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* so have no fence associated with it) due to aperture constaints
* at the time of pinning.
*/
if (cache->fb.tiling_mode != I915_TILING_X ||
cache->fb.fence_reg == I915_FENCE_REG_NONE) {
if (!cache->vma->fence) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
params->vma = cache->vma;
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
params->fb.pixel_format = cache->fb.pixel_format;
params->fb.stride = cache->fb.stride;
params->fb.fence_reg = cache->fb.fence_reg;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,

View File

@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
intel_unpin_fb_vma(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
drm_framebuffer_remove(&ifbdev->fb->base);

View File

@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane),
intel_fb_gtt_offset(fb, rotation) + surf_addr);
intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane),
intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}

View File

@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
mpllP = (mpllP >> 8) & 0xf;
if (!mpllP)
mpllP = 4;
@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
return clock;
return clock / 1000;
}
ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);

View File

@ -99,6 +99,7 @@ struct nv84_fence_priv {
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
struct mutex mutex;
};
int nv84_fence_context_new(struct nouveau_channel *);

View File

@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
}
/* nouveau_led.c */
#if IS_ENABLED(CONFIG_LEDS_CLASS)
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
int nouveau_led_init(struct drm_device *dev);
void nouveau_led_suspend(struct drm_device *dev);
void nouveau_led_resume(struct drm_device *dev);

View File

@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
if (argv->v0.object == 0ULL)
if (argv->v0.object == 0ULL &&
argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;

View File

@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc->state->event)
drm_crtc_vblank_get(crtc);
}
/* Update plane(s). */
for_each_plane_in_state(state, plane, plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
drm_crtc_vblank_put(crtc);
}
}

View File

@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
mutex_unlock(&priv->mutex);
if (ret)
nv84_fence_context_del(chan);
@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*

View File

@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
);
}
for (i = 0; i < size; i++)
nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
for (; i < 0x60; i++)
nvkm_wr32(device, 0x61c440 + soff, (i << 8));
nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);

View File

@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
case 0x94:
case 0x96:
case 0x98:
case 0xaa:
case 0xac:
return true;
default:
break;

View File

@ -97,9 +97,10 @@
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
* 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 48
#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

View File

@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
args->vram_size = (u64)man->size << PAGE_SHIFT;
args->vram_visible = rdev->mc.visible_vram_size;
args->vram_visible -= rdev->vram_pin_size;
args->gart_size = rdev->mc.gtt_size;
args->gart_size -= rdev->gart_pin_size;

View File

@ -168,7 +168,7 @@ struct cp2112_device {
atomic_t xfer_avail;
struct gpio_chip gc;
u8 *in_out_buffer;
spinlock_t lock;
struct mutex lock;
struct gpio_desc *desc[8];
bool gpio_poll;
@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ret = 0;
exit:
spin_unlock_irqrestore(&dev->lock, flags);
return ret <= 0 ? ret : -EIO;
mutex_unlock(&dev->lock);
return ret < 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
mutex_lock(&dev->lock);
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
ret = buf[1];
exit:
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock);
return ret;
}
@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
goto fail;
}
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock);
/*
* Set gpio value when output direction is already set,
@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
return 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock);
return ret < 0 ? ret : -EIO;
}
@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
spin_lock_init(&dev->lock);
mutex_init(&dev->lock);
ret = hid_parse(hdev);
if (ret) {

View File

@ -76,6 +76,9 @@
#define USB_VENDOR_ID_ALPS_JP 0x044E
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
#define USB_VENDOR_ID_ANTON 0x1130
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101

View File

@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
.driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
.driver_data = LG_FF2 },
.driver_data = LG_NOGET | LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
.driver_data = LG_FF3 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),

View File

@ -57,6 +57,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },

View File

@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
wacom->id[0] = STYLUS_DEVICE_ID;
}
pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
if (features->pressure_max > 255)
pressure = (pressure << 1) | ((data[4] >> 6) & 1);
pressure += (features->pressure_max + 1) / 2;
if (prox) {
pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
if (features->pressure_max > 255)
pressure = (pressure << 1) | ((data[4] >> 6) & 1);
pressure += (features->pressure_max + 1) / 2;
input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
input_report_abs(input, ABS_PRESSURE, pressure);
input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
input_report_abs(input, ABS_PRESSURE, pressure);
input_report_key(input, BTN_TOUCH, data[4] & 0x08);
input_report_key(input, BTN_STYLUS, data[4] & 0x10);
/* Only allow the stylus2 button to be reported for the pen tool. */
input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
input_report_key(input, BTN_TOUCH, data[4] & 0x08);
input_report_key(input, BTN_STYLUS, data[4] & 0x10);
/* Only allow the stylus2 button to be reported for the pen tool. */
input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
}
if (!prox)
wacom->id[0] = 0;

View File

@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret;
}
init_cached_read_index(channel);
next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc),

View File

@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
static int palmas_gpadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
int ret;
@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
static int palmas_gpadc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
int ret;

View File

@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
static int __maybe_unused afe4403_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;
@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
static int __maybe_unused afe4403_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;

View File

@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
static int __maybe_unused afe4404_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;
@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
static int __maybe_unused afe4404_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;

View File

@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
mutex_lock(&data->lock);
while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
ret = max30100_read_measurement(data);
if (ret)
break;

View File

@ -71,7 +71,8 @@
* a) select an implementation using busy loop polling on those systems
* b) use the checksum to do some probabilistic decoding
*/
#define DHT11_START_TRANSMISSION 18 /* ms */
#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
#define DHT11_MIN_TIMERES 34000 /* ns */
#define DHT11_THRESHOLD 49000 /* ns */
#define DHT11_AMBIG_LOW 23000 /* ns */
@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
ret = gpio_direction_output(dht11->gpio, 0);
if (ret)
goto err;
msleep(DHT11_START_TRANSMISSION);
usleep_range(DHT11_START_TRANSMISSION_MIN,
DHT11_START_TRANSMISSION_MAX);
ret = gpio_direction_input(dht11->gpio);
if (ret)
goto err;

View File

@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
data->enabled = true;
if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
retval = disable_irq_wake(irq);
if (!retval)
if (retval)
dev_warn(&rmi_dev->dev,
"Failed to disable irq for wake: %d\n",
retval);
@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
disable_irq(irq);
if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
retval = enable_irq_wake(irq);
if (!retval)
if (retval)
dev_warn(&rmi_dev->dev,
"Failed to enable irq for wake: %d\n",
retval);

View File

@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
wm->battery_dev->dev.platform_data = pdata->batt_pdata;
wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;

View File

@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if (intmask & SDHCI_INT_RETUNE)
mmc_retune_needed(host->mmc);
if (intmask & SDHCI_INT_CARD_INT) {
if ((intmask & SDHCI_INT_CARD_INT) &&
(host->ier & SDHCI_INT_CARD_INT)) {
sdhci_enable_sdio_irq_nolock(host, false);
host->thread_isr |= SDHCI_INT_CARD_INT;
result = IRQ_WAKE_THREAD;

View File

@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(np->pci_dev,
np->rx_info[i].mapping)) {
dev_kfree_skb(skb);
np->rx_info[i].skb = NULL;
break;
}
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
unsigned int prev_tx;
u32 status;
int i;
int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
if (pci_dma_mapping_error(np->pci_dev,
np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++;
goto err_out;
}
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
return NETDEV_TX_OK;
}
err_out:
entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL;
if (i > 0) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_first_frag_len(skb),
PCI_DMA_TODEVICE);
np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_frag_size(
&skb_shinfo(skb)->frags[j-1]),
PCI_DMA_TODEVICE);
entry++;
}
}
dev_kfree_skb_any(skb);
np->cur_tx = prev_tx;
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(np->pci_dev,
np->rx_info[entry].mapping)) {
dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL;
break;
}
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}

View File

@ -43,13 +43,13 @@
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
@ -78,6 +78,37 @@
*/
#define MACB_HALT_TIMEOUT 1230
/* DMA buffer descriptor might be different size
* depends on hardware configuration.
*/
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
#endif
return sizeof(struct macb_dma_desc);
}
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/* Dma buffer descriptor is 4 words length (instead of 2 words)
* for 64b GEM.
*/
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
idx <<= 1;
#endif
return idx;
}
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
}
#endif
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
{
@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
unsigned int index)
{
return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
index = macb_tx_ring_wrap(queue->bp, index);
index = macb_adj_dma_desc_idx(queue->bp, index);
return &queue->tx_ring[index];
}
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
dma_addr_t offset;
offset = macb_tx_ring_wrap(queue->bp, index) *
sizeof(struct macb_dma_desc);
macb_dma_desc_get_size(queue->bp);
return queue->tx_ring_dma + offset;
}
@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{
return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
index = macb_rx_ring_wrap(bp, index);
index = macb_adj_dma_desc_idx(bp, index);
return &bp->rx_ring[index];
}
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
}
static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
{
desc->addr = (u32)addr;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
desc->addrh = (u32)(addr >> 32);
struct macb_dma_desc_64 *desc_64;
if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
}
#endif
desc->addr = lower_32_bits(addr);
}
static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
dma_addr_t addr = 0;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
struct macb_dma_desc_64 *desc_64;
if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
addr = ((u64)(desc_64->addrh) << 32);
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
return addr;
}
static void macb_tx_error_task(struct work_struct *work)
@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(desc, 0);
macb_set_addr(bp, desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
struct macb_dma_desc *desc;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
bp->rx_ring_size) > 0) {
@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
rmb();
bp->rx_prepared_head++;
desc = macb_rx_desc(bp, entry);
if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
macb_set_addr(&(bp->rx_ring[entry]), paddr);
bp->rx_ring[entry].ctrl = 0;
macb_set_addr(bp, desc, paddr);
desc->ctrl = 0;
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
} else {
bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
bp->rx_ring[entry].ctrl = 0;
desc->addr &= ~MACB_BIT(RX_USED);
desc->ctrl = 0;
}
}
@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
bool rxused;
entry = macb_rx_ring_wrap(bp, bp->rx_tail);
desc = &bp->rx_ring[entry];
desc = macb_rx_desc(bp, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
addr |= ((u64)(desc->addrh) << 32);
#endif
addr = macb_get_addr(bp, desc);
ctrl = desc->ctrl;
if (!rxused)
@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
static inline void macb_init_rx_ring(struct macb *bp)
{
dma_addr_t addr;
struct macb_dma_desc *desc = NULL;
int i;
addr = bp->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
bp->rx_ring[i].addr = addr;
bp->rx_ring[i].ctrl = 0;
desc = macb_rx_desc(bp, i);
macb_set_addr(bp, desc, addr);
desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
desc->addr |= MACB_BIT(RX_WRAP);
bp->rx_tail = 0;
}
@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
for (tail = bp->rx_tail; budget > 0; tail++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
u32 addr, ctrl;
u32 ctrl;
/* Make hw descriptor updates visible to CPU */
rmb();
addr = desc->addr;
ctrl = desc->ctrl;
if (!(addr & MACB_BIT(RX_USED)))
if (!(desc->addr & MACB_BIT(RX_USED)))
break;
if (ctrl & MACB_BIT(RX_SOF)) {
@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
i = tx_head;
entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
desc = &queue->tx_ring[entry];
desc = macb_tx_desc(queue, entry);
desc->ctrl = ctrl;
if (lso_ctrl) {
@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
i--;
entry = macb_tx_ring_wrap(bp, i);
tx_skb = &queue->tx_skb[entry];
desc = &queue->tx_ring[entry];
desc = macb_tx_desc(queue, entry);
ctrl = (u32)tx_skb->size;
if (eof) {
@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
/* Set TX buffer descriptor */
macb_set_addr(desc, tx_skb->mapping);
macb_set_addr(bp, desc, tx_skb->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
if (!skb)
continue;
desc = &bp->rx_ring[i];
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
addr |= ((u64)(desc->addrh) << 32);
#endif
desc = macb_rx_desc(bp, i);
addr = macb_get_addr(bp, desc);
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@ -1711,15 +1765,17 @@ static int macb_alloc_consistent(struct macb *bp)
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
struct macb_dma_desc *desc = NULL;
unsigned int q;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < bp->tx_ring_size; i++) {
queue->tx_ring[i].addr = 0;
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
desc = macb_tx_desc(queue, i);
macb_set_addr(bp, desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
}
queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
}
@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
static void macb_init_rings(struct macb *bp)
{
int i;
struct macb_dma_desc *desc = NULL;
macb_init_rx_ring(bp);
for (i = 0; i < bp->tx_ring_size; i++) {
bp->queues[0].tx_ring[i].addr = 0;
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
desc = macb_tx_desc(&bp->queues[0], i);
macb_set_addr(bp, desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
}
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
desc->ctrl |= MACB_BIT(TX_WRAP);
}
static void macb_reset_hw(struct macb *bp)
@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
dmacfg |= GEM_BIT(ADDR64);
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
dmacfg |= GEM_BIT(ADDR64);
#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Enable interrupts */
@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
queue->TBQPH = GEM_TBQPH(hw_q -1);
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
queue->TBQPH = GEM_TBQPH(hw_q - 1);
#endif
} else {
/* queue0 uses legacy registers */
@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
queue->TBQPH = MACB_TBQPH;
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
queue->TBQPH = MACB_TBQPH;
#endif
}
@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
sizeof(struct macb_dma_desc)),
macb_dma_desc_get_size(lp)),
&lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring)
return -ENOMEM;
@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
if (!lp->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
sizeof(struct macb_dma_desc),
macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
addr = lp->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
lp->rx_ring[i].addr = addr;
lp->rx_ring[i].ctrl = 0;
desc = macb_rx_desc(lp, i);
macb_set_addr(lp, desc, addr);
desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
}
/* Set the Wrap bit on the last descriptor */
lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
lp->rx_tail = 0;
@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
sizeof(struct macb_dma_desc),
macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
desc = macb_rx_desc(lp, lp->rx_tail);
while (desc->addr & MACB_BIT(RX_USED)) {
p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
skb_reserve(skb, 2);
@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
lp->stats.rx_dropped++;
}
if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
lp->stats.multicast++;
/* reset ownership bit */
lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
lp->rx_tail = 0;
else
lp->rx_tail++;
desc = macb_rx_desc(lp, lp->rx_tail);
}
}
@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap = HW_DMA_CAP_64B;
} else
bp->hw_dma_cap = HW_DMA_CAP_32B;
#endif
spin_lock_init(&bp->lock);

View File

@ -385,6 +385,8 @@
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
#define GEM_DAW64_OFFSET 23
#define GEM_DAW64_SIZE 1
/* Constants for CLK */
#define MACB_CLK_DIV8 0
@ -487,12 +489,20 @@
struct macb_dma_desc {
u32 addr;
u32 ctrl;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
u32 addrh;
u32 resvd;
#endif
};
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
enum macb_hw_dma_cap {
HW_DMA_CAP_32B,
HW_DMA_CAP_64B,
};
struct macb_dma_desc_64 {
u32 addrh;
u32 resvd;
};
#endif
/* DMA descriptor bitfields */
#define MACB_RX_USED_OFFSET 0
#define MACB_RX_USED_SIZE 1
@ -874,6 +884,10 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
enum macb_hw_dma_cap hw_dma_cap;
#endif
};
static inline bool macb_is_gem(struct macb *bp)

View File

@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
int speed = 2;
if (!xcv) {
dev_err(&xcv->pdev->dev,
"XCV init not done, probe may have failed\n");
pr_err("XCV init not done, probe may have failed\n");
return;
}

View File

@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
done:
/* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
done:
ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
{
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT))
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
be_dev_mac_del(adapter, adapter->pmac_id[0]);
eth_zero_addr(adapter->dev_mac);
}
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
/* Normally this condition usually true as the ->dev_mac is zeroed.
* But on BE3 VFs the initial MAC is pre-programmed by PF and
* subsequent be_dev_mac_add() can fail (after fresh boot)
*/
if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
int old_pmac_id = -1;
/* Remember old programmed MAC if any - can happen on BE3 VF */
if (!is_zero_ether_addr(adapter->dev_mac))
old_pmac_id = adapter->pmac_id[0];
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
/* Delete the old programmed MAC as we successfully programmed
* a new MAC
*/
if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
be_dev_mac_del(adapter, old_pmac_id);
ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
}
@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
/* Initial MAC for BE3 VFs is already programmed by PF */
if (BEx_chip(adapter) && be_virtfn(adapter))
memcpy(adapter->dev_mac, mac, ETH_ALEN);
}
return 0;

View File

@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
if (!rxb->page)
continue;
dma_unmap_single(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_page(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;

View File

@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
return -ETIMEDOUT;
}
static int mlx4_comm_internal_err(u32 slave_read)
int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;

View File

@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
return;
mlx4_stop_catas_poll(dev);
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
mlx4_is_slave(dev)) {
/* In mlx4_remove_one on a VF */
u32 slave_read =
swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
if (mlx4_comm_internal_err(slave_read)) {
mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
__func__);
mlx4_enter_error_state(dev->persist);
}
}
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)

View File

@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);

View File

@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (cmd->cmdif_rev > CMD_IF_REV) {
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev);
err = -ENOTSUPP;
err = -EOPNOTSUPP;
goto err_free_page;
}

View File

@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
return -ENOTSUPP;
return -EOPNOTSUPP;
}
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
return -ENOTSUPP;
return -EOPNOTSUPP;
}
#else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);

View File

@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -ENOTSUPP;
return -EOPNOTSUPP;
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) {
@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
int err;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -ENOTSUPP;
return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets;
struct ieee_pfc pfc;
int err = -ENOTSUPP;
int err = -EOPNOTSUPP;
int i;
if (!MLX5_CAP_GEN(mdev, ets))
@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, ets)) {
netdev_err(netdev, "%s, ets is not supported\n", __func__);
return;
}
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);

View File

@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -ENOTSUPP;
return -EOPNOTSUPP;
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int i;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -ENOTSUPP;
return -EOPNOTSUPP;
mutex_lock(&priv->state_lock);
@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
int i;
struct mlx5_core_dev *mdev = priv->mdev;
int ctxlen = MLX5_ST_SZ_BYTES(tirc);
int tt;
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
mlx5e_build_tir_ctx_hash(tirc, priv);
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
bool hash_changed = false;
void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
if (key)
if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != priv->params.rss_hfunc) {
priv->params.rss_hfunc = hfunc;
hash_changed = true;
}
if (key) {
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
hash_changed = hash_changed ||
priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
priv->params.rss_hfunc = hfunc;
mlx5e_modify_tirs_hash(priv, in, inlen);
if (hash_changed)
mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock);
@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
u32 mlx5_wol_mode;
if (!wol_supported)
return -ENOTSUPP;
return -EOPNOTSUPP;
if (wol->wolopts & ~wol_supported)
return -EINVAL;
@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
return -ENOTSUPP;
return -EOPNOTSUPP;
if (!rx_mode_changed)
return 0;
@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
bool reset;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -ENOTSUPP;
return -EOPNOTSUPP;
if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");

View File

@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns)
return -EINVAL;
return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {

View File

@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_ETHTOOL);
if (!ns)
return ERR_PTR(-ENOTSUPP);
return ERR_PTR(-EOPNOTSUPP);
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),

View File

@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
enum mlx5e_traffic_types tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, priv->params.toeplitz_hash_key, len);
}
switch (tt) {
case MLX5E_TT_IPV4_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
default:
WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
}
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
enum mlx5e_traffic_types tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) {
case MLX5E_TT_IPV4_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
default:
WARN_ONCE(true,
"mlx5e_build_indir_tir_ctx: bad traffic type!\n");
}
mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -ENOTSUPP;
return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
!MLX5_CAP_GEN(mdev, nic_flow_table) ||
!MLX5_CAP_ETH(mdev, csum_cap) ||
@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
< 3) {
mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP;
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");

View File

@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
__be32 *saddr,
int *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rtable *rt;
struct neighbour *n = NULL;
int ttl;
@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else
return -EOPNOTSUPP;
#endif
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
ip_rt_put(rt);
return -EOPNOTSUPP;
}
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
else
*out_dev = rt->dst.dev;
ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
*out_n = n;
*saddr = fl4->saddr;
*out_ttl = ttl;
*out_dev = rt->dst.dev;
return 0;
}

View File

@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP;
return -EOPNOTSUPP;
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
vport, vlan, qos, set_flags);
@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
return -ENOMEM;
return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
return -EIO;
return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
return -EIO;
return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
return -ENOTSUPP;
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))

View File

@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
return 0;
out_notsupp:
return -ENOTSUPP;
return -EOPNOTSUPP;
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP;
goto ns_err;
}
@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
return -ENOMEM;
return -EOPNOTSUPP;
}
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
int vport;
int err;
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
err = esw_create_offloads_fdb_table(esw, nvports);
if (err)
return err;
goto create_fdb_err;
err = esw_create_offloads_table(esw);
if (err)
@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
goto err_reps;
}
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return 0;
err_reps:
@ -717,6 +718,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
create_ft_err:
esw_destroy_offloads_fdb_table(esw);
create_fdb_err:
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return err;
}
@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
}
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return err;
}

View File

@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
return -EOPNOTSUPP;
opmod = 1;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);

Some files were not shown because too many files have changed in this diff Show More