Updated from Linux LTS 3.10.25 to 3.10.26

This commit is contained in:
Nathan 2025-04-09 20:15:34 -05:00
parent 92cb237c3b
commit c205d496ee
118 changed files with 902 additions and 407 deletions

View File

@ -1468,6 +1468,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
* dump_id: dump IDENTIFY data. * dump_id: dump IDENTIFY data.
* atapi_dmadir: Enable ATAPI DMADIR bridge support
* disable: Disable this device.
If there are multiple matching configurations changing If there are multiple matching configurations changing
the same attribute, the last one is used. the same attribute, the last one is used.

View File

@ -1,6 +1,6 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 25 SUBLEVEL = 26
EXTRAVERSION = EXTRAVERSION =
NAME = TOSSUG Baby Fish NAME = TOSSUG Baby Fish

View File

@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void)
return val; return val;
} }
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void) static inline u64 arch_counter_get_cntvct(void)
{ {
u64 cval; u64 cval;

View File

@ -153,6 +153,8 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
mov r7, #0
mcrr p15, 4, r7, r7, c14 @ CNTVOFF
1: 1:
#endif #endif

View File

@ -503,6 +503,10 @@ vcpu .req r0 @ vcpu pointer always in r0
add r5, vcpu, r4 add r5, vcpu, r4
strd r2, r3, [r5] strd r2, r3, [r5]
@ Ensure host CNTVCT == CNTPCT
mov r2, #0
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1: 1:
#endif #endif
@ Allow physical timer/counter access for the host @ Allow physical timer/counter access for the host

View File

@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
/* gpmc */ /* gpmc */
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
{ .irq = 20 }, { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 } { .irq = -1 }
}; };
@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
}; };
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
{ .irq = 52 }, { .irq = 52 + OMAP_INTC_START, },
{ .irq = -1 } { .irq = -1 }
}; };

View File

@ -2152,7 +2152,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
}; };
static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
{ .irq = 20 }, { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 } { .irq = -1 }
}; };
@ -2986,7 +2986,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_isp_hwmod; static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
{ .irq = 24 }, { .irq = 24 + OMAP_INTC_START, },
{ .irq = -1 } { .irq = -1 }
}; };
@ -3028,7 +3028,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_iva_hwmod; static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
{ .irq = 28 }, { .irq = 28 + OMAP_INTC_START, },
{ .irq = -1 } { .irq = -1 }
}; };

View File

@ -6,6 +6,8 @@
/dts-v1/; /dts-v1/;
/memreserve/ 0x80000000 0x00010000;
/ { / {
model = "Foundation-v8A"; model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress"; compatible = "arm,foundation-aarch64", "arm,vexpress";

View File

@ -110,16 +110,6 @@ static inline void __cpuinit arch_counter_set_user_access(void)
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
} }
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void) static inline u64 arch_counter_get_cntvct(void)
{ {
u64 cval; u64 cval;

View File

@ -184,7 +184,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pgprot_noncached(prot) \ #define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \ #define pgprot_writecombine(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define pgprot_dmacoherent(prot) \ #define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT

View File

@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned int tmp; unsigned int tmp;
asm volatile( asm volatile(
" ldaxr %w0, %1\n" "2: ldaxr %w0, %1\n"
" cbnz %w0, 1f\n" " cbnz %w0, 1f\n"
" stxr %w0, %w2, %1\n" " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
"1:\n" "1:\n"
: "=&r" (tmp), "+Q" (lock->lock) : "=&r" (tmp), "+Q" (lock->lock)
: "r" (1) : "r" (1)

View File

@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n, unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
if (n == 0)
return;
if (i + n > SYSCALL_MAX_ARGS) { if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n, unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
if (n == 0)
return;
if (i + n > SYSCALL_MAX_ARGS) { if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n", pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS); __func__, i + n, SYSCALL_MAX_ARGS);

View File

@ -24,10 +24,10 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#ifndef CONFIG_ARM64_64K_PAGES #ifndef CONFIG_ARM64_64K_PAGES
#define THREAD_SIZE_ORDER 1 #define THREAD_SIZE_ORDER 2
#endif #endif
#define THREAD_SIZE 8192 #define THREAD_SIZE 16384
#define THREAD_START_SP (THREAD_SIZE - 16) #define THREAD_START_SP (THREAD_SIZE - 16)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View File

@ -21,6 +21,7 @@
#define BOOT_CPU_MODE_EL2 (0x0e12b007) #define BOOT_CPU_MODE_EL2 (0x0e12b007)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
/* /*
* __boot_cpu_mode records what mode CPUs were booted in. * __boot_cpu_mode records what mode CPUs were booted in.
@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
void __hyp_set_vectors(phys_addr_t phys_vector_base); void __hyp_set_vectors(phys_addr_t phys_vector_base);
phys_addr_t __hyp_get_vectors(void); phys_addr_t __hyp_get_vectors(void);
static inline void sync_boot_mode(void)
{
/*
* As secondaries write to __boot_cpu_mode with caches disabled, we
* must flush the corresponding cache entries to ensure the visibility
* of their writes.
*/
__flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
}
/* Reports the availability of HYP mode */ /* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void) static inline bool is_hyp_mode_available(void)
{ {
sync_boot_mode();
return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
} }
@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
/* Check if the bootloader has booted CPUs in different modes */ /* Check if the bootloader has booted CPUs in different modes */
static inline bool is_hyp_mode_mismatched(void) static inline bool is_hyp_mode_mismatched(void)
{ {
sync_boot_mode();
return __boot_cpu_mode[0] != __boot_cpu_mode[1]; return __boot_cpu_mode[0] != __boot_cpu_mode[1];
} }

View File

@ -121,7 +121,7 @@
.macro get_thread_info, rd .macro get_thread_info, rd
mov \rd, sp mov \rd, sp
and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm .endm
/* /*

View File

@ -79,8 +79,10 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void) void fpsimd_flush_thread(void)
{ {
preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_load_state(&current->thread.fpsimd_state); fpsimd_load_state(&current->thread.fpsimd_state);
preempt_enable();
} }
/* /*

View File

@ -236,31 +236,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
{ {
int err, len, type, disabled = !ctrl.enabled; int err, len, type, disabled = !ctrl.enabled;
if (disabled) { attr->disabled = disabled;
len = 0; if (disabled)
type = HW_BREAKPOINT_EMPTY; return 0;
} else {
err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
switch (note_type) { err = arch_bp_generic_fields(ctrl, &len, &type);
case NT_ARM_HW_BREAK: if (err)
if ((type & HW_BREAKPOINT_X) != type) return err;
return -EINVAL;
break; switch (note_type) {
case NT_ARM_HW_WATCH: case NT_ARM_HW_BREAK:
if ((type & HW_BREAKPOINT_RW) != type) if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
break;
default:
return -EINVAL; return -EINVAL;
} break;
case NT_ARM_HW_WATCH:
if ((type & HW_BREAKPOINT_RW) != type)
return -EINVAL;
break;
default:
return -EINVAL;
} }
attr->bp_len = len; attr->bp_len = len;
attr->bp_type = type; attr->bp_type = type;
attr->disabled = disabled;
return 0; return 0;
} }

View File

@ -199,13 +199,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
raw_spin_lock(&boot_lock); raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock); raw_spin_unlock(&boot_lock);
/*
* Enable local interrupts.
*/
notify_cpu_starting(cpu);
local_irq_enable();
local_fiq_enable();
/* /*
* OK, now it's safe to let the boot CPU continue. Wait for * OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online * the CPU migration code to notice that the CPU is online
@ -214,6 +207,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
complete(&cpu_running); complete(&cpu_running);
/*
* Enable GIC and timers.
*/
notify_cpu_starting(cpu);
local_irq_enable();
local_fiq_enable();
/* /*
* OK, it's off to the idle thread for us * OK, it's off to the idle thread for us
*/ */

View File

@ -77,14 +77,12 @@ void __flush_dcache_page(struct page *page)
void __sync_icache_dcache(pte_t pte, unsigned long addr) void __sync_icache_dcache(pte_t pte, unsigned long addr)
{ {
unsigned long pfn; struct page *page = pte_page(pte);
struct page *page;
pfn = pte_pfn(pte); /* no flushing needed for anonymous pages */
if (!pfn_valid(pfn)) if (!page_mapping(page))
return; return;
page = pfn_to_page(pfn);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
__flush_dcache_page(page); __flush_dcache_page(page);
__flush_icache_all(); __flush_icache_all();
@ -94,28 +92,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
} }
/* /*
* Ensure cache coherency between kernel mapping and userspace mapping of this * This function is called when a page has been modified by the kernel. Mark
* page. * it as dirty for later flushing when mapped in user space (if executable,
* see __sync_icache_dcache).
*/ */
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping; if (test_bit(PG_dcache_clean, &page->flags))
/*
* The zero page is never written to, so never has any dirty cache
* lines, and therefore never needs to be flushed.
*/
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
if (mapping && mapping_mapped(mapping)) {
__flush_dcache_page(page);
__flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
} else {
clear_bit(PG_dcache_clean, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
}
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);

View File

@ -339,7 +339,6 @@ void __init paging_init(void)
bootmem_init(); bootmem_init();
empty_zero_page = virt_to_page(zero_page); empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(empty_zero_page);
/* /*
* TTBR0 is only used for the identity mapping at this stage. Make it * TTBR0 is only used for the identity mapping at this stage. Make it

View File

@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm)
ret ret
ENDPROC(cpu_do_switch_mm) ENDPROC(cpu_do_switch_mm)
cpu_name:
.ascii "AArch64 Processor"
.align
.section ".text.init", #alloc, #execinstr .section ".text.init", #alloc, #execinstr
/* /*

View File

@ -264,7 +264,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \ beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \ blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \ li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \ sth r1,PACA_TRAP_SAVE(r13); \

View File

@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
mtctr r8 mtctr r8
bctr bctr
.balign 8
p_end: .llong _end - _stext p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */ 4: /* Now copy the rest of the kernel up to _end */

View File

@ -473,11 +473,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
slb_v = vcpu->kvm->arch.vrma_slb_v; slb_v = vcpu->kvm->arch.vrma_slb_v;
} }
preempt_disable();
/* Find the HPTE in the hash table */ /* Find the HPTE in the hash table */
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
HPTE_V_VALID | HPTE_V_ABSENT); HPTE_V_VALID | HPTE_V_ABSENT);
if (index < 0) if (index < 0) {
preempt_enable();
return -ENOENT; return -ENOENT;
}
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
v = hptep[0] & ~HPTE_V_HVLOCK; v = hptep[0] & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte; gr = kvm->arch.revmap[index].guest_rpte;
@ -485,6 +488,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Unlock the HPTE */ /* Unlock the HPTE */
asm volatile("lwsync" : : : "memory"); asm volatile("lwsync" : : : "memory");
hptep[0] = v; hptep[0] = v;
preempt_enable();
gpte->eaddr = eaddr; gpte->eaddr = eaddr;
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);

View File

@ -724,6 +724,10 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */ 20, /* 1M, unsupported */
}; };
/* When called from virtmode, this func should be protected by
* preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
* can trigger deadlock issue.
*/
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long valid) unsigned long valid)
{ {

View File

@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_FLATMEM
/* need in pfn_valid macro */
EXPORT_SYMBOL(min_low_pfn);
EXPORT_SYMBOL(max_low_pfn);
#endif
#define DECLARE_EXPORT(name) \ #define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name) extern void name(void);EXPORT_SYMBOL(name)

View File

@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
checksum.o strlen.o div64.o div64-generic.o checksum.o strlen.o div64.o div64-generic.o
# Extracted from libgcc # Extracted from libgcc
lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
udiv_qrnnd.o udiv_qrnnd.o

View File

@ -616,7 +616,7 @@ static inline unsigned long pte_present(pte_t pte)
} }
#define pte_accessible pte_accessible #define pte_accessible pte_accessible
static inline unsigned long pte_accessible(pte_t a) static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{ {
return pte_val(a) & _PAGE_VALID; return pte_val(a) & _PAGE_VALID;
} }
@ -806,7 +806,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine. * and SUN4V pte layout, so this inline test is fine.
*/ */
if (likely(mm != &init_mm) && pte_accessible(orig)) if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm); tlb_batch_add(mm, addr, ptep, orig, fullmm);
} }

View File

@ -415,9 +415,16 @@ static inline int pte_present(pte_t a)
} }
#define pte_accessible pte_accessible #define pte_accessible pte_accessible
static inline int pte_accessible(pte_t a) static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{ {
return pte_flags(a) & _PAGE_PRESENT; if (pte_flags(a) & _PAGE_PRESENT)
return true;
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
mm_tlb_flush_pending(mm))
return true;
return false;
} }
static inline int pte_hidden(pte_t pte) static inline int pte_hidden(pte_t pte)

View File

@ -238,8 +238,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
#define ARCH_HAS_SMP_MB_AFTER_LOCK
#endif /* _ASM_X86_SPINLOCK_H */ #endif /* _ASM_X86_SPINLOCK_H */

View File

@ -387,7 +387,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS); set_cpu_cap(c, X86_FEATURE_PEBS);
} }
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) if (c->x86 == 6 && cpu_has_clflush &&
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64

View File

@ -1364,6 +1364,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return; return;
} }
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
/* update jump label if enable bit changes */ /* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE) if (value & MSR_IA32_APICBASE_ENABLE)
@ -1373,10 +1377,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
recalculate_apic_map(vcpu->kvm); recalculate_apic_map(vcpu->kvm);
} }
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) { if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) { if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic); u32 id = kvm_apic_id(apic);

View File

@ -2402,7 +2402,7 @@ int ata_dev_configure(struct ata_device *dev)
cdb_intr_string = ", CDB intr"; cdb_intr_string = ", CDB intr";
} }
if (atapi_dmadir || atapi_id_dmadir(dev->id)) { if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
dev->flags |= ATA_DFLAG_DMADIR; dev->flags |= ATA_DFLAG_DMADIR;
dma_dir_string = ", DMADIR"; dma_dir_string = ", DMADIR";
} }
@ -4135,6 +4135,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN }, ATA_HORKAGE_FIRMWARE_WARN },
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132 /* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */ Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@ -6533,6 +6536,8 @@ static int __init ata_parse_force_one(char **cur,
{ "nosrst", .lflags = ATA_LFLAG_NO_SRST }, { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
{ "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
{ "disable", .horkage_on = ATA_HORKAGE_DISABLE },
}; };
char *start = *cur, *p = *cur; char *start = *cur, *p = *cur;
char *id, *val, *endp; char *id, *val, *endp;

View File

@ -3864,6 +3864,27 @@ void ata_scsi_hotplug(struct work_struct *work)
return; return;
} }
/*
* XXX - UGLY HACK
*
* The block layer suspend/resume path is fundamentally broken due
* to freezable kthreads and workqueue and may deadlock if a block
* device gets removed while resume is in progress. I don't know
* what the solution is short of removing freezable kthreads and
* workqueues altogether.
*
* The following is an ugly hack to avoid kicking off device
* removal while freezer is active. This is a joke but does avoid
* this particular deadlock scenario.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=62801
* http://marc.info/?l=linux-kernel&m=138695698516487
*/
#ifdef CONFIG_FREEZER
while (pm_freezing)
msleep(10);
#endif
DPRINTK("ENTER\n"); DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex); mutex_lock(&ap->scsi_scan_mutex);

View File

@ -937,12 +937,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
u64 snap_id) u64 snap_id)
{ {
u32 which; u32 which;
const char *snap_name;
which = rbd_dev_snap_index(rbd_dev, snap_id); which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX) if (which == BAD_SNAP_INDEX)
return NULL; return ERR_PTR(-ENOENT);
return _rbd_dev_v1_snap_name(rbd_dev, which); snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
return snap_name ? snap_name : ERR_PTR(-ENOMEM);
} }
static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
@ -1126,6 +1128,7 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
buf = bvec_kmap_irq(bv, &flags); buf = bvec_kmap_irq(bv, &flags);
memset(buf + remainder, 0, memset(buf + remainder, 0,
bv->bv_len - remainder); bv->bv_len - remainder);
flush_dcache_page(bv->bv_page);
bvec_kunmap_irq(buf, &flags); bvec_kunmap_irq(buf, &flags);
} }
pos += bv->bv_len; pos += bv->bv_len;
@ -1158,6 +1161,7 @@ static void zero_pages(struct page **pages, u64 offset, u64 end)
local_irq_save(flags); local_irq_save(flags);
kaddr = kmap_atomic(*page); kaddr = kmap_atomic(*page);
memset(kaddr + page_offset, 0, length); memset(kaddr + page_offset, 0, length);
flush_dcache_page(*page);
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
local_irq_restore(flags); local_irq_restore(flags);
@ -2171,9 +2175,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request = NULL; struct rbd_obj_request *obj_request = NULL;
struct rbd_obj_request *next_obj_request; struct rbd_obj_request *next_obj_request;
bool write_request = img_request_write_test(img_request); bool write_request = img_request_write_test(img_request);
struct bio *bio_list; struct bio *bio_list = 0;
unsigned int bio_offset = 0; unsigned int bio_offset = 0;
struct page **pages; struct page **pages = 0;
u64 img_offset; u64 img_offset;
u64 resid; u64 resid;
u16 opcode; u16 opcode;
@ -2211,6 +2215,11 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
rbd_segment_name_free(object_name); rbd_segment_name_free(object_name);
if (!obj_request) if (!obj_request)
goto out_unwind; goto out_unwind;
/*
* set obj_request->img_request before creating the
* osd_request so that it gets the right snapc
*/
rbd_img_obj_request_add(img_request, obj_request);
if (type == OBJ_REQUEST_BIO) { if (type == OBJ_REQUEST_BIO) {
unsigned int clone_size; unsigned int clone_size;
@ -2252,11 +2261,6 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
obj_request->pages, length, obj_request->pages, length,
offset & ~PAGE_MASK, false, false); offset & ~PAGE_MASK, false, false);
/*
* set obj_request->img_request before formatting
* the osd_request so that it gets the right snapc
*/
rbd_img_obj_request_add(img_request, obj_request);
if (write_request) if (write_request)
rbd_osd_req_format_write(obj_request); rbd_osd_req_format_write(obj_request);
else else
@ -2817,7 +2821,7 @@ out_err:
obj_request_done_set(obj_request); obj_request_done_set(obj_request);
} }
static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
{ {
struct rbd_obj_request *obj_request; struct rbd_obj_request *obj_request;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
@ -2832,16 +2836,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req) if (!obj_request->osd_req)
goto out; goto out;
obj_request->callback = rbd_obj_request_put;
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
notify_id, 0, 0); notify_id, 0, 0);
rbd_osd_req_format_read(obj_request); rbd_osd_req_format_read(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request); ret = rbd_obj_request_submit(osdc, obj_request);
out:
if (ret) if (ret)
rbd_obj_request_put(obj_request); goto out;
ret = rbd_obj_request_wait(obj_request);
out:
rbd_obj_request_put(obj_request);
return ret; return ret;
} }
@ -2861,7 +2866,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (ret) if (ret)
rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret); rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
rbd_obj_notify_ack(rbd_dev, notify_id); rbd_obj_notify_ack_sync(rbd_dev, notify_id);
} }
/* /*
@ -3333,6 +3338,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
} }
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
bool removing;
/*
* Don't hold the lock while doing disk operations,
* or lock ordering will conflict with the bdev mutex via:
* rbd_add() -> blkdev_get() -> rbd_open()
*/
spin_lock_irq(&rbd_dev->lock);
removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
spin_unlock_irq(&rbd_dev->lock);
/*
* If the device is being removed, rbd_dev->disk has
* been destroyed, so don't try to update its size
*/
if (!removing) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
revalidate_disk(rbd_dev->disk);
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev) static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{ {
u64 mapping_size; u64 mapping_size;
@ -3351,12 +3381,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
rbd_exists_validate(rbd_dev); rbd_exists_validate(rbd_dev);
mutex_unlock(&ctl_mutex); mutex_unlock(&ctl_mutex);
if (mapping_size != rbd_dev->mapping.size) { if (mapping_size != rbd_dev->mapping.size) {
sector_t size; rbd_dev_update_size(rbd_dev);
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
revalidate_disk(rbd_dev->disk);
} }
return ret; return ret;
@ -3710,12 +3735,14 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (size_buf)) if (ret < sizeof (size_buf))
return -ERANGE; return -ERANGE;
if (order) if (order) {
*order = size_buf.order; *order = size_buf.order;
dout(" order %u", (unsigned int)*order);
}
*snap_size = le64_to_cpu(size_buf.size); *snap_size = le64_to_cpu(size_buf.size);
dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", dout(" snap_id 0x%016llx snap_size = %llu\n",
(unsigned long long)snap_id, (unsigned int)*order, (unsigned long long)snap_id,
(unsigned long long)*snap_size); (unsigned long long)*snap_size);
return 0; return 0;
@ -4030,8 +4057,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
snap_id = snapc->snaps[which]; snap_id = snapc->snaps[which];
snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
if (IS_ERR(snap_name)) if (IS_ERR(snap_name)) {
break; /* ignore no-longer existing snapshots */
if (PTR_ERR(snap_name) == -ENOENT)
continue;
else
break;
}
found = !strcmp(name, snap_name); found = !strcmp(name, snap_name);
kfree(snap_name); kfree(snap_name);
} }
@ -4110,8 +4142,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
/* Look up the snapshot name, and make a copy */ /* Look up the snapshot name, and make a copy */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id); snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
if (!snap_name) { if (IS_ERR(snap_name)) {
ret = -ENOMEM; ret = PTR_ERR(snap_name);
goto out_err; goto out_err;
} }
@ -5059,23 +5091,6 @@ err_out_module:
return (ssize_t)rc; return (ssize_t)rc;
} }
static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
{
struct list_head *tmp;
struct rbd_device *rbd_dev;
spin_lock(&rbd_dev_list_lock);
list_for_each(tmp, &rbd_dev_list) {
rbd_dev = list_entry(tmp, struct rbd_device, node);
if (rbd_dev->dev_id == dev_id) {
spin_unlock(&rbd_dev_list_lock);
return rbd_dev;
}
}
spin_unlock(&rbd_dev_list_lock);
return NULL;
}
static void rbd_dev_device_release(struct device *dev) static void rbd_dev_device_release(struct device *dev)
{ {
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
@ -5120,8 +5135,10 @@ static ssize_t rbd_remove(struct bus_type *bus,
size_t count) size_t count)
{ {
struct rbd_device *rbd_dev = NULL; struct rbd_device *rbd_dev = NULL;
int target_id; struct list_head *tmp;
int dev_id;
unsigned long ul; unsigned long ul;
bool already = false;
int ret; int ret;
ret = strict_strtoul(buf, 10, &ul); ret = strict_strtoul(buf, 10, &ul);
@ -5129,30 +5146,51 @@ static ssize_t rbd_remove(struct bus_type *bus,
return ret; return ret;
/* convert to int; abort if we lost anything in the conversion */ /* convert to int; abort if we lost anything in the conversion */
target_id = (int) ul; dev_id = (int)ul;
if (target_id != ul) if (dev_id != ul)
return -EINVAL; return -EINVAL;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
rbd_dev = __rbd_get_dev(target_id); ret = -ENOENT;
if (!rbd_dev) { spin_lock(&rbd_dev_list_lock);
ret = -ENOENT; list_for_each(tmp, &rbd_dev_list) {
goto done; rbd_dev = list_entry(tmp, struct rbd_device, node);
if (rbd_dev->dev_id == dev_id) {
ret = 0;
break;
}
} }
if (!ret) {
spin_lock_irq(&rbd_dev->lock); spin_lock_irq(&rbd_dev->lock);
if (rbd_dev->open_count) if (rbd_dev->open_count)
ret = -EBUSY; ret = -EBUSY;
else else
set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
spin_unlock_irq(&rbd_dev->lock); &rbd_dev->flags);
if (ret < 0) spin_unlock_irq(&rbd_dev->lock);
}
spin_unlock(&rbd_dev_list_lock);
if (ret < 0 || already)
goto done; goto done;
rbd_bus_del_dev(rbd_dev);
ret = rbd_dev_header_watch_sync(rbd_dev, false); ret = rbd_dev_header_watch_sync(rbd_dev, false);
if (ret) if (ret)
rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
/*
* flush remaining watch callbacks - these must be complete
* before the osd_client is shutdown
*/
dout("%s: flushing notifies", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
* notifies are completely processed. Otherwise
* rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
* in a potential use after free of rbd_dev->disk or rbd_dev.
*/
rbd_bus_del_dev(rbd_dev);
rbd_dev_image_release(rbd_dev); rbd_dev_image_release(rbd_dev);
module_put(THIS_MODULE); module_put(THIS_MODULE);
ret = count; ret = count;

View File

@ -186,27 +186,19 @@ u32 arch_timer_get_rate(void)
return arch_timer_rate; return arch_timer_rate;
} }
/* u64 arch_timer_read_counter(void)
* Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
* call it before it has been initialised. Rather than incur a performance
* penalty checking for initialisation, provide a default implementation that
* won't lead to time appearing to jump backwards.
*/
static u64 arch_timer_read_zero(void)
{ {
return 0; return arch_counter_get_cntvct();
} }
u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
static cycle_t arch_counter_read(struct clocksource *cs) static cycle_t arch_counter_read(struct clocksource *cs)
{ {
return arch_timer_read_counter(); return arch_counter_get_cntvct();
} }
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{ {
return arch_timer_read_counter(); return arch_counter_get_cntvct();
} }
static struct clocksource clocksource_counter = { static struct clocksource clocksource_counter = {
@ -287,7 +279,7 @@ static int __init arch_timer_register(void)
cyclecounter.mult = clocksource_counter.mult; cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift; cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter, timecounter_init(&timecounter, &cyclecounter,
arch_counter_get_cntpct()); arch_counter_get_cntvct());
if (arch_timer_use_virtual) { if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI]; ppi = arch_timer_ppi[VIRT_PPI];
@ -376,11 +368,6 @@ static void __init arch_timer_init(struct device_node *np)
} }
} }
if (arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
arch_timer_register(); arch_timer_register();
arch_timer_arch_init(); arch_timer_arch_init();
} }

View File

@ -77,7 +77,7 @@ static void __iomem *sched_io_base;
static u32 read_sched_clock(void) static u32 read_sched_clock(void)
{ {
return __raw_readl(sched_io_base); return ~__raw_readl(sched_io_base);
} }
static const struct of_device_id sptimer_ids[] __initconst = { static const struct of_device_id sptimer_ids[] __initconst = {

View File

@ -755,6 +755,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum]; cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu); intel_pstate_get_cpu_pstates(cpu);
if (!cpu->pstate.current_pstate) {
all_cpu_data[cpunum] = NULL;
kfree(cpu);
return -ENODATA;
}
cpu->cpu = cpunum; cpu->cpu = cpunum;

View File

@ -333,6 +333,7 @@ config NET_DMA
bool "Network: TCP receive copy offload" bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA) default (INTEL_IOATDMA || FSL_DMA)
depends on BROKEN
help help
This enables the use of DMA engines in the network stack to This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles. offload receive copy-to-user operations, freeing CPU cycles.

View File

@ -1626,7 +1626,6 @@ static struct scsi_host_template scsi_driver_template = {
.cmd_per_lun = 1, .cmd_per_lun = 1,
.can_queue = 1, .can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs, .sdev_attrs = sbp2_scsi_sysfs_attrs,
.no_write_same = 1,
}; };
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");

View File

@ -248,7 +248,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags); spin_lock_irqsave(&tlmm_lock, irq_flags);
writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
__clear_bit(gpio, msm_gpio.enabled_irqs); __clear_bit(gpio, msm_gpio.enabled_irqs);
spin_unlock_irqrestore(&tlmm_lock, irq_flags); spin_unlock_irqrestore(&tlmm_lock, irq_flags);
} }
@ -260,7 +260,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags); spin_lock_irqsave(&tlmm_lock, irq_flags);
__set_bit(gpio, msm_gpio.enabled_irqs); __set_bit(gpio, msm_gpio.enabled_irqs);
set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
spin_unlock_irqrestore(&tlmm_lock, irq_flags); spin_unlock_irqrestore(&tlmm_lock, irq_flags);
} }

View File

@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
if (offset < TWL4030_GPIO_MAX) if (offset < TWL4030_GPIO_MAX)
ret = twl4030_set_gpio_direction(offset, 1); ret = twl4030_set_gpio_direction(offset, 1);
else else
ret = -EINVAL; ret = -EINVAL; /* LED outputs can't be set as input */
if (!ret) if (!ret)
priv->direction &= ~BIT(offset); priv->direction &= ~BIT(offset);
@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{ {
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int ret = -EINVAL; int ret = 0;
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX) if (offset < TWL4030_GPIO_MAX) {
ret = twl4030_set_gpio_direction(offset, 0); ret = twl4030_set_gpio_direction(offset, 0);
if (ret) {
mutex_unlock(&priv->mutex);
return ret;
}
}
/*
* LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
*/
priv->direction |= BIT(offset); priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);

View File

@ -68,10 +68,12 @@
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */ /* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* The panel supports, but does not include a lower clocked mode for lvds */ /* The panel supports, but does not include a lower clocked mode for lvds */
#define EDID_QUIRK_ADD_DOWNCLOCK_MODE (1 << 8) #define EDID_QUIRK_ADD_DOWNCLOCK_MODE (1 << 9)
/* The panel can reduce consumption with shorter blanking intervals */ /* The panel can reduce consumption with shorter blanking intervals */
#define EDID_QUIRK_SHORT_BLANKING (1 << 9) #define EDID_QUIRK_SHORT_BLANKING (1 << 10)
struct detailed_mode_closure { struct detailed_mode_closure {
@ -134,6 +136,9 @@ static struct edid_quirk {
/* Medion MD 30217 PG */ /* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
/* Samsung TFT-LCD LTN121AT10-301 */ /* Samsung TFT-LCD LTN121AT10-301 */
{ "SEC", 0x3142, EDID_QUIRK_ADD_DOWNCLOCK_MODE }, { "SEC", 0x3142, EDID_QUIRK_ADD_DOWNCLOCK_MODE },
/* Acer B116XW03 */ /* Acer B116XW03 */
@ -3412,6 +3417,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info); drm_add_display_info(edid, &connector->display_info);
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
return num_modes; return num_modes;
} }
EXPORT_SYMBOL(drm_add_edid_modes); EXPORT_SYMBOL(drm_add_edid_modes);

View File

@ -84,6 +84,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv; struct drm_i915_master_private *master_priv;
/*
* The dri breadcrumb update races against the drm master disappearing.
* Instead of trying to fix this (this is by far not the only ums issue)
* just don't do the update in kms mode.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (dev->primary->master) { if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv; master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
@ -1885,8 +1893,10 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{ {
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv); i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv); i915_gem_release(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
} }
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)

View File

@ -335,10 +335,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
mutex_unlock(&dev->struct_mutex);
} }
static struct i915_hw_context * static struct i915_hw_context *

View File

@ -11027,7 +11027,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev); intel_setup_overlay(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false); intel_modeset_setup_hw_state(dev, false);
drm_modeset_unlock_all(dev);
} }
void intel_modeset_cleanup(struct drm_device *dev) void intel_modeset_cleanup(struct drm_device *dev)
@ -11107,14 +11109,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state) int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl; u16 gmch_ctrl;
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state) if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0; return 0;
} }

View File

@ -1176,7 +1176,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
if ((rdev->family == CHIP_TAHITI) || if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN)) (rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
else if (rdev->family == CHIP_VERDE) else if ((rdev->family == CHIP_VERDE) ||
(rdev->family == CHIP_OLAND) ||
(rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) { switch (radeon_crtc->crtc_id) {

View File

@ -753,6 +753,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) { (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2; rdev->config.cayman.max_backends_per_se = 2;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) || } else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) || (rdev->pdev->device == 0x990A) ||
@ -763,6 +767,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) { (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4; rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2; rdev->config.cayman.max_backends_per_se = 2;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) || } else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) || (rdev->pdev->device == 0x9991) ||
@ -773,9 +781,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) { (rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1; rdev->config.cayman.max_backends_per_se = 1;
rdev->config.cayman.max_hw_contexts = 4;
rdev->config.cayman.sx_max_export_size = 128;
rdev->config.cayman.sx_max_export_pos_size = 32;
rdev->config.cayman.sx_max_export_smx_size = 96;
} else { } else {
rdev->config.cayman.max_simds_per_se = 2; rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1; rdev->config.cayman.max_backends_per_se = 1;
rdev->config.cayman.max_hw_contexts = 4;
rdev->config.cayman.sx_max_export_size = 128;
rdev->config.cayman.sx_max_export_pos_size = 32;
rdev->config.cayman.sx_max_export_smx_size = 96;
} }
rdev->config.cayman.max_texture_channel_caches = 2; rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256; rdev->config.cayman.max_gprs = 256;
@ -783,10 +799,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32; rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512; rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8; rdev->config.cayman.sx_num_of_sets = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2; rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40; rdev->config.cayman.sc_prim_fifo_size = 0x40;

View File

@ -460,7 +460,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
if ((start >> 28) != (end >> 28)) { if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
start, end); start, end);
return -EINVAL; return -EINVAL;

View File

@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16; base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
/* Some boards seem to be configured for 128MB of sideport memory,
* but really only have 64MB. Just skip the sideport and use
* UMA memory.
*/
if (rdev->mc.igp_sideport_enabled &&
(rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
base += 128 * 1024 * 1024;
rdev->mc.real_vram_size -= 128 * 1024 * 1024;
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
/* Use K8 direct mapping for fast fb access. */ /* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false; rdev->fastfb_working = false;

View File

@ -409,6 +409,9 @@ static int intel_idle(struct cpuidle_device *dev,
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb(); smp_mb();
if (!need_resched()) if (!need_resched())

View File

@ -211,7 +211,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 1, .address = 1,
.scan_index = 1, .scan_index = 1,
.scan_type = IIO_ST('u', 12, 16, 0), .scan_type = {
.sign = 'u',
.realbits = 12,
.storagebits = 16,
.shift = 0,
.endianness = IIO_BE,
},
}, },
.channel[1] = { .channel[1] = {
.type = IIO_VOLTAGE, .type = IIO_VOLTAGE,
@ -221,7 +227,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 0, .address = 0,
.scan_index = 0, .scan_index = 0,
.scan_type = IIO_ST('u', 12, 16, 0), .scan_type = {
.sign = 'u',
.realbits = 12,
.storagebits = 16,
.shift = 0,
.endianness = IIO_BE,
},
}, },
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
.int_vref_mv = 2500, .int_vref_mv = 2500,

View File

@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = ADIS16448_BARO_OUT, .address = ADIS16448_BARO_OUT,
.scan_index = ADIS16400_SCAN_BARO, .scan_index = ADIS16400_SCAN_BARO,
.scan_type = IIO_ST('s', 16, 16, 0), .scan_type = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_BE,
},
}, },
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
IIO_CHAN_SOFT_TIMESTAMP(11) IIO_CHAN_SOFT_TIMESTAMP(11)

View File

@ -242,21 +242,29 @@ isert_create_device_ib_res(struct isert_device *device)
isert_cq_event_callback, isert_cq_event_callback,
(void *)&cq_desc[i], (void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i); ISER_MAX_RX_CQ_LEN, i);
if (IS_ERR(device->dev_rx_cq[i])) if (IS_ERR(device->dev_rx_cq[i])) {
ret = PTR_ERR(device->dev_rx_cq[i]);
device->dev_rx_cq[i] = NULL;
goto out_cq; goto out_cq;
}
device->dev_tx_cq[i] = ib_create_cq(device->ib_device, device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_tx_callback, isert_cq_tx_callback,
isert_cq_event_callback, isert_cq_event_callback,
(void *)&cq_desc[i], (void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i); ISER_MAX_TX_CQ_LEN, i);
if (IS_ERR(device->dev_tx_cq[i])) if (IS_ERR(device->dev_tx_cq[i])) {
ret = PTR_ERR(device->dev_tx_cq[i]);
device->dev_tx_cq[i] = NULL;
goto out_cq;
}
ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
if (ret)
goto out_cq; goto out_cq;
if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
goto out_cq; if (ret)
if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
goto out_cq; goto out_cq;
} }

View File

@ -1975,6 +1975,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
break; break;
case EV_ABS: case EV_ABS:
input_alloc_absinfo(dev);
if (!dev->absinfo)
return;
__set_bit(code, dev->absbit); __set_bit(code, dev->absbit);
break; break;

View File

@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
/* set LED in default state (end of init phase) */ /* set LED in default state (end of init phase) */
pcan_usb_pro_set_led(dev, 0, 1); pcan_usb_pro_set_led(dev, 0, 1);
kfree(bi);
kfree(fi);
return 0; return 0;
err_out: err_out:

View File

@ -7482,7 +7482,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{ {
u32 base = (u32) mapping & 0xffffffff; u32 base = (u32) mapping & 0xffffffff;
return (base > 0xffffdcc0) && (base + len + 8 < base); return base + len + 8 < base;
} }
/* Test for TSO DMA buffers that cross into regions which are within MSS bytes /* Test for TSO DMA buffers that cross into regions which are within MSS bytes

View File

@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops; dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD; dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
/* dm9620/21a require room for 4 byte padding, even in dm9601
* mode, so we need +1 to be able to receive full size
* ethernet frames.
*/
dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
dev->mii.dev = dev->net; dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read; dev->mii.mdio_read = dm9601_mdio_read;
@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags) gfp_t flags)
{ {
int len; int len, pad;
/* format: /* format:
b1: packet length low b1: packet length low
@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
b3..n: packet data b3..n: packet data
*/ */
len = skb->len; len = skb->len + DM_TX_OVERHEAD;
if (skb_headroom(skb) < DM_TX_OVERHEAD) { /* workaround for dm962x errata with tx fifo getting out of
* sync if a USB bulk transfer retry happens right after a
* packet with odd / maxpacket length by adding up to 3 bytes
* padding.
*/
while ((len & 1) || !(len % dev->maxpacket))
len++;
len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
pad = len - skb->len;
if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
struct sk_buff *skb2; struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = skb2; skb = skb2;
if (!skb) if (!skb)
@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
__skb_push(skb, DM_TX_OVERHEAD); __skb_push(skb, DM_TX_OVERHEAD);
/* usbnet adds padding if length is a multiple of packet size if (pad) {
if so, adjust length value in header */ memset(skb->data + skb->len, 0, pad);
if ((skb->len % dev->maxpacket) == 0) __skb_put(skb, pad);
len++; }
skb->data[0] = len; skb->data[0] = len;
skb->data[1] = len >> 8; skb->data[1] = len >> 8;

View File

@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
mask2 |= ATH9K_INT_CST; mask2 |= ATH9K_INT_CST;
if (isr2 & AR_ISR_S2_TSFOOR) if (isr2 & AR_ISR_S2_TSFOOR)
mask2 |= ATH9K_INT_TSFOOR; mask2 |= ATH9K_INT_TSFOOR;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S2, isr2);
isr &= ~AR_ISR_BCNMISC;
}
} }
isr = REG_READ(ah, AR_ISR_RAC); if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
isr = REG_READ(ah, AR_ISR_RAC);
if (isr == 0xffffffff) { if (isr == 0xffffffff) {
*masked = 0; *masked = 0;
return false; return false;
@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= ATH9K_INT_TX; *masked |= ATH9K_INT_TX;
s0_s = REG_READ(ah, AR_ISR_S0_S); if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
s0_s = REG_READ(ah, AR_ISR_S0_S);
s1_s = REG_READ(ah, AR_ISR_S1_S);
} else {
s0_s = REG_READ(ah, AR_ISR_S0);
REG_WRITE(ah, AR_ISR_S0, s0_s);
s1_s = REG_READ(ah, AR_ISR_S1);
REG_WRITE(ah, AR_ISR_S1, s1_s);
isr &= ~(AR_ISR_TXOK |
AR_ISR_TXDESC |
AR_ISR_TXERR |
AR_ISR_TXEOL);
}
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
s1_s = REG_READ(ah, AR_ISR_S1_S);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
} }
@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= mask2; *masked |= mask2;
} }
if (AR_SREV_9100(ah)) if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
return true;
if (isr & AR_ISR_GENTMR) {
u32 s5_s; u32 s5_s;
s5_s = REG_READ(ah, AR_ISR_S5_S); if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
s5_s = REG_READ(ah, AR_ISR_S5_S);
} else {
s5_s = REG_READ(ah, AR_ISR_S5);
}
ah->intr_gen_timer_trigger = ah->intr_gen_timer_trigger =
MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if ((s5_s & AR_ISR_S5_TIM_TIMER) && if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
*masked |= ATH9K_INT_TIM_TIMER; *masked |= ATH9K_INT_TIM_TIMER;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S5, s5_s);
isr &= ~AR_ISR_GENTMR;
}
} }
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR, isr);
REG_READ(ah, AR_ISR);
}
if (AR_SREV_9100(ah))
return true;
if (sync_cause) { if (sync_cause) {
ath9k_debug_sync_cause(common, sync_cause); ath9k_debug_sync_cause(common, sync_cause);
fatal_int = fatal_int =

View File

@ -734,6 +734,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
}; };
int index = rtlpci->rx_ring[rx_queue_idx].idx; int index = rtlpci->rx_ring[rx_queue_idx].idx;
if (rtlpci->driver_is_goingto_unload)
return;
/*RX NORMAL PKT */ /*RX NORMAL PKT */
while (count--) { while (count--) {
/*rx descriptor */ /*rx descriptor */
@ -1630,6 +1632,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
*/ */
set_hal_stop(rtlhal); set_hal_stop(rtlhal);
rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->disable_interrupt(hw); rtlpriv->cfg->ops->disable_interrupt(hw);
cancel_work_sync(&rtlpriv->works.lps_change_work); cancel_work_sync(&rtlpriv->works.lps_change_work);
@ -1647,7 +1650,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
ppsc->rfchange_inprogress = true; ppsc->rfchange_inprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->hw_disable(hw); rtlpriv->cfg->ops->hw_disable(hw);
/* some things are not needed if firmware not available */ /* some things are not needed if firmware not available */
if (!rtlpriv->max_fw_size) if (!rtlpriv->max_fw_size)

View File

@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
(unsigned long long)cp, (unsigned long long)s, (unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da); (unsigned long long)da);
/*
* If the number of address cells is larger than 2 we assume the
* mapping doesn't specify a physical address. Rather, the address
* specifies an identifier that must match exactly.
*/
if (na > 2 && memcmp(range, addr, na * 4) != 0)
return OF_BAD_ADDR;
if (da < cp || da >= (cp + s)) if (da < cp || da >= (cp + s))
return OF_BAD_ADDR; return OF_BAD_ADDR;
return da - cp; return da - cp;

View File

@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc; return rc;
} }
tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
if (IS_ERR(tp->screen)) { if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen); rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view); raw3270_put_view(&tp->view);

View File

@ -66,7 +66,8 @@ enum pci_8255_boardid {
BOARD_ADLINK_PCI7296, BOARD_ADLINK_PCI7296,
BOARD_CB_PCIDIO24, BOARD_CB_PCIDIO24,
BOARD_CB_PCIDIO24H, BOARD_CB_PCIDIO24H,
BOARD_CB_PCIDIO48H, BOARD_CB_PCIDIO48H_OLD,
BOARD_CB_PCIDIO48H_NEW,
BOARD_CB_PCIDIO96H, BOARD_CB_PCIDIO96H,
BOARD_NI_PCIDIO96, BOARD_NI_PCIDIO96,
BOARD_NI_PCIDIO96B, BOARD_NI_PCIDIO96B,
@ -109,11 +110,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.dio_badr = 2, .dio_badr = 2,
.n_8255 = 1, .n_8255 = 1,
}, },
[BOARD_CB_PCIDIO48H] = { [BOARD_CB_PCIDIO48H_OLD] = {
.name = "cb_pci-dio48h", .name = "cb_pci-dio48h",
.dio_badr = 1, .dio_badr = 1,
.n_8255 = 2, .n_8255 = 2,
}, },
[BOARD_CB_PCIDIO48H_NEW] = {
.name = "cb_pci-dio48h",
.dio_badr = 2,
.n_8255 = 2,
},
[BOARD_CB_PCIDIO96H] = { [BOARD_CB_PCIDIO96H] = {
.name = "cb_pci-dio96h", .name = "cb_pci-dio96h",
.dio_badr = 2, .dio_badr = 2,
@ -270,7 +276,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
{ PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
{ PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
{ PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
.driver_data = BOARD_CB_PCIDIO48H_OLD },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
.driver_data = BOARD_CB_PCIDIO48H_NEW },
{ PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
{ PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
{ PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },

View File

@ -838,24 +838,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (((hdr->flags & ISCSI_FLAG_CMD_READ) || if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/* /*
* Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) * From RFC-3720 Section 10.3.1:
* that adds support for RESERVE/RELEASE. There is a bug *
* add with this new functionality that sets R/W bits when * "Either or both of R and W MAY be 1 when either the
* neither CDB carries any READ or WRITE datapayloads. * Expected Data Transfer Length and/or Bidirectional Read
* Expected Data Transfer Length are 0"
*
* For this case, go ahead and clear the unnecssary bits
* to avoid any confusion with ->data_direction.
*/ */
if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { hdr->flags &= ~ISCSI_FLAG_CMD_READ;
hdr->flags &= ~ISCSI_FLAG_CMD_READ; hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
goto done;
}
pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for" " set when Expected Data Transfer Length is 0 for"
" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
} }
done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {

View File

@ -1078,6 +1078,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
dev->dev_attrib.block_size = block_size; dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n", pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size); dev, block_size);
if (dev->dev_attrib.max_bytes_per_io)
dev->dev_attrib.hw_max_sectors =
dev->dev_attrib.max_bytes_per_io / block_size;
return 0; return 0;
} }

View File

@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION, " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION); TARGET_CORE_MOD_VERSION);
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
" MaxSectors: %u\n", hba->hba_id, fd_host->fd_host_id);
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0; return 0;
} }
@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
} }
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {

View File

@ -7,7 +7,10 @@
#define FD_DEVICE_QUEUE_DEPTH 32 #define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512 #define FD_BLOCKSIZE 512
#define FD_MAX_SECTORS 2048 /*
* Limited by the number of iovecs (2048) per vfs_[writev,readv] call
*/
#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01 #define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02 #define RRF_GOT_LBA 0x02

View File

@ -369,6 +369,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = { static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C4", 0 }, { "INT33C4", 0 },
{ "INT33C5", 0 }, { "INT33C5", 0 },
{ "INT3434", 0 },
{ "INT3435", 0 },
{ "80860F0A", 0 }, { "80860F0A", 0 },
{ }, { },
}; };

View File

@ -2051,6 +2051,9 @@ static int __init pmz_console_init(void)
/* Probe ports */ /* Probe ports */
pmz_probe(); pmz_probe();
if (pmz_ports_count == 0)
return -ENODEV;
/* TODO: Autoprobe console based on OF */ /* TODO: Autoprobe console based on OF */
/* pmz_console.index = i; */ /* pmz_console.index = i; */
register_console(&pmz_console); register_console(&pmz_console);

View File

@ -176,14 +176,7 @@ retry:
return result; return result;
} }
/* Try sending off another urb, unless in irq context (in which case goto retry; /* try sending off another urb */
* there will be no free urb). */
if (!in_irq())
goto retry;
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return 0;
} }
/** /**

View File

@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_MC2718 0xffe8 #define ZTE_PRODUCT_MC2718 0xffe8
#define ZTE_PRODUCT_AC2726 0xfff1
#define BENQ_VENDOR_ID 0x04a5 #define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068 #define BENQ_PRODUCT_H10 0x4068
@ -1456,6 +1457,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },

View File

@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19d2, 0xfffd) }, { USB_DEVICE(0x19d2, 0xfffd) },
{ USB_DEVICE(0x19d2, 0xfffc) }, { USB_DEVICE(0x19d2, 0xfffc) },
{ USB_DEVICE(0x19d2, 0xfffb) }, { USB_DEVICE(0x19d2, 0xfffb) },
/* AC2726, AC8710_V3 */ /* AC8710_V3 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
{ USB_DEVICE(0x19d2, 0xfff6) }, { USB_DEVICE(0x19d2, 0xfff6) },
{ USB_DEVICE(0x19d2, 0xfff7) }, { USB_DEVICE(0x19d2, 0xfff7) },
{ USB_DEVICE(0x19d2, 0xfff8) }, { USB_DEVICE(0x19d2, 0xfff8) },

View File

@ -213,9 +213,13 @@ static int readpage_nounlock(struct file *filp, struct page *page)
if (err < 0) { if (err < 0) {
SetPageError(page); SetPageError(page);
goto out; goto out;
} else if (err < PAGE_CACHE_SIZE) { } else {
if (err < PAGE_CACHE_SIZE) {
/* zero fill remainder of page */ /* zero fill remainder of page */
zero_user_segment(page, err, PAGE_CACHE_SIZE); zero_user_segment(page, err, PAGE_CACHE_SIZE);
} else {
flush_dcache_page(page);
}
} }
SetPageUptodate(page); SetPageUptodate(page);

View File

@ -313,9 +313,9 @@ static int striped_read(struct inode *inode,
{ {
struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
u64 pos, this_len; u64 pos, this_len, left;
int io_align, page_align; int io_align, page_align;
int left, pages_left; int pages_left;
int read; int read;
struct page **page_pos; struct page **page_pos;
int ret; int ret;
@ -346,47 +346,40 @@ more:
ret = 0; ret = 0;
hit_stripe = this_len < left; hit_stripe = this_len < left;
was_short = ret >= 0 && ret < this_len; was_short = ret >= 0 && ret < this_len;
dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
if (ret > 0) { if (ret >= 0) {
int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; int didpages;
if (was_short && (pos + ret < inode->i_size)) {
if (read < pos - off) { u64 tmp = min(this_len - ret,
dout(" zero gap %llu to %llu\n", off + read, pos); inode->i_size - pos - ret);
ceph_zero_page_vector_range(page_align + read, dout(" zero gap %llu to %llu\n",
pos - off - read, pages); pos + ret, pos + ret + tmp);
ceph_zero_page_vector_range(page_align + read + ret,
tmp, pages);
ret += tmp;
} }
didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
pos += ret; pos += ret;
read = pos - off; read = pos - off;
left -= ret; left -= ret;
page_pos += didpages; page_pos += didpages;
pages_left -= didpages; pages_left -= didpages;
/* hit stripe? */ /* hit stripe and need continue*/
if (left && hit_stripe) if (left && hit_stripe && pos < inode->i_size)
goto more; goto more;
} }
if (was_short) { if (read > 0) {
ret = read;
/* did we bounce off eof? */ /* did we bounce off eof? */
if (pos + left > inode->i_size) if (pos + left > inode->i_size)
*checkeof = 1; *checkeof = 1;
/* zero trailing bytes (inside i_size) */
if (left > 0 && pos < inode->i_size) {
if (pos + left > inode->i_size)
left = inode->i_size - pos;
dout("zero tail %d\n", left);
ceph_zero_page_vector_range(page_align + read, left,
pages);
read += left;
}
} }
if (ret >= 0)
ret = read;
dout("striped_read returns %d\n", ret); dout("striped_read returns %d\n", ret);
return ret; return ret;
} }
@ -618,6 +611,8 @@ out:
if (check_caps) if (check_caps)
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
NULL); NULL);
} else if (ret != -EOLDSNAPC && written > 0) {
ret = written;
} }
return ret; return ret;
} }

View File

@ -211,8 +211,12 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx", snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
ceph_ino(inode), dl.object_no); ceph_ino(inode), dl.object_no);
ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap, r = ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap,
ceph_file_layout_pg_pool(ci->i_layout)); ceph_file_layout_pg_pool(ci->i_layout));
if (r < 0) {
up_read(&osdc->map_sem);
return r;
}
dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
if (dl.osd >= 0) { if (dl.osd >= 0) {

View File

@ -414,6 +414,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
{ {
struct ceph_mds_session *s; struct ceph_mds_session *s;
if (mds >= mdsc->mdsmap->m_max_mds)
return ERR_PTR(-EINVAL);
s = kzalloc(sizeof(*s), GFP_NOFS); s = kzalloc(sizeof(*s), GFP_NOFS);
if (!s) if (!s)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -639,6 +642,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
req->r_unsafe_dir = NULL; req->r_unsafe_dir = NULL;
} }
complete_all(&req->r_safe_completion);
ceph_mdsc_put_request(req); ceph_mdsc_put_request(req);
} }
@ -1840,8 +1845,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
int mds = -1; int mds = -1;
int err = -EAGAIN; int err = -EAGAIN;
if (req->r_err || req->r_got_result) if (req->r_err || req->r_got_result) {
if (req->r_aborted)
__unregister_request(mdsc, req);
goto out; goto out;
}
if (req->r_timeout && if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) { time_after_eq(jiffies, req->r_started + req->r_timeout)) {
@ -2151,7 +2159,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (head->safe) { if (head->safe) {
req->r_got_safe = true; req->r_got_safe = true;
__unregister_request(mdsc, req); __unregister_request(mdsc, req);
complete_all(&req->r_safe_completion);
if (req->r_got_unsafe) { if (req->r_got_unsafe) {
/* /*
@ -3040,8 +3047,10 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
fsc->mdsc = mdsc; fsc->mdsc = mdsc;
mutex_init(&mdsc->mutex); mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
if (mdsc->mdsmap == NULL) if (mdsc->mdsmap == NULL) {
kfree(mdsc);
return -ENOMEM; return -ENOMEM;
}
init_completion(&mdsc->safe_umount_waiters); init_completion(&mdsc->safe_umount_waiters);
init_waitqueue_head(&mdsc->session_close_wq); init_waitqueue_head(&mdsc->session_close_wq);

View File

@ -138,6 +138,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
m->m_info[mds].export_targets = m->m_info[mds].export_targets =
kcalloc(num_export_targets, sizeof(u32), kcalloc(num_export_targets, sizeof(u32),
GFP_NOFS); GFP_NOFS);
if (m->m_info[mds].export_targets == NULL)
goto badmem;
for (j = 0; j < num_export_targets; j++) for (j = 0; j < num_export_targets; j++)
m->m_info[mds].export_targets[j] = m->m_info[mds].export_targets[j] =
ceph_decode_32(&pexport_targets); ceph_decode_32(&pexport_targets);
@ -170,7 +172,7 @@ bad:
DUMP_PREFIX_OFFSET, 16, 1, DUMP_PREFIX_OFFSET, 16, 1,
start, end - start, true); start, end - start, true);
ceph_mdsmap_destroy(m); ceph_mdsmap_destroy(m);
return ERR_PTR(-EINVAL); return ERR_PTR(err);
} }
void ceph_mdsmap_destroy(struct ceph_mdsmap *m) void ceph_mdsmap_destroy(struct ceph_mdsmap *m)

View File

@ -357,7 +357,7 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
} }
err = -EINVAL; err = -EINVAL;
dev_name_end--; /* back up to ':' separator */ dev_name_end--; /* back up to ':' separator */
if (*dev_name_end != ':') { if (dev_name_end < dev_name || *dev_name_end != ':') {
pr_err("device name is missing path (no : separator in %s)\n", pr_err("device name is missing path (no : separator in %s)\n",
dev_name); dev_name);
goto out; goto out;

View File

@ -999,6 +999,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
struct address_space *mapping = inode->i_mapping;
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh; struct gfs2_holder gh;
int rv; int rv;
@ -1019,6 +1020,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1) if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */ goto out; /* dio not valid, fall back to buffered i/o */
/*
* Now since we are holding a deferred (CW) lock at this point, you
* might be wondering why this is ever needed. There is a case however
* where we've granted a deferred local lock against a cached exclusive
* glock. That is ok provided all granted local locks are deferred, but
* it also means that it is possible to encounter pages which are
* cached and possibly also mapped. So here we check for that and sort
* them out ahead of the dio. The glock state machine will take care of
* everything else.
*
* If in fact the cached glock state (gl->gl_state) is deferred (CW) in
* the first place, mapping->nr_pages will always be zero.
*/
if (mapping->nrpages) {
loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
loff_t len = iov_length(iov, nr_segs);
loff_t end = PAGE_ALIGN(offset + len) - 1;
rv = 0;
if (len == 0)
goto out;
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
rv = filemap_write_and_wait_range(mapping, lstart, end);
if (rv)
return rv;
truncate_inode_pages_range(mapping, lstart, end);
}
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, gfs2_get_block_direct, offset, nr_segs, gfs2_get_block_direct,
NULL, NULL, 0); NULL, NULL, 0);

View File

@ -1317,8 +1317,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
if (IS_ERR(s)) if (IS_ERR(s))
goto error_bdev; goto error_bdev;
if (s->s_root) if (s->s_root) {
/*
* s_umount nests inside bd_mutex during
* __invalidate_device(). blkdev_put() acquires
* bd_mutex and can't be called under s_umount. Drop
* s_umount temporarily. This is safe as we're
* holding an active reference.
*/
up_write(&s->s_umount);
blkdev_put(bdev, mode); blkdev_put(bdev, mode);
down_write(&s->s_umount);
}
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT; args.ar_quota = GFS2_QUOTA_DEFAULT;

View File

@ -220,7 +220,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif #endif
#ifndef pte_accessible #ifndef pte_accessible
# define pte_accessible(pte) ((void)(pte),1) # define pte_accessible(mm, pte) ((void)(pte), 1)
#endif #endif
#ifndef flush_tlb_fix_spurious_fault #ifndef flush_tlb_fix_spurious_fault

View File

@ -32,7 +32,7 @@
#ifdef CONFIG_ARM_ARCH_TIMER #ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void); extern u32 arch_timer_get_rate(void);
extern u64 (*arch_timer_read_counter)(void); extern u64 arch_timer_read_counter(void);
extern struct timecounter *arch_timer_get_timecounter(void); extern struct timecounter *arch_timer_get_timecounter(void);
#else #else

View File

@ -559,7 +559,7 @@
{0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \

View File

@ -3,6 +3,6 @@
#include <uapi/linux/auxvec.h> #include <uapi/linux/auxvec.h>
#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ #define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */ #endif /* _LINUX_AUXVEC_H */

View File

@ -145,7 +145,6 @@ struct ceph_osd_request {
s32 r_reply_op_result[CEPH_OSD_MAX_OP]; s32 r_reply_op_result[CEPH_OSD_MAX_OP];
int r_got_reply; int r_got_reply;
int r_linger; int r_linger;
int r_completed;
struct ceph_osd_client *r_osdc; struct ceph_osd_client *r_osdc;
struct kref r_kref; struct kref r_kref;
@ -336,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req); struct ceph_osd_request *req);
extern void ceph_osdc_sync(struct ceph_osd_client *osdc); extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino, struct ceph_vino vino,
struct ceph_file_layout *layout, struct ceph_file_layout *layout,

View File

@ -399,6 +399,7 @@ enum {
ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
/* DMA mask for user DMA control: User visible values; DO NOT /* DMA mask for user DMA control: User visible values; DO NOT
renumber */ renumber */

View File

@ -437,6 +437,14 @@ struct mm_struct {
* a different node than Make PTE Scan Go Now. * a different node than Make PTE Scan Go Now.
*/ */
int first_nid; int first_nid;
#endif
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
*/
bool tlb_flush_pending;
#endif #endif
struct uprobes_state uprobes_state; struct uprobes_state uprobes_state;
}; };
@ -458,4 +466,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var; return mm->cpu_vm_mask_var;
} }
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* Memory barriers to keep this state in sync are graciously provided by
* the page table locks, outside of which no page table modifications happen.
* The barriers below prevent the compiler from re-ordering the instructions
* around the memory barriers that are already present in the code.
*/
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
barrier();
return mm->tlb_flush_pending;
}
static inline void set_tlb_flush_pending(struct mm_struct *mm)
{
mm->tlb_flush_pending = true;
/*
* Guarantee that the tlb_flush_pending store does not leak into the
* critical section updating the page tables
*/
smp_mb__before_spinlock();
}
/* Clearing is done after a TLB flush, which also provides a barrier. */
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
{
barrier();
mm->tlb_flush_pending = false;
}
#else
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
return false;
}
static inline void set_tlb_flush_pending(struct mm_struct *mm)
{
}
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
{
}
#endif
#endif /* _LINUX_MM_TYPES_H */ #endif /* _LINUX_MM_TYPES_H */

View File

@ -117,9 +117,17 @@ do { \
#endif /*arch_spin_is_contended*/ #endif /*arch_spin_is_contended*/
#endif #endif
/* The lock does not imply full memory barrier. */ /*
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK * Despite its name it doesn't necessarily has to be a full barrier.
static inline void smp_mb__after_lock(void) { smp_mb(); } * It should only guarantee that a STORE before the critical section
* can not be reordered with a LOAD inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
* serialize it with another STORE done by spin_lock().
*/
#ifndef smp_mb__before_spinlock
#define smp_mb__before_spinlock() smp_wmb()
#endif #endif
/** /**

View File

@ -614,6 +614,7 @@ struct se_dev_attrib {
u32 unmap_granularity; u32 unmap_granularity;
u32 unmap_granularity_alignment; u32 unmap_granularity_alignment;
u32 max_write_same_len; u32 max_write_same_len;
u32 max_bytes_per_io;
struct se_device *da_dev; struct se_device *da_dev;
struct config_group da_group; struct config_group da_group;
}; };

View File

@ -561,6 +561,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
mm->cached_hole_size = ~0UL; mm->cached_hole_size = ~0UL;
mm_init_aio(mm); mm_init_aio(mm);
mm_init_owner(mm, p); mm_init_owner(mm, p);
clear_tlb_flush_pending(mm);
if (likely(!mm_alloc_pgd(mm))) { if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0; mm->def_flags = 0;

View File

@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
bool pm_freezing; bool pm_freezing;
bool pm_nosig_freezing; bool pm_nosig_freezing;
/*
* Temporary export for the deadlock workaround in ata_scsi_hotplug().
* Remove once the hack becomes unnecessary.
*/
EXPORT_SYMBOL_GPL(pm_freezing);
/* protects freezing and frozen transitions */ /* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock); static DEFINE_SPINLOCK(freezer_lock);

View File

@ -1488,7 +1488,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
unsigned long flags; unsigned long flags;
int cpu, success = 0; int cpu, success = 0;
smp_wmb(); /*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
* reordered with p->state check below. This pairs with mb() in
* set_current_state() the waiting thread does.
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags); raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state)) if (!(p->state & state))
goto out; goto out;
@ -2995,6 +3001,12 @@ need_resched:
if (sched_feat(HRTICK)) if (sched_feat(HRTICK))
hrtick_clear(rq); hrtick_clear(rq);
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock); raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw; switch_count = &prev->nivcsw;

View File

@ -936,6 +936,13 @@ void task_numa_work(struct callback_head *work)
if (vma->vm_end - vma->vm_start < HPAGE_SIZE) if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
continue; continue;
/*
* Skip inaccessible VMAs to avoid any confusion between
* PROT_NONE and NUMA hinting ptes
*/
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
continue;
do { do {
start = max(start, vma->vm_start); start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);

View File

@ -924,6 +924,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{ {
struct rq *rq = rq_of_rt_rq(rt_rq); struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && prio < prev_prio) if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio); cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
} }
@ -933,6 +940,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{ {
struct rq *rq = rq_of_rt_rq(rt_rq); struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio) if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
} }

View File

@ -750,7 +750,7 @@ static int ftrace_profile_init(void)
int cpu; int cpu;
int ret = 0; int ret = 0;
for_each_online_cpu(cpu) { for_each_possible_cpu(cpu) {
ret = ftrace_profile_init_cpu(cpu); ret = ftrace_profile_init_cpu(cpu);
if (ret) if (ret)
break; break;

View File

@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
bool migrate_scanner) bool migrate_scanner)
{ {
struct zone *zone = cc->zone; struct zone *zone = cc->zone;
if (cc->ignore_skip_hint)
return;
if (!page) if (!page)
return; return;

View File

@ -203,9 +203,10 @@ get_write_lock:
if (mapping_cap_account_dirty(mapping)) { if (mapping_cap_account_dirty(mapping)) {
unsigned long addr; unsigned long addr;
struct file *file = get_file(vma->vm_file); struct file *file = get_file(vma->vm_file);
/* mmap_region may free vma; grab the info now */
vm_flags = vma->vm_flags;
addr = mmap_region(file, start, size, addr = mmap_region(file, start, size, vm_flags, pgoff);
vma->vm_flags, pgoff);
fput(file); fput(file);
if (IS_ERR_VALUE(addr)) { if (IS_ERR_VALUE(addr)) {
err = addr; err = addr;
@ -213,7 +214,7 @@ get_write_lock:
BUG_ON(addr != start); BUG_ON(addr != start);
err = 0; err = 0;
} }
goto out; goto out_freed;
} }
mutex_lock(&mapping->i_mmap_mutex); mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping); flush_dcache_mmap_lock(mapping);
@ -248,6 +249,7 @@ get_write_lock:
out: out:
if (vma) if (vma)
vm_flags = vma->vm_flags; vm_flags = vma->vm_flags;
out_freed:
if (likely(!has_write_lock)) if (likely(!has_write_lock))
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
else else

View File

@ -1344,6 +1344,20 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unlock; goto out_unlock;
} }
/* Bail if we fail to protect against THP splits for any reason */
if (unlikely(!anon_vma)) {
put_page(page);
page_nid = -1;
goto clear_pmdnuma;
}
/*
* The page_table_lock above provides a memory barrier
* with change_protection_range.
*/
if (mm_tlb_flush_pending(mm))
flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
/* /*
* Migrate the THP to the requested node, returns with page unlocked * Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared. * and pmd_numa cleared.

View File

@ -385,7 +385,7 @@ struct mem_cgroup {
static size_t memcg_size(void) static size_t memcg_size(void)
{ {
return sizeof(struct mem_cgroup) + return sizeof(struct mem_cgroup) +
nr_node_ids * sizeof(struct mem_cgroup_per_node); nr_node_ids * sizeof(struct mem_cgroup_per_node *);
} }
/* internal only representation about the status of kmem accounting. */ /* internal only representation about the status of kmem accounting. */

View File

@ -936,6 +936,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
BUG_ON(!PageHWPoison(p)); BUG_ON(!PageHWPoison(p));
return SWAP_FAIL; return SWAP_FAIL;
} }
/*
* We pinned the head page for hwpoison handling,
* now we split the thp and we are interested in
* the hwpoisoned raw page, so move the refcount
* to it.
*/
if (hpage != p) {
put_page(hpage);
get_page(p);
}
/* THP is split, so ppage should be the real poisoned page. */ /* THP is split, so ppage should be the real poisoned page. */
ppage = p; ppage = p;
} }

Some files were not shown because too many files have changed in this diff Show More