Updated from Linux LTS 3.10.24 to 3.10.25

This commit is contained in:
Nathan 2025-04-09 19:32:12 -05:00
parent cc92f436cd
commit 92cb237c3b
85 changed files with 642 additions and 313 deletions

View File

@ -1,6 +1,6 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 24 SUBLEVEL = 25
EXTRAVERSION = EXTRAVERSION =
NAME = TOSSUG Baby Fish NAME = TOSSUG Baby Fish

View File

@ -410,6 +410,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
struct stackframe frame; struct stackframe frame;
unsigned long stack_page;
int count = 0; int count = 0;
if (!p || p == current || p->state == TASK_RUNNING) if (!p || p == current || p->state == TASK_RUNNING)
return 0; return 0;
@ -418,9 +419,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p); frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */ frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p); frame.pc = thread_saved_pc(p);
stack_page = (unsigned long)task_stack_page(p);
do { do {
int ret = unwind_frame(&frame); if (frame.sp < stack_page ||
if (ret < 0) frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(&frame) < 0)
return 0; return 0;
if (!in_sched_functions(frame.pc)) if (!in_sched_functions(frame.pc))
return frame.pc; return frame.pc;

View File

@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE); high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */ /* check current frame pointer is within bounds */
if (fp < (low + 12) || fp + 4 >= high) if (fp < low + 12 || fp > high - 4)
return -EINVAL; return -EINVAL;
/* restore the registers from the stack frame */ /* restore the registers from the stack frame */

View File

@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
} }
/** /**
* _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
* @oh: struct omap_hwmod * * @oh: struct omap_hwmod *
* @v: pointer to register contents to modify * @v: pointer to register contents to modify
* *
@ -426,6 +426,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
return 0; return 0;
} }
/**
* _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
* @oh: struct omap_hwmod *
* @v: pointer to register contents to modify
*
* Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
* error or 0 upon success.
*/
static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
{
u32 softrst_mask;
if (!oh->class->sysc ||
!(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
return -EINVAL;
if (!oh->class->sysc->sysc_fields) {
WARN(1,
"omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
oh->name);
return -EINVAL;
}
softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
*v &= ~softrst_mask;
return 0;
}
/** /**
* _wait_softreset_complete - wait for an OCP softreset to complete * _wait_softreset_complete - wait for an OCP softreset to complete
* @oh: struct omap_hwmod * to wait on * @oh: struct omap_hwmod * to wait on
@ -1909,6 +1939,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
ret = _set_softreset(oh, &v); ret = _set_softreset(oh, &v);
if (ret) if (ret)
goto dis_opt_clks; goto dis_opt_clks;
_write_sysconfig(v, oh);
ret = _clear_softreset(oh, &v);
if (ret)
goto dis_opt_clks;
_write_sysconfig(v, oh); _write_sysconfig(v, oh);
if (oh->class->sysc->srst_udelay) if (oh->class->sysc->srst_udelay)
@ -3148,6 +3184,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
goto error; goto error;
_write_sysconfig(v, oh); _write_sysconfig(v, oh);
ret = _clear_softreset(oh, &v);
if (ret)
goto error;
_write_sysconfig(v, oh);
error: error:
return ret; return ret;
} }

View File

@ -1930,7 +1930,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
.syss_offs = 0x0014, .syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1, .sysc_fields = &omap_hwmod_sysc_type1,
@ -2008,15 +2009,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY * hence HWMOD_SWSUP_MSTANDBY
*/ */
/* .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
* During system boot; If the hwmod framework resets the module
* the module will have smart idle settings; which can lead to deadlock
* (above Errata Id:i660); so, dont reset the module during boot;
* Use HWMOD_INIT_NO_RESET.
*/
.flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
HWMOD_INIT_NO_RESET,
}; };
/* /*

View File

@ -13,6 +13,7 @@
#include <mach/regs-ost.h> #include <mach/regs-ost.h>
#include <mach/reset.h> #include <mach/reset.h>
#include <mach/smemc.h>
unsigned int reset_status; unsigned int reset_status;
EXPORT_SYMBOL(reset_status); EXPORT_SYMBOL(reset_status);
@ -81,6 +82,12 @@ static void do_hw_reset(void)
writel_relaxed(OSSR_M3, OSSR); writel_relaxed(OSSR_M3, OSSR);
/* ... in 100 ms */ /* ... in 100 ms */
writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
/*
* SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
* we put SDRAM into self-refresh to prevent that
*/
while (1)
writel_relaxed(MDREFR_SLFRSH, MDREFR);
} }
void pxa_restart(char mode, const char *cmd) void pxa_restart(char mode, const char *cmd)
@ -104,4 +111,3 @@ void pxa_restart(char mode, const char *cmd)
break; break;
} }
} }

View File

@ -424,57 +424,57 @@ static struct platform_device tosa_power_device = {
* Tosa Keyboard * Tosa Keyboard
*/ */
static const uint32_t tosakbd_keymap[] = { static const uint32_t tosakbd_keymap[] = {
KEY(0, 2, KEY_W), KEY(0, 1, KEY_W),
KEY(0, 6, KEY_K), KEY(0, 5, KEY_K),
KEY(0, 7, KEY_BACKSPACE), KEY(0, 6, KEY_BACKSPACE),
KEY(0, 8, KEY_P), KEY(0, 7, KEY_P),
KEY(1, 1, KEY_Q), KEY(1, 0, KEY_Q),
KEY(1, 2, KEY_E), KEY(1, 1, KEY_E),
KEY(1, 3, KEY_T), KEY(1, 2, KEY_T),
KEY(1, 4, KEY_Y), KEY(1, 3, KEY_Y),
KEY(1, 6, KEY_O), KEY(1, 5, KEY_O),
KEY(1, 7, KEY_I), KEY(1, 6, KEY_I),
KEY(1, 8, KEY_COMMA), KEY(1, 7, KEY_COMMA),
KEY(2, 1, KEY_A), KEY(2, 0, KEY_A),
KEY(2, 2, KEY_D), KEY(2, 1, KEY_D),
KEY(2, 3, KEY_G), KEY(2, 2, KEY_G),
KEY(2, 4, KEY_U), KEY(2, 3, KEY_U),
KEY(2, 6, KEY_L), KEY(2, 5, KEY_L),
KEY(2, 7, KEY_ENTER), KEY(2, 6, KEY_ENTER),
KEY(2, 8, KEY_DOT), KEY(2, 7, KEY_DOT),
KEY(3, 1, KEY_Z), KEY(3, 0, KEY_Z),
KEY(3, 2, KEY_C), KEY(3, 1, KEY_C),
KEY(3, 3, KEY_V), KEY(3, 2, KEY_V),
KEY(3, 4, KEY_J), KEY(3, 3, KEY_J),
KEY(3, 5, TOSA_KEY_ADDRESSBOOK), KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
KEY(3, 6, TOSA_KEY_CANCEL), KEY(3, 5, TOSA_KEY_CANCEL),
KEY(3, 7, TOSA_KEY_CENTER), KEY(3, 6, TOSA_KEY_CENTER),
KEY(3, 8, TOSA_KEY_OK), KEY(3, 7, TOSA_KEY_OK),
KEY(3, 9, KEY_LEFTSHIFT), KEY(3, 8, KEY_LEFTSHIFT),
KEY(4, 1, KEY_S), KEY(4, 0, KEY_S),
KEY(4, 2, KEY_R), KEY(4, 1, KEY_R),
KEY(4, 3, KEY_B), KEY(4, 2, KEY_B),
KEY(4, 4, KEY_N), KEY(4, 3, KEY_N),
KEY(4, 5, TOSA_KEY_CALENDAR), KEY(4, 4, TOSA_KEY_CALENDAR),
KEY(4, 6, TOSA_KEY_HOMEPAGE), KEY(4, 5, TOSA_KEY_HOMEPAGE),
KEY(4, 7, KEY_LEFTCTRL), KEY(4, 6, KEY_LEFTCTRL),
KEY(4, 8, TOSA_KEY_LIGHT), KEY(4, 7, TOSA_KEY_LIGHT),
KEY(4, 10, KEY_RIGHTSHIFT), KEY(4, 9, KEY_RIGHTSHIFT),
KEY(5, 1, KEY_TAB), KEY(5, 0, KEY_TAB),
KEY(5, 2, KEY_SLASH), KEY(5, 1, KEY_SLASH),
KEY(5, 3, KEY_H), KEY(5, 2, KEY_H),
KEY(5, 4, KEY_M), KEY(5, 3, KEY_M),
KEY(5, 5, TOSA_KEY_MENU), KEY(5, 4, TOSA_KEY_MENU),
KEY(5, 7, KEY_UP), KEY(5, 6, KEY_UP),
KEY(5, 11, TOSA_KEY_FN), KEY(5, 10, TOSA_KEY_FN),
KEY(6, 1, KEY_X), KEY(6, 0, KEY_X),
KEY(6, 2, KEY_F), KEY(6, 1, KEY_F),
KEY(6, 3, KEY_SPACE), KEY(6, 2, KEY_SPACE),
KEY(6, 4, KEY_APOSTROPHE), KEY(6, 3, KEY_APOSTROPHE),
KEY(6, 5, TOSA_KEY_MAIL), KEY(6, 4, TOSA_KEY_MAIL),
KEY(6, 6, KEY_LEFT), KEY(6, 5, KEY_LEFT),
KEY(6, 7, KEY_DOWN), KEY(6, 6, KEY_DOWN),
KEY(6, 8, KEY_RIGHT), KEY(6, 7, KEY_RIGHT),
}; };
static struct matrix_keymap_data tosakbd_keymap_data = { static struct matrix_keymap_data tosakbd_keymap_data = {

View File

@ -50,16 +50,20 @@ static inline struct page *dma_addr_to_page(struct device *dev,
} }
/* /*
* The affected CPUs below in 'cpu_needs_post_dma_flush()' can
* speculatively fill random cachelines with stale data at any time,
* requiring an extra flush post-DMA.
*
* Warning on the terminology - Linux calls an uncached area coherent; * Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency * MIPS terminology calls memory areas with hardware maintained coherency
* coherent. * coherent.
*/ */
static inline int cpu_needs_post_dma_flush(struct device *dev)
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{ {
return !plat_device_is_coherent(dev) && return !plat_device_is_coherent(dev) &&
(current_cpu_type() == CPU_R10000 || (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000); current_cpu_type() == CPU_R12000 ||
current_cpu_type() == CPU_BMIPS5000);
} }
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
@ -230,7 +234,7 @@ static inline void __dma_sync(struct page *page,
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{ {
if (cpu_is_noncoherent_r10000(dev)) if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr), __dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction); dma_addr & ~PAGE_MASK, size, direction);
@ -281,7 +285,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
static void mips_dma_sync_single_for_cpu(struct device *dev, static void mips_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{ {
if (cpu_is_noncoherent_r10000(dev)) if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle), __dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction); dma_handle & ~PAGE_MASK, size, direction);
} }
@ -302,7 +306,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
/* Make sure that gcc doesn't leave the empty loop body. */ /* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) { for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev)) if (cpu_needs_post_dma_flush(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length, __dma_sync(sg_page(sg), sg->offset, sg->length,
direction); direction);
} }

View File

@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address) unsigned long address)
{ {
struct page *page = page_address(table);
tlb_flush_pgtable(tlb, address); tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(page); pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page, 0); pgtable_free_tlb(tlb, page_address(table), 0);
} }
#endif /* _ASM_POWERPC_PGALLOC_32_H */ #endif /* _ASM_POWERPC_PGALLOC_32_H */

View File

@ -144,11 +144,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address) unsigned long address)
{ {
struct page *page = page_address(table);
tlb_flush_pgtable(tlb, address); tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(page); pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page, 0); pgtable_free_tlb(tlb, page_address(table), 0);
} }
#else /* if CONFIG_PPC_64K_PAGES */ #else /* if CONFIG_PPC_64K_PAGES */

View File

@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
# Don't autogenerate SSE instructions # Don't autogenerate MMX or SSE instructions
KBUILD_CFLAGS += -mno-sse KBUILD_CFLAGS += -mno-mmx -mno-sse
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
# with nonstandard options # with nonstandard options
@ -60,8 +60,8 @@ else
KBUILD_AFLAGS += -m64 KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64 KBUILD_CFLAGS += -m64
# Don't autogenerate SSE instructions # Don't autogenerate MMX or SSE instructions
KBUILD_CFLAGS += -mno-sse KBUILD_CFLAGS += -mno-mmx -mno-sse
# Use -mpreferred-stack-boundary=3 if supported. # Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)

View File

@ -153,6 +153,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
} }
#define KVM_X2APIC_CID_BITS 0
static void recalculate_apic_map(struct kvm *kvm) static void recalculate_apic_map(struct kvm *kvm)
{ {
struct kvm_apic_map *new, *old = NULL; struct kvm_apic_map *new, *old = NULL;
@ -190,7 +192,8 @@ static void recalculate_apic_map(struct kvm *kvm)
if (apic_x2apic_mode(apic)) { if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32; new->ldr_bits = 32;
new->cid_shift = 16; new->cid_shift = 16;
new->cid_mask = new->lid_mask = 0xffff; new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) && } else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ && !new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
@ -855,7 +858,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
ASSERT(apic != NULL); ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */ /* if initial count is 0, current count should also be 0 */
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
apic->lapic_timer.period == 0)
return 0; return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@ -1705,7 +1709,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{ {
u32 data; u32 data;
void *vapic;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@ -1713,9 +1716,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return; return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page); kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); sizeof(u32));
kunmap_atomic(vapic);
apic_set_tpr(vcpu->arch.apic, data & 0xff); apic_set_tpr(vcpu->arch.apic, data & 0xff);
} }
@ -1751,7 +1753,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
u32 data, tpr; u32 data, tpr;
int max_irr, max_isr; int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
void *vapic;
apic_sync_pv_eoi_to_guest(vcpu, apic); apic_sync_pv_eoi_to_guest(vcpu, apic);
@ -1767,20 +1768,26 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0; max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
vapic = kmap_atomic(vcpu->arch.apic->vapic_page); kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; sizeof(u32));
kunmap_atomic(vapic);
} }
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{ {
vcpu->arch.apic->vapic_addr = vapic_addr; if (vapic_addr) {
if (vapic_addr) if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.apic->vapic_cache,
vapic_addr, sizeof(u32)))
return -EINVAL;
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
else } else {
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
} }
vcpu->arch.apic->vapic_addr = vapic_addr;
return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;

View File

@ -34,7 +34,7 @@ struct kvm_lapic {
*/ */
void *regs; void *regs;
gpa_t vapic_addr; gpa_t vapic_addr;
struct page *vapic_page; struct gfn_to_hva_cache vapic_cache;
unsigned long pending_events; unsigned long pending_events;
unsigned int sipi_vector; unsigned int sipi_vector;
}; };
@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);

View File

@ -3138,8 +3138,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va)) if (copy_from_user(&va, argp, sizeof va))
goto out; goto out;
r = 0; r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break; break;
} }
case KVM_X86_SETUP_MCE: { case KVM_X86_SETUP_MCE: {
@ -5539,36 +5538,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu); !kvm_event_needs_reinjection(vcpu);
} }
static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page;
return 0;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int idx;
if (!apic || !apic->vapic_addr)
return;
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu) static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{ {
int max_irr, tpr; int max_irr, tpr;
@ -5889,11 +5858,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
r = vapic_enter(vcpu);
if (r) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
return r;
}
r = 1; r = 1;
while (r > 0) { while (r > 0) {
@ -5951,8 +5915,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vapic_exit(vcpu);
return r; return r;
} }

View File

@ -780,13 +780,6 @@ void __init efi_init(void)
set_bit(EFI_MEMMAP, &x86_efi_facility); set_bit(EFI_MEMMAP, &x86_efi_facility);
#ifdef CONFIG_X86_32
if (efi_is_native()) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
#endif
#if EFI_DEBUG #if EFI_DEBUG
print_efi_memmap(); print_efi_memmap();
#endif #endif

View File

@ -114,9 +114,6 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
struct hash_ctx *ctx = ask->private; struct hash_ctx *ctx = ask->private;
int err; int err;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (flags & MSG_SENDPAGE_NOTLAST) if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE; flags |= MSG_MORE;

View File

@ -378,9 +378,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
struct skcipher_sg_list *sgl; struct skcipher_sg_list *sgl;
int err = -EINVAL; int err = -EINVAL;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (flags & MSG_SENDPAGE_NOTLAST) if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE; flags |= MSG_MORE;

View File

@ -5830,7 +5830,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
uint16_t postoff = 0; uint16_t postoff = 0;
if (intel_crtc->config.limited_color_range) if (intel_crtc->config.limited_color_range)
postoff = (16 * (1 << 13) / 255) & 0x1fff; postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);

View File

@ -279,9 +279,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
} }
} else if (ASIC_IS_DCE3(rdev)) { } else {
/* according to the reg specs, this should DCE3.2 only, but in /* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0/3.1 as well. * practice it seems to cover DCE2.0/3.0/3.1 as well.
*/ */
if (dig->dig_encoder == 0) { if (dig->dig_encoder == 0) {
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@ -292,10 +292,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
} }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
AUDIO_DTO_MODULE(clock / 10));
} }
} }

View File

@ -3615,8 +3615,15 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */ /* size in MB on si */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; tmp = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; /* some boards may have garbage in the upper 16 bits */
if (tmp & 0xffff0000) {
DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
if (tmp & 0xffff)
tmp &= 0xffff;
}
rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc); si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev); radeon_update_bandwidth_info(rdev);

View File

@ -43,6 +43,7 @@
* @last_update: time of last update (jiffies) * @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value * @temperature: cached temperature measurement value
* @humidity: cached humidity measurement value * @humidity: cached humidity measurement value
* @write_length: length for I2C measurement request
*/ */
struct hih6130 { struct hih6130 {
struct device *hwmon_dev; struct device *hwmon_dev;
@ -51,6 +52,7 @@ struct hih6130 {
unsigned long last_update; unsigned long last_update;
int temperature; int temperature;
int humidity; int humidity;
size_t write_length;
}; };
/** /**
@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
*/ */
if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
/* write to slave address, no data, to request a measurement */ /*
ret = i2c_master_send(client, tmp, 0); * Write to slave address to request a measurement.
* According with the datasheet it should be with no data, but
* for systems with I2C bus drivers that do not allow zero
* length packets we write one dummy byte to allow sensor
* measurements on them.
*/
tmp[0] = 0;
ret = i2c_master_send(client, tmp, hih6130->write_length);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
goto fail_remove_sysfs; goto fail_remove_sysfs;
} }
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
hih6130->write_length = 1;
return 0; return 0;
fail_remove_sysfs: fail_remove_sysfs:

View File

@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{ {
if (rpm <= 0) if (rpm <= 0)
return 255; return 255;
if (rpm > 1350000)
return 1;
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
} }

View File

@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{ {
if (rpm <= 0) if (rpm <= 0)
return 255; return 255;
if (rpm > 1350000)
return 1;
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
} }

View File

@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
*/ */
static inline u8 FAN_TO_REG(long rpm, int div) static inline u8 FAN_TO_REG(long rpm, int div)
{ {
if (rpm == 0) if (rpm <= 0 || rpm > 1310720)
return 0; return 0;
return clamp_val(1310720 / (rpm * div), 1, 255); return clamp_val(1310720 / (rpm * div), 1, 255);
} }

View File

@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
if (err) if (err)
return err; return err;
val = clamp_val(val, 0, 255); val = clamp_val(val, 0, 255);
val = DIV_ROUND_CLOSEST(val, 0x11);
mutex_lock(&data->update_lock); mutex_lock(&data->update_lock);
data->pwm[nr] = val; data->pwm[nr] = val * 0x11;
val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
mutex_unlock(&data->update_lock); mutex_unlock(&data->update_lock);
return count; return count;
@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock); mutex_lock(&data->update_lock);
reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
data->pwm_enable[nr] = val; data->pwm_enable[nr] = val;
reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]); reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
mutex_unlock(&data->update_lock); mutex_unlock(&data->update_lock);
@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
? 0 : 1; ? 0 : 1;
data->pwm_enable[i] = data->pwm_enable[i] =
((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1; ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
data->pwm[i] = w83l786ng_read_value(client, data->pwm[i] =
W83L786NG_REG_PWM[i]); (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
& 0x0f) * 0x11;
} }

View File

@ -167,6 +167,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },

View File

@ -106,6 +106,7 @@ struct usbtouch_device_info {
struct usbtouch_usb { struct usbtouch_usb {
unsigned char *data; unsigned char *data;
dma_addr_t data_dma; dma_addr_t data_dma;
int data_size;
unsigned char *buffer; unsigned char *buffer;
int buf_len; int buf_len;
struct urb *irq; struct urb *irq;
@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
static void usbtouch_free_buffers(struct usb_device *udev, static void usbtouch_free_buffers(struct usb_device *udev,
struct usbtouch_usb *usbtouch) struct usbtouch_usb *usbtouch)
{ {
usb_free_coherent(udev, usbtouch->type->rept_size, usb_free_coherent(udev, usbtouch->data_size,
usbtouch->data, usbtouch->data_dma); usbtouch->data, usbtouch->data_dma);
kfree(usbtouch->buffer); kfree(usbtouch->buffer);
} }
@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!type->process_pkt) if (!type->process_pkt)
type->process_pkt = usbtouch_process_pkt; type->process_pkt = usbtouch_process_pkt;
usbtouch->data = usb_alloc_coherent(udev, type->rept_size, usbtouch->data_size = type->rept_size;
if (type->get_pkt_len) {
/*
* When dealing with variable-length packets we should
* not request more than wMaxPacketSize bytes at once
* as we do not know if there is more data coming or
* we filled exactly wMaxPacketSize bytes and there is
* nothing else.
*/
usbtouch->data_size = min(usbtouch->data_size,
usb_endpoint_maxp(endpoint));
}
usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size,
GFP_KERNEL, &usbtouch->data_dma); GFP_KERNEL, &usbtouch->data_dma);
if (!usbtouch->data) if (!usbtouch->data)
goto out_free; goto out_free;
@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf,
if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT)
usb_fill_int_urb(usbtouch->irq, udev, usb_fill_int_urb(usbtouch->irq, udev,
usb_rcvintpipe(udev, endpoint->bEndpointAddress), usb_rcvintpipe(udev, endpoint->bEndpointAddress),
usbtouch->data, type->rept_size, usbtouch->data, usbtouch->data_size,
usbtouch_irq, usbtouch, endpoint->bInterval); usbtouch_irq, usbtouch, endpoint->bInterval);
else else
usb_fill_bulk_urb(usbtouch->irq, udev, usb_fill_bulk_urb(usbtouch->irq, udev,
usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), usb_rcvbulkpipe(udev, endpoint->bEndpointAddress),
usbtouch->data, type->rept_size, usbtouch->data, usbtouch->data_size,
usbtouch_irq, usbtouch); usbtouch_irq, usbtouch);
usbtouch->irq->dev = udev; usbtouch->irq->dev = udev;

View File

@ -1660,6 +1660,11 @@ static int __init dm_bufio_init(void)
{ {
__u64 mem; __u64 mem;
dm_bufio_allocated_kmem_cache = 0;
dm_bufio_allocated_get_free_pages = 0;
dm_bufio_allocated_vmalloc = 0;
dm_bufio_current_allocated = 0;
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);

View File

@ -20,6 +20,7 @@
struct delay_c { struct delay_c {
struct timer_list delay_timer; struct timer_list delay_timer;
struct mutex timer_lock; struct mutex timer_lock;
struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios; struct work_struct flush_expired_bios;
struct list_head delayed_bios; struct list_head delayed_bios;
atomic_t may_delay; atomic_t may_delay;
@ -45,14 +46,13 @@ struct dm_delay_info {
static DEFINE_MUTEX(delayed_bios_lock); static DEFINE_MUTEX(delayed_bios_lock);
static struct workqueue_struct *kdelayd_wq;
static struct kmem_cache *delayed_cache; static struct kmem_cache *delayed_cache;
static void handle_delayed_timer(unsigned long data) static void handle_delayed_timer(unsigned long data)
{ {
struct delay_c *dc = (struct delay_c *)data; struct delay_c *dc = (struct delay_c *)data;
queue_work(kdelayd_wq, &dc->flush_expired_bios); queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
} }
static void queue_timeout(struct delay_c *dc, unsigned long expires) static void queue_timeout(struct delay_c *dc, unsigned long expires)
@ -191,6 +191,12 @@ out:
goto bad_dev_write; goto bad_dev_write;
} }
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
DMERR("Couldn't start kdelayd");
goto bad_queue;
}
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@ -203,6 +209,8 @@ out:
ti->private = dc; ti->private = dc;
return 0; return 0;
bad_queue:
mempool_destroy(dc->delayed_pool);
bad_dev_write: bad_dev_write:
if (dc->dev_write) if (dc->dev_write)
dm_put_device(ti, dc->dev_write); dm_put_device(ti, dc->dev_write);
@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
{ {
struct delay_c *dc = ti->private; struct delay_c *dc = ti->private;
flush_workqueue(kdelayd_wq); destroy_workqueue(dc->kdelayd_wq);
dm_put_device(ti, dc->dev_read); dm_put_device(ti, dc->dev_read);
@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
{ {
int r = -ENOMEM; int r = -ENOMEM;
kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!kdelayd_wq) {
DMERR("Couldn't start kdelayd");
goto bad_queue;
}
delayed_cache = KMEM_CACHE(dm_delay_info, 0); delayed_cache = KMEM_CACHE(dm_delay_info, 0);
if (!delayed_cache) { if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache."); DMERR("Couldn't create delayed bio cache.");
@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
bad_register: bad_register:
kmem_cache_destroy(delayed_cache); kmem_cache_destroy(delayed_cache);
bad_memcache: bad_memcache:
destroy_workqueue(kdelayd_wq);
bad_queue:
return r; return r;
} }
@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
{ {
dm_unregister_target(&delay_target); dm_unregister_target(&delay_target);
kmem_cache_destroy(delayed_cache); kmem_cache_destroy(delayed_cache);
destroy_workqueue(kdelayd_wq);
} }
/* Module hooks */ /* Module hooks */

View File

@ -66,6 +66,18 @@ struct dm_snapshot {
atomic_t pending_exceptions_count; atomic_t pending_exceptions_count;
/* Protected by "lock" */
sector_t exception_start_sequence;
/* Protected by kcopyd single-threaded callback */
sector_t exception_complete_sequence;
/*
* A list of pending exceptions that completed out of order.
* Protected by kcopyd single-threaded callback.
*/
struct list_head out_of_order_list;
mempool_t *pending_pool; mempool_t *pending_pool;
struct dm_exception_table pending; struct dm_exception_table pending;
@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
*/ */
int started; int started;
/* There was copying error. */
int copy_error;
/* A sequence number, it is used for in-order completion. */
sector_t exception_sequence;
struct list_head out_of_order_entry;
/* /*
* For writing a complete chunk, bypassing the copy. * For writing a complete chunk, bypassing the copy.
*/ */
@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->valid = 1; s->valid = 1;
s->active = 0; s->active = 0;
atomic_set(&s->pending_exceptions_count, 0); atomic_set(&s->pending_exceptions_count, 0);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
init_rwsem(&s->lock); init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list); INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock); spin_lock_init(&s->pe_lock);
@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
pending_complete(pe, success); pending_complete(pe, success);
} }
static void complete_exception(struct dm_snap_pending_exception *pe)
{
struct dm_snapshot *s = pe->snap;
if (unlikely(pe->copy_error))
pending_complete(pe, 0);
else
/* Update the metadata if we are persistent */
s->store->type->commit_exception(s->store, &pe->e,
commit_callback, pe);
}
/* /*
* Called when the copy I/O has finished. kcopyd actually runs * Called when the copy I/O has finished. kcopyd actually runs
* this code so don't block. * this code so don't block.
@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
struct dm_snap_pending_exception *pe = context; struct dm_snap_pending_exception *pe = context;
struct dm_snapshot *s = pe->snap; struct dm_snapshot *s = pe->snap;
if (read_err || write_err) pe->copy_error = read_err || write_err;
pending_complete(pe, 0);
else if (pe->exception_sequence == s->exception_complete_sequence) {
/* Update the metadata if we are persistent */ s->exception_complete_sequence++;
s->store->type->commit_exception(s->store, &pe->e, complete_exception(pe);
commit_callback, pe);
while (!list_empty(&s->out_of_order_list)) {
pe = list_entry(s->out_of_order_list.next,
struct dm_snap_pending_exception, out_of_order_entry);
if (pe->exception_sequence != s->exception_complete_sequence)
break;
s->exception_complete_sequence++;
list_del(&pe->out_of_order_entry);
complete_exception(pe);
}
} else {
struct list_head *lh;
struct dm_snap_pending_exception *pe2;
list_for_each_prev(lh, &s->out_of_order_list) {
pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
if (pe2->exception_sequence < pe->exception_sequence)
break;
}
list_add(&pe->out_of_order_entry, lh);
}
} }
/* /*
@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
return NULL; return NULL;
} }
pe->exception_sequence = s->exception_start_sequence++;
dm_insert_exception(&s->pending, &pe->e); dm_insert_exception(&s->pending, &pe->e);
return pe; return pe;
@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = { static struct target_type snapshot_target = {
.name = "snapshot", .name = "snapshot",
.version = {1, 11, 1}, .version = {1, 12, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = snapshot_ctr, .ctr = snapshot_ctr,
.dtr = snapshot_dtr, .dtr = snapshot_dtr,

View File

@ -216,6 +216,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
num_targets = dm_round_up(num_targets, KEYS_PER_NODE); num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
if (!num_targets) {
kfree(t);
return -ENOMEM;
}
if (alloc_targets(t, num_targets)) { if (alloc_targets(t, num_targets)) {
kfree(t); kfree(t);
return -ENOMEM; return -ENOMEM;

View File

@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/ */
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) { if (r) {
DMERR_LIMIT("dm_thin_insert_block() failed"); DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
dm_device_name(pool->pool_md), r);
set_pool_mode(pool, PM_READ_ONLY);
cell_error(pool, m->cell); cell_error(pool, m->cell);
goto out; goto out;
} }

View File

@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
* The shadow op will often be a noop. Only insert if it really * The shadow op will often be a noop. Only insert if it really
* copied data. * copied data.
*/ */
if (dm_block_location(*block) != b) if (dm_block_location(*block) != b) {
/*
* dm_tm_shadow_block will have already decremented the old
* block, but it is still referenced by the btree. We
* increment to stop the insert decrementing it below zero
* when overwriting the old value.
*/
dm_tm_inc(info->btree_info.tm, b);
r = insert_ablock(info, index, *block, root); r = insert_ablock(info, index, *block, root);
}
return r; return r;
} }

View File

@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b); int r = sm_metadata_new_block_(sm, b);
if (r) if (r) {
DMERR("unable to allocate new metadata block"); DMERR("unable to allocate new metadata block");
return r;
}
r = sm_metadata_get_nr_free(sm, &count); r = sm_metadata_get_nr_free(sm, &count);
if (r) if (r) {
DMERR("couldn't get free block count"); DMERR("couldn't get free block count");
return r;
}
check_threshold(&smm->threshold, count); check_threshold(&smm->threshold, count);

View File

@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
static int af9033_wr_reg_val_tab(struct af9033_state *state, static int af9033_wr_reg_val_tab(struct af9033_state *state,
const struct reg_val *tab, int tab_len) const struct reg_val *tab, int tab_len)
{ {
#define MAX_TAB_LEN 212
int ret, i, j; int ret, i, j;
u8 buf[MAX_XFER_SIZE]; u8 buf[1 + MAX_TAB_LEN];
dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
if (tab_len > sizeof(buf)) { if (tab_len > sizeof(buf)) {
dev_warn(&state->i2c->dev, dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
"%s: i2c wr len=%d is too big!\n",
KBUILD_MODNAME, tab_len); KBUILD_MODNAME, tab_len);
return -EINVAL; return -EINVAL;
} }
dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
for (i = 0, j = 0; i < tab_len; i++) { for (i = 0, j = 0; i < tab_len; i++) {
buf[j] = tab[i].val; buf[j] = tab[i].val;

View File

@ -34,7 +34,7 @@ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
{ {
.addr = i2c, .addr = i2c,
.flags = 0, .flags = 0,
.len = sizeof(buf), .len = len + 1,
.buf = buf, .buf = buf,
} }
}; };
@ -75,7 +75,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
}, { }, {
.addr = i2c, .addr = i2c,
.flags = I2C_M_RD, .flags = I2C_M_RD,
.len = sizeof(buf), .len = len,
.buf = buf, .buf = buf,
} }
}; };

View File

@ -131,12 +131,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
return -EINVAL; return -EINVAL;
} }
state->input = input; state->input = input;
if (!v4l2_ctrl_g_ctrl(state->mute)) if (v4l2_ctrl_g_ctrl(state->mute))
return 0; return 0;
if (!v4l2_ctrl_g_ctrl(state->vol)) if (!v4l2_ctrl_g_ctrl(state->vol))
return 0; return 0;
if (!v4l2_ctrl_g_ctrl(state->bal))
return 0;
wm8775_set_audio(sd, 1); wm8775_set_audio(sd, 1);
return 0; return 0;
} }

View File

@ -4226,6 +4226,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
} }
btv->std = V4L2_STD_PAL; btv->std = V4L2_STD_PAL;
init_irqreg(btv); init_irqreg(btv);
if (!bttv_tvcards[btv->c.type].no_video)
v4l2_ctrl_handler_setup(hdl); v4l2_ctrl_handler_setup(hdl);
if (hdl->error) { if (hdl->error) {
result = hdl->error; result = hdl->error;

View File

@ -1348,10 +1348,12 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
if (fw_debug) { if (fw_debug) {
dev->kthread = kthread_run(saa7164_thread_function, dev, dev->kthread = kthread_run(saa7164_thread_function, dev,
"saa7164 debug"); "saa7164 debug");
if (!dev->kthread) if (IS_ERR(dev->kthread)) {
dev->kthread = NULL;
printk(KERN_ERR "%s() Failed to create " printk(KERN_ERR "%s() Failed to create "
"debug kernel thread\n", __func__); "debug kernel thread\n", __func__);
} }
}
} /* != BOARD_UNKNOWN */ } /* != BOARD_UNKNOWN */
else else

View File

@ -130,7 +130,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{ {
u8 wbuf[MAX_XFER_SIZE]; u8 wbuf[MAX_XFER_SIZE];
u8 mbox = (reg >> 16) & 0xff; u8 mbox = (reg >> 16) & 0xff;
struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
if (6 + len > sizeof(wbuf)) { if (6 + len > sizeof(wbuf)) {
dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
@ -237,14 +237,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
} else { } else {
/* I2C */ /* I2C */
u8 buf[MAX_XFER_SIZE]; u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
buf, msg[1].len, msg[1].buf }; buf, msg[1].len, msg[1].buf };
if (5 + msg[0].len > sizeof(buf)) { if (5 + msg[0].len > sizeof(buf)) {
dev_warn(&d->udev->dev, dev_warn(&d->udev->dev,
"%s: i2c xfer: len=%d is too big!\n", "%s: i2c xfer: len=%d is too big!\n",
KBUILD_MODNAME, msg[0].len); KBUILD_MODNAME, msg[0].len);
return -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto unlock;
} }
req.mbox |= ((msg[0].addr & 0x80) >> 3); req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len; buf[0] = msg[1].len;
@ -273,14 +274,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
} else { } else {
/* I2C */ /* I2C */
u8 buf[MAX_XFER_SIZE]; u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
0, NULL }; buf, 0, NULL };
if (5 + msg[0].len > sizeof(buf)) { if (5 + msg[0].len > sizeof(buf)) {
dev_warn(&d->udev->dev, dev_warn(&d->udev->dev,
"%s: i2c xfer: len=%d is too big!\n", "%s: i2c xfer: len=%d is too big!\n",
KBUILD_MODNAME, msg[0].len); KBUILD_MODNAME, msg[0].len);
return -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto unlock;
} }
req.mbox |= ((msg[0].addr & 0x80) >> 3); req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len; buf[0] = msg[0].len;
@ -300,6 +302,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
} }
unlock:
mutex_unlock(&d->i2c_mutex); mutex_unlock(&d->i2c_mutex);
if (ret < 0) if (ret < 0)
@ -1512,6 +1515,8 @@ static const struct usb_device_id af9035_id_table[] = {
/* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
{ } { }
}; };
MODULE_DEVICE_TABLE(usb, af9035_id_table); MODULE_DEVICE_TABLE(usb, af9035_id_table);

View File

@ -1595,6 +1595,9 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
* ownership of the resources, wait and try again to * ownership of the resources, wait and try again to
* see if they have relinquished the resources yet. * see if they have relinquished the resources yet.
*/ */
if (usec_interval >= 1000)
mdelay(usec_interval/1000);
else
udelay(usec_interval); udelay(usec_interval);
} }
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);

View File

@ -3937,12 +3937,13 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
int quick_drop; int quick_drop;
s32 t[3], f[3] = {5180, 5500, 5785}; s32 t[3], f[3] = {5180, 5500, 5785};
if (!(pBase->miscConfiguration & BIT(1))) if (!(pBase->miscConfiguration & BIT(4)))
return; return;
if (freq < 4000) if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
if (freq < 4000) {
quick_drop = eep->modalHeader2G.quick_drop; quick_drop = eep->modalHeader2G.quick_drop;
else { } else {
t[0] = eep->base_ext1.quick_drop_low; t[0] = eep->base_ext1.quick_drop_low;
t[1] = eep->modalHeader5G.quick_drop; t[1] = eep->modalHeader5G.quick_drop;
t[2] = eep->base_ext1.quick_drop_high; t[2] = eep->base_ext1.quick_drop_high;
@ -3950,6 +3951,7 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
} }
REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
} }
}
static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
{ {
@ -3988,7 +3990,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u8 bias; u8 bias;
if (!(eep->baseEepHeader.featureEnable & 0x40)) if (!(eep->baseEepHeader.miscConfiguration & 0x40))
return; return;
if (!AR_SREV_9300(ah)) if (!AR_SREV_9300(ah))

View File

@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL; return -EINVAL;
if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
return -EINVAL;
if (drain < 0 || drain > 1)
return -EINVAL;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);

View File

@ -309,8 +309,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (bss_desc && bss_desc->ssid.ssid_len && if (bss_desc && bss_desc->ssid.ssid_len &&
(!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
ssid, &bss_desc->ssid))) { ssid, &bss_desc->ssid))) {
kfree(bss_desc); ret = 0;
return 0; goto done;
} }
/* Exit Adhoc mode first */ /* Exit Adhoc mode first */

View File

@ -19,6 +19,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/kexec.h>
#include "pci.h" #include "pci.h"
struct pci_dynid { struct pci_dynid {
@ -388,12 +389,17 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev); pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev); pci_msix_shutdown(pci_dev);
#ifdef CONFIG_KEXEC
/* /*
* Turn off Bus Master bit on the device to tell it to not * If this is a kexec reboot, turn off Bus Master bit on the
* continue to do DMA. Don't touch devices in D3cold or unknown states. * device to tell it to not continue to do DMA. Don't touch
* devices in D3cold or unknown states.
* If it is not a kexec reboot, firmware will hit the PCI
* devices with big hammer and stop their DMA any way.
*/ */
if (pci_dev->current_state <= PCI_D3hot) if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
pci_clear_master(pci_dev); pci_clear_master(pci_dev);
#endif
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM

View File

@ -221,6 +221,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_alarm_year = tm.tm_year; at91_alarm_year = tm.tm_year;
tm.tm_mon = alrm->time.tm_mon;
tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour; tm.tm_hour = alrm->time.tm_hour;
tm.tm_min = alrm->time.tm_min; tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec; tm.tm_sec = alrm->time.tm_sec;

View File

@ -72,6 +72,9 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
outb(s->state & 0xFF, dev->iobase); outb(s->state & 0xFF, dev->iobase);
outb(s->state >> 8, dev->iobase + 1); outb(s->state >> 8, dev->iobase + 1);
} }
data[1] = s->state;
return insn->n; return insn->n;
} }

View File

@ -59,6 +59,9 @@ static int pci263_do_insn_bits(struct comedi_device *dev,
outb(s->state & 0xFF, dev->iobase); outb(s->state & 0xFF, dev->iobase);
outb(s->state >> 8, dev->iobase + 1); outb(s->state >> 8, dev->iobase + 1);
} }
data[1] = s->state;
return insn->n; return insn->n;
} }

View File

@ -935,12 +935,13 @@ static void pcmuio_detach(struct comedi_device *dev)
struct pcmuio_private *devpriv = dev->private; struct pcmuio_private *devpriv = dev->private;
int i; int i;
if (devpriv) {
for (i = 0; i < MAX_ASICS; ++i) { for (i = 0; i < MAX_ASICS; ++i) {
if (devpriv->asics[i].irq) if (devpriv->asics[i].irq)
free_irq(devpriv->asics[i].irq, dev); free_irq(devpriv->asics[i].irq, dev);
} }
if (devpriv && devpriv->sprivs)
kfree(devpriv->sprivs); kfree(devpriv->sprivs);
}
comedi_legacy_detach(dev); comedi_legacy_detach(dev);
} }

View File

@ -87,11 +87,11 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
/* on return, data[1] contains the value of the digital input lines. */ /* on return, data[1] contains the value of the digital input lines. */
outb(PADR, CSCIR); outb(PADR, CSCIR);
data[0] = inb(CSCDR); data[1] = inb(CSCDR);
outb(PBDR, CSCIR); outb(PBDR, CSCIR);
data[0] += inb(CSCDR) << 8; data[1] += inb(CSCDR) << 8;
outb(PCDR, CSCIR); outb(PCDR, CSCIR);
data[0] += ((inb(CSCDR) & 0xF0) << 12); data[1] += ((inb(CSCDR) & 0xF0) << 12);
return insn->n; return insn->n;

View File

@ -478,6 +478,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
dep = dwc3_wIndex_to_dep(dwc, wIndex); dep = dwc3_wIndex_to_dep(dwc, wIndex);
if (!dep) if (!dep)
return -EINVAL; return -EINVAL;
if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
break;
ret = __dwc3_gadget_ep_set_halt(dep, set); ret = __dwc3_gadget_ep_set_halt(dep, set);
if (ret) if (ret)
return -EINVAL; return -EINVAL;

View File

@ -1220,9 +1220,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
else else
dep->flags |= DWC3_EP_STALL; dep->flags |= DWC3_EP_STALL;
} else { } else {
if (dep->flags & DWC3_EP_WEDGE)
return 0;
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_CLEARSTALL, &params); DWC3_DEPCMD_CLEARSTALL, &params);
if (ret) if (ret)
@ -1230,7 +1227,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
value ? "set" : "clear", value ? "set" : "clear",
dep->name); dep->name);
else else
dep->flags &= ~DWC3_EP_STALL; dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
} }
return ret; return ret;

View File

@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev)
bitmap_zero(f->endpoints, 32); bitmap_zero(f->endpoints, 32);
} }
cdev->config = NULL; cdev->config = NULL;
cdev->delayed_status = 0;
} }
static int set_config(struct usb_composite_dev *cdev, static int set_config(struct usb_composite_dev *cdev,

View File

@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_K4605 0x14C6 #define HUAWEI_PRODUCT_K4605 0x14C6
#define HUAWEI_PRODUCT_E173S6 0x1C07
#define QUANTA_VENDOR_ID 0x0408 #define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02 #define QUANTA_PRODUCT_Q101 0xEA02
@ -575,6 +576,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &net_intf1_blacklist }, .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &net_intf2_blacklist }, .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
@ -637,6 +640,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
@ -691,6 +698,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
@ -745,6 +756,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
@ -799,6 +814,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
@ -853,6 +872,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
@ -907,6 +930,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },

View File

@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
#if defined CONFIG_PNP #if defined CONFIG_PNP
/* now that the user has specified an IO port and we haven't detected /* now that the user has specified an IO port and we haven't detected
* any devices, disable pnp support */ * any devices, disable pnp support */
isapnp = 0; if (isapnp)
pnp_unregister_driver(&scl200wdt_pnp_driver); pnp_unregister_driver(&scl200wdt_pnp_driver);
isapnp = 0;
#endif #endif
if (!request_region(io, io_len, SC1200_MODULE_NAME)) { if (!request_region(io, io_len, SC1200_MODULE_NAME)) {

View File

@ -227,7 +227,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
if (ret > 0) { if (ret > 0) {
/* we need an acl */ /* we need an acl */
ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS); ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
} else { } else if (ret < 0) {
cache_no_acl(inode); cache_no_acl(inode);
} }
} else { } else {

View File

@ -2419,10 +2419,23 @@ out_unlock:
return ret; return ret;
} }
static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
{
struct old_sa_defrag_extent *old, *tmp;
if (!new)
return;
list_for_each_entry_safe(old, tmp, &new->head, list) {
list_del(&old->list);
kfree(old);
}
kfree(new);
}
static void relink_file_extents(struct new_sa_defrag_extent *new) static void relink_file_extents(struct new_sa_defrag_extent *new)
{ {
struct btrfs_path *path; struct btrfs_path *path;
struct old_sa_defrag_extent *old, *tmp;
struct sa_defrag_extent_backref *backref; struct sa_defrag_extent_backref *backref;
struct sa_defrag_extent_backref *prev = NULL; struct sa_defrag_extent_backref *prev = NULL;
struct inode *inode; struct inode *inode;
@ -2465,16 +2478,11 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
kfree(prev); kfree(prev);
btrfs_free_path(path); btrfs_free_path(path);
list_for_each_entry_safe(old, tmp, &new->head, list) {
list_del(&old->list);
kfree(old);
}
out: out:
free_sa_defrag_extent(new);
atomic_dec(&root->fs_info->defrag_running); atomic_dec(&root->fs_info->defrag_running);
wake_up(&root->fs_info->transaction_wait); wake_up(&root->fs_info->transaction_wait);
kfree(new);
} }
static struct new_sa_defrag_extent * static struct new_sa_defrag_extent *
@ -2484,7 +2492,7 @@ record_old_file_extents(struct inode *inode,
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_key key; struct btrfs_key key;
struct old_sa_defrag_extent *old, *tmp; struct old_sa_defrag_extent *old;
struct new_sa_defrag_extent *new; struct new_sa_defrag_extent *new;
int ret; int ret;
@ -2532,7 +2540,7 @@ record_old_file_extents(struct inode *inode,
if (slot >= btrfs_header_nritems(l)) { if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path); ret = btrfs_next_leaf(root, path);
if (ret < 0) if (ret < 0)
goto out_free_list; goto out_free_path;
else if (ret > 0) else if (ret > 0)
break; break;
continue; continue;
@ -2561,7 +2569,7 @@ record_old_file_extents(struct inode *inode,
old = kmalloc(sizeof(*old), GFP_NOFS); old = kmalloc(sizeof(*old), GFP_NOFS);
if (!old) if (!old)
goto out_free_list; goto out_free_path;
offset = max(new->file_pos, key.offset); offset = max(new->file_pos, key.offset);
end = min(new->file_pos + new->len, key.offset + num_bytes); end = min(new->file_pos + new->len, key.offset + num_bytes);
@ -2583,15 +2591,10 @@ next:
return new; return new;
out_free_list:
list_for_each_entry_safe(old, tmp, &new->head, list) {
list_del(&old->list);
kfree(old);
}
out_free_path: out_free_path:
btrfs_free_path(path); btrfs_free_path(path);
out_kfree: out_kfree:
kfree(new); free_sa_defrag_extent(new);
return NULL; return NULL;
} }
@ -2743,8 +2746,14 @@ out:
btrfs_remove_ordered_extent(inode, ordered_extent); btrfs_remove_ordered_extent(inode, ordered_extent);
/* for snapshot-aware defrag */ /* for snapshot-aware defrag */
if (new) if (new) {
if (ret) {
free_sa_defrag_extent(new);
atomic_dec(&root->fs_info->defrag_running);
} else {
relink_file_extents(new); relink_file_extents(new);
}
}
/* once for us */ /* once for us */
btrfs_put_ordered_extent(ordered_extent); btrfs_put_ordered_extent(ordered_extent);

View File

@ -2093,7 +2093,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
if (err == -EINTR) if (err == -EINTR)
goto out; goto out_drop_write;
dentry = lookup_one_len(vol_args->name, parent, namelen); dentry = lookup_one_len(vol_args->name, parent, namelen);
if (IS_ERR(dentry)) { if (IS_ERR(dentry)) {
err = PTR_ERR(dentry); err = PTR_ERR(dentry);
@ -2235,6 +2235,7 @@ out_dput:
dput(dentry); dput(dentry);
out_unlock_dir: out_unlock_dir:
mutex_unlock(&dir->i_mutex); mutex_unlock(&dir->i_mutex);
out_drop_write:
mnt_drop_write_file(file); mnt_drop_write_file(file);
out: out:
kfree(vol_args); kfree(vol_args);

View File

@ -4623,8 +4623,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
} }
if (!access_ok(VERIFY_READ, arg->clone_sources, if (!access_ok(VERIFY_READ, arg->clone_sources,
sizeof(*arg->clone_sources * sizeof(*arg->clone_sources) *
arg->clone_sources_count))) { arg->clone_sources_count)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }

View File

@ -3314,7 +3314,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
btrfs_set_token_file_extent_type(leaf, fi, btrfs_set_token_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG, BTRFS_FILE_EXTENT_REG,
&token); &token);
if (em->block_start == 0) if (em->block_start == EXTENT_MAP_HOLE)
skip_csum = true; skip_csum = true;
} }

View File

@ -4248,6 +4248,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got " btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got "
"%Lu-%Lu\n", logical, logical+len, em->start, "%Lu-%Lu\n", logical, logical+len, em->start,
em->start + em->len); em->start + em->len);
free_extent_map(em);
return 1; return 1;
} }
@ -4429,6 +4430,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, " btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
"found %Lu-%Lu\n", logical, em->start, "found %Lu-%Lu\n", logical, em->start,
em->start + em->len); em->start + em->len);
free_extent_map(em);
return -EINVAL; return -EINVAL;
} }

View File

@ -44,7 +44,7 @@
static inline sector_t normalize(sector_t s, int base) static inline sector_t normalize(sector_t s, int base)
{ {
sector_t tmp = s; /* Since do_div modifies its argument */ sector_t tmp = s; /* Since do_div modifies its argument */
return s - do_div(tmp, base); return s - sector_div(tmp, base);
} }
static inline sector_t normalize_up(sector_t s, int base) static inline sector_t normalize_up(sector_t s, int base)

View File

@ -4222,8 +4222,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
dprintk("%s ERROR %d, Reset session\n", __func__, dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status); task->tk_status);
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
task->tk_status = 0; goto wait_on_recovery;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY); nfs_inc_server_stats(server, NFSIOS_DELAY);

View File

@ -128,6 +128,13 @@ nfsd_reply_cache_alloc(void)
return rp; return rp;
} }
static void
nfsd_reply_cache_unhash(struct svc_cacherep *rp)
{
hlist_del_init(&rp->c_hash);
list_del_init(&rp->c_lru);
}
static void static void
nfsd_reply_cache_free_locked(struct svc_cacherep *rp) nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
{ {
@ -403,7 +410,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
if (nfsd_cache_entry_expired(rp) || if (nfsd_cache_entry_expired(rp) ||
num_drc_entries >= max_drc_entries) { num_drc_entries >= max_drc_entries) {
lru_put_end(rp); nfsd_reply_cache_unhash(rp);
prune_cache_entries(); prune_cache_entries();
goto search_cache; goto search_cache;
} }

View File

@ -216,6 +216,8 @@ xfs_growfs_data_private(
*/ */
nfree = 0; nfree = 0;
for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
__be32 *agfl_bno;
/* /*
* AG freespace header block * AG freespace header block
*/ */
@ -275,8 +277,10 @@ xfs_growfs_data_private(
agfl->agfl_seqno = cpu_to_be32(agno); agfl->agfl_seqno = cpu_to_be32(agno);
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
} }
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
error = xfs_bwrite(bp); error = xfs_bwrite(bp);
xfs_buf_relse(bp); xfs_buf_relse(bp);

View File

@ -409,7 +409,8 @@ xfs_attrlist_by_handle(
return -XFS_ERROR(EPERM); return -XFS_ERROR(EPERM);
if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
if (al_hreq.buflen > XATTR_LIST_MAX) if (al_hreq.buflen < sizeof(struct attrlist) ||
al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
/* /*

View File

@ -359,7 +359,8 @@ xfs_compat_attrlist_by_handle(
if (copy_from_user(&al_hreq, arg, if (copy_from_user(&al_hreq, arg,
sizeof(compat_xfs_fsop_attrlist_handlereq_t))) sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
if (al_hreq.buflen > XATTR_LIST_MAX) if (al_hreq.buflen < sizeof(struct attrlist) ||
al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
/* /*

View File

@ -28,8 +28,6 @@
#endif #endif
#define uninitialized_var(x) x
#ifndef __HAVE_BUILTIN_BSWAP16__ #ifndef __HAVE_BUILTIN_BSWAP16__
/* icc has this, but it's called _bswap16 */ /* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__ #define __HAVE_BUILTIN_BSWAP16__

View File

@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size; extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size; extern size_t vmcoreinfo_max_size;
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base); unsigned long long *crash_size, unsigned long long *crash_base);
int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,

View File

@ -103,7 +103,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
{ {
struct snd_sg_buf *sgbuf = dmab->private_data; struct snd_sg_buf *sgbuf = dmab->private_data;
dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
addr &= PAGE_MASK; addr &= ~((dma_addr_t)PAGE_SIZE - 1);
return addr + offset % PAGE_SIZE; return addr + offset % PAGE_SIZE;
} }

View File

@ -30,7 +30,7 @@
#include <sound/compress_params.h> #include <sound/compress_params.h>
#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1) #define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
/** /**
* struct snd_compressed_buffer: compressed buffer * struct snd_compressed_buffer: compressed buffer
* @fragment_size: size of buffer fragment in bytes * @fragment_size: size of buffer fragment in bytes
@ -67,8 +67,8 @@ struct snd_compr_params {
struct snd_compr_tstamp { struct snd_compr_tstamp {
__u32 byte_offset; __u32 byte_offset;
__u32 copied_total; __u32 copied_total;
snd_pcm_uframes_t pcm_frames; __u32 pcm_frames;
snd_pcm_uframes_t pcm_io_frames; __u32 pcm_io_frames;
__u32 sampling_rate; __u32 sampling_rate;
}; };

View File

@ -287,7 +287,7 @@ again:
put_page(page); put_page(page);
/* serialize against __split_huge_page_splitting() */ /* serialize against __split_huge_page_splitting() */
local_irq_disable(); local_irq_disable();
if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
page_head = compound_head(page); page_head = compound_head(page);
/* /*
* page_head is valid pointer but we must pin * page_head is valid pointer but we must pin

View File

@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size; size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
/* Flag to indicate we are going to kexec a new kernel */
bool kexec_in_progress = false;
/* Location of the reserved area for the crash kernel */ /* Location of the reserved area for the crash kernel */
struct resource crashk_res = { struct resource crashk_res = {
.name = "Crash kernel", .name = "Crash kernel",
@ -1678,6 +1681,7 @@ int kernel_kexec(void)
} else } else
#endif #endif
{ {
kexec_in_progress = true;
kernel_restart_prepare(NULL); kernel_restart_prepare(NULL);
printk(KERN_EMERG "Starting new kernel\n"); printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown(); machine_shutdown();

View File

@ -224,6 +224,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
atomic_read(&cfs_rq->tg->runnable_avg)); atomic_read(&cfs_rq->tg->runnable_avg));
#endif #endif
#ifdef CONFIG_CFS_BANDWIDTH
SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
cfs_rq->tg->cfs_bandwidth.timer_active);
SEQ_printf(m, " .%-30s: %d\n", "throttled",
cfs_rq->throttled);
SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
cfs_rq->throttle_count);
#endif
print_cfs_group_stats(m, cpu, cfs_rq->tg); print_cfs_group_stats(m, cpu, cfs_rq->tg);
#endif #endif

View File

@ -2288,6 +2288,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->throttled_clock = rq->clock; cfs_rq->throttled_clock = rq->clock;
raw_spin_lock(&cfs_b->lock); raw_spin_lock(&cfs_b->lock);
list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
if (!cfs_b->timer_active)
__start_cfs_bandwidth(cfs_b);
raw_spin_unlock(&cfs_b->lock); raw_spin_unlock(&cfs_b->lock);
} }

View File

@ -1071,9 +1071,6 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
int ret; int ret;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (flags & MSG_SENDPAGE_NOTLAST) if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE; flags |= MSG_MORE;

View File

@ -1714,8 +1714,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
} }
} }
t = rtnl_dereference(ip6n->tnls_wc[0]);
unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_many(&list); unregister_netdevice_many(&list);
} }

View File

@ -2356,8 +2356,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
if (sdata->vif.type != NL80211_IFTYPE_STATION && if (sdata->vif.type != NL80211_IFTYPE_STATION)
sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))

View File

@ -864,7 +864,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
u16 sc; u16 sc;
u8 tid, ack_policy; u8 tid, ack_policy;
if (!ieee80211_is_data_qos(hdr->frame_control)) if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
goto dont_reorder; goto dont_reorder;
/* /*

View File

@ -53,6 +53,7 @@
#include <net/ip.h> /* for local_port_range[] */ #include <net/ip.h> /* for local_port_range[] */
#include <net/sock.h> #include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
#include <net/inet_connection_sock.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/netlabel.h> #include <net/netlabel.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
@ -3736,6 +3737,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
return 0; return 0;
} }
/**
* selinux_conn_sid - Determine the child socket label for a connection
* @sk_sid: the parent socket's SID
* @skb_sid: the packet's SID
* @conn_sid: the resulting connection SID
*
* If @skb_sid is valid then the user:role:type information from @sk_sid is
* combined with the MLS information from @skb_sid in order to create
* @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
* of @sk_sid. Returns zero on success, negative values on failure.
*
*/
static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
{
int err = 0;
if (skb_sid != SECSID_NULL)
err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
else
*conn_sid = sk_sid;
return err;
}
/* socket security operations */ /* socket security operations */
static int socket_sockcreate_sid(const struct task_security_struct *tsec, static int socket_sockcreate_sid(const struct task_security_struct *tsec,
@ -4342,7 +4367,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security; struct sk_security_struct *sksec = sk->sk_security;
int err; int err;
u16 family = sk->sk_family; u16 family = sk->sk_family;
u32 newsid; u32 connsid;
u32 peersid; u32 peersid;
/* handle mapped IPv4 packets arriving via IPv6 sockets */ /* handle mapped IPv4 packets arriving via IPv6 sockets */
@ -4352,16 +4377,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
err = selinux_skb_peerlbl_sid(skb, family, &peersid); err = selinux_skb_peerlbl_sid(skb, family, &peersid);
if (err) if (err)
return err; return err;
if (peersid == SECSID_NULL) { err = selinux_conn_sid(sksec->sid, peersid, &connsid);
req->secid = sksec->sid;
req->peer_secid = SECSID_NULL;
} else {
err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
if (err) if (err)
return err; return err;
req->secid = newsid; req->secid = connsid;
req->peer_secid = peersid; req->peer_secid = peersid;
}
return selinux_netlbl_inet_conn_request(req, family); return selinux_netlbl_inet_conn_request(req, family);
} }
@ -4621,6 +4641,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
static unsigned int selinux_ip_output(struct sk_buff *skb, static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family) u16 family)
{ {
struct sock *sk;
u32 sid; u32 sid;
if (!netlbl_enabled()) if (!netlbl_enabled())
@ -4629,8 +4650,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling * because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */ * before IPsec is applied so we can leverage AH protection */
if (skb->sk) { sk = skb->sk;
struct sk_security_struct *sksec = skb->sk->sk_security; if (sk) {
struct sk_security_struct *sksec;
if (sk->sk_state == TCP_LISTEN)
/* if the socket is the listening state then this
* packet is a SYN-ACK packet which means it needs to
* be labeled based on the connection/request_sock and
* not the parent socket. unfortunately, we can't
* lookup the request_sock yet as it isn't queued on
* the parent socket until after the SYN-ACK is sent.
* the "solution" is to simply pass the packet as-is
* as any IP option based labeling should be copied
* from the initial connection request (in the IP
* layer). it is far from ideal, but until we get a
* security label in the packet itself this is the
* best we can do. */
return NF_ACCEPT;
/* standard practice, label using the parent socket */
sksec = sk->sk_security;
sid = sksec->sid; sid = sksec->sid;
} else } else
sid = SECINITSID_KERNEL; sid = SECINITSID_KERNEL;
@ -4715,12 +4755,12 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
if (!secmark_active && !peerlbl_active) if (!secmark_active && !peerlbl_active)
return NF_ACCEPT; return NF_ACCEPT;
/* if the packet is being forwarded then get the peer label from the
* packet itself; otherwise check to see if it is from a local
* application or the kernel, if from an application get the peer label
* from the sending socket, otherwise use the kernel's sid */
sk = skb->sk; sk = skb->sk;
if (sk == NULL) { if (sk == NULL) {
/* Without an associated socket the packet is either coming
* from the kernel or it is being forwarded; check the packet
* to determine which and if the packet is being forwarded
* query the packet directly to determine the security label. */
if (skb->skb_iif) { if (skb->skb_iif) {
secmark_perm = PACKET__FORWARD_OUT; secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@ -4729,7 +4769,26 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
secmark_perm = PACKET__SEND; secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL; peer_sid = SECINITSID_KERNEL;
} }
} else if (sk->sk_state == TCP_LISTEN) {
/* Locally generated packet but the associated socket is in the
* listening state which means this is a SYN-ACK packet. In
* this particular case the correct security label is assigned
* to the connection/request_sock but unfortunately we can't
* query the request_sock as it isn't queued on the parent
* socket until after the SYN-ACK packet is sent; the only
* viable choice is to regenerate the label like we do in
* selinux_inet_conn_request(). See also selinux_ip_output()
* for similar problems. */
u32 skb_sid;
struct sk_security_struct *sksec = sk->sk_security;
if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
return NF_DROP;
if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
return NF_DROP;
secmark_perm = PACKET__SEND;
} else { } else {
/* Locally generated packet, fetch the security label from the
* associated socket. */
struct sk_security_struct *sksec = sk->sk_security; struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid; peer_sid = sksec->sid;
secmark_perm = PACKET__SEND; secmark_perm = PACKET__SEND;

View File

@ -469,6 +469,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
memset(path, 0, sizeof(*path)); memset(path, 0, sizeof(*path));
} }
/* return a DAC if paired to the given pin by codec driver */
static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
{
struct hda_gen_spec *spec = codec->spec;
const hda_nid_t *list = spec->preferred_dacs;
if (!list)
return 0;
for (; *list; list += 2)
if (*list == pin)
return list[1];
return 0;
}
/* look for an empty DAC slot */ /* look for an empty DAC slot */
static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
bool is_digital) bool is_digital)
@ -1135,6 +1149,13 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
continue; continue;
} }
dacs[i] = get_preferred_dac(codec, pin);
if (dacs[i]) {
if (is_dac_already_used(codec, dacs[i]))
badness += bad->shared_primary;
}
if (!dacs[i])
dacs[i] = look_for_dac(codec, pin, false); dacs[i] = look_for_dac(codec, pin, false);
if (!dacs[i] && !i) { if (!dacs[i] && !i) {
/* try to steal the DAC of surrounds for the front */ /* try to steal the DAC of surrounds for the front */
@ -4284,6 +4305,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
return AC_PWRST_D3; return AC_PWRST_D3;
} }
/* mute all aamix inputs initially; parse up to the first leaves */
static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
{
int i, nums;
const hda_nid_t *conn;
bool has_amp;
nums = snd_hda_get_conn_list(codec, mix, &conn);
has_amp = nid_has_mute(codec, mix, HDA_INPUT);
for (i = 0; i < nums; i++) {
if (has_amp)
snd_hda_codec_amp_stereo(codec, mix,
HDA_INPUT, i,
0xff, HDA_AMP_MUTE);
else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
snd_hda_codec_amp_stereo(codec, conn[i],
HDA_OUTPUT, 0,
0xff, HDA_AMP_MUTE);
}
}
/* /*
* Parse the given BIOS configuration and set up the hda_gen_spec * Parse the given BIOS configuration and set up the hda_gen_spec
@ -4422,6 +4463,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
} }
} }
/* mute all aamix input initially */
if (spec->mixer_nid)
mute_all_mixer_nid(codec, spec->mixer_nid);
dig_only: dig_only:
parse_digital(codec); parse_digital(codec);

View File

@ -241,6 +241,9 @@ struct hda_gen_spec {
const struct badness_table *main_out_badness; const struct badness_table *main_out_badness;
const struct badness_table *extra_out_badness; const struct badness_table *extra_out_badness;
/* preferred pin/DAC pairs; an array of paired NIDs */
const hda_nid_t *preferred_dacs;
/* loopback mixing mode */ /* loopback mixing mode */
bool aamix_mode; bool aamix_mode;

View File

@ -1227,6 +1227,14 @@ static int ad1986a_parse_auto_config(struct hda_codec *codec)
{ {
int err; int err;
struct ad198x_spec *spec; struct ad198x_spec *spec;
static hda_nid_t preferred_pairs[] = {
0x1a, 0x03,
0x1b, 0x03,
0x1c, 0x04,
0x1d, 0x05,
0x1e, 0x03,
0
};
err = alloc_ad_spec(codec); err = alloc_ad_spec(codec);
if (err < 0) if (err < 0)
@ -1247,6 +1255,8 @@ static int ad1986a_parse_auto_config(struct hda_codec *codec)
* So, let's disable the shared stream. * So, let's disable the shared stream.
*/ */
spec->gen.multiout.no_share_stream = 1; spec->gen.multiout.no_share_stream = 1;
/* give fixed DAC/pin pairs */
spec->gen.preferred_dacs = preferred_pairs;
snd_hda_pick_fixup(codec, NULL, ad1986a_fixup_tbl, ad1986a_fixups); snd_hda_pick_fixup(codec, NULL, ad1986a_fixup_tbl, ad1986a_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);

View File

@ -2209,8 +2209,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
int err; int err;
per_cvt = get_cvt(spec, 0); per_cvt = get_cvt(spec, 0);
err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid, err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
per_cvt->cvt_nid); per_cvt->cvt_nid,
HDA_PCM_TYPE_HDMI);
if (err < 0) if (err < 0)
return err; return err;
return simple_hdmi_build_jack(codec, 0); return simple_hdmi_build_jack(codec, 0);

View File

@ -1904,6 +1904,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
int r; int r;
struct kvm_vcpu *vcpu, *v; struct kvm_vcpu *vcpu, *v;
if (id >= KVM_MAX_VCPUS)
return -EINVAL;
vcpu = kvm_arch_vcpu_create(kvm, id); vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu)) if (IS_ERR(vcpu))
return PTR_ERR(vcpu); return PTR_ERR(vcpu);