Updated from Linux LTS 3.10.20 to 3.10.21
This commit is contained in:
parent
d67348cc45
commit
8de512f759
2
Makefile
2
Makefile
@ -1,6 +1,6 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 20
|
SUBLEVEL = 21
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = TOSSUG Baby Fish
|
NAME = TOSSUG Baby Fish
|
||||||
|
|
||||||
|
@ -313,6 +313,17 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
|
||||||
|
{
|
||||||
|
if (!is_vmalloc_addr(kaddr)) {
|
||||||
|
BUG_ON(!virt_addr_valid(kaddr));
|
||||||
|
return __pa(kaddr);
|
||||||
|
} else {
|
||||||
|
return page_to_phys(vmalloc_to_page(kaddr)) +
|
||||||
|
offset_in_page(kaddr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
|
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
|
||||||
* @from: The virtual kernel start address of the range
|
* @from: The virtual kernel start address of the range
|
||||||
@ -324,16 +335,27 @@ out:
|
|||||||
*/
|
*/
|
||||||
int create_hyp_mappings(void *from, void *to)
|
int create_hyp_mappings(void *from, void *to)
|
||||||
{
|
{
|
||||||
unsigned long phys_addr = virt_to_phys(from);
|
phys_addr_t phys_addr;
|
||||||
|
unsigned long virt_addr;
|
||||||
unsigned long start = KERN_TO_HYP((unsigned long)from);
|
unsigned long start = KERN_TO_HYP((unsigned long)from);
|
||||||
unsigned long end = KERN_TO_HYP((unsigned long)to);
|
unsigned long end = KERN_TO_HYP((unsigned long)to);
|
||||||
|
|
||||||
/* Check for a valid kernel memory mapping */
|
start = start & PAGE_MASK;
|
||||||
if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
|
end = PAGE_ALIGN(end);
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return __create_hyp_mappings(hyp_pgd, start, end,
|
for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
|
||||||
__phys_to_pfn(phys_addr), PAGE_HYP);
|
int err;
|
||||||
|
|
||||||
|
phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
|
||||||
|
err = __create_hyp_mappings(hyp_pgd, virt_addr,
|
||||||
|
virt_addr + PAGE_SIZE,
|
||||||
|
__phys_to_pfn(phys_addr),
|
||||||
|
PAGE_HYP);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include <asm/page.h> /* for __va, __pa */
|
#include <asm/page.h> /* for __va, __pa */
|
||||||
#include <arch/io.h>
|
#include <arch/io.h>
|
||||||
|
#include <asm-generic/iomap.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
struct cris_io_operations
|
struct cris_io_operations
|
||||||
|
@ -319,7 +319,7 @@ struct thread_struct {
|
|||||||
regs->loadrs = 0; \
|
regs->loadrs = 0; \
|
||||||
regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
|
regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
|
||||||
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
|
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
|
||||||
if (unlikely(!get_dumpable(current->mm))) { \
|
if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
|
||||||
/* \
|
/* \
|
||||||
* Zap scratch regs to avoid leaking bits between processes with different \
|
* Zap scratch regs to avoid leaking bits between processes with different \
|
||||||
* uid/privileges. \
|
* uid/privileges. \
|
||||||
|
@ -454,7 +454,15 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
|||||||
if (copy_vsx_to_user(&frame->mc_vsregs, current))
|
if (copy_vsx_to_user(&frame->mc_vsregs, current))
|
||||||
return 1;
|
return 1;
|
||||||
msr |= MSR_VSX;
|
msr |= MSR_VSX;
|
||||||
}
|
} else if (!ctx_has_vsx_region)
|
||||||
|
/*
|
||||||
|
* With a small context structure we can't hold the VSX
|
||||||
|
* registers, hence clear the MSR value to indicate the state
|
||||||
|
* was not saved.
|
||||||
|
*/
|
||||||
|
msr &= ~MSR_VSX;
|
||||||
|
|
||||||
|
|
||||||
#endif /* CONFIG_VSX */
|
#endif /* CONFIG_VSX */
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
/* save spe registers */
|
/* save spe registers */
|
||||||
|
@ -1530,12 +1530,12 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|||||||
|
|
||||||
dn = dev->of_node;
|
dn = dev->of_node;
|
||||||
if (!dn) {
|
if (!dn) {
|
||||||
strcat(buf, "\n");
|
strcpy(buf, "\n");
|
||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
}
|
}
|
||||||
cp = of_get_property(dn, "compatible", NULL);
|
cp = of_get_property(dn, "compatible", NULL);
|
||||||
if (!cp) {
|
if (!cp) {
|
||||||
strcat(buf, "\n");
|
strcpy(buf, "\n");
|
||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
|
|||||||
slice = GET_HIGH_SLICE_INDEX(addr);
|
slice = GET_HIGH_SLICE_INDEX(addr);
|
||||||
*boundary_addr = (slice + end) ?
|
*boundary_addr = (slice + end) ?
|
||||||
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
|
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
|
||||||
return !!(available.high_slices & (1u << slice));
|
return !!(available.high_slices & (1ul << slice));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,5 +57,5 @@ config PPC_MPC5200_BUGFIX
|
|||||||
|
|
||||||
config PPC_MPC5200_LPBFIFO
|
config PPC_MPC5200_LPBFIFO
|
||||||
tristate "MPC5200 LocalPlus bus FIFO driver"
|
tristate "MPC5200 LocalPlus bus FIFO driver"
|
||||||
depends on PPC_MPC52xx
|
depends on PPC_MPC52xx && PPC_BESTCOMM
|
||||||
select PPC_BESTCOMM_GEN_BD
|
select PPC_BESTCOMM_GEN_BD
|
||||||
|
@ -151,13 +151,23 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
|||||||
rid_end = pe->rid + 1;
|
rid_end = pe->rid + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Associate PE in PELT */
|
/*
|
||||||
|
* Associate PE in PELT. We need add the PE into the
|
||||||
|
* corresponding PELT-V as well. Otherwise, the error
|
||||||
|
* originated from the PE might contribute to other
|
||||||
|
* PEs.
|
||||||
|
*/
|
||||||
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
|
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
|
||||||
bcomp, dcomp, fcomp, OPAL_MAP_PE);
|
bcomp, dcomp, fcomp, OPAL_MAP_PE);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
|
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
|
||||||
|
pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
|
||||||
|
if (rc)
|
||||||
|
pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
|
||||||
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
|
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
|
||||||
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
||||||
|
|
||||||
|
@ -35,7 +35,6 @@ static u8 *ctrblk;
|
|||||||
static char keylen_flag;
|
static char keylen_flag;
|
||||||
|
|
||||||
struct s390_aes_ctx {
|
struct s390_aes_ctx {
|
||||||
u8 iv[AES_BLOCK_SIZE];
|
|
||||||
u8 key[AES_MAX_KEY_SIZE];
|
u8 key[AES_MAX_KEY_SIZE];
|
||||||
long enc;
|
long enc;
|
||||||
long dec;
|
long dec;
|
||||||
@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
return aes_set_key(tfm, in_key, key_len);
|
return aes_set_key(tfm, in_key, key_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
|
static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
|
||||||
struct blkcipher_walk *walk)
|
struct blkcipher_walk *walk)
|
||||||
{
|
{
|
||||||
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
int ret = blkcipher_walk_virt(desc, walk);
|
int ret = blkcipher_walk_virt(desc, walk);
|
||||||
unsigned int nbytes = walk->nbytes;
|
unsigned int nbytes = walk->nbytes;
|
||||||
|
struct {
|
||||||
|
u8 iv[AES_BLOCK_SIZE];
|
||||||
|
u8 key[AES_MAX_KEY_SIZE];
|
||||||
|
} param;
|
||||||
|
|
||||||
if (!nbytes)
|
if (!nbytes)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
memcpy(param, walk->iv, AES_BLOCK_SIZE);
|
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
|
||||||
|
memcpy(param.key, sctx->key, sctx->key_len);
|
||||||
do {
|
do {
|
||||||
/* only use complete blocks */
|
/* only use complete blocks */
|
||||||
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||||
u8 *out = walk->dst.virt.addr;
|
u8 *out = walk->dst.virt.addr;
|
||||||
u8 *in = walk->src.virt.addr;
|
u8 *in = walk->src.virt.addr;
|
||||||
|
|
||||||
ret = crypt_s390_kmc(func, param, out, in, n);
|
ret = crypt_s390_kmc(func, ¶m, out, in, n);
|
||||||
if (ret < 0 || ret != n)
|
if (ret < 0 || ret != n)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
nbytes &= AES_BLOCK_SIZE - 1;
|
nbytes &= AES_BLOCK_SIZE - 1;
|
||||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||||
} while ((nbytes = walk->nbytes));
|
} while ((nbytes = walk->nbytes));
|
||||||
memcpy(walk->iv, param, AES_BLOCK_SIZE);
|
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
|||||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||||
|
|
||||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||||
return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
|
return cbc_aes_crypt(desc, sctx->enc, &walk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
||||||
@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|||||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||||
|
|
||||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||||
return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
|
return cbc_aes_crypt(desc, sctx->dec, &walk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_alg cbc_aes_alg = {
|
static struct crypto_alg cbc_aes_alg = {
|
||||||
|
@ -933,7 +933,7 @@ static ssize_t show_idle_count(struct device *dev,
|
|||||||
idle_count = ACCESS_ONCE(idle->idle_count);
|
idle_count = ACCESS_ONCE(idle->idle_count);
|
||||||
if (ACCESS_ONCE(idle->clock_idle_enter))
|
if (ACCESS_ONCE(idle->clock_idle_enter))
|
||||||
idle_count++;
|
idle_count++;
|
||||||
} while ((sequence & 1) || (idle->sequence != sequence));
|
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
|
||||||
return sprintf(buf, "%llu\n", idle_count);
|
return sprintf(buf, "%llu\n", idle_count);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
||||||
@ -951,7 +951,7 @@ static ssize_t show_idle_time(struct device *dev,
|
|||||||
idle_time = ACCESS_ONCE(idle->idle_time);
|
idle_time = ACCESS_ONCE(idle->idle_time);
|
||||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||||
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
||||||
} while ((sequence & 1) || (idle->sequence != sequence));
|
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
|
||||||
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
|
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
|
||||||
return sprintf(buf, "%llu\n", idle_time >> 12);
|
return sprintf(buf, "%llu\n", idle_time >> 12);
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ cputime64_t s390_get_idle_time(int cpu)
|
|||||||
sequence = ACCESS_ONCE(idle->sequence);
|
sequence = ACCESS_ONCE(idle->sequence);
|
||||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||||
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
||||||
} while ((sequence & 1) || (idle->sequence != sequence));
|
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
|
||||||
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
|
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,6 +248,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int is_ftrace_caller(unsigned long ip)
|
||||||
|
{
|
||||||
|
if (ip == (unsigned long)(&ftrace_call) ||
|
||||||
|
ip == (unsigned long)(&ftrace_regs_call))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A breakpoint was added to the code address we are about to
|
* A breakpoint was added to the code address we are about to
|
||||||
* modify, and this is the handle that will just skip over it.
|
* modify, and this is the handle that will just skip over it.
|
||||||
@ -257,10 +266,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
|||||||
*/
|
*/
|
||||||
int ftrace_int3_handler(struct pt_regs *regs)
|
int ftrace_int3_handler(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
unsigned long ip;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!regs))
|
if (WARN_ON_ONCE(!regs))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!ftrace_location(regs->ip - 1))
|
ip = regs->ip - 1;
|
||||||
|
if (!ftrace_location(ip) && !is_ftrace_caller(ip))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
regs->ip += MCOUNT_INSN_SIZE - 1;
|
regs->ip += MCOUNT_INSN_SIZE - 1;
|
||||||
|
@ -430,7 +430,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
|||||||
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
|
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
|
||||||
|
|
||||||
if (request_firmware(&fw, (const char *)fw_name, device)) {
|
if (request_firmware(&fw, (const char *)fw_name, device)) {
|
||||||
pr_err("failed to load file %s\n", fw_name);
|
pr_debug("failed to load file %s\n", fw_name);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,9 +378,9 @@ static void amd_e400_idle(void)
|
|||||||
* The switch back from broadcast mode needs to be
|
* The switch back from broadcast mode needs to be
|
||||||
* called with interrupts disabled.
|
* called with interrupts disabled.
|
||||||
*/
|
*/
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
|
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
} else
|
} else
|
||||||
default_idle();
|
default_idle();
|
||||||
}
|
}
|
||||||
|
@ -4207,7 +4207,10 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
|
|||||||
case OpMem8:
|
case OpMem8:
|
||||||
ctxt->memop.bytes = 1;
|
ctxt->memop.bytes = 1;
|
||||||
if (ctxt->memop.type == OP_REG) {
|
if (ctxt->memop.type == OP_REG) {
|
||||||
ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
|
int highbyte_regs = ctxt->rex_prefix == 0;
|
||||||
|
|
||||||
|
ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm,
|
||||||
|
highbyte_regs);
|
||||||
fetch_register_operand(&ctxt->memop);
|
fetch_register_operand(&ctxt->memop);
|
||||||
}
|
}
|
||||||
goto mem_common;
|
goto mem_common;
|
||||||
|
@ -2229,6 +2229,7 @@ void blk_start_request(struct request *req)
|
|||||||
if (unlikely(blk_bidi_rq(req)))
|
if (unlikely(blk_bidi_rq(req)))
|
||||||
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
|
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
|
||||||
|
|
||||||
|
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
|
||||||
blk_add_timer(req);
|
blk_add_timer(req);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_start_request);
|
EXPORT_SYMBOL(blk_start_request);
|
||||||
|
@ -144,6 +144,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
|
|||||||
lim->discard_zeroes_data = 1;
|
lim->discard_zeroes_data = 1;
|
||||||
lim->max_segments = USHRT_MAX;
|
lim->max_segments = USHRT_MAX;
|
||||||
lim->max_hw_sectors = UINT_MAX;
|
lim->max_hw_sectors = UINT_MAX;
|
||||||
|
lim->max_segment_size = UINT_MAX;
|
||||||
lim->max_sectors = UINT_MAX;
|
lim->max_sectors = UINT_MAX;
|
||||||
lim->max_write_same_sectors = UINT_MAX;
|
lim->max_write_same_sectors = UINT_MAX;
|
||||||
}
|
}
|
||||||
|
@ -90,8 +90,8 @@ static void blk_rq_timed_out(struct request *req)
|
|||||||
__blk_complete_request(req);
|
__blk_complete_request(req);
|
||||||
break;
|
break;
|
||||||
case BLK_EH_RESET_TIMER:
|
case BLK_EH_RESET_TIMER:
|
||||||
blk_clear_rq_complete(req);
|
|
||||||
blk_add_timer(req);
|
blk_add_timer(req);
|
||||||
|
blk_clear_rq_complete(req);
|
||||||
break;
|
break;
|
||||||
case BLK_EH_NOT_HANDLED:
|
case BLK_EH_NOT_HANDLED:
|
||||||
/*
|
/*
|
||||||
@ -173,7 +173,6 @@ void blk_add_timer(struct request *req)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
BUG_ON(!list_empty(&req->timeout_list));
|
BUG_ON(!list_empty(&req->timeout_list));
|
||||||
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some LLDs, like scsi, peek at the timeout to prevent a
|
* Some LLDs, like scsi, peek at the timeout to prevent a
|
||||||
|
@ -963,10 +963,17 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
|
|||||||
*/
|
*/
|
||||||
return_desc =
|
return_desc =
|
||||||
*(operand[0]->reference.where);
|
*(operand[0]->reference.where);
|
||||||
if (return_desc) {
|
if (!return_desc) {
|
||||||
acpi_ut_add_reference
|
/*
|
||||||
(return_desc);
|
* Element is NULL, do not allow the dereference.
|
||||||
|
* This provides compatibility with other ACPI
|
||||||
|
* implementations.
|
||||||
|
*/
|
||||||
|
return_ACPI_STATUS
|
||||||
|
(AE_AML_UNINITIALIZED_ELEMENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acpi_ut_add_reference(return_desc);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -991,11 +998,40 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
|
|||||||
acpi_namespace_node
|
acpi_namespace_node
|
||||||
*)
|
*)
|
||||||
return_desc);
|
return_desc);
|
||||||
|
if (!return_desc) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* June 2013:
|
||||||
|
* buffer_fields/field_units require additional resolution
|
||||||
|
*/
|
||||||
|
switch (return_desc->common.type) {
|
||||||
|
case ACPI_TYPE_BUFFER_FIELD:
|
||||||
|
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||||
|
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
||||||
|
case ACPI_TYPE_LOCAL_INDEX_FIELD:
|
||||||
|
|
||||||
|
status =
|
||||||
|
acpi_ex_read_data_from_field
|
||||||
|
(walk_state, return_desc,
|
||||||
|
&temp_desc);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
return_desc = temp_desc;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
|
||||||
|
/* Add another reference to the object */
|
||||||
|
|
||||||
|
acpi_ut_add_reference
|
||||||
|
(return_desc);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add another reference to the object! */
|
|
||||||
|
|
||||||
acpi_ut_add_reference(return_desc);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -57,6 +57,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
|
|||||||
union acpi_operand_object *dest_desc,
|
union acpi_operand_object *dest_desc,
|
||||||
struct acpi_walk_state *walk_state);
|
struct acpi_walk_state *walk_state);
|
||||||
|
|
||||||
|
static acpi_status
|
||||||
|
acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
|
||||||
|
struct acpi_namespace_node *node,
|
||||||
|
struct acpi_walk_state *walk_state);
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
*
|
*
|
||||||
* FUNCTION: acpi_ex_store
|
* FUNCTION: acpi_ex_store
|
||||||
@ -376,7 +381,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
|
|||||||
* When storing into an object the data is converted to the
|
* When storing into an object the data is converted to the
|
||||||
* target object type then stored in the object. This means
|
* target object type then stored in the object. This means
|
||||||
* that the target object type (for an initialized target) will
|
* that the target object type (for an initialized target) will
|
||||||
* not be changed by a store operation.
|
* not be changed by a store operation. A copy_object can change
|
||||||
|
* the target type, however.
|
||||||
|
*
|
||||||
|
* The implicit_conversion flag is set to NO/FALSE only when
|
||||||
|
* storing to an arg_x -- as per the rules of the ACPI spec.
|
||||||
*
|
*
|
||||||
* Assumes parameters are already validated.
|
* Assumes parameters are already validated.
|
||||||
*
|
*
|
||||||
@ -400,7 +409,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
|
|||||||
target_type = acpi_ns_get_type(node);
|
target_type = acpi_ns_get_type(node);
|
||||||
target_desc = acpi_ns_get_attached_object(node);
|
target_desc = acpi_ns_get_attached_object(node);
|
||||||
|
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
|
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p (%s) to node %p (%s)\n",
|
||||||
source_desc,
|
source_desc,
|
||||||
acpi_ut_get_object_type_name(source_desc), node,
|
acpi_ut_get_object_type_name(source_desc), node,
|
||||||
acpi_ut_get_type_name(target_type)));
|
acpi_ut_get_type_name(target_type)));
|
||||||
@ -414,46 +423,31 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
|
|||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If no implicit conversion, drop into the default case below */
|
|
||||||
|
|
||||||
if ((!implicit_conversion) ||
|
|
||||||
((walk_state->opcode == AML_COPY_OP) &&
|
|
||||||
(target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
|
|
||||||
(target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
|
|
||||||
(target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
|
|
||||||
/*
|
|
||||||
* Force execution of default (no implicit conversion). Note:
|
|
||||||
* copy_object does not perform an implicit conversion, as per the ACPI
|
|
||||||
* spec -- except in case of region/bank/index fields -- because these
|
|
||||||
* objects must retain their original type permanently.
|
|
||||||
*/
|
|
||||||
target_type = ACPI_TYPE_ANY;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do the actual store operation */
|
/* Do the actual store operation */
|
||||||
|
|
||||||
switch (target_type) {
|
switch (target_type) {
|
||||||
case ACPI_TYPE_BUFFER_FIELD:
|
|
||||||
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
|
||||||
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
|
||||||
case ACPI_TYPE_LOCAL_INDEX_FIELD:
|
|
||||||
|
|
||||||
/* For fields, copy the source data to the target field. */
|
|
||||||
|
|
||||||
status = acpi_ex_write_data_to_field(source_desc, target_desc,
|
|
||||||
&walk_state->result_obj);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ACPI_TYPE_INTEGER:
|
case ACPI_TYPE_INTEGER:
|
||||||
case ACPI_TYPE_STRING:
|
case ACPI_TYPE_STRING:
|
||||||
case ACPI_TYPE_BUFFER:
|
case ACPI_TYPE_BUFFER:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These target types are all of type Integer/String/Buffer, and
|
* The simple data types all support implicit source operand
|
||||||
* therefore support implicit conversion before the store.
|
* conversion before the store.
|
||||||
*
|
|
||||||
* Copy and/or convert the source object to a new target object
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) {
|
||||||
|
/*
|
||||||
|
* However, copy_object and Stores to arg_x do not perform
|
||||||
|
* an implicit conversion, as per the ACPI specification.
|
||||||
|
* A direct store is performed instead.
|
||||||
|
*/
|
||||||
|
status = acpi_ex_store_direct_to_node(source_desc, node,
|
||||||
|
walk_state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Store with implicit source operand conversion support */
|
||||||
|
|
||||||
status =
|
status =
|
||||||
acpi_ex_store_object_to_object(source_desc, target_desc,
|
acpi_ex_store_object_to_object(source_desc, target_desc,
|
||||||
&new_desc, walk_state);
|
&new_desc, walk_state);
|
||||||
@ -467,13 +461,12 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
|
|||||||
* the Name's type to that of the value being stored in it.
|
* the Name's type to that of the value being stored in it.
|
||||||
* source_desc reference count is incremented by attach_object.
|
* source_desc reference count is incremented by attach_object.
|
||||||
*
|
*
|
||||||
* Note: This may change the type of the node if an explicit store
|
* Note: This may change the type of the node if an explicit
|
||||||
* has been performed such that the node/object type has been
|
* store has been performed such that the node/object type
|
||||||
* changed.
|
* has been changed.
|
||||||
*/
|
*/
|
||||||
status =
|
status = acpi_ns_attach_object(node, new_desc,
|
||||||
acpi_ns_attach_object(node, new_desc,
|
new_desc->common.type);
|
||||||
new_desc->common.type);
|
|
||||||
|
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||||
"Store %s into %s via Convert/Attach\n",
|
"Store %s into %s via Convert/Attach\n",
|
||||||
@ -484,38 +477,83 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case ACPI_TYPE_BUFFER_FIELD:
|
||||||
|
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||||
|
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
||||||
|
case ACPI_TYPE_LOCAL_INDEX_FIELD:
|
||||||
|
/*
|
||||||
|
* For all fields, always write the source data to the target
|
||||||
|
* field. Any required implicit source operand conversion is
|
||||||
|
* performed in the function below as necessary. Note, field
|
||||||
|
* objects must retain their original type permanently.
|
||||||
|
*/
|
||||||
|
status = acpi_ex_write_data_to_field(source_desc, target_desc,
|
||||||
|
&walk_state->result_obj);
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
|
||||||
"Storing [%s] (%p) directly into node [%s] (%p)"
|
|
||||||
" with no implicit conversion\n",
|
|
||||||
acpi_ut_get_object_type_name(source_desc),
|
|
||||||
source_desc,
|
|
||||||
acpi_ut_get_object_type_name(target_desc),
|
|
||||||
node));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No conversions for all other types. Directly store a copy of
|
* No conversions for all other types. Directly store a copy of
|
||||||
* the source object. NOTE: This is a departure from the ACPI
|
* the source object. This is the ACPI spec-defined behavior for
|
||||||
* spec, which states "If conversion is impossible, abort the
|
* the copy_object operator.
|
||||||
* running control method".
|
|
||||||
*
|
*
|
||||||
* This code implements "If conversion is impossible, treat the
|
* NOTE: For the Store operator, this is a departure from the
|
||||||
* Store operation as a CopyObject".
|
* ACPI spec, which states "If conversion is impossible, abort
|
||||||
|
* the running control method". Instead, this code implements
|
||||||
|
* "If conversion is impossible, treat the Store operation as
|
||||||
|
* a CopyObject".
|
||||||
*/
|
*/
|
||||||
status =
|
status = acpi_ex_store_direct_to_node(source_desc, node,
|
||||||
acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc,
|
walk_state);
|
||||||
walk_state);
|
|
||||||
if (ACPI_FAILURE(status)) {
|
|
||||||
return_ACPI_STATUS(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
status =
|
|
||||||
acpi_ns_attach_object(node, new_desc,
|
|
||||||
new_desc->common.type);
|
|
||||||
acpi_ut_remove_reference(new_desc);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
*
|
||||||
|
* FUNCTION: acpi_ex_store_direct_to_node
|
||||||
|
*
|
||||||
|
* PARAMETERS: source_desc - Value to be stored
|
||||||
|
* node - Named object to receive the value
|
||||||
|
* walk_state - Current walk state
|
||||||
|
*
|
||||||
|
* RETURN: Status
|
||||||
|
*
|
||||||
|
* DESCRIPTION: "Store" an object directly to a node. This involves a copy
|
||||||
|
* and an attach.
|
||||||
|
*
|
||||||
|
******************************************************************************/
|
||||||
|
|
||||||
|
static acpi_status
|
||||||
|
acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
|
||||||
|
struct acpi_namespace_node *node,
|
||||||
|
struct acpi_walk_state *walk_state)
|
||||||
|
{
|
||||||
|
acpi_status status;
|
||||||
|
union acpi_operand_object *new_desc;
|
||||||
|
|
||||||
|
ACPI_FUNCTION_TRACE(ex_store_direct_to_node);
|
||||||
|
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||||
|
"Storing [%s] (%p) directly into node [%s] (%p)"
|
||||||
|
" with no implicit conversion\n",
|
||||||
|
acpi_ut_get_object_type_name(source_desc),
|
||||||
|
source_desc, acpi_ut_get_type_name(node->type),
|
||||||
|
node));
|
||||||
|
|
||||||
|
/* Copy the source object to a new object */
|
||||||
|
|
||||||
|
status =
|
||||||
|
acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, walk_state);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
return_ACPI_STATUS(status);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Attach the new object to the node */
|
||||||
|
|
||||||
|
status = acpi_ns_attach_object(node, new_desc, new_desc->common.type);
|
||||||
|
acpi_ut_remove_reference(new_desc);
|
||||||
|
return_ACPI_STATUS(status);
|
||||||
|
}
|
||||||
|
@ -636,9 +636,12 @@ static void handle_root_bridge_removal(struct acpi_device *device)
|
|||||||
ej_event->device = device;
|
ej_event->device = device;
|
||||||
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
|
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
|
||||||
|
|
||||||
|
get_device(&device->dev);
|
||||||
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
|
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
|
||||||
if (ACPI_FAILURE(status))
|
if (ACPI_FAILURE(status)) {
|
||||||
|
put_device(&device->dev);
|
||||||
kfree(ej_event);
|
kfree(ej_event);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _handle_hotplug_event_root(struct work_struct *work)
|
static void _handle_hotplug_event_root(struct work_struct *work)
|
||||||
|
@ -121,17 +121,10 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
|
|||||||
*/
|
*/
|
||||||
static void acpi_safe_halt(void)
|
static void acpi_safe_halt(void)
|
||||||
{
|
{
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
if (!tif_need_resched()) {
|
||||||
/*
|
|
||||||
* TS_POLLING-cleared state must be visible before we
|
|
||||||
* test NEED_RESCHED:
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
if (!need_resched()) {
|
|
||||||
safe_halt();
|
safe_halt();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
}
|
}
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ARCH_APICTIMER_STOPS_ON_C3
|
#ifdef ARCH_APICTIMER_STOPS_ON_C3
|
||||||
@ -739,6 +732,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||||||
if (unlikely(!pr))
|
if (unlikely(!pr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
||||||
|
if (current_set_polling_and_test())
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 1);
|
lapic_timer_state_broadcast(pr, cx, 1);
|
||||||
acpi_idle_do_entry(cx);
|
acpi_idle_do_entry(cx);
|
||||||
|
|
||||||
@ -792,18 +790,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||||||
if (unlikely(!pr))
|
if (unlikely(!pr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (cx->entry_method != ACPI_CSTATE_FFH) {
|
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
if (current_set_polling_and_test())
|
||||||
/*
|
|
||||||
* TS_POLLING-cleared state must be visible before we test
|
|
||||||
* NEED_RESCHED:
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
if (unlikely(need_resched())) {
|
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -821,9 +810,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||||||
|
|
||||||
sched_clock_idle_wakeup_event(0);
|
sched_clock_idle_wakeup_event(0);
|
||||||
|
|
||||||
if (cx->entry_method != ACPI_CSTATE_FFH)
|
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
@ -860,18 +846,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cx->entry_method != ACPI_CSTATE_FFH) {
|
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
if (current_set_polling_and_test())
|
||||||
/*
|
|
||||||
* TS_POLLING-cleared state must be visible before we test
|
|
||||||
* NEED_RESCHED:
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
if (unlikely(need_resched())) {
|
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
acpi_unlazy_tlb(smp_processor_id());
|
acpi_unlazy_tlb(smp_processor_id());
|
||||||
@ -917,9 +894,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
|
|
||||||
sched_clock_idle_wakeup_event(0);
|
sched_clock_idle_wakeup_event(0);
|
||||||
|
|
||||||
if (cx->entry_method != ACPI_CSTATE_FFH)
|
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
@ -244,8 +244,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
acpi_evaluate_hotplug_ost(handle, ost_source,
|
|
||||||
ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
|
|
||||||
error = acpi_bus_scan(handle);
|
error = acpi_bus_scan(handle);
|
||||||
if (error) {
|
if (error) {
|
||||||
acpi_handle_warn(handle, "Namespace scan failure\n");
|
acpi_handle_warn(handle, "Namespace scan failure\n");
|
||||||
|
@ -846,7 +846,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
|
|||||||
for (i = 2; i < br->count; i++)
|
for (i = 2; i < br->count; i++)
|
||||||
if (level_old == br->levels[i])
|
if (level_old == br->levels[i])
|
||||||
break;
|
break;
|
||||||
if (i == br->count)
|
if (i == br->count || !level)
|
||||||
level = max_level;
|
level = max_level;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,7 +545,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
|
|||||||
|
|
||||||
mutex_lock(&brd_devices_mutex);
|
mutex_lock(&brd_devices_mutex);
|
||||||
brd = brd_init_one(MINOR(dev) >> part_shift);
|
brd = brd_init_one(MINOR(dev) >> part_shift);
|
||||||
kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
|
kobj = brd ? get_disk(brd->brd_disk) : NULL;
|
||||||
mutex_unlock(&brd_devices_mutex);
|
mutex_unlock(&brd_devices_mutex);
|
||||||
|
|
||||||
*part = 0;
|
*part = 0;
|
||||||
|
@ -1747,7 +1747,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
err = loop_add(&lo, MINOR(dev) >> part_shift);
|
err = loop_add(&lo, MINOR(dev) >> part_shift);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
kobj = ERR_PTR(err);
|
kobj = NULL;
|
||||||
else
|
else
|
||||||
kobj = get_disk(lo->lo_disk);
|
kobj = get_disk(lo->lo_disk);
|
||||||
mutex_unlock(&loop_index_mutex);
|
mutex_unlock(&loop_index_mutex);
|
||||||
|
@ -724,6 +724,30 @@ static const struct dmi_system_id intel_no_lvds[] = {
|
|||||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.callback = intel_no_lvds_dmi_callback,
|
||||||
|
.ident = "Intel D410PT",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = intel_no_lvds_dmi_callback,
|
||||||
|
.ident = "Intel D425KT",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = intel_no_lvds_dmi_callback,
|
||||||
|
.ident = "Intel D510MO",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
{ } /* terminating entry */
|
{ } /* terminating entry */
|
||||||
};
|
};
|
||||||
|
@ -297,7 +297,7 @@ static const struct lm90_params lm90_params[] = {
|
|||||||
[max6696] = {
|
[max6696] = {
|
||||||
.flags = LM90_HAVE_EMERGENCY
|
.flags = LM90_HAVE_EMERGENCY
|
||||||
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
|
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
|
||||||
.alert_alarms = 0x187c,
|
.alert_alarms = 0x1c7c,
|
||||||
.max_convrate = 6,
|
.max_convrate = 6,
|
||||||
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
|
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
|
||||||
},
|
},
|
||||||
@ -1666,19 +1666,22 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
|
|||||||
if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
|
if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
|
||||||
dev_info(&client->dev, "Everything OK\n");
|
dev_info(&client->dev, "Everything OK\n");
|
||||||
} else {
|
} else {
|
||||||
if (alarms & 0x61)
|
if ((alarms & 0x61) || (alarms2 & 0x80))
|
||||||
dev_warn(&client->dev,
|
dev_warn(&client->dev,
|
||||||
"temp%d out of range, please check!\n", 1);
|
"temp%d out of range, please check!\n", 1);
|
||||||
if (alarms & 0x1a)
|
if ((alarms & 0x1a) || (alarms2 & 0x20))
|
||||||
dev_warn(&client->dev,
|
dev_warn(&client->dev,
|
||||||
"temp%d out of range, please check!\n", 2);
|
"temp%d out of range, please check!\n", 2);
|
||||||
if (alarms & 0x04)
|
if (alarms & 0x04)
|
||||||
dev_warn(&client->dev,
|
dev_warn(&client->dev,
|
||||||
"temp%d diode open, please check!\n", 2);
|
"temp%d diode open, please check!\n", 2);
|
||||||
|
|
||||||
if (alarms2 & 0x18)
|
if (alarms2 & 0x5a)
|
||||||
dev_warn(&client->dev,
|
dev_warn(&client->dev,
|
||||||
"temp%d out of range, please check!\n", 3);
|
"temp%d out of range, please check!\n", 3);
|
||||||
|
if (alarms2 & 0x04)
|
||||||
|
dev_warn(&client->dev,
|
||||||
|
"temp%d diode open, please check!\n", 3);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable ALERT# output, because these chips don't implement
|
* Disable ALERT# output, because these chips don't implement
|
||||||
|
@ -407,7 +407,7 @@ static int intel_idle(struct cpuidle_device *dev,
|
|||||||
if (!(lapic_timer_reliable_states & (1 << (cstate))))
|
if (!(lapic_timer_reliable_states & (1 << (cstate))))
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
|
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
|
||||||
|
|
||||||
if (!need_resched()) {
|
if (!current_set_polling_and_test()) {
|
||||||
|
|
||||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
@ -485,8 +485,11 @@ int mei_nfc_host_init(struct mei_device *dev)
|
|||||||
if (ndev->cl_info)
|
if (ndev->cl_info)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cl_info = mei_cl_allocate(dev);
|
ndev->cl_info = mei_cl_allocate(dev);
|
||||||
cl = mei_cl_allocate(dev);
|
ndev->cl = mei_cl_allocate(dev);
|
||||||
|
|
||||||
|
cl = ndev->cl;
|
||||||
|
cl_info = ndev->cl_info;
|
||||||
|
|
||||||
if (!cl || !cl_info) {
|
if (!cl || !cl_info) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@ -527,10 +530,9 @@ int mei_nfc_host_init(struct mei_device *dev)
|
|||||||
|
|
||||||
cl->device_uuid = mei_nfc_guid;
|
cl->device_uuid = mei_nfc_guid;
|
||||||
|
|
||||||
|
|
||||||
list_add_tail(&cl->device_link, &dev->device_list);
|
list_add_tail(&cl->device_link, &dev->device_list);
|
||||||
|
|
||||||
ndev->cl_info = cl_info;
|
|
||||||
ndev->cl = cl;
|
|
||||||
ndev->req_id = 1;
|
ndev->req_id = 1;
|
||||||
|
|
||||||
INIT_WORK(&ndev->init_work, mei_nfc_init);
|
INIT_WORK(&ndev->init_work, mei_nfc_init);
|
||||||
|
@ -814,9 +814,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
|
|||||||
msg_ctrl_save = priv->read_reg(priv,
|
msg_ctrl_save = priv->read_reg(priv,
|
||||||
C_CAN_IFACE(MSGCTRL_REG, 0));
|
C_CAN_IFACE(MSGCTRL_REG, 0));
|
||||||
|
|
||||||
if (msg_ctrl_save & IF_MCONT_EOB)
|
|
||||||
return num_rx_pkts;
|
|
||||||
|
|
||||||
if (msg_ctrl_save & IF_MCONT_MSGLST) {
|
if (msg_ctrl_save & IF_MCONT_MSGLST) {
|
||||||
c_can_handle_lost_msg_obj(dev, 0, msg_obj);
|
c_can_handle_lost_msg_obj(dev, 0, msg_obj);
|
||||||
num_rx_pkts++;
|
num_rx_pkts++;
|
||||||
@ -824,6 +821,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (msg_ctrl_save & IF_MCONT_EOB)
|
||||||
|
return num_rx_pkts;
|
||||||
|
|
||||||
if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
|
if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1544,9 +1544,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
|
static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
|
||||||
struct usb_endpoint_descriptor **in,
|
struct usb_endpoint_descriptor **in,
|
||||||
struct usb_endpoint_descriptor **out)
|
struct usb_endpoint_descriptor **out)
|
||||||
{
|
{
|
||||||
const struct usb_host_interface *iface_desc;
|
const struct usb_host_interface *iface_desc;
|
||||||
struct usb_endpoint_descriptor *endpoint;
|
struct usb_endpoint_descriptor *endpoint;
|
||||||
@ -1557,12 +1557,18 @@ static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
|
|||||||
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
|
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
|
||||||
endpoint = &iface_desc->endpoint[i].desc;
|
endpoint = &iface_desc->endpoint[i].desc;
|
||||||
|
|
||||||
if (usb_endpoint_is_bulk_in(endpoint))
|
if (!*in && usb_endpoint_is_bulk_in(endpoint))
|
||||||
*in = endpoint;
|
*in = endpoint;
|
||||||
|
|
||||||
if (usb_endpoint_is_bulk_out(endpoint))
|
if (!*out && usb_endpoint_is_bulk_out(endpoint))
|
||||||
*out = endpoint;
|
*out = endpoint;
|
||||||
|
|
||||||
|
/* use first bulk endpoint for in and out */
|
||||||
|
if (*in && *out)
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvaser_usb_probe(struct usb_interface *intf,
|
static int kvaser_usb_probe(struct usb_interface *intf,
|
||||||
@ -1576,8 +1582,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||||||
if (!dev)
|
if (!dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
|
err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
|
||||||
if (!dev->bulk_in || !dev->bulk_out) {
|
if (err) {
|
||||||
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
|
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -3400,10 +3400,13 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
|
|||||||
|
|
||||||
vgc = rt2800_get_default_vgc(rt2x00dev);
|
vgc = rt2800_get_default_vgc(rt2x00dev);
|
||||||
|
|
||||||
if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
|
if (rt2x00_rt(rt2x00dev, RT5592)) {
|
||||||
vgc += 0x20;
|
if (qual->rssi > -65)
|
||||||
else if (qual->rssi > -80)
|
vgc += 0x20;
|
||||||
vgc += 0x10;
|
} else {
|
||||||
|
if (qual->rssi > -80)
|
||||||
|
vgc += 0x10;
|
||||||
|
}
|
||||||
|
|
||||||
rt2800_set_vgc(rt2x00dev, qual, vgc);
|
rt2800_set_vgc(rt2x00dev, qual, vgc);
|
||||||
}
|
}
|
||||||
|
@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define TXSTATUS_READ_INTERVAL 1000000
|
||||||
|
|
||||||
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
|
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
|
||||||
int urb_status, u32 tx_status)
|
int urb_status, u32 tx_status)
|
||||||
{
|
{
|
||||||
@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
|
|||||||
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
|
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
|
||||||
|
|
||||||
if (rt2800usb_txstatus_pending(rt2x00dev)) {
|
if (rt2800usb_txstatus_pending(rt2x00dev)) {
|
||||||
/* Read register after 250 us */
|
/* Read register after 1 ms */
|
||||||
hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
|
hrtimer_start(&rt2x00dev->txstatus_timer,
|
||||||
|
ktime_set(0, TXSTATUS_READ_INTERVAL),
|
||||||
HRTIMER_MODE_REL);
|
HRTIMER_MODE_REL);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
|
|||||||
if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
|
if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Read TX_STA_FIFO register after 500 us */
|
/* Read TX_STA_FIFO register after 2 ms */
|
||||||
hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
|
hrtimer_start(&rt2x00dev->txstatus_timer,
|
||||||
|
ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
|
||||||
HRTIMER_MODE_REL);
|
HRTIMER_MODE_REL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,6 +181,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
|
|||||||
static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
|
static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
|
||||||
struct ieee80211_vif *vif)
|
struct ieee80211_vif *vif)
|
||||||
{
|
{
|
||||||
|
struct ieee80211_tx_control control = {};
|
||||||
struct rt2x00_dev *rt2x00dev = data;
|
struct rt2x00_dev *rt2x00dev = data;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
@ -195,7 +196,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
|
|||||||
*/
|
*/
|
||||||
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
|
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
|
||||||
while (skb) {
|
while (skb) {
|
||||||
rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
|
rt2x00mac_tx(rt2x00dev->hw, &control, skb);
|
||||||
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
|
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
|
|||||||
* @local: frame is not from mac80211
|
* @local: frame is not from mac80211
|
||||||
*/
|
*/
|
||||||
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
||||||
bool local);
|
struct ieee80211_sta *sta, bool local);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rt2x00queue_update_beacon - Send new beacon from mac80211
|
* rt2x00queue_update_beacon - Send new beacon from mac80211
|
||||||
|
@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
|
|||||||
frag_skb->data, data_length, tx_info,
|
frag_skb->data, data_length, tx_info,
|
||||||
(struct ieee80211_rts *)(skb->data));
|
(struct ieee80211_rts *)(skb->data));
|
||||||
|
|
||||||
retval = rt2x00queue_write_tx_frame(queue, skb, true);
|
retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
|
rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
|
||||||
@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
|
|||||||
goto exit_fail;
|
goto exit_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
|
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
|
||||||
goto exit_fail;
|
goto exit_fail;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
|
|||||||
struct rt2x00_dev *rt2x00dev = hw->priv;
|
struct rt2x00_dev *rt2x00dev = hw->priv;
|
||||||
struct data_queue *queue;
|
struct data_queue *queue;
|
||||||
|
|
||||||
|
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
tx_queue_for_each(rt2x00dev, queue)
|
tx_queue_for_each(rt2x00dev, queue)
|
||||||
rt2x00queue_flush_queue(queue, drop);
|
rt2x00queue_flush_queue(queue, drop);
|
||||||
}
|
}
|
||||||
|
@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
||||||
bool local)
|
struct ieee80211_sta *sta, bool local)
|
||||||
{
|
{
|
||||||
struct ieee80211_tx_info *tx_info;
|
struct ieee80211_tx_info *tx_info;
|
||||||
struct queue_entry *entry;
|
struct queue_entry *entry;
|
||||||
@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
|||||||
* after that we are free to use the skb->cb array
|
* after that we are free to use the skb->cb array
|
||||||
* for our information.
|
* for our information.
|
||||||
*/
|
*/
|
||||||
rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
|
rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All information is retrieved from the skb->cb array,
|
* All information is retrieved from the skb->cb array,
|
||||||
|
@ -484,28 +484,29 @@ static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
|
|||||||
{
|
{
|
||||||
int type = pci_pcie_type(dev);
|
int type = pci_pcie_type(dev);
|
||||||
|
|
||||||
return pcie_cap_version(dev) > 1 ||
|
return type == PCI_EXP_TYPE_ENDPOINT ||
|
||||||
|
type == PCI_EXP_TYPE_LEG_END ||
|
||||||
type == PCI_EXP_TYPE_ROOT_PORT ||
|
type == PCI_EXP_TYPE_ROOT_PORT ||
|
||||||
type == PCI_EXP_TYPE_ENDPOINT ||
|
type == PCI_EXP_TYPE_UPSTREAM ||
|
||||||
type == PCI_EXP_TYPE_LEG_END;
|
type == PCI_EXP_TYPE_DOWNSTREAM ||
|
||||||
|
type == PCI_EXP_TYPE_PCI_BRIDGE ||
|
||||||
|
type == PCI_EXP_TYPE_PCIE_BRIDGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
|
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int type = pci_pcie_type(dev);
|
int type = pci_pcie_type(dev);
|
||||||
|
|
||||||
return pcie_cap_version(dev) > 1 ||
|
return (type == PCI_EXP_TYPE_ROOT_PORT ||
|
||||||
type == PCI_EXP_TYPE_ROOT_PORT ||
|
type == PCI_EXP_TYPE_DOWNSTREAM) &&
|
||||||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
|
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
|
||||||
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
|
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int type = pci_pcie_type(dev);
|
int type = pci_pcie_type(dev);
|
||||||
|
|
||||||
return pcie_cap_version(dev) > 1 ||
|
return type == PCI_EXP_TYPE_ROOT_PORT ||
|
||||||
type == PCI_EXP_TYPE_ROOT_PORT ||
|
|
||||||
type == PCI_EXP_TYPE_RC_EC;
|
type == PCI_EXP_TYPE_RC_EC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,7 +510,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
|
if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
|
||||||
|
(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
|
||||||
rcode = -EINVAL;
|
rcode = -EINVAL;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
@ -1593,7 +1593,11 @@ static int mos7840_tiocmget(struct tty_struct *tty)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
|
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
|
||||||
|
if (status != 1)
|
||||||
|
return -EIO;
|
||||||
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
|
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
|
||||||
|
if (status != 1)
|
||||||
|
return -EIO;
|
||||||
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
|
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
|
||||||
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
|
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
|
||||||
| ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
|
| ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
|
||||||
|
@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry,
|
|||||||
struct configfs_dirent *sd = dentry->d_fsdata;
|
struct configfs_dirent *sd = dentry->d_fsdata;
|
||||||
|
|
||||||
if (sd) {
|
if (sd) {
|
||||||
BUG_ON(sd->s_dentry != dentry);
|
|
||||||
/* Coordinate with configfs_readdir */
|
/* Coordinate with configfs_readdir */
|
||||||
spin_lock(&configfs_dirent_lock);
|
spin_lock(&configfs_dirent_lock);
|
||||||
sd->s_dentry = NULL;
|
/* Coordinate with configfs_attach_attr where will increase
|
||||||
|
* sd->s_count and update sd->s_dentry to new allocated one.
|
||||||
|
* Only set sd->dentry to null when this dentry is the only
|
||||||
|
* sd owner.
|
||||||
|
* If not do so, configfs_d_iput may run just after
|
||||||
|
* configfs_attach_attr and set sd->s_dentry to null
|
||||||
|
* even it's still in use.
|
||||||
|
*/
|
||||||
|
if (atomic_read(&sd->s_count) <= 2)
|
||||||
|
sd->s_dentry = NULL;
|
||||||
|
|
||||||
spin_unlock(&configfs_dirent_lock);
|
spin_unlock(&configfs_dirent_lock);
|
||||||
configfs_put(sd);
|
configfs_put(sd);
|
||||||
}
|
}
|
||||||
@ -426,8 +435,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
|
|||||||
struct configfs_attribute * attr = sd->s_element;
|
struct configfs_attribute * attr = sd->s_element;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
spin_lock(&configfs_dirent_lock);
|
||||||
dentry->d_fsdata = configfs_get(sd);
|
dentry->d_fsdata = configfs_get(sd);
|
||||||
sd->s_dentry = dentry;
|
sd->s_dentry = dentry;
|
||||||
|
spin_unlock(&configfs_dirent_lock);
|
||||||
|
|
||||||
error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
|
error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
|
||||||
configfs_init_file);
|
configfs_init_file);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -1671,6 +1671,12 @@ int __get_dumpable(unsigned long mm_flags)
|
|||||||
return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
|
return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This returns the actual value of the suid_dumpable flag. For things
|
||||||
|
* that are using this for checking for privilege transitions, it must
|
||||||
|
* test against SUID_DUMP_USER rather than treating it as a boolean
|
||||||
|
* value.
|
||||||
|
*/
|
||||||
int get_dumpable(struct mm_struct *mm)
|
int get_dumpable(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
return __get_dumpable(mm->flags);
|
return __get_dumpable(mm->flags);
|
||||||
|
@ -1160,29 +1160,24 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!data->rpc_done) {
|
if (!data->rpc_done) {
|
||||||
ret = data->rpc_status;
|
if (data->rpc_status) {
|
||||||
goto err;
|
ret = data->rpc_status;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
/* cached opens have already been processed */
|
||||||
|
goto update;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -ESTALE;
|
|
||||||
if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
|
|
||||||
!(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
|
|
||||||
!(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
ret = -ENOMEM;
|
|
||||||
state = nfs4_get_open_state(inode, data->owner);
|
|
||||||
if (state == NULL)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
ret = nfs_refresh_inode(inode, &data->f_attr);
|
ret = nfs_refresh_inode(inode, &data->f_attr);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (data->o_res.delegation_type != 0)
|
if (data->o_res.delegation_type != 0)
|
||||||
nfs4_opendata_check_deleg(data, state);
|
nfs4_opendata_check_deleg(data, state);
|
||||||
|
update:
|
||||||
update_open_stateid(state, &data->o_res.stateid, NULL,
|
update_open_stateid(state, &data->o_res.stateid, NULL,
|
||||||
data->o_arg.fmode);
|
data->o_arg.fmode);
|
||||||
|
atomic_inc(&state->count);
|
||||||
|
|
||||||
return state;
|
return state;
|
||||||
err:
|
err:
|
||||||
@ -4572,6 +4567,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
|
|||||||
status = 0;
|
status = 0;
|
||||||
}
|
}
|
||||||
request->fl_ops->fl_release_private(request);
|
request->fl_ops->fl_release_private(request);
|
||||||
|
request->fl_ops = NULL;
|
||||||
out:
|
out:
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@ -536,16 +536,12 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out3;
|
goto out3;
|
||||||
exp.ex_anon_uid= make_kuid(&init_user_ns, an_int);
|
exp.ex_anon_uid= make_kuid(&init_user_ns, an_int);
|
||||||
if (!uid_valid(exp.ex_anon_uid))
|
|
||||||
goto out3;
|
|
||||||
|
|
||||||
/* anon gid */
|
/* anon gid */
|
||||||
err = get_int(&mesg, &an_int);
|
err = get_int(&mesg, &an_int);
|
||||||
if (err)
|
if (err)
|
||||||
goto out3;
|
goto out3;
|
||||||
exp.ex_anon_gid= make_kgid(&init_user_ns, an_int);
|
exp.ex_anon_gid= make_kgid(&init_user_ns, an_int);
|
||||||
if (!gid_valid(exp.ex_anon_gid))
|
|
||||||
goto out3;
|
|
||||||
|
|
||||||
/* fsid */
|
/* fsid */
|
||||||
err = get_int(&mesg, &an_int);
|
err = get_int(&mesg, &an_int);
|
||||||
@ -583,6 +579,17 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
|
|||||||
exp.ex_uuid);
|
exp.ex_uuid);
|
||||||
if (err)
|
if (err)
|
||||||
goto out4;
|
goto out4;
|
||||||
|
/*
|
||||||
|
* For some reason exportfs has been passing down an
|
||||||
|
* invalid (-1) uid & gid on the "dummy" export which it
|
||||||
|
* uses to test export support. To make sure exportfs
|
||||||
|
* sees errors from check_export we therefore need to
|
||||||
|
* delay these checks till after check_export:
|
||||||
|
*/
|
||||||
|
if (!uid_valid(exp.ex_anon_uid))
|
||||||
|
goto out4;
|
||||||
|
if (!gid_valid(exp.ex_anon_gid))
|
||||||
|
goto out4;
|
||||||
}
|
}
|
||||||
|
|
||||||
expp = svc_export_lookup(&exp);
|
expp = svc_export_lookup(&exp);
|
||||||
|
215
fs/nfsd/vfs.c
215
fs/nfsd/vfs.c
@ -297,8 +297,104 @@ commit_metadata(struct svc_fh *fhp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set various file attributes.
|
* Go over the attributes and take care of the small differences between
|
||||||
* N.B. After this call fhp needs an fh_put
|
* NFS semantics and what Linux expects.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* NFSv2 does not differentiate between "set-[ac]time-to-now"
|
||||||
|
* which only requires access, and "set-[ac]time-to-X" which
|
||||||
|
* requires ownership.
|
||||||
|
* So if it looks like it might be "set both to the same time which
|
||||||
|
* is close to now", and if inode_change_ok fails, then we
|
||||||
|
* convert to "set to now" instead of "set to explicit time"
|
||||||
|
*
|
||||||
|
* We only call inode_change_ok as the last test as technically
|
||||||
|
* it is not an interface that we should be using.
|
||||||
|
*/
|
||||||
|
#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
|
||||||
|
#define MAX_TOUCH_TIME_ERROR (30*60)
|
||||||
|
if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
|
||||||
|
iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
|
||||||
|
/*
|
||||||
|
* Looks probable.
|
||||||
|
*
|
||||||
|
* Now just make sure time is in the right ballpark.
|
||||||
|
* Solaris, at least, doesn't seem to care what the time
|
||||||
|
* request is. We require it be within 30 minutes of now.
|
||||||
|
*/
|
||||||
|
time_t delta = iap->ia_atime.tv_sec - get_seconds();
|
||||||
|
if (delta < 0)
|
||||||
|
delta = -delta;
|
||||||
|
if (delta < MAX_TOUCH_TIME_ERROR &&
|
||||||
|
inode_change_ok(inode, iap) != 0) {
|
||||||
|
/*
|
||||||
|
* Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
|
||||||
|
* This will cause notify_change to set these times
|
||||||
|
* to "now"
|
||||||
|
*/
|
||||||
|
iap->ia_valid &= ~BOTH_TIME_SET;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* sanitize the mode change */
|
||||||
|
if (iap->ia_valid & ATTR_MODE) {
|
||||||
|
iap->ia_mode &= S_IALLUGO;
|
||||||
|
iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Revoke setuid/setgid on chown */
|
||||||
|
if (!S_ISDIR(inode->i_mode) &&
|
||||||
|
(((iap->ia_valid & ATTR_UID) && !uid_eq(iap->ia_uid, inode->i_uid)) ||
|
||||||
|
((iap->ia_valid & ATTR_GID) && !gid_eq(iap->ia_gid, inode->i_gid)))) {
|
||||||
|
iap->ia_valid |= ATTR_KILL_PRIV;
|
||||||
|
if (iap->ia_valid & ATTR_MODE) {
|
||||||
|
/* we're setting mode too, just clear the s*id bits */
|
||||||
|
iap->ia_mode &= ~S_ISUID;
|
||||||
|
if (iap->ia_mode & S_IXGRP)
|
||||||
|
iap->ia_mode &= ~S_ISGID;
|
||||||
|
} else {
|
||||||
|
/* set ATTR_KILL_* bits and let VFS handle it */
|
||||||
|
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __be32
|
||||||
|
nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
||||||
|
struct iattr *iap)
|
||||||
|
{
|
||||||
|
struct inode *inode = fhp->fh_dentry->d_inode;
|
||||||
|
int host_err;
|
||||||
|
|
||||||
|
if (iap->ia_size < inode->i_size) {
|
||||||
|
__be32 err;
|
||||||
|
|
||||||
|
err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
|
||||||
|
NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
host_err = get_write_access(inode);
|
||||||
|
if (host_err)
|
||||||
|
goto out_nfserrno;
|
||||||
|
|
||||||
|
host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
|
||||||
|
if (host_err)
|
||||||
|
goto out_put_write_access;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_put_write_access:
|
||||||
|
put_write_access(inode);
|
||||||
|
out_nfserrno:
|
||||||
|
return nfserrno(host_err);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set various file attributes. After this call fhp needs an fh_put.
|
||||||
*/
|
*/
|
||||||
__be32
|
__be32
|
||||||
nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
|
nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
|
||||||
@ -332,114 +428,43 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
|
|||||||
if (!iap->ia_valid)
|
if (!iap->ia_valid)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
nfsd_sanitize_attrs(inode, iap);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NFSv2 does not differentiate between "set-[ac]time-to-now"
|
* The size case is special, it changes the file in addition to the
|
||||||
* which only requires access, and "set-[ac]time-to-X" which
|
* attributes.
|
||||||
* requires ownership.
|
|
||||||
* So if it looks like it might be "set both to the same time which
|
|
||||||
* is close to now", and if inode_change_ok fails, then we
|
|
||||||
* convert to "set to now" instead of "set to explicit time"
|
|
||||||
*
|
|
||||||
* We only call inode_change_ok as the last test as technically
|
|
||||||
* it is not an interface that we should be using. It is only
|
|
||||||
* valid if the filesystem does not define it's own i_op->setattr.
|
|
||||||
*/
|
|
||||||
#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
|
|
||||||
#define MAX_TOUCH_TIME_ERROR (30*60)
|
|
||||||
if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
|
|
||||||
iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
|
|
||||||
/*
|
|
||||||
* Looks probable.
|
|
||||||
*
|
|
||||||
* Now just make sure time is in the right ballpark.
|
|
||||||
* Solaris, at least, doesn't seem to care what the time
|
|
||||||
* request is. We require it be within 30 minutes of now.
|
|
||||||
*/
|
|
||||||
time_t delta = iap->ia_atime.tv_sec - get_seconds();
|
|
||||||
if (delta < 0)
|
|
||||||
delta = -delta;
|
|
||||||
if (delta < MAX_TOUCH_TIME_ERROR &&
|
|
||||||
inode_change_ok(inode, iap) != 0) {
|
|
||||||
/*
|
|
||||||
* Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
|
|
||||||
* This will cause notify_change to set these times
|
|
||||||
* to "now"
|
|
||||||
*/
|
|
||||||
iap->ia_valid &= ~BOTH_TIME_SET;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size case is special.
|
|
||||||
* It changes the file as well as the attributes.
|
|
||||||
*/
|
*/
|
||||||
if (iap->ia_valid & ATTR_SIZE) {
|
if (iap->ia_valid & ATTR_SIZE) {
|
||||||
if (iap->ia_size < inode->i_size) {
|
err = nfsd_get_write_access(rqstp, fhp, iap);
|
||||||
err = nfsd_permission(rqstp, fhp->fh_export, dentry,
|
if (err)
|
||||||
NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
|
goto out;
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
host_err = get_write_access(inode);
|
|
||||||
if (host_err)
|
|
||||||
goto out_nfserr;
|
|
||||||
|
|
||||||
size_change = 1;
|
size_change = 1;
|
||||||
host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
|
|
||||||
if (host_err) {
|
|
||||||
put_write_access(inode);
|
|
||||||
goto out_nfserr;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sanitize the mode change */
|
|
||||||
if (iap->ia_valid & ATTR_MODE) {
|
|
||||||
iap->ia_mode &= S_IALLUGO;
|
|
||||||
iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Revoke setuid/setgid on chown */
|
|
||||||
if (!S_ISDIR(inode->i_mode) &&
|
|
||||||
(((iap->ia_valid & ATTR_UID) && !uid_eq(iap->ia_uid, inode->i_uid)) ||
|
|
||||||
((iap->ia_valid & ATTR_GID) && !gid_eq(iap->ia_gid, inode->i_gid)))) {
|
|
||||||
iap->ia_valid |= ATTR_KILL_PRIV;
|
|
||||||
if (iap->ia_valid & ATTR_MODE) {
|
|
||||||
/* we're setting mode too, just clear the s*id bits */
|
|
||||||
iap->ia_mode &= ~S_ISUID;
|
|
||||||
if (iap->ia_mode & S_IXGRP)
|
|
||||||
iap->ia_mode &= ~S_ISGID;
|
|
||||||
} else {
|
|
||||||
/* set ATTR_KILL_* bits and let VFS handle it */
|
|
||||||
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Change the attributes. */
|
|
||||||
|
|
||||||
iap->ia_valid |= ATTR_CTIME;
|
iap->ia_valid |= ATTR_CTIME;
|
||||||
|
|
||||||
err = nfserr_notsync;
|
if (check_guard && guardtime != inode->i_ctime.tv_sec) {
|
||||||
if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
|
err = nfserr_notsync;
|
||||||
host_err = nfsd_break_lease(inode);
|
goto out_put_write_access;
|
||||||
if (host_err)
|
|
||||||
goto out_nfserr;
|
|
||||||
fh_lock(fhp);
|
|
||||||
|
|
||||||
host_err = notify_change(dentry, iap);
|
|
||||||
err = nfserrno(host_err);
|
|
||||||
fh_unlock(fhp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
host_err = nfsd_break_lease(inode);
|
||||||
|
if (host_err)
|
||||||
|
goto out_put_write_access_nfserror;
|
||||||
|
|
||||||
|
fh_lock(fhp);
|
||||||
|
host_err = notify_change(dentry, iap);
|
||||||
|
fh_unlock(fhp);
|
||||||
|
|
||||||
|
out_put_write_access_nfserror:
|
||||||
|
err = nfserrno(host_err);
|
||||||
|
out_put_write_access:
|
||||||
if (size_change)
|
if (size_change)
|
||||||
put_write_access(inode);
|
put_write_access(inode);
|
||||||
if (!err)
|
if (!err)
|
||||||
commit_metadata(fhp);
|
commit_metadata(fhp);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
out_nfserr:
|
|
||||||
err = nfserrno(host_err);
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NFSD_V2_ACL) || \
|
#if defined(CONFIG_NFSD_V2_ACL) || \
|
||||||
|
@ -99,9 +99,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
|
|||||||
extern void would_dump(struct linux_binprm *, struct file *);
|
extern void would_dump(struct linux_binprm *, struct file *);
|
||||||
|
|
||||||
extern int suid_dumpable;
|
extern int suid_dumpable;
|
||||||
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
|
|
||||||
#define SUID_DUMP_USER 1 /* Dump as user of process */
|
|
||||||
#define SUID_DUMP_ROOT 2 /* Dump as root */
|
|
||||||
|
|
||||||
/* Stack area protections */
|
/* Stack area protections */
|
||||||
#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
|
#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
|
||||||
|
@ -338,6 +338,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
|
|||||||
extern void set_dumpable(struct mm_struct *mm, int value);
|
extern void set_dumpable(struct mm_struct *mm, int value);
|
||||||
extern int get_dumpable(struct mm_struct *mm);
|
extern int get_dumpable(struct mm_struct *mm);
|
||||||
|
|
||||||
|
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
|
||||||
|
#define SUID_DUMP_USER 1 /* Dump as user of process */
|
||||||
|
#define SUID_DUMP_ROOT 2 /* Dump as root */
|
||||||
|
|
||||||
/* mm flags */
|
/* mm flags */
|
||||||
/* dumpable bits */
|
/* dumpable bits */
|
||||||
#define MMF_DUMPABLE 0 /* core dump is permitted */
|
#define MMF_DUMPABLE 0 /* core dump is permitted */
|
||||||
@ -2503,34 +2507,98 @@ static inline int tsk_is_polling(struct task_struct *p)
|
|||||||
{
|
{
|
||||||
return task_thread_info(p)->status & TS_POLLING;
|
return task_thread_info(p)->status & TS_POLLING;
|
||||||
}
|
}
|
||||||
static inline void current_set_polling(void)
|
static inline void __current_set_polling(void)
|
||||||
{
|
{
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void current_clr_polling(void)
|
static inline bool __must_check current_set_polling_and_test(void)
|
||||||
|
{
|
||||||
|
__current_set_polling();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Polling state must be visible before we test NEED_RESCHED,
|
||||||
|
* paired by resched_task()
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __current_clr_polling(void)
|
||||||
{
|
{
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
current_thread_info()->status &= ~TS_POLLING;
|
||||||
smp_mb__after_clear_bit();
|
}
|
||||||
|
|
||||||
|
static inline bool __must_check current_clr_polling_and_test(void)
|
||||||
|
{
|
||||||
|
__current_clr_polling();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Polling state must be visible before we test NEED_RESCHED,
|
||||||
|
* paired by resched_task()
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
}
|
}
|
||||||
#elif defined(TIF_POLLING_NRFLAG)
|
#elif defined(TIF_POLLING_NRFLAG)
|
||||||
static inline int tsk_is_polling(struct task_struct *p)
|
static inline int tsk_is_polling(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
|
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
|
||||||
}
|
}
|
||||||
static inline void current_set_polling(void)
|
|
||||||
|
static inline void __current_set_polling(void)
|
||||||
{
|
{
|
||||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void current_clr_polling(void)
|
static inline bool __must_check current_set_polling_and_test(void)
|
||||||
|
{
|
||||||
|
__current_set_polling();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Polling state must be visible before we test NEED_RESCHED,
|
||||||
|
* paired by resched_task()
|
||||||
|
*
|
||||||
|
* XXX: assumes set/clear bit are identical barrier wise.
|
||||||
|
*/
|
||||||
|
smp_mb__after_clear_bit();
|
||||||
|
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __current_clr_polling(void)
|
||||||
{
|
{
|
||||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool __must_check current_clr_polling_and_test(void)
|
||||||
|
{
|
||||||
|
__current_clr_polling();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Polling state must be visible before we test NEED_RESCHED,
|
||||||
|
* paired by resched_task()
|
||||||
|
*/
|
||||||
|
smp_mb__after_clear_bit();
|
||||||
|
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
|
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
|
||||||
static inline void current_set_polling(void) { }
|
static inline void __current_set_polling(void) { }
|
||||||
static inline void current_clr_polling(void) { }
|
static inline void __current_clr_polling(void) { }
|
||||||
|
|
||||||
|
static inline bool __must_check current_set_polling_and_test(void)
|
||||||
|
{
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
|
}
|
||||||
|
static inline bool __must_check current_clr_polling_and_test(void)
|
||||||
|
{
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -107,6 +107,8 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
|
|||||||
#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
|
#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
|
||||||
#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
|
#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
|
||||||
|
|
||||||
|
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
|
||||||
|
|
||||||
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
|
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
|
||||||
/*
|
/*
|
||||||
* An arch can define its own version of set_restore_sigmask() to get the
|
* An arch can define its own version of set_restore_sigmask() to get the
|
||||||
|
@ -171,4 +171,13 @@ static inline void snd_compr_fragment_elapsed(struct snd_compr_stream *stream)
|
|||||||
wake_up(&stream->runtime->sleep);
|
wake_up(&stream->runtime->sleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
|
||||||
|
{
|
||||||
|
if (snd_BUG_ON(!stream))
|
||||||
|
return;
|
||||||
|
|
||||||
|
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
|
||||||
|
wake_up(&stream->runtime->sleep);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
37
ipc/shm.c
37
ipc/shm.c
@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma)
|
|||||||
*/
|
*/
|
||||||
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||||
{
|
{
|
||||||
|
struct file *shm_file;
|
||||||
|
|
||||||
|
shm_file = shp->shm_file;
|
||||||
|
shp->shm_file = NULL;
|
||||||
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
shm_rmid(ns, shp);
|
shm_rmid(ns, shp);
|
||||||
shm_unlock(shp);
|
shm_unlock(shp);
|
||||||
if (!is_file_hugepages(shp->shm_file))
|
if (!is_file_hugepages(shm_file))
|
||||||
shmem_lock(shp->shm_file, 0, shp->mlock_user);
|
shmem_lock(shm_file, 0, shp->mlock_user);
|
||||||
else if (shp->mlock_user)
|
else if (shp->mlock_user)
|
||||||
user_shm_unlock(file_inode(shp->shm_file)->i_size,
|
user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
|
||||||
shp->mlock_user);
|
fput(shm_file);
|
||||||
fput (shp->shm_file);
|
|
||||||
ipc_rcu_putref(shp, shm_rcu_free);
|
ipc_rcu_putref(shp, shm_rcu_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
|
|||||||
ipc_lock_object(&shp->shm_perm);
|
ipc_lock_object(&shp->shm_perm);
|
||||||
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
|
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
|
||||||
kuid_t euid = current_euid();
|
kuid_t euid = current_euid();
|
||||||
err = -EPERM;
|
|
||||||
if (!uid_eq(euid, shp->shm_perm.uid) &&
|
if (!uid_eq(euid, shp->shm_perm.uid) &&
|
||||||
!uid_eq(euid, shp->shm_perm.cuid))
|
!uid_eq(euid, shp->shm_perm.cuid)) {
|
||||||
|
err = -EPERM;
|
||||||
goto out_unlock0;
|
goto out_unlock0;
|
||||||
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
|
}
|
||||||
|
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
|
||||||
|
err = -EPERM;
|
||||||
goto out_unlock0;
|
goto out_unlock0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
shm_file = shp->shm_file;
|
shm_file = shp->shm_file;
|
||||||
|
|
||||||
|
/* check if shm_destroy() is tearing down shp */
|
||||||
|
if (shm_file == NULL) {
|
||||||
|
err = -EIDRM;
|
||||||
|
goto out_unlock0;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_file_hugepages(shm_file))
|
if (is_file_hugepages(shm_file))
|
||||||
goto out_unlock0;
|
goto out_unlock0;
|
||||||
|
|
||||||
@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ipc_lock_object(&shp->shm_perm);
|
ipc_lock_object(&shp->shm_perm);
|
||||||
|
|
||||||
|
/* check if shm_destroy() is tearing down shp */
|
||||||
|
if (shp->shm_file == NULL) {
|
||||||
|
ipc_unlock_object(&shp->shm_perm);
|
||||||
|
err = -EIDRM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
path = shp->shm_file->f_path;
|
path = shp->shm_file->f_path;
|
||||||
path_get(&path);
|
path_get(&path);
|
||||||
shp->shm_nattch++;
|
shp->shm_nattch++;
|
||||||
|
@ -44,7 +44,7 @@ static inline int cpu_idle_poll(void)
|
|||||||
rcu_idle_enter();
|
rcu_idle_enter();
|
||||||
trace_cpu_idle_rcuidle(0, smp_processor_id());
|
trace_cpu_idle_rcuidle(0, smp_processor_id());
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
while (!need_resched())
|
while (!tif_need_resched())
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||||
rcu_idle_exit();
|
rcu_idle_exit();
|
||||||
@ -92,8 +92,7 @@ static void cpu_idle_loop(void)
|
|||||||
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
|
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
|
||||||
cpu_idle_poll();
|
cpu_idle_poll();
|
||||||
} else {
|
} else {
|
||||||
current_clr_polling();
|
if (!current_clr_polling_and_test()) {
|
||||||
if (!need_resched()) {
|
|
||||||
stop_critical_timings();
|
stop_critical_timings();
|
||||||
rcu_idle_enter();
|
rcu_idle_enter();
|
||||||
arch_cpu_idle();
|
arch_cpu_idle();
|
||||||
@ -103,7 +102,7 @@ static void cpu_idle_loop(void)
|
|||||||
} else {
|
} else {
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
current_set_polling();
|
__current_set_polling();
|
||||||
}
|
}
|
||||||
arch_cpu_idle_exit();
|
arch_cpu_idle_exit();
|
||||||
}
|
}
|
||||||
@ -129,7 +128,7 @@ void cpu_startup_entry(enum cpuhp_state state)
|
|||||||
*/
|
*/
|
||||||
boot_init_stack_canary();
|
boot_init_stack_canary();
|
||||||
#endif
|
#endif
|
||||||
current_set_polling();
|
__current_set_polling();
|
||||||
arch_cpu_idle_prepare();
|
arch_cpu_idle_prepare();
|
||||||
cpu_idle_loop();
|
cpu_idle_loop();
|
||||||
}
|
}
|
||||||
|
@ -257,7 +257,8 @@ ok:
|
|||||||
if (task->mm)
|
if (task->mm)
|
||||||
dumpable = get_dumpable(task->mm);
|
dumpable = get_dumpable(task->mm);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
|
if (dumpable != SUID_DUMP_USER &&
|
||||||
|
!ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
@ -1201,8 +1201,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
|
|||||||
/*
|
/*
|
||||||
* Enable debugging if selected on the kernel commandline.
|
* Enable debugging if selected on the kernel commandline.
|
||||||
*/
|
*/
|
||||||
if (slub_debug && (!slub_debug_slabs ||
|
if (slub_debug && (!slub_debug_slabs || (name &&
|
||||||
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
|
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
|
||||||
flags |= slub_debug;
|
flags |= slub_debug;
|
||||||
|
|
||||||
return flags;
|
return flags;
|
||||||
|
@ -1407,9 +1407,9 @@ call_refreshresult(struct rpc_task *task)
|
|||||||
return;
|
return;
|
||||||
case -ETIMEDOUT:
|
case -ETIMEDOUT:
|
||||||
rpc_delay(task, 3*HZ);
|
rpc_delay(task, 3*HZ);
|
||||||
case -EKEYEXPIRED:
|
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
status = -EACCES;
|
status = -EACCES;
|
||||||
|
case -EKEYEXPIRED:
|
||||||
if (!task->tk_cred_retry)
|
if (!task->tk_cred_retry)
|
||||||
break;
|
break;
|
||||||
task->tk_cred_retry--;
|
task->tk_cred_retry--;
|
||||||
|
@ -391,8 +391,10 @@ static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen,
|
|||||||
return kernel_sendmsg(sock, &msg, NULL, 0, 0);
|
return kernel_sendmsg(sock, &msg, NULL, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
|
static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy)
|
||||||
{
|
{
|
||||||
|
ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
|
||||||
|
int offset, size_t size, int flags);
|
||||||
struct page **ppage;
|
struct page **ppage;
|
||||||
unsigned int remainder;
|
unsigned int remainder;
|
||||||
int err, sent = 0;
|
int err, sent = 0;
|
||||||
@ -401,6 +403,9 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
|
|||||||
base += xdr->page_base;
|
base += xdr->page_base;
|
||||||
ppage = xdr->pages + (base >> PAGE_SHIFT);
|
ppage = xdr->pages + (base >> PAGE_SHIFT);
|
||||||
base &= ~PAGE_MASK;
|
base &= ~PAGE_MASK;
|
||||||
|
do_sendpage = sock->ops->sendpage;
|
||||||
|
if (!zerocopy)
|
||||||
|
do_sendpage = sock_no_sendpage;
|
||||||
for(;;) {
|
for(;;) {
|
||||||
unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
|
unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
|
||||||
int flags = XS_SENDMSG_FLAGS;
|
int flags = XS_SENDMSG_FLAGS;
|
||||||
@ -408,7 +413,7 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
|
|||||||
remainder -= len;
|
remainder -= len;
|
||||||
if (remainder != 0 || more)
|
if (remainder != 0 || more)
|
||||||
flags |= MSG_MORE;
|
flags |= MSG_MORE;
|
||||||
err = sock->ops->sendpage(sock, *ppage, base, len, flags);
|
err = do_sendpage(sock, *ppage, base, len, flags);
|
||||||
if (remainder == 0 || err != len)
|
if (remainder == 0 || err != len)
|
||||||
break;
|
break;
|
||||||
sent += err;
|
sent += err;
|
||||||
@ -429,9 +434,10 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
|
|||||||
* @addrlen: UDP only -- length of destination address
|
* @addrlen: UDP only -- length of destination address
|
||||||
* @xdr: buffer containing this request
|
* @xdr: buffer containing this request
|
||||||
* @base: starting position in the buffer
|
* @base: starting position in the buffer
|
||||||
|
* @zerocopy: true if it is safe to use sendpage()
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
|
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy)
|
||||||
{
|
{
|
||||||
unsigned int remainder = xdr->len - base;
|
unsigned int remainder = xdr->len - base;
|
||||||
int err, sent = 0;
|
int err, sent = 0;
|
||||||
@ -459,7 +465,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
|
|||||||
if (base < xdr->page_len) {
|
if (base < xdr->page_len) {
|
||||||
unsigned int len = xdr->page_len - base;
|
unsigned int len = xdr->page_len - base;
|
||||||
remainder -= len;
|
remainder -= len;
|
||||||
err = xs_send_pagedata(sock, xdr, base, remainder != 0);
|
err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy);
|
||||||
if (remainder == 0 || err != len)
|
if (remainder == 0 || err != len)
|
||||||
goto out;
|
goto out;
|
||||||
sent += err;
|
sent += err;
|
||||||
@ -562,7 +568,7 @@ static int xs_local_send_request(struct rpc_task *task)
|
|||||||
req->rq_svec->iov_base, req->rq_svec->iov_len);
|
req->rq_svec->iov_base, req->rq_svec->iov_len);
|
||||||
|
|
||||||
status = xs_sendpages(transport->sock, NULL, 0,
|
status = xs_sendpages(transport->sock, NULL, 0,
|
||||||
xdr, req->rq_bytes_sent);
|
xdr, req->rq_bytes_sent, true);
|
||||||
dprintk("RPC: %s(%u) = %d\n",
|
dprintk("RPC: %s(%u) = %d\n",
|
||||||
__func__, xdr->len - req->rq_bytes_sent, status);
|
__func__, xdr->len - req->rq_bytes_sent, status);
|
||||||
if (likely(status >= 0)) {
|
if (likely(status >= 0)) {
|
||||||
@ -618,7 +624,7 @@ static int xs_udp_send_request(struct rpc_task *task)
|
|||||||
status = xs_sendpages(transport->sock,
|
status = xs_sendpages(transport->sock,
|
||||||
xs_addr(xprt),
|
xs_addr(xprt),
|
||||||
xprt->addrlen, xdr,
|
xprt->addrlen, xdr,
|
||||||
req->rq_bytes_sent);
|
req->rq_bytes_sent, true);
|
||||||
|
|
||||||
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
|
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
|
||||||
xdr->len - req->rq_bytes_sent, status);
|
xdr->len - req->rq_bytes_sent, status);
|
||||||
@ -689,6 +695,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
|
|||||||
struct rpc_xprt *xprt = req->rq_xprt;
|
struct rpc_xprt *xprt = req->rq_xprt;
|
||||||
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
||||||
struct xdr_buf *xdr = &req->rq_snd_buf;
|
struct xdr_buf *xdr = &req->rq_snd_buf;
|
||||||
|
bool zerocopy = true;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
xs_encode_stream_record_marker(&req->rq_snd_buf);
|
xs_encode_stream_record_marker(&req->rq_snd_buf);
|
||||||
@ -696,13 +703,20 @@ static int xs_tcp_send_request(struct rpc_task *task)
|
|||||||
xs_pktdump("packet data:",
|
xs_pktdump("packet data:",
|
||||||
req->rq_svec->iov_base,
|
req->rq_svec->iov_base,
|
||||||
req->rq_svec->iov_len);
|
req->rq_svec->iov_len);
|
||||||
|
/* Don't use zero copy if this is a resend. If the RPC call
|
||||||
|
* completes while the socket holds a reference to the pages,
|
||||||
|
* then we may end up resending corrupted data.
|
||||||
|
*/
|
||||||
|
if (task->tk_flags & RPC_TASK_SENT)
|
||||||
|
zerocopy = false;
|
||||||
|
|
||||||
/* Continue transmitting the packet/record. We must be careful
|
/* Continue transmitting the packet/record. We must be careful
|
||||||
* to cope with writespace callbacks arriving _after_ we have
|
* to cope with writespace callbacks arriving _after_ we have
|
||||||
* called sendmsg(). */
|
* called sendmsg(). */
|
||||||
while (1) {
|
while (1) {
|
||||||
status = xs_sendpages(transport->sock,
|
status = xs_sendpages(transport->sock,
|
||||||
NULL, 0, xdr, req->rq_bytes_sent);
|
NULL, 0, xdr, req->rq_bytes_sent,
|
||||||
|
zerocopy);
|
||||||
|
|
||||||
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
|
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
|
||||||
xdr->len - req->rq_bytes_sent, status);
|
xdr->len - req->rq_bytes_sent, status);
|
||||||
|
@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = {
|
|||||||
{.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
|
{.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
|
||||||
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
|
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
|
||||||
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
|
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
|
||||||
{.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
|
|
||||||
{.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
|
{.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
|
||||||
{.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
|
{.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
|
||||||
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
|
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
|
||||||
|
@ -668,14 +668,48 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
|
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
|
||||||
if (!retval) {
|
if (!retval) {
|
||||||
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
|
snd_compr_drain_notify(stream);
|
||||||
wake_up(&stream->runtime->sleep);
|
|
||||||
stream->runtime->total_bytes_available = 0;
|
stream->runtime->total_bytes_available = 0;
|
||||||
stream->runtime->total_bytes_transferred = 0;
|
stream->runtime->total_bytes_transferred = 0;
|
||||||
}
|
}
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are called with lock held. So drop the lock while we wait for
|
||||||
|
* drain complete notfication from the driver
|
||||||
|
*
|
||||||
|
* It is expected that driver will notify the drain completion and then
|
||||||
|
* stream will be moved to SETUP state, even if draining resulted in an
|
||||||
|
* error. We can trigger next track after this.
|
||||||
|
*/
|
||||||
|
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
|
||||||
|
mutex_unlock(&stream->device->lock);
|
||||||
|
|
||||||
|
/* we wait for drain to complete here, drain can return when
|
||||||
|
* interruption occurred, wait returned error or success.
|
||||||
|
* For the first two cases we don't do anything different here and
|
||||||
|
* return after waking up
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = wait_event_interruptible(stream->runtime->sleep,
|
||||||
|
(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
|
||||||
|
if (ret == -ERESTARTSYS)
|
||||||
|
pr_debug("wait aborted by a signal");
|
||||||
|
else if (ret)
|
||||||
|
pr_debug("wait for drain failed with %d\n", ret);
|
||||||
|
|
||||||
|
|
||||||
|
wake_up(&stream->runtime->sleep);
|
||||||
|
mutex_lock(&stream->device->lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int snd_compr_drain(struct snd_compr_stream *stream)
|
static int snd_compr_drain(struct snd_compr_stream *stream)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
@ -683,12 +717,15 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
|
|||||||
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
|
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
|
||||||
stream->runtime->state == SNDRV_PCM_STATE_SETUP)
|
stream->runtime->state == SNDRV_PCM_STATE_SETUP)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
|
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
|
||||||
if (!retval) {
|
if (retval) {
|
||||||
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
|
pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
|
||||||
wake_up(&stream->runtime->sleep);
|
wake_up(&stream->runtime->sleep);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
return retval;
|
|
||||||
|
return snd_compress_wait_for_drain(stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int snd_compr_next_track(struct snd_compr_stream *stream)
|
static int snd_compr_next_track(struct snd_compr_stream *stream)
|
||||||
@ -724,9 +761,14 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
|
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
|
||||||
|
if (retval) {
|
||||||
|
pr_debug("Partial drain returned failure\n");
|
||||||
|
wake_up(&stream->runtime->sleep);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
stream->next_track = false;
|
stream->next_track = false;
|
||||||
return retval;
|
return snd_compress_wait_for_drain(stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
||||||
|
@ -73,9 +73,11 @@
|
|||||||
#ifdef MSND_CLASSIC
|
#ifdef MSND_CLASSIC
|
||||||
# include "msnd_classic.h"
|
# include "msnd_classic.h"
|
||||||
# define LOGNAME "msnd_classic"
|
# define LOGNAME "msnd_classic"
|
||||||
|
# define DEV_NAME "msnd-classic"
|
||||||
#else
|
#else
|
||||||
# include "msnd_pinnacle.h"
|
# include "msnd_pinnacle.h"
|
||||||
# define LOGNAME "snd_msnd_pinnacle"
|
# define LOGNAME "snd_msnd_pinnacle"
|
||||||
|
# define DEV_NAME "msnd-pinnacle"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void set_default_audio_parameters(struct snd_msnd *chip)
|
static void set_default_audio_parameters(struct snd_msnd *chip)
|
||||||
@ -1068,8 +1070,6 @@ static int snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DEV_NAME "msnd-pinnacle"
|
|
||||||
|
|
||||||
static struct isa_driver snd_msnd_driver = {
|
static struct isa_driver snd_msnd_driver = {
|
||||||
.match = snd_msnd_isa_match,
|
.match = snd_msnd_isa_match,
|
||||||
.probe = snd_msnd_isa_probe,
|
.probe = snd_msnd_isa_probe,
|
||||||
|
@ -2636,9 +2636,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
|
|||||||
cancel_delayed_work_sync(&codec->jackpoll_work);
|
cancel_delayed_work_sync(&codec->jackpoll_work);
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
cancel_delayed_work_sync(&codec->power_work);
|
cancel_delayed_work_sync(&codec->power_work);
|
||||||
codec->power_on = 0;
|
|
||||||
codec->power_transition = 0;
|
|
||||||
codec->power_jiffies = jiffies;
|
|
||||||
flush_workqueue(bus->workq);
|
flush_workqueue(bus->workq);
|
||||||
#endif
|
#endif
|
||||||
snd_hda_ctls_clear(codec);
|
snd_hda_ctls_clear(codec);
|
||||||
|
@ -787,10 +787,10 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable)
|
|||||||
if (spec->own_eapd_ctl ||
|
if (spec->own_eapd_ctl ||
|
||||||
!(snd_hda_query_pin_caps(codec, pin) & AC_PINCAP_EAPD))
|
!(snd_hda_query_pin_caps(codec, pin) & AC_PINCAP_EAPD))
|
||||||
return;
|
return;
|
||||||
if (codec->inv_eapd)
|
|
||||||
enable = !enable;
|
|
||||||
if (spec->keep_eapd_on && !enable)
|
if (spec->keep_eapd_on && !enable)
|
||||||
return;
|
return;
|
||||||
|
if (codec->inv_eapd)
|
||||||
|
enable = !enable;
|
||||||
snd_hda_codec_update_cache(codec, pin, 0,
|
snd_hda_codec_update_cache(codec, pin, 0,
|
||||||
AC_VERB_SET_EAPD_BTLENABLE,
|
AC_VERB_SET_EAPD_BTLENABLE,
|
||||||
enable ? 0x02 : 0x00);
|
enable ? 0x02 : 0x00);
|
||||||
|
@ -1197,8 +1197,12 @@ static int alloc_ad_spec(struct hda_codec *codec)
|
|||||||
static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
|
static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
|
||||||
const struct hda_fixup *fix, int action)
|
const struct hda_fixup *fix, int action)
|
||||||
{
|
{
|
||||||
if (action == HDA_FIXUP_ACT_PRE_PROBE)
|
struct ad198x_spec *spec = codec->spec;
|
||||||
|
|
||||||
|
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
||||||
codec->inv_jack_detect = 1;
|
codec->inv_jack_detect = 1;
|
||||||
|
spec->gen.keep_eapd_on = 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -3491,6 +3491,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
|
|||||||
.patch = patch_conexant_auto },
|
.patch = patch_conexant_auto },
|
||||||
{ .id = 0x14f15115, .name = "CX20757",
|
{ .id = 0x14f15115, .name = "CX20757",
|
||||||
.patch = patch_conexant_auto },
|
.patch = patch_conexant_auto },
|
||||||
|
{ .id = 0x14f151d7, .name = "CX20952",
|
||||||
|
.patch = patch_conexant_auto },
|
||||||
{} /* terminator */
|
{} /* terminator */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -3517,6 +3519,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15111");
|
|||||||
MODULE_ALIAS("snd-hda-codec-id:14f15113");
|
MODULE_ALIAS("snd-hda-codec-id:14f15113");
|
||||||
MODULE_ALIAS("snd-hda-codec-id:14f15114");
|
MODULE_ALIAS("snd-hda-codec-id:14f15114");
|
||||||
MODULE_ALIAS("snd-hda-codec-id:14f15115");
|
MODULE_ALIAS("snd-hda-codec-id:14f15115");
|
||||||
|
MODULE_ALIAS("snd-hda-codec-id:14f151d7");
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("Conexant HD-audio codec");
|
MODULE_DESCRIPTION("Conexant HD-audio codec");
|
||||||
|
@ -1046,6 +1046,7 @@ enum {
|
|||||||
ALC880_FIXUP_UNIWILL,
|
ALC880_FIXUP_UNIWILL,
|
||||||
ALC880_FIXUP_UNIWILL_DIG,
|
ALC880_FIXUP_UNIWILL_DIG,
|
||||||
ALC880_FIXUP_Z71V,
|
ALC880_FIXUP_Z71V,
|
||||||
|
ALC880_FIXUP_ASUS_W5A,
|
||||||
ALC880_FIXUP_3ST_BASE,
|
ALC880_FIXUP_3ST_BASE,
|
||||||
ALC880_FIXUP_3ST,
|
ALC880_FIXUP_3ST,
|
||||||
ALC880_FIXUP_3ST_DIG,
|
ALC880_FIXUP_3ST_DIG,
|
||||||
@ -1216,6 +1217,26 @@ static const struct hda_fixup alc880_fixups[] = {
|
|||||||
{ }
|
{ }
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
[ALC880_FIXUP_ASUS_W5A] = {
|
||||||
|
.type = HDA_FIXUP_PINS,
|
||||||
|
.v.pins = (const struct hda_pintbl[]) {
|
||||||
|
/* set up the whole pins as BIOS is utterly broken */
|
||||||
|
{ 0x14, 0x0121411f }, /* HP */
|
||||||
|
{ 0x15, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x16, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x17, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x18, 0x90a60160 }, /* mic */
|
||||||
|
{ 0x19, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x1a, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x1b, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x1c, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x1d, 0x411111f0 }, /* N/A */
|
||||||
|
{ 0x1e, 0xb743111e }, /* SPDIF out */
|
||||||
|
{ }
|
||||||
|
},
|
||||||
|
.chained = true,
|
||||||
|
.chain_id = ALC880_FIXUP_GPIO1,
|
||||||
|
},
|
||||||
[ALC880_FIXUP_3ST_BASE] = {
|
[ALC880_FIXUP_3ST_BASE] = {
|
||||||
.type = HDA_FIXUP_PINS,
|
.type = HDA_FIXUP_PINS,
|
||||||
.v.pins = (const struct hda_pintbl[]) {
|
.v.pins = (const struct hda_pintbl[]) {
|
||||||
@ -1337,6 +1358,7 @@ static const struct hda_fixup alc880_fixups[] = {
|
|||||||
|
|
||||||
static const struct snd_pci_quirk alc880_fixup_tbl[] = {
|
static const struct snd_pci_quirk alc880_fixup_tbl[] = {
|
||||||
SND_PCI_QUIRK(0x1019, 0x0f69, "Coeus G610P", ALC880_FIXUP_W810),
|
SND_PCI_QUIRK(0x1019, 0x0f69, "Coeus G610P", ALC880_FIXUP_W810),
|
||||||
|
SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS W5A", ALC880_FIXUP_ASUS_W5A),
|
||||||
SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V),
|
SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V),
|
||||||
SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1),
|
SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1),
|
||||||
SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2),
|
SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2),
|
||||||
@ -1482,6 +1504,7 @@ enum {
|
|||||||
ALC260_FIXUP_KN1,
|
ALC260_FIXUP_KN1,
|
||||||
ALC260_FIXUP_FSC_S7020,
|
ALC260_FIXUP_FSC_S7020,
|
||||||
ALC260_FIXUP_FSC_S7020_JWSE,
|
ALC260_FIXUP_FSC_S7020_JWSE,
|
||||||
|
ALC260_FIXUP_VAIO_PINS,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void alc260_gpio1_automute(struct hda_codec *codec)
|
static void alc260_gpio1_automute(struct hda_codec *codec)
|
||||||
@ -1622,6 +1645,24 @@ static const struct hda_fixup alc260_fixups[] = {
|
|||||||
.chained = true,
|
.chained = true,
|
||||||
.chain_id = ALC260_FIXUP_FSC_S7020,
|
.chain_id = ALC260_FIXUP_FSC_S7020,
|
||||||
},
|
},
|
||||||
|
[ALC260_FIXUP_VAIO_PINS] = {
|
||||||
|
.type = HDA_FIXUP_PINS,
|
||||||
|
.v.pins = (const struct hda_pintbl[]) {
|
||||||
|
/* Pin configs are missing completely on some VAIOs */
|
||||||
|
{ 0x0f, 0x01211020 },
|
||||||
|
{ 0x10, 0x0001003f },
|
||||||
|
{ 0x11, 0x411111f0 },
|
||||||
|
{ 0x12, 0x01a15930 },
|
||||||
|
{ 0x13, 0x411111f0 },
|
||||||
|
{ 0x14, 0x411111f0 },
|
||||||
|
{ 0x15, 0x411111f0 },
|
||||||
|
{ 0x16, 0x411111f0 },
|
||||||
|
{ 0x17, 0x411111f0 },
|
||||||
|
{ 0x18, 0x411111f0 },
|
||||||
|
{ 0x19, 0x411111f0 },
|
||||||
|
{ }
|
||||||
|
}
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct snd_pci_quirk alc260_fixup_tbl[] = {
|
static const struct snd_pci_quirk alc260_fixup_tbl[] = {
|
||||||
@ -1630,6 +1671,8 @@ static const struct snd_pci_quirk alc260_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
|
SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
|
||||||
SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
|
SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
|
||||||
SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
|
SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
|
||||||
|
SND_PCI_QUIRK(0x104d, 0x81bb, "Sony VAIO", ALC260_FIXUP_VAIO_PINS),
|
||||||
|
SND_PCI_QUIRK(0x104d, 0x81e2, "Sony VAIO TX", ALC260_FIXUP_HP_PIN_0F),
|
||||||
SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020),
|
SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020),
|
||||||
SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
|
SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
|
||||||
SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
|
SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
|
||||||
@ -2389,6 +2432,7 @@ static const struct hda_verb alc268_beep_init_verbs[] = {
|
|||||||
enum {
|
enum {
|
||||||
ALC268_FIXUP_INV_DMIC,
|
ALC268_FIXUP_INV_DMIC,
|
||||||
ALC268_FIXUP_HP_EAPD,
|
ALC268_FIXUP_HP_EAPD,
|
||||||
|
ALC268_FIXUP_SPDIF,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct hda_fixup alc268_fixups[] = {
|
static const struct hda_fixup alc268_fixups[] = {
|
||||||
@ -2403,6 +2447,13 @@ static const struct hda_fixup alc268_fixups[] = {
|
|||||||
{}
|
{}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
[ALC268_FIXUP_SPDIF] = {
|
||||||
|
.type = HDA_FIXUP_PINS,
|
||||||
|
.v.pins = (const struct hda_pintbl[]) {
|
||||||
|
{ 0x1e, 0x014b1180 }, /* enable SPDIF out */
|
||||||
|
{}
|
||||||
|
}
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct hda_model_fixup alc268_fixup_models[] = {
|
static const struct hda_model_fixup alc268_fixup_models[] = {
|
||||||
@ -2412,6 +2463,7 @@ static const struct hda_model_fixup alc268_fixup_models[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct snd_pci_quirk alc268_fixup_tbl[] = {
|
static const struct snd_pci_quirk alc268_fixup_tbl[] = {
|
||||||
|
SND_PCI_QUIRK(0x1025, 0x0139, "Acer TravelMate 6293", ALC268_FIXUP_SPDIF),
|
||||||
SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
|
SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
|
||||||
/* below is codec SSID since multiple Toshiba laptops have the
|
/* below is codec SSID since multiple Toshiba laptops have the
|
||||||
* same PCI SSID 1179:ff00
|
* same PCI SSID 1179:ff00
|
||||||
@ -2541,6 +2593,7 @@ enum {
|
|||||||
ALC269_TYPE_ALC283,
|
ALC269_TYPE_ALC283,
|
||||||
ALC269_TYPE_ALC284,
|
ALC269_TYPE_ALC284,
|
||||||
ALC269_TYPE_ALC286,
|
ALC269_TYPE_ALC286,
|
||||||
|
ALC269_TYPE_ALC255,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2565,6 +2618,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
|
|||||||
case ALC269_TYPE_ALC269VD:
|
case ALC269_TYPE_ALC269VD:
|
||||||
case ALC269_TYPE_ALC282:
|
case ALC269_TYPE_ALC282:
|
||||||
case ALC269_TYPE_ALC286:
|
case ALC269_TYPE_ALC286:
|
||||||
|
case ALC269_TYPE_ALC255:
|
||||||
case ALC269_TYPE_ALC283:
|
case ALC269_TYPE_ALC283:
|
||||||
ssids = alc269_ssids;
|
ssids = alc269_ssids;
|
||||||
break;
|
break;
|
||||||
@ -2826,6 +2880,23 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
|
|||||||
snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
|
snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Make sure the led works even in runtime suspend */
|
||||||
|
static unsigned int led_power_filter(struct hda_codec *codec,
|
||||||
|
hda_nid_t nid,
|
||||||
|
unsigned int power_state)
|
||||||
|
{
|
||||||
|
struct alc_spec *spec = codec->spec;
|
||||||
|
|
||||||
|
if (power_state != AC_PWRST_D3 || nid != spec->mute_led_nid)
|
||||||
|
return power_state;
|
||||||
|
|
||||||
|
/* Set pin ctl again, it might have just been set to 0 */
|
||||||
|
snd_hda_set_pin_ctl(codec, nid,
|
||||||
|
snd_hda_codec_get_pin_target(codec, nid));
|
||||||
|
|
||||||
|
return AC_PWRST_D0;
|
||||||
|
}
|
||||||
|
|
||||||
static void alc269_fixup_hp_mute_led(struct hda_codec *codec,
|
static void alc269_fixup_hp_mute_led(struct hda_codec *codec,
|
||||||
const struct hda_fixup *fix, int action)
|
const struct hda_fixup *fix, int action)
|
||||||
{
|
{
|
||||||
@ -2845,6 +2916,7 @@ static void alc269_fixup_hp_mute_led(struct hda_codec *codec,
|
|||||||
spec->mute_led_nid = pin - 0x0a + 0x18;
|
spec->mute_led_nid = pin - 0x0a + 0x18;
|
||||||
spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
|
spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
|
||||||
spec->gen.vmaster_mute_enum = 1;
|
spec->gen.vmaster_mute_enum = 1;
|
||||||
|
codec->power_filter = led_power_filter;
|
||||||
snd_printd("Detected mute LED for %x:%d\n", spec->mute_led_nid,
|
snd_printd("Detected mute LED for %x:%d\n", spec->mute_led_nid,
|
||||||
spec->mute_led_polarity);
|
spec->mute_led_polarity);
|
||||||
break;
|
break;
|
||||||
@ -2860,6 +2932,7 @@ static void alc269_fixup_hp_mute_led_mic1(struct hda_codec *codec,
|
|||||||
spec->mute_led_nid = 0x18;
|
spec->mute_led_nid = 0x18;
|
||||||
spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
|
spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
|
||||||
spec->gen.vmaster_mute_enum = 1;
|
spec->gen.vmaster_mute_enum = 1;
|
||||||
|
codec->power_filter = led_power_filter;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2872,6 +2945,7 @@ static void alc269_fixup_hp_mute_led_mic2(struct hda_codec *codec,
|
|||||||
spec->mute_led_nid = 0x19;
|
spec->mute_led_nid = 0x19;
|
||||||
spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
|
spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
|
||||||
spec->gen.vmaster_mute_enum = 1;
|
spec->gen.vmaster_mute_enum = 1;
|
||||||
|
codec->power_filter = led_power_filter;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3112,8 +3186,10 @@ static void alc_update_headset_mode(struct hda_codec *codec)
|
|||||||
else
|
else
|
||||||
new_headset_mode = ALC_HEADSET_MODE_HEADPHONE;
|
new_headset_mode = ALC_HEADSET_MODE_HEADPHONE;
|
||||||
|
|
||||||
if (new_headset_mode == spec->current_headset_mode)
|
if (new_headset_mode == spec->current_headset_mode) {
|
||||||
|
snd_hda_gen_update_outputs(codec);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
switch (new_headset_mode) {
|
switch (new_headset_mode) {
|
||||||
case ALC_HEADSET_MODE_UNPLUGGED:
|
case ALC_HEADSET_MODE_UNPLUGGED:
|
||||||
@ -3642,6 +3718,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
|
SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
|
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
|
||||||
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
||||||
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
|
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
|
||||||
@ -3867,6 +3944,9 @@ static int patch_alc269(struct hda_codec *codec)
|
|||||||
case 0x10ec0286:
|
case 0x10ec0286:
|
||||||
spec->codec_variant = ALC269_TYPE_ALC286;
|
spec->codec_variant = ALC269_TYPE_ALC286;
|
||||||
break;
|
break;
|
||||||
|
case 0x10ec0255:
|
||||||
|
spec->codec_variant = ALC269_TYPE_ALC255;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* automatic parse from the BIOS config */
|
/* automatic parse from the BIOS config */
|
||||||
@ -4575,6 +4655,7 @@ static int patch_alc680(struct hda_codec *codec)
|
|||||||
static const struct hda_codec_preset snd_hda_preset_realtek[] = {
|
static const struct hda_codec_preset snd_hda_preset_realtek[] = {
|
||||||
{ .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
|
{ .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
|
||||||
{ .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
|
{ .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
|
||||||
|
{ .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
|
||||||
{ .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
|
{ .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
|
||||||
{ .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
|
{ .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
|
||||||
{ .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
|
{ .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
|
||||||
|
@ -101,7 +101,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf,
|
|||||||
usb_set_intfdata(intf, chips[i]);
|
usb_set_intfdata(intf, chips[i]);
|
||||||
mutex_unlock(®ister_mutex);
|
mutex_unlock(®ister_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
} else if (regidx < 0)
|
} else if (!devices[i] && regidx < 0)
|
||||||
regidx = i;
|
regidx = i;
|
||||||
}
|
}
|
||||||
if (regidx < 0) {
|
if (regidx < 0) {
|
||||||
|
@ -103,6 +103,10 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|||||||
while ((gfn << PAGE_SHIFT) & (page_size - 1))
|
while ((gfn << PAGE_SHIFT) & (page_size - 1))
|
||||||
page_size >>= 1;
|
page_size >>= 1;
|
||||||
|
|
||||||
|
/* Make sure hva is aligned to the page size we want to map */
|
||||||
|
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
|
||||||
|
page_size >>= 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pin all pages we are about to map in memory. This is
|
* Pin all pages we are about to map in memory. This is
|
||||||
* important because we unmap and unpin in 4kb steps later.
|
* important because we unmap and unpin in 4kb steps later.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user