Updated from Linux LTS 3.10.22 to 3.10.23

This commit is contained in:
Nathan 2025-04-09 17:19:07 -05:00
parent 74577147bc
commit 943c6a9f68
64 changed files with 510 additions and 466 deletions

View File

@ -571,9 +571,6 @@ tcp_limit_output_bytes - INTEGER
typical pfifo_fast qdiscs. typical pfifo_fast qdiscs.
tcp_limit_output_bytes limits the number of bytes on qdisc tcp_limit_output_bytes limits the number of bytes on qdisc
or device to reduce artificial RTT/cwnd and reduce bufferbloat. or device to reduce artificial RTT/cwnd and reduce bufferbloat.
Note: For GSO/TSO enabled flows, we try to have at least two
packets in flight. Reducing tcp_limit_output_bytes might also
reduce the size of individual GSO packet (64KB being the max)
Default: 131072 Default: 131072
tcp_challenge_ack_limit - INTEGER tcp_challenge_ack_limit - INTEGER

View File

@ -1,6 +1,6 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 22 SUBLEVEL = 23
EXTRAVERSION = EXTRAVERSION =
NAME = TOSSUG Baby Fish NAME = TOSSUG Baby Fish

View File

@ -741,9 +741,17 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX; q->sg_reserved_size = INT_MAX;
/* Protect q->elevator from elevator_change */
mutex_lock(&q->sysfs_lock);
/* init elevator */ /* init elevator */
if (elevator_init(q, NULL)) if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
return NULL; return NULL;
}
mutex_unlock(&q->sysfs_lock);
return q; return q;
} }
EXPORT_SYMBOL(blk_init_allocated_queue); EXPORT_SYMBOL(blk_init_allocated_queue);

View File

@ -186,6 +186,12 @@ int elevator_init(struct request_queue *q, char *name)
struct elevator_type *e = NULL; struct elevator_type *e = NULL;
int err; int err;
/*
* q->sysfs_lock must be held to provide mutual exclusion between
* elevator_switch() and here.
*/
lockdep_assert_held(&q->sysfs_lock);
if (unlikely(q->elevator)) if (unlikely(q->elevator))
return 0; return 0;
@ -959,7 +965,7 @@ fail_init:
/* /*
* Switch this queue to the given IO scheduler. * Switch this queue to the given IO scheduler.
*/ */
int elevator_change(struct request_queue *q, const char *name) static int __elevator_change(struct request_queue *q, const char *name)
{ {
char elevator_name[ELV_NAME_MAX]; char elevator_name[ELV_NAME_MAX];
struct elevator_type *e; struct elevator_type *e;
@ -981,6 +987,18 @@ int elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e); return elevator_switch(q, e);
} }
int elevator_change(struct request_queue *q, const char *name)
{
int ret;
/* Protect q->elevator from elevator_init() */
mutex_lock(&q->sysfs_lock);
ret = __elevator_change(q, name);
mutex_unlock(&q->sysfs_lock);
return ret;
}
EXPORT_SYMBOL(elevator_change); EXPORT_SYMBOL(elevator_change);
ssize_t elv_iosched_store(struct request_queue *q, const char *name, ssize_t elv_iosched_store(struct request_queue *q, const char *name,
@ -991,7 +1009,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
if (!q->elevator) if (!q->elevator)
return count; return count;
ret = elevator_change(q, name); ret = __elevator_change(q, name);
if (!ret) if (!ret)
return count; return count;

View File

@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
struct hash_ctx *ctx = ask->private; struct hash_ctx *ctx = ask->private;
int err; int err;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
lock_sock(sk); lock_sock(sk);
sg_init_table(ctx->sgl.sg, 1); sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset); sg_set_page(ctx->sgl.sg, page, size, offset);

View File

@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
struct skcipher_sg_list *sgl; struct skcipher_sg_list *sgl;
int err = -EINVAL; int err = -EINVAL;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
lock_sock(sk); lock_sock(sk);
if (!ctx->more && ctx->used) if (!ctx->more && ctx->used)
goto unlock; goto unlock;

View File

@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev)
tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
if (tmp) { if (tmp) {
memcpy(card->atmdev->esi, tmp->dev_addr, 6); memcpy(card->atmdev->esi, tmp->dev_addr, 6);
dev_put(tmp);
printk("%s: ESI %pM\n", card->name, card->atmdev->esi); printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
} }
/* /*

View File

@ -32,11 +32,23 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/pid_namespace.h> #include <linux/pid_namespace.h>
#include <asm/unaligned.h>
#include <linux/cn_proc.h> #include <linux/cn_proc.h>
#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) /*
* Size of a cn_msg followed by a proc_event structure. Since the
* sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
* add one 4-byte word to the size here, and then start the actual
* cn_msg structure 4 bytes into the stack buffer. The result is that
* the immediately following proc_event structure is aligned to 8 bytes.
*/
#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
/* See comment above; we test our assumption about sizeof struct cn_msg here. */
static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
{
BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
return (struct cn_msg *)(buffer + 4);
}
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task)
{ {
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts; struct timespec ts;
struct task_struct *parent; struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_FORK; ev->what = PROC_EVENT_FORK;
rcu_read_lock(); rcu_read_lock();
parent = rcu_dereference(task->real_parent); parent = rcu_dereference(task->real_parent);
@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task)
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
struct timespec ts; struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_EXEC; ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid; ev->event_data.exec.process_pid = task->pid;
ev->event_data.exec.process_tgid = task->tgid; ev->event_data.exec.process_tgid = task->tgid;
@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
{ {
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts; struct timespec ts;
const struct cred *cred; const struct cred *cred;
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id; ev->what = which_id;
@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
rcu_read_unlock(); rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task)
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
struct timespec ts; struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_SID; ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid; ev->event_data.sid.process_pid = task->pid;
ev->event_data.sid.process_tgid = task->tgid; ev->event_data.sid.process_tgid = task->tgid;
@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
struct timespec ts; struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_PTRACE; ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid; ev->event_data.ptrace.process_pid = task->pid;
ev->event_data.ptrace.process_tgid = task->tgid; ev->event_data.ptrace.process_tgid = task->tgid;
@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task)
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
struct timespec ts; struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_COMM; ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid; ev->event_data.comm.process_pid = task->pid;
ev->event_data.comm.process_tgid = task->tgid; ev->event_data.comm.process_tgid = task->tgid;
@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task)
{ {
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts; struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_COREDUMP; ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid; ev->event_data.coredump.process_tgid = task->tgid;
@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task)
{ {
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts; struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_EXIT; ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_pid = task->pid;
ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.process_tgid = task->tgid;
@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{ {
struct cn_msg *msg; struct cn_msg *msg;
struct proc_event *ev; struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE]; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts; struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1) if (atomic_read(&proc_event_num_listeners) < 1)
return; return;
msg = (struct cn_msg *)buffer; msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data)); memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq; msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->timestamp_ns = timespec_to_ns(&ts);
ev->cpu = -1; ev->cpu = -1;
ev->what = PROC_EVENT_NONE; ev->what = PROC_EVENT_NONE;
ev->event_data.ack.err = err; ev->event_data.ack.err = err;

View File

@ -24,6 +24,7 @@
* Authors: Christian König * Authors: Christian König
*/ */
#include <linux/hdmi.h> #include <linux/hdmi.h>
#include <linux/gcd.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include "radeon.h" #include "radeon.h"
@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */ /* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */ /* Clock N CTS N CTS N CTS */
{ 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
{ 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
{ 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
{ 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
}; };
/*
* calculate CTS value if it's not found in the table
*/
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
u64 n;
u32 d;
if (*CTS == 0) { /*
n = (u64)clock * (u64)N * 1000ULL; * calculate CTS and N values if they are not found in the table
d = 128 * freq; */
do_div(n, d); static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
*CTS = n; {
} int n, cts;
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", unsigned long div, mul;
N, *CTS, freq);
/* Safe, but overly large values */
n = 128 * freq;
cts = clock * 1000;
/* Smallest valid fraction */
div = gcd(n, cts);
n /= div;
cts /= div;
/*
* The optimal N is 128*freq/1000. Calculate the closest larger
* value that doesn't truncate any bits.
*/
mul = ((128*freq/1000) + (n-1))/n;
n *= mul;
cts *= mul;
/* Check that we are in spec (not always possible) */
if (n < (128*freq/1500))
printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
if (n > (128*freq/300))
printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
*N = n;
*CTS = cts;
DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
*N, *CTS, freq);
} }
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock) struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
struct radeon_hdmi_acr res; struct radeon_hdmi_acr res;
u8 i; u8 i;
for (i = 0; r600_hdmi_predefined_acr[i].clock != clock && /* Precalculated values for common clocks */
r600_hdmi_predefined_acr[i].clock != 0; i++) for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
; if (r600_hdmi_predefined_acr[i].clock == clock)
res = r600_hdmi_predefined_acr[i]; return r600_hdmi_predefined_acr[i];
}
/* In case some CTS are missing */ /* And odd clocks get manually calculated */
r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000); r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100); r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000); r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
return res; return res;
} }

View File

@ -47,6 +47,7 @@
#define DFP_RDESC_ORIG_SIZE 97 #define DFP_RDESC_ORIG_SIZE 97
#define FV_RDESC_ORIG_SIZE 130 #define FV_RDESC_ORIG_SIZE 130
#define MOMO_RDESC_ORIG_SIZE 87 #define MOMO_RDESC_ORIG_SIZE 87
#define MOMO2_RDESC_ORIG_SIZE 87
/* Fixed report descriptors for Logitech Driving Force (and Pro) /* Fixed report descriptors for Logitech Driving Force (and Pro)
* wheel controllers * wheel controllers
@ -284,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
0xC0 /* End Collection */ 0xC0 /* End Collection */
}; };
static __u8 momo2_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
0x75, 0x0A, /* Report Size (10), */
0x15, 0x00, /* Logical Minimum (0), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x35, 0x00, /* Physical Minimum (0), */
0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
0x09, 0x30, /* Usage (X), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x0A, /* Report Count (10), */
0x75, 0x01, /* Report Size (1), */
0x25, 0x01, /* Logical Maximum (1), */
0x45, 0x01, /* Physical Maximum (1), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
0x29, 0x0A, /* Usage Maximum (0Ah), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
0x09, 0x00, /* Usage (00h), */
0x95, 0x04, /* Report Count (4), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x01, /* Report Count (1), */
0x75, 0x08, /* Report Size (8), */
0x26, 0xFF, 0x00, /* Logical Maximum (255), */
0x46, 0xFF, 0x00, /* Physical Maximum (255), */
0x09, 0x01, /* Usage (01h), */
0x81, 0x02, /* Input (Variable), */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x31, /* Usage (Y), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x32, /* Usage (Z), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
0x09, 0x00, /* Usage (00h), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xA1, 0x02, /* Collection (Logical), */
0x09, 0x02, /* Usage (02h), */
0x95, 0x07, /* Report Count (7), */
0x91, 0x02, /* Output (Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* /*
* Certain Logitech keyboards send in report #3 keys which are far * Certain Logitech keyboards send in report #3 keys which are far
* above the logical maximum described in descriptor. This extends * above the logical maximum described in descriptor. This extends
@ -343,6 +392,15 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
} }
break; break;
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Racing Force (Black) report descriptor\n");
rdesc = momo2_rdesc_fixed;
*rsize = sizeof(momo2_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
if (*rsize == FV_RDESC_ORIG_SIZE) { if (*rsize == FV_RDESC_ORIG_SIZE) {
hid_info(hdev, hid_info(hdev,

View File

@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
int offset; int offset;
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
if (addr_width < BITS_PER_LONG && pfn >> addr_width)
/* Address beyond IOMMU's addressing capabilities. */
return NULL;
parent = domain->pgd; parent = domain->pgd;
while (level > 0) { while (level > 0) {

View File

@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
if (disable_irq_remap) if (disable_irq_remap)
return 0; return 0;
if (irq_remap_broken) { if (irq_remap_broken) {
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, printk(KERN_WARNING
"This system BIOS has enabled interrupt remapping\n" "This system BIOS has enabled interrupt remapping\n"
"on a chipset that contains an erratum making that\n" "on a chipset that contains an erratum making that\n"
"feature unstable. To maintain system stability\n" "feature unstable. To maintain system stability\n"
"interrupt remapping is being disabled. Please\n" "interrupt remapping is being disabled. Please\n"
"contact your BIOS vendor for an update\n"); "contact your BIOS vendor for an update\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
disable_irq_remap = 1; disable_irq_remap = 1;
return 0; return 0;
} }

View File

@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
spin_unlock_irqrestore(&card->isdnloop_lock, flags); spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++) {
strcpy(card->s0num[i], sdef.num[i]); strlcpy(card->s0num[i], sdef.num[i],
sizeof(card->s0num[0]));
}
break; break;
case ISDN_PTYPE_1TR6: case ISDN_PTYPE_1TR6:
if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
spin_unlock_irqrestore(&card->isdnloop_lock, flags); spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM; return -ENOMEM;
} }
strcpy(card->s0num[0], sdef.num[0]); strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
card->s0num[1][0] = '\0'; card->s0num[1][0] = '\0';
card->s0num[2][0] = '\0'; card->s0num[2][0] = '\0';
break; break;

View File

@ -784,7 +784,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort. * Otherwise we don't understand what happened, so abort.
*/ */
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
struct mmc_blk_request *brq, int *ecc_err) struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
{ {
bool prev_cmd_status_valid = true; bool prev_cmd_status_valid = true;
u32 status, stop_status = 0; u32 status, stop_status = 0;
@ -822,6 +822,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
*ecc_err = 1; *ecc_err = 1;
/* Flag General errors */
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
if ((status & R1_ERROR) ||
(brq->stop.resp[0] & R1_ERROR)) {
pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, __func__,
brq->stop.resp[0], status);
*gen_err = 1;
}
/* /*
* Check the current card state. If it is in some data transfer * Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.) * mode, tell it to stop (and hopefully transition back to TRAN.)
@ -841,6 +851,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_ABORT; return ERR_ABORT;
if (stop_status & R1_CARD_ECC_FAILED) if (stop_status & R1_CARD_ECC_FAILED)
*ecc_err = 1; *ecc_err = 1;
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
if (stop_status & R1_ERROR) {
pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
req->rq_disk->disk_name, __func__,
stop_status);
*gen_err = 1;
}
} }
/* Check for set block count errors */ /* Check for set block count errors */
@ -1084,7 +1101,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
mmc_active); mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq; struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req; struct request *req = mq_mrq->req;
int ecc_err = 0; int ecc_err = 0, gen_err = 0;
/* /*
* sbc.error indicates a problem with the set block count * sbc.error indicates a problem with the set block count
@ -1098,7 +1115,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
*/ */
if (brq->sbc.error || brq->cmd.error || brq->stop.error || if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error) { brq->data.error) {
switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY: case ERR_RETRY:
return MMC_BLK_RETRY; return MMC_BLK_RETRY;
case ERR_ABORT: case ERR_ABORT:
@ -1130,6 +1147,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
u32 status; u32 status;
unsigned long timeout; unsigned long timeout;
/* Check stop command response */
if (brq->stop.resp[0] & R1_ERROR) {
pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
req->rq_disk->disk_name, __func__,
brq->stop.resp[0]);
gen_err = 1;
}
timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS); timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
do { do {
int err = get_card_status(card, &status, 5); int err = get_card_status(card, &status, 5);
@ -1139,6 +1164,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_CMD_ERR; return MMC_BLK_CMD_ERR;
} }
if (status & R1_ERROR) {
pr_err("%s: %s: general error sending status command, card status %#x\n",
req->rq_disk->disk_name, __func__,
status);
gen_err = 1;
}
/* Timeout if the device never becomes ready for data /* Timeout if the device never becomes ready for data
* and never leaves the program state. * and never leaves the program state.
*/ */
@ -1158,6 +1190,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
(R1_CURRENT_STATE(status) == R1_STATE_PRG)); (R1_CURRENT_STATE(status) == R1_STATE_PRG));
} }
/* if general error occurs, retry the write operation. */
if (gen_err) {
pr_warn("%s: retrying write for general error\n",
req->rq_disk->disk_name);
return MMC_BLK_RETRY;
}
if (brq->data.error) { if (brq->data.error) {
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->data.error, req->rq_disk->disk_name, brq->data.error,

View File

@ -537,8 +537,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
goto out; goto out;
} }
if (bond->params.mode == BOND_MODE_ALB || if (bond->params.mode == BOND_MODE_ALB ||
bond->params.mode == BOND_MODE_TLB) { bond->params.mode == BOND_MODE_TLB ||
pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", bond->params.mode == BOND_MODE_8023AD) {
pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
bond->dev->name, bond->dev->name); bond->dev->name, bond->dev->name);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@ -696,6 +697,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
int new_value, ret = count; int new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
if (!rtnl_trylock())
return restart_syscall();
if (!(bond->params.miimon)) { if (!(bond->params.miimon)) {
pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
bond->dev->name); bond->dev->name);
@ -729,6 +732,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
} }
out: out:
rtnl_unlock();
return ret; return ret;
} }
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
@ -751,6 +755,8 @@ static ssize_t bonding_store_updelay(struct device *d,
int new_value, ret = count; int new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
if (!rtnl_trylock())
return restart_syscall();
if (!(bond->params.miimon)) { if (!(bond->params.miimon)) {
pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
bond->dev->name); bond->dev->name);
@ -784,6 +790,7 @@ static ssize_t bonding_store_updelay(struct device *d,
} }
out: out:
rtnl_unlock();
return ret; return ret;
} }
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,

View File

@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++; mdev->port_cnt++;
/* Initialize time stamp mechanism */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (!dev->caps.comp_pool) { if (!dev->caps.comp_pool) {
mdev->profile.prof[i].rx_ring_num = mdev->profile.prof[i].rx_ring_num =
@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->pndev[i] = NULL; mdev->pndev[i] = NULL;
} }
/* Initialize time stamp mechanism */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
return mdev; return mdev;
err_mr: err_mr:

View File

@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
le32_to_cpu(txd->opts1) & 0xffff, le32_to_cpu(txd->opts1) & 0xffff,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
bytes_compl += skb->len;
pkts_compl++;
if (status & LastFrag) { if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) { if (status & (TxError | TxFIFOUnder)) {
netif_dbg(cp, tx_err, cp->dev, netif_dbg(cp, tx_err, cp->dev,
@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
netif_dbg(cp, tx_done, cp->dev, netif_dbg(cp, tx_done, cp->dev,
"tx done, slot %d\n", tx_tail); "tx done, slot %d\n", tx_tail);
} }
bytes_compl += skb->len;
pkts_compl++;
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }

View File

@ -3461,6 +3461,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x9065); rtl_writephy(tp, 0x14, 0x9065);
rtl_writephy(tp, 0x14, 0x1065); rtl_writephy(tp, 0x14, 0x1065);
/* Check ALDPS bit, disable it if enabled */
rtl_writephy(tp, 0x1f, 0x0a43);
if (rtl_readphy(tp, 0x10) & 0x0004)
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy(tp, 0x1f, 0x0000);
} }

View File

@ -661,6 +661,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
const struct iovec *iv, unsigned long total_len, const struct iovec *iv, unsigned long total_len,
size_t count, int noblock) size_t count, int noblock)
{ {
int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
struct sk_buff *skb; struct sk_buff *skb;
struct macvlan_dev *vlan; struct macvlan_dev *vlan;
unsigned long len = total_len; unsigned long len = total_len;
@ -703,6 +704,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen; linear = copylen;
if (iov_pages(iv, vnet_hdr_len + copylen, count) if (iov_pages(iv, vnet_hdr_len + copylen, count)
<= MAX_SKB_FRAGS) <= MAX_SKB_FRAGS)
@ -711,6 +714,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (!zerocopy) { if (!zerocopy) {
copylen = len; copylen = len;
if (vnet_hdr.hdr_len > good_linear)
linear = good_linear;
else
linear = vnet_hdr.hdr_len; linear = vnet_hdr.hdr_len;
} }

View File

@ -1217,6 +1217,8 @@ static int team_user_linkup_option_get(struct team *team,
return 0; return 0;
} }
static void __team_carrier_check(struct team *team);
static int team_user_linkup_option_set(struct team *team, static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx) struct team_gsetter_ctx *ctx)
{ {
@ -1224,6 +1226,7 @@ static int team_user_linkup_option_set(struct team *team,
port->user.linkup = ctx->data.bool_val; port->user.linkup = ctx->data.bool_val;
team_refresh_port_linkup(port); team_refresh_port_linkup(port);
__team_carrier_check(port->team);
return 0; return 0;
} }
@ -1243,6 +1246,7 @@ static int team_user_linkup_en_option_set(struct team *team,
port->user.linkup_enabled = ctx->data.bool_val; port->user.linkup_enabled = ctx->data.bool_val;
team_refresh_port_linkup(port); team_refresh_port_linkup(port);
__team_carrier_check(port->team);
return 0; return 0;
} }

View File

@ -1069,6 +1069,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *skb; struct sk_buff *skb;
size_t len = total_len, align = NET_SKB_PAD, linear; size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 }; struct virtio_net_hdr gso = { 0 };
int good_linear;
int offset = 0; int offset = 0;
int copylen; int copylen;
bool zerocopy = false; bool zerocopy = false;
@ -1109,12 +1110,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL; return -EINVAL;
} }
good_linear = SKB_MAX_HEAD(align);
if (msg_control) { if (msg_control) {
/* There are 256 bytes to be copied in skb, so there is /* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used. * enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace. * The rest of the buffer is mapped from userspace.
*/ */
copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen; linear = copylen;
if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
zerocopy = true; zerocopy = true;
@ -1122,6 +1127,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (!zerocopy) { if (!zerocopy) {
copylen = len; copylen = len;
if (gso.hdr_len > good_linear)
linear = good_linear;
else
linear = gso.hdr_len; linear = gso.hdr_len;
} }

View File

@ -203,9 +203,6 @@ static void intr_complete (struct urb *urb)
break; break;
} }
if (!netif_running (dev->net))
return;
status = usb_submit_urb (urb, GFP_ATOMIC); status = usb_submit_urb (urb, GFP_ATOMIC);
if (status != 0) if (status != 0)
netif_err(dev, timer, dev->net, netif_err(dev, timer, dev->net,

View File

@ -625,15 +625,15 @@ static int kyrofb_ioctl(struct fb_info *info,
} }
break; break;
case KYRO_IOCTL_UVSTRIDE: case KYRO_IOCTL_UVSTRIDE:
if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long))) if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride)))
return -EFAULT; return -EFAULT;
break; break;
case KYRO_IOCTL_STRIDE: case KYRO_IOCTL_STRIDE:
if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long))) if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride)))
return -EFAULT; return -EFAULT;
break; break;
case KYRO_IOCTL_OVERLAY_OFFSET: case KYRO_IOCTL_OVERLAY_OFFSET:
if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long))) if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset)))
return -EFAULT; return -EFAULT;
break; break;
} }

View File

@ -423,10 +423,12 @@ static void kill_ioctx_rcu(struct rcu_head *head)
* when the processes owning a context have all exited to encourage * when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx. * the rapid destruction of the kioctx.
*/ */
static void kill_ioctx(struct kioctx *ctx) static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
{ {
if (!atomic_xchg(&ctx->dead, 1)) { if (!atomic_xchg(&ctx->dead, 1)) {
spin_lock(&mm->ioctx_lock);
hlist_del_rcu(&ctx->list); hlist_del_rcu(&ctx->list);
spin_unlock(&mm->ioctx_lock);
/* /*
* It'd be more correct to do this in free_ioctx(), after all * It'd be more correct to do this in free_ioctx(), after all
@ -494,7 +496,7 @@ void exit_aio(struct mm_struct *mm)
*/ */
ctx->mmap_size = 0; ctx->mmap_size = 0;
kill_ioctx(ctx); kill_ioctx(mm, ctx);
} }
} }
@ -854,7 +856,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
if (!IS_ERR(ioctx)) { if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp); ret = put_user(ioctx->user_id, ctxp);
if (ret) if (ret)
kill_ioctx(ioctx); kill_ioctx(current->mm, ioctx);
put_ioctx(ioctx); put_ioctx(ioctx);
} }
@ -872,7 +874,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{ {
struct kioctx *ioctx = lookup_ioctx(ctx); struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) { if (likely(NULL != ioctx)) {
kill_ioctx(ioctx); kill_ioctx(current->mm, ioctx);
put_ioctx(ioctx); put_ioctx(ioctx);
return 0; return 0;
} }

View File

@ -1612,6 +1612,12 @@ xfs_file_ioctl(
case XFS_IOC_FREE_EOFBLOCKS: { case XFS_IOC_FREE_EOFBLOCKS: {
struct xfs_eofblocks eofb; struct xfs_eofblocks eofb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (mp->m_flags & XFS_MOUNT_RDONLY)
return -XFS_ERROR(EROFS);
if (copy_from_user(&eofb, arg, sizeof(eofb))) if (copy_from_user(&eofb, arg, sizeof(eofb)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);

View File

@ -30,6 +30,7 @@ enum clock_event_nofitiers {
#include <linux/notifier.h> #include <linux/notifier.h>
struct clock_event_device; struct clock_event_device;
struct module;
/* Clock event mode commands */ /* Clock event mode commands */
enum clock_event_mode { enum clock_event_mode {
@ -83,6 +84,7 @@ enum clock_event_mode {
* @irq: IRQ number (only for non CPU local devices) * @irq: IRQ number (only for non CPU local devices)
* @cpumask: cpumask to indicate for which CPUs this device works * @cpumask: cpumask to indicate for which CPUs this device works
* @list: list head for the management code * @list: list head for the management code
* @owner: module reference
*/ */
struct clock_event_device { struct clock_event_device {
void (*event_handler)(struct clock_event_device *); void (*event_handler)(struct clock_event_device *);
@ -112,6 +114,7 @@ struct clock_event_device {
int irq; int irq;
const struct cpumask *cpumask; const struct cpumask *cpumask;
struct list_head list; struct list_head list;
struct module *owner;
} ____cacheline_aligned; } ____cacheline_aligned;
/* /*
@ -150,7 +153,6 @@ extern void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new); struct clock_event_device *new);
extern void clockevents_set_mode(struct clock_event_device *dev, extern void clockevents_set_mode(struct clock_event_device *dev,
enum clock_event_mode mode); enum clock_event_mode mode);
extern int clockevents_register_notifier(struct notifier_block *nb);
extern int clockevents_program_event(struct clock_event_device *dev, extern int clockevents_program_event(struct clock_event_device *dev,
ktime_t expires, bool force); ktime_t expires, bool force);

View File

@ -331,11 +331,6 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t; typedef unsigned char *sk_buff_data_t;
#endif #endif
#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
#endif
/** /**
* struct sk_buff - socket buffer * struct sk_buff - socket buffer
* @next: Next buffer in list * @next: Next buffer in list
@ -368,7 +363,6 @@ typedef unsigned char *sk_buff_data_t;
* @protocol: Packet protocol from driver * @protocol: Packet protocol from driver
* @destructor: Destruct function * @destructor: Destruct function
* @nfct: Associated connection, if any * @nfct: Associated connection, if any
* @nfct_reasm: netfilter conntrack re-assembly pointer
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on * @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index * @tc_index: Traffic control index
@ -455,9 +449,6 @@ struct sk_buff {
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack *nfct; struct nf_conntrack *nfct;
#endif #endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
struct sk_buff *nfct_reasm;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge;
#endif #endif
@ -2724,18 +2715,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use); atomic_inc(&nfct->use);
} }
#endif #endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
{
if (skb)
atomic_inc(&skb->users);
}
static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
{
if (skb)
kfree_skb(skb);
}
#endif
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{ {
@ -2754,10 +2733,6 @@ static inline void nf_reset(struct sk_buff *skb)
nf_conntrack_put(skb->nfct); nf_conntrack_put(skb->nfct);
skb->nfct = NULL; skb->nfct = NULL;
#endif #endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_put_reasm(skb->nfct_reasm);
skb->nfct_reasm = NULL;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge); nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL; skb->nf_bridge = NULL;
@ -2779,10 +2754,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
nf_conntrack_get(src->nfct); nf_conntrack_get(src->nfct);
dst->nfctinfo = src->nfctinfo; dst->nfctinfo = src->nfctinfo;
#endif #endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
dst->nf_bridge = src->nf_bridge; dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge); nf_bridge_get(src->nf_bridge);
@ -2794,9 +2765,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(dst->nfct); nf_conntrack_put(dst->nfct);
#endif #endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_put_reasm(dst->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(dst->nf_bridge); nf_bridge_put(dst->nf_bridge);
#endif #endif

View File

@ -41,6 +41,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
OOM_KILL, OOM_KILL,
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
NUMA_PTE_UPDATES, NUMA_PTE_UPDATES,
NUMA_HUGE_PTE_UPDATES,
NUMA_HINT_FAULTS, NUMA_HINT_FAULTS,
NUMA_HINT_FAULTS_LOCAL, NUMA_HINT_FAULTS_LOCAL,
NUMA_PAGE_MIGRATE, NUMA_PAGE_MIGRATE,

View File

@ -109,7 +109,6 @@ extern int ip_vs_conn_tab_size;
struct ip_vs_iphdr { struct ip_vs_iphdr {
__u32 len; /* IPv4 simply where L4 starts __u32 len; /* IPv4 simply where L4 starts
IPv6 where L4 Transport Header starts */ IPv6 where L4 Transport Header starts */
__u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */
__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
__s16 protocol; __s16 protocol;
__s32 flags; __s32 flags;
@ -117,34 +116,12 @@ struct ip_vs_iphdr {
union nf_inet_addr daddr; union nf_inet_addr daddr;
}; };
/* Dependency to module: nf_defrag_ipv6 */
#if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
{
return skb->nfct_reasm;
}
static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
int len, void *buffer,
const struct ip_vs_iphdr *ipvsh)
{
if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb)))
return skb_header_pointer(skb_nfct_reasm(skb),
ipvsh->thoff_reasm, len, buffer);
return skb_header_pointer(skb, offset, len, buffer);
}
#else
static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
{
return NULL;
}
static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
int len, void *buffer, int len, void *buffer,
const struct ip_vs_iphdr *ipvsh) const struct ip_vs_iphdr *ipvsh)
{ {
return skb_header_pointer(skb, offset, len, buffer); return skb_header_pointer(skb, offset, len, buffer);
} }
#endif
static inline void static inline void
ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr) ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
@ -171,19 +148,12 @@ ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
(struct ipv6hdr *)skb_network_header(skb); (struct ipv6hdr *)skb_network_header(skb);
iphdr->saddr.in6 = iph->saddr; iphdr->saddr.in6 = iph->saddr;
iphdr->daddr.in6 = iph->daddr; iphdr->daddr.in6 = iph->daddr;
/* ipv6_find_hdr() updates len, flags, thoff_reasm */ /* ipv6_find_hdr() updates len, flags */
iphdr->thoff_reasm = 0;
iphdr->len = 0; iphdr->len = 0;
iphdr->flags = 0; iphdr->flags = 0;
iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
&iphdr->fragoffs, &iphdr->fragoffs,
&iphdr->flags); &iphdr->flags);
/* get proto from re-assembled packet and it's offset */
if (skb_nfct_reasm(skb))
iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb),
&iphdr->thoff_reasm,
-1, NULL, NULL);
} else } else
#endif #endif
{ {

View File

@ -6,10 +6,7 @@ extern void nf_defrag_ipv6_enable(void);
extern int nf_ct_frag6_init(void); extern int nf_ct_frag6_init(void);
extern void nf_ct_frag6_cleanup(void); extern void nf_ct_frag6_cleanup(void);
extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, extern void nf_ct_frag6_consume_orig(struct sk_buff *skb);
struct net_device *in,
struct net_device *out,
int (*okfn)(struct sk_buff *));
struct inet_frags_ctl; struct inet_frags_ctl;

View File

@ -15,7 +15,6 @@
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/notifier.h>
#include <linux/smp.h> #include <linux/smp.h>
#include "tick-internal.h" #include "tick-internal.h"
@ -23,10 +22,6 @@
/* The registered clock event devices */ /* The registered clock event devices */
static LIST_HEAD(clockevent_devices); static LIST_HEAD(clockevent_devices);
static LIST_HEAD(clockevents_released); static LIST_HEAD(clockevents_released);
/* Notification for clock events */
static RAW_NOTIFIER_HEAD(clockevents_chain);
/* Protection for the above */ /* Protection for the above */
static DEFINE_RAW_SPINLOCK(clockevents_lock); static DEFINE_RAW_SPINLOCK(clockevents_lock);
@ -267,30 +262,6 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
return (rc && force) ? clockevents_program_min_delta(dev) : rc; return (rc && force) ? clockevents_program_min_delta(dev) : rc;
} }
/**
* clockevents_register_notifier - register a clock events change listener
*/
int clockevents_register_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&clockevents_lock, flags);
ret = raw_notifier_chain_register(&clockevents_chain, nb);
raw_spin_unlock_irqrestore(&clockevents_lock, flags);
return ret;
}
/*
* Notify about a clock event change. Called with clockevents_lock
* held.
*/
static void clockevents_do_notify(unsigned long reason, void *dev)
{
raw_notifier_call_chain(&clockevents_chain, reason, dev);
}
/* /*
* Called after a notify add to make devices available which were * Called after a notify add to make devices available which were
* released from the notifier call. * released from the notifier call.
@ -304,7 +275,7 @@ static void clockevents_notify_released(void)
struct clock_event_device, list); struct clock_event_device, list);
list_del(&dev->list); list_del(&dev->list);
list_add(&dev->list, &clockevent_devices); list_add(&dev->list, &clockevent_devices);
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); tick_check_new_device(dev);
} }
} }
@ -325,7 +296,7 @@ void clockevents_register_device(struct clock_event_device *dev)
raw_spin_lock_irqsave(&clockevents_lock, flags); raw_spin_lock_irqsave(&clockevents_lock, flags);
list_add(&dev->list, &clockevent_devices); list_add(&dev->list, &clockevent_devices);
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); tick_check_new_device(dev);
clockevents_notify_released(); clockevents_notify_released();
raw_spin_unlock_irqrestore(&clockevents_lock, flags); raw_spin_unlock_irqrestore(&clockevents_lock, flags);
@ -421,6 +392,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
* released list and do a notify add later. * released list and do a notify add later.
*/ */
if (old) { if (old) {
module_put(old->owner);
clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
list_del(&old->list); list_del(&old->list);
list_add(&old->list, &clockevents_released); list_add(&old->list, &clockevents_released);
@ -468,7 +440,7 @@ void clockevents_notify(unsigned long reason, void *arg)
int cpu; int cpu;
raw_spin_lock_irqsave(&clockevents_lock, flags); raw_spin_lock_irqsave(&clockevents_lock, flags);
clockevents_do_notify(reason, arg); tick_notify(reason, arg);
switch (reason) { switch (reason) {
case CLOCK_EVT_NOTIFY_CPU_DEAD: case CLOCK_EVT_NOTIFY_CPU_DEAD:

View File

@ -475,6 +475,7 @@ static void sync_cmos_clock(struct work_struct *work)
* called as close as possible to 500 ms before the new second starts. * called as close as possible to 500 ms before the new second starts.
* This code is run on a timer. If the clock is set, that timer * This code is run on a timer. If the clock is set, that timer
* may not expire at the correct time. Thus, we adjust... * may not expire at the correct time. Thus, we adjust...
* We want the clock to be within a couple of ticks from the target.
*/ */
if (!ntp_synced()) { if (!ntp_synced()) {
/* /*
@ -485,7 +486,7 @@ static void sync_cmos_clock(struct work_struct *work)
} }
getnstimeofday(&now); getnstimeofday(&now);
if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) { if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
struct timespec adjust = now; struct timespec adjust = now;
fail = -ENODEV; fail = -ENODEV;

View File

@ -19,6 +19,7 @@
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/module.h>
#include "tick-internal.h" #include "tick-internal.h"
@ -65,17 +66,34 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
/* /*
* Check, if the device can be utilized as broadcast device: * Check, if the device can be utilized as broadcast device:
*/ */
int tick_check_broadcast_device(struct clock_event_device *dev) static bool tick_check_broadcast_device(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
(newdev->features & CLOCK_EVT_FEAT_C3STOP))
return false;
if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
!(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
return false;
return !curdev || newdev->rating > curdev->rating;
}
/*
* Conditionally install/replace broadcast device
*/
void tick_install_broadcast_device(struct clock_event_device *dev)
{ {
struct clock_event_device *cur = tick_broadcast_device.evtdev; struct clock_event_device *cur = tick_broadcast_device.evtdev;
if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || if (!tick_check_broadcast_device(cur, dev))
(tick_broadcast_device.evtdev && return;
tick_broadcast_device.evtdev->rating >= dev->rating) ||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
clockevents_exchange_device(tick_broadcast_device.evtdev, dev); if (!try_module_get(dev->owner))
return;
clockevents_exchange_device(cur, dev);
if (cur) if (cur)
cur->event_handler = clockevents_handle_noop; cur->event_handler = clockevents_handle_noop;
tick_broadcast_device.evtdev = dev; tick_broadcast_device.evtdev = dev;
@ -91,7 +109,6 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
*/ */
if (dev->features & CLOCK_EVT_FEAT_ONESHOT) if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_clock_notify(); tick_clock_notify();
return 1;
} }
/* /*

View File

@ -18,6 +18,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/module.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
@ -206,14 +207,50 @@ static void tick_setup_device(struct tick_device *td,
tick_setup_oneshot(newdev, handler, next_event); tick_setup_oneshot(newdev, handler, next_event);
} }
static bool tick_check_percpu(struct clock_event_device *curdev,
struct clock_event_device *newdev, int cpu)
{
if (!cpumask_test_cpu(cpu, newdev->cpumask))
return false;
if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
return true;
/* Check if irq affinity can be set */
if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
return false;
/* Prefer an existing cpu local device */
if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
return false;
return true;
}
static bool tick_check_preferred(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
/* Prefer oneshot capable device */
if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
return false;
if (tick_oneshot_mode_active())
return false;
}
/*
* Use the higher rated one, but prefer a CPU local device with a lower
* rating than a non-CPU local device
*/
return !curdev ||
newdev->rating > curdev->rating ||
!cpumask_equal(curdev->cpumask, newdev->cpumask);
}
/* /*
* Check, if the new registered device should be used. * Check, if the new registered device should be used.
*/ */
static int tick_check_new_device(struct clock_event_device *newdev) void tick_check_new_device(struct clock_event_device *newdev)
{ {
struct clock_event_device *curdev; struct clock_event_device *curdev;
struct tick_device *td; struct tick_device *td;
int cpu, ret = NOTIFY_OK; int cpu;
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&tick_device_lock, flags); raw_spin_lock_irqsave(&tick_device_lock, flags);
@ -226,40 +263,15 @@ static int tick_check_new_device(struct clock_event_device *newdev)
curdev = td->evtdev; curdev = td->evtdev;
/* cpu local device ? */ /* cpu local device ? */
if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { if (!tick_check_percpu(curdev, newdev, cpu))
/*
* If the cpu affinity of the device interrupt can not
* be set, ignore it.
*/
if (!irq_can_set_affinity(newdev->irq))
goto out_bc; goto out_bc;
/* /* Preference decision */
* If we have a cpu local device already, do not replace it if (!tick_check_preferred(curdev, newdev))
* by a non cpu local device
*/
if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
goto out_bc; goto out_bc;
}
/* if (!try_module_get(newdev->owner))
* If we have an active device, then check the rating and the oneshot return;
* feature.
*/
if (curdev) {
/*
* Prefer one shot capable devices !
*/
if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
!(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
goto out_bc;
/*
* Check the rating
*/
if (curdev->rating >= newdev->rating)
goto out_bc;
}
/* /*
* Replace the eventually existing device by the new * Replace the eventually existing device by the new
@ -276,18 +288,14 @@ static int tick_check_new_device(struct clock_event_device *newdev)
tick_oneshot_notify(); tick_oneshot_notify();
raw_spin_unlock_irqrestore(&tick_device_lock, flags); raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return NOTIFY_STOP; return;
out_bc: out_bc:
/* /*
* Can the new device be used as a broadcast device ? * Can the new device be used as a broadcast device ?
*/ */
if (tick_check_broadcast_device(newdev)) tick_install_broadcast_device(newdev);
ret = NOTIFY_STOP;
raw_spin_unlock_irqrestore(&tick_device_lock, flags); raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
} }
/* /*
@ -361,17 +369,10 @@ static void tick_resume(void)
raw_spin_unlock_irqrestore(&tick_device_lock, flags); raw_spin_unlock_irqrestore(&tick_device_lock, flags);
} }
/* void tick_notify(unsigned long reason, void *dev)
* Notification about clock event devices
*/
static int tick_notify(struct notifier_block *nb, unsigned long reason,
void *dev)
{ {
switch (reason) { switch (reason) {
case CLOCK_EVT_NOTIFY_ADD:
return tick_check_new_device(dev);
case CLOCK_EVT_NOTIFY_BROADCAST_ON: case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_OFF: case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
@ -405,21 +406,12 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
default: default:
break; break;
} }
return NOTIFY_OK;
} }
static struct notifier_block tick_notifier = {
.notifier_call = tick_notify,
};
/** /**
* tick_init - initialize the tick control * tick_init - initialize the tick control
*
* Register the notifier with the clockevents framework
*/ */
void __init tick_init(void) void __init tick_init(void)
{ {
clockevents_register_notifier(&tick_notifier);
tick_broadcast_init(); tick_broadcast_init();
} }

View File

@ -18,6 +18,8 @@ extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
extern void tick_handle_periodic(struct clock_event_device *dev); extern void tick_handle_periodic(struct clock_event_device *dev);
extern void tick_notify(unsigned long reason, void *dev);
extern void tick_check_new_device(struct clock_event_device *dev);
extern void clockevents_shutdown(struct clock_event_device *dev); extern void clockevents_shutdown(struct clock_event_device *dev);
@ -90,7 +92,7 @@ static inline bool tick_broadcast_oneshot_available(void) { return false; }
*/ */
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
extern int tick_check_broadcast_device(struct clock_event_device *dev); extern void tick_install_broadcast_device(struct clock_event_device *dev);
extern int tick_is_broadcast_device(struct clock_event_device *dev); extern int tick_is_broadcast_device(struct clock_event_device *dev);
extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
extern void tick_shutdown_broadcast(unsigned int *cpup); extern void tick_shutdown_broadcast(unsigned int *cpup);
@ -102,9 +104,8 @@ tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
#else /* !BROADCAST */ #else /* !BROADCAST */
static inline int tick_check_broadcast_device(struct clock_event_device *dev) static inline void tick_install_broadcast_device(struct clock_event_device *dev)
{ {
return 0;
} }
static inline int tick_is_broadcast_device(struct clock_event_device *dev) static inline int tick_is_broadcast_device(struct clock_event_device *dev)

View File

@ -135,6 +135,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pmd_t *pmd; pmd_t *pmd;
unsigned long next; unsigned long next;
unsigned long pages = 0; unsigned long pages = 0;
unsigned long nr_huge_updates = 0;
bool all_same_node; bool all_same_node;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
@ -145,7 +146,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
split_huge_page_pmd(vma, addr, pmd); split_huge_page_pmd(vma, addr, pmd);
else if (change_huge_pmd(vma, pmd, addr, newprot, else if (change_huge_pmd(vma, pmd, addr, newprot,
prot_numa)) { prot_numa)) {
pages++; pages += HPAGE_PMD_NR;
nr_huge_updates++;
continue; continue;
} }
/* fall through */ /* fall through */
@ -165,6 +167,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
change_pmd_protnuma(vma->vm_mm, addr, pmd); change_pmd_protnuma(vma->vm_mm, addr, pmd);
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
if (nr_huge_updates)
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
return pages; return pages;
} }

View File

@ -783,6 +783,7 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
"numa_pte_updates", "numa_pte_updates",
"numa_huge_pte_updates",
"numa_hint_faults", "numa_hint_faults",
"numa_hint_faults_local", "numa_hint_faults_local",
"numa_pages_migrated", "numa_pages_migrated",

View File

@ -172,6 +172,8 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
del_nbp(p); del_nbp(p);
} }
br_fdb_delete_by_port(br, NULL, 1);
del_timer_sync(&br->gc_timer); del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev); br_sysfs_delbr(br->dev);

View File

@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
__get_user(kmsg->msg_flags, &umsg->msg_flags)) __get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT; return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
return -EINVAL; kmsg->msg_namelen = sizeof(struct sockaddr_storage);
kmsg->msg_name = compat_ptr(tmp1); kmsg->msg_name = compat_ptr(tmp1);
kmsg->msg_iov = compat_ptr(tmp2); kmsg->msg_iov = compat_ptr(tmp2);
kmsg->msg_control = compat_ptr(tmp3); kmsg->msg_control = compat_ptr(tmp3);
@ -93,6 +93,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
if (err < 0) if (err < 0)
return err; return err;
} }
if (kern_msg->msg_name)
kern_msg->msg_name = kern_address; kern_msg->msg_name = kern_address;
} else { } else {
kern_msg->msg_name = NULL; kern_msg->msg_name = NULL;

View File

@ -4478,7 +4478,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
{ {
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) if (ops->ndo_change_rx_flags)
ops->ndo_change_rx_flags(dev, flags); ops->ndo_change_rx_flags(dev, flags);
} }

View File

@ -507,7 +507,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
if (frh->action && (frh->action != rule->action)) if (frh->action && (frh->action != rule->action))
continue; continue;
if (frh->table && (frh_get_table(frh, tb) != rule->table)) if (frh_get_table(frh, tb) &&
(frh_get_table(frh, tb) != rule->table))
continue; continue;
if (tb[FRA_PRIORITY] && if (tb[FRA_PRIORITY] &&

View File

@ -48,6 +48,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
if (err < 0) if (err < 0)
return err; return err;
} }
if (m->msg_name)
m->msg_name = address; m->msg_name = address;
} else { } else {
m->msg_name = NULL; m->msg_name = NULL;

View File

@ -2515,6 +2515,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
if (x) { if (x) {
int ret; int ret;
__u8 *eth; __u8 *eth;
struct iphdr *iph;
nhead = x->props.header_len - skb_headroom(skb); nhead = x->props.header_len - skb_headroom(skb);
if (nhead > 0) { if (nhead > 0) {
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@ -2536,6 +2538,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
eth = (__u8 *) skb_push(skb, ETH_HLEN); eth = (__u8 *) skb_push(skb, ETH_HLEN);
memcpy(eth, pkt_dev->hh, 12); memcpy(eth, pkt_dev->hh, 12);
*(u16 *) &eth[12] = protocol; *(u16 *) &eth[12] = protocol;
/* Update IPv4 header len as well as checksum value */
iph = ip_hdr(skb);
iph->tot_len = htons(skb->len - ETH_HLEN);
ip_send_check(iph);
} }
} }
return 1; return 1;

View File

@ -585,9 +585,6 @@ static void skb_release_head_state(struct sk_buff *skb)
#if IS_ENABLED(CONFIG_NF_CONNTRACK) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb->nfct); nf_conntrack_put(skb->nfct);
#endif #endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_put_reasm(skb->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge); nf_bridge_put(skb->nf_bridge);
#endif #endif

View File

@ -862,7 +862,7 @@ lowpan_process_data(struct sk_buff *skb)
* Traffic class carried in-line * Traffic class carried in-line
* ECN + DSCP (1 byte), Flow Label is elided * ECN + DSCP (1 byte), Flow Label is elided
*/ */
case 1: /* 10b */ case 2: /* 10b */
if (lowpan_fetch_skb_u8(skb, &tmp)) if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop; goto drop;
@ -875,7 +875,7 @@ lowpan_process_data(struct sk_buff *skb)
* Flow Label carried in-line * Flow Label carried in-line
* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
*/ */
case 2: /* 01b */ case 1: /* 01b */
if (lowpan_fetch_skb_u8(skb, &tmp)) if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop; goto drop;

View File

@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
err = PTR_ERR(rt); err = PTR_ERR(rt);
if (err == -ENETUNREACH) if (err == -ENETUNREACH)
IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
goto out; goto out;
} }

View File

@ -351,6 +351,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (!rt->dst.xfrm || if (!rt->dst.xfrm ||
rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) { rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
dev->stats.tx_carrier_errors++; dev->stats.tx_carrier_errors++;
ip_rt_put(rt);
goto tx_error_icmp; goto tx_error_icmp;
} }
tdev = rt->dst.dev; tdev = rt->dst.dev;

View File

@ -583,7 +583,7 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
err = PTR_ERR(rt); err = PTR_ERR(rt);
rt = NULL; rt = NULL;
if (err == -ENETUNREACH) if (err == -ENETUNREACH)
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out; goto out;
} }

View File

@ -1725,8 +1725,12 @@ local_input:
rth->dst.error= -err; rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL; rth->rt_flags &= ~RTCF_LOCAL;
} }
if (do_cache) if (do_cache) {
rt_cache_route(&FIB_RES_NH(res), rth); if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
rth->dst.flags |= DST_NOCACHE;
rt_add_uncached_list(rth);
}
}
skb_dst_set(skb, &rth->dst); skb_dst_set(skb, &rth->dst);
err = 0; err = 0;
goto out; goto out;

View File

@ -813,12 +813,6 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
xmit_size_goal = min_t(u32, gso_size, xmit_size_goal = min_t(u32, gso_size,
sk->sk_gso_max_size - 1 - hlen); sk->sk_gso_max_size - 1 - hlen);
/* TSQ : try to have at least two segments in flight
* (one in NIC TX ring, another in Qdisc)
*/
xmit_size_goal = min_t(u32, xmit_size_goal,
sysctl_tcp_limit_output_bytes >> 1);
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
/* We try hard to avoid divides here */ /* We try hard to avoid divides here */
@ -2915,6 +2909,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int sum_truesize = 0;
struct tcphdr *th; struct tcphdr *th;
unsigned int thlen; unsigned int thlen;
unsigned int seq; unsigned int seq;
@ -2998,13 +2993,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
if (copy_destructor) { if (copy_destructor) {
skb->destructor = gso_skb->destructor; skb->destructor = gso_skb->destructor;
skb->sk = gso_skb->sk; skb->sk = gso_skb->sk;
/* {tcp|sock}_wfree() use exact truesize accounting : sum_truesize += skb->truesize;
* sum(skb->truesize) MUST be exactly be gso_skb->truesize
* So we account mss bytes of 'true size' for each segment.
* The last segment will contain the remaining.
*/
skb->truesize = mss;
gso_skb->truesize -= mss;
} }
skb = skb->next; skb = skb->next;
th = tcp_hdr(skb); th = tcp_hdr(skb);
@ -3021,7 +3010,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
if (copy_destructor) { if (copy_destructor) {
swap(gso_skb->sk, skb->sk); swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor); swap(gso_skb->destructor, skb->destructor);
swap(gso_skb->truesize, skb->truesize); sum_truesize += skb->truesize;
atomic_add(sum_truesize - gso_skb->truesize,
&skb->sk->sk_wmem_alloc);
} }
delta = htonl(oldlen + (skb->tail - skb->transport_header) + delta = htonl(oldlen + (skb->tail - skb->transport_header) +

View File

@ -176,7 +176,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
err = PTR_ERR(rt); err = PTR_ERR(rt);
if (err == -ENETUNREACH) if (err == -ENETUNREACH)
IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
return err; return err;
} }

View File

@ -1877,8 +1877,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
* - better RTT estimation and ACK scheduling * - better RTT estimation and ACK scheduling
* - faster recovery * - faster recovery
* - high rates * - high rates
* Alas, some drivers / subsystems require a fair amount
* of queued bytes to ensure line rate.
* One example is wifi aggregation (802.11 AMPDU)
*/ */
limit = max(skb->truesize, sk->sk_pacing_rate >> 10); limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
sk->sk_pacing_rate >> 10);
if (atomic_read(&sk->sk_wmem_alloc) > limit) { if (atomic_read(&sk->sk_wmem_alloc) > limit) {
set_bit(TSQ_THROTTLED, &tp->tsq_flags); set_bit(TSQ_THROTTLED, &tp->tsq_flags);
@ -3111,7 +3115,6 @@ void tcp_send_window_probe(struct sock *sk)
{ {
if (sk->sk_state == TCP_ESTABLISHED) { if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
tcp_xmit_probe_skb(sk, 0); tcp_xmit_probe_skb(sk, 0);
} }
} }

View File

@ -972,7 +972,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
err = PTR_ERR(rt); err = PTR_ERR(rt);
rt = NULL; rt = NULL;
if (err == -ENETUNREACH) if (err == -ENETUNREACH)
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out; goto out;
} }
@ -1071,6 +1071,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
int ret; int ret;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (!up->pending) { if (!up->pending) {
struct msghdr msg = { .msg_flags = flags|MSG_MORE }; struct msghdr msg = { .msg_flags = flags|MSG_MORE };

View File

@ -453,8 +453,10 @@ static int mem_check(struct sock *sk)
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
return 0; return 0;
rcu_read_lock_bh();
for_each_sk_fl_rcu(np, sfl) for_each_sk_fl_rcu(np, sfl)
count++; count++;
rcu_read_unlock_bh();
if (room <= 0 || if (room <= 0 ||
((count >= FL_MAX_PER_SOCK || ((count >= FL_MAX_PER_SOCK ||

View File

@ -141,7 +141,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
} }
rcu_read_unlock_bh(); rcu_read_unlock_bh();
IP6_INC_STATS_BH(dev_net(dst->dev), IP6_INC_STATS(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb); kfree_skb(skb);
return -EINVAL; return -EINVAL;
@ -150,7 +150,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
static int ip6_finish_output(struct sk_buff *skb) static int ip6_finish_output(struct sk_buff *skb)
{ {
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb))) dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
return ip6_fragment(skb, ip6_finish_output2); return ip6_fragment(skb, ip6_finish_output2);
else else
return ip6_finish_output2(skb); return ip6_finish_output2(skb);

View File

@ -172,63 +172,13 @@ out:
return nf_conntrack_confirm(skb); return nf_conntrack_confirm(skb);
} }
static unsigned int __ipv6_conntrack_in(struct net *net,
unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct sk_buff *reasm = skb->nfct_reasm;
const struct nf_conn_help *help;
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
/* This packet is fragmented and has reassembled packet. */
if (reasm) {
/* Reassembled packet isn't parsed yet ? */
if (!reasm->nfct) {
unsigned int ret;
ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm);
if (ret != NF_ACCEPT)
return ret;
}
/* Conntrack helpers need the entire reassembled packet in the
* POST_ROUTING hook. In case of unconfirmed connections NAT
* might reassign a helper, so the entire packet is also
* required.
*/
ct = nf_ct_get(reasm, &ctinfo);
if (ct != NULL && !nf_ct_is_untracked(ct)) {
help = nfct_help(ct);
if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
nf_conntrack_get_reasm(reasm);
NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
(struct net_device *)in,
(struct net_device *)out,
okfn, NF_IP6_PRI_CONNTRACK + 1);
return NF_DROP_ERR(-ECANCELED);
}
}
nf_conntrack_get(reasm->nfct);
skb->nfct = reasm->nfct;
skb->nfctinfo = reasm->nfctinfo;
return NF_ACCEPT;
}
return nf_conntrack_in(net, PF_INET6, hooknum, skb);
}
static unsigned int ipv6_conntrack_in(unsigned int hooknum, static unsigned int ipv6_conntrack_in(unsigned int hooknum,
struct sk_buff *skb, struct sk_buff *skb,
const struct net_device *in, const struct net_device *in,
const struct net_device *out, const struct net_device *out,
int (*okfn)(struct sk_buff *)) int (*okfn)(struct sk_buff *))
{ {
return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn); return nf_conntrack_in(dev_net(in), PF_INET6, hooknum, skb);
} }
static unsigned int ipv6_conntrack_local(unsigned int hooknum, static unsigned int ipv6_conntrack_local(unsigned int hooknum,
@ -242,7 +192,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
return NF_ACCEPT; return NF_ACCEPT;
} }
return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn); return nf_conntrack_in(dev_net(out), PF_INET6, hooknum, skb);
} }
static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {

View File

@ -621,31 +621,16 @@ ret_orig:
return skb; return skb;
} }
void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, void nf_ct_frag6_consume_orig(struct sk_buff *skb)
struct net_device *in, struct net_device *out,
int (*okfn)(struct sk_buff *))
{ {
struct sk_buff *s, *s2; struct sk_buff *s, *s2;
unsigned int ret = 0;
for (s = NFCT_FRAG6_CB(skb)->orig; s;) { for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
nf_conntrack_put_reasm(s->nfct_reasm);
nf_conntrack_get_reasm(skb);
s->nfct_reasm = skb;
s2 = s->next; s2 = s->next;
s->next = NULL; s->next = NULL;
consume_skb(s);
if (ret != -ECANCELED)
ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
in, out, okfn,
NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
else
kfree_skb(s);
s = s2; s = s2;
} }
nf_conntrack_put_reasm(skb);
} }
static int nf_ct_net_init(struct net *net) static int nf_ct_net_init(struct net *net)

View File

@ -75,8 +75,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
if (reasm == skb) if (reasm == skb)
return NF_ACCEPT; return NF_ACCEPT;
nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in, nf_ct_frag6_consume_orig(reasm);
(struct net_device *)out, okfn);
NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
(struct net_device *) in, (struct net_device *) out,
okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
return NF_STOLEN; return NF_STOLEN;
} }

View File

@ -728,8 +728,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
prefix = &prefix_buf; prefix = &prefix_buf;
} }
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr, if (rinfo->prefix_len == 0)
dev->ifindex); rt = rt6_get_dflt_router(gwaddr, dev);
else
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
gwaddr, dev->ifindex);
if (rt && !lifetime) { if (rt && !lifetime) {
ip6_del_rt(rt); ip6_del_rt(rt);

View File

@ -1131,12 +1131,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iph_skb(af, skb, &iph); ip_vs_fill_iph_skb(af, skb, &iph);
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) { if (af == AF_INET6) {
if (!iph.fragoffs && skb_nfct_reasm(skb)) {
struct sk_buff *reasm = skb_nfct_reasm(skb);
/* Save fw mark for coming frags */
reasm->ipvs_property = 1;
reasm->mark = skb->mark;
}
if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
int related; int related;
int verdict = ip_vs_out_icmp_v6(skb, &related, int verdict = ip_vs_out_icmp_v6(skb, &related,
@ -1606,12 +1600,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) { if (af == AF_INET6) {
if (!iph.fragoffs && skb_nfct_reasm(skb)) {
struct sk_buff *reasm = skb_nfct_reasm(skb);
/* Save fw mark for coming frags. */
reasm->ipvs_property = 1;
reasm->mark = skb->mark;
}
if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
int related; int related;
int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum, int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
@ -1663,9 +1651,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
/* sorry, all this trouble for a no-hit :) */ /* sorry, all this trouble for a no-hit :) */
IP_VS_DBG_PKT(12, af, pp, skb, 0, IP_VS_DBG_PKT(12, af, pp, skb, 0,
"ip_vs_in: packet continues traversal as normal"); "ip_vs_in: packet continues traversal as normal");
if (iph.fragoffs && !skb_nfct_reasm(skb)) { if (iph.fragoffs) {
/* Fragment that couldn't be mapped to a conn entry /* Fragment that couldn't be mapped to a conn entry
* and don't have any pointer to a reasm skb
* is missing module nf_defrag_ipv6 * is missing module nf_defrag_ipv6
*/ */
IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n"); IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
@ -1747,38 +1734,6 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
/*
* AF_INET6 fragment handling
* Copy info from first fragment, to the rest of them.
*/
static unsigned int
ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct sk_buff *reasm = skb_nfct_reasm(skb);
struct net *net;
/* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
* ipvs_property is set when checking first fragment
* in ip_vs_in() and ip_vs_out().
*/
if (reasm)
IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
if (!reasm || !reasm->ipvs_property)
return NF_ACCEPT;
net = skb_net(skb);
if (!net_ipvs(net)->enable)
return NF_ACCEPT;
/* Copy stored fw mark, saved in ip_vs_{in,out} */
skb->mark = reasm->mark;
return NF_ACCEPT;
}
/* /*
* AF_INET6 handler in NF_INET_LOCAL_IN chain * AF_INET6 handler in NF_INET_LOCAL_IN chain
* Schedule and forward packets from remote clients * Schedule and forward packets from remote clients
@ -1916,14 +1871,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.priority = 100, .priority = 100,
}, },
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
/* After mangle & nat fetch 2:nd fragment and following */
{
.hook = ip_vs_preroute_frag6,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
/* After packet filtering, change source only for VS/NAT */ /* After packet filtering, change source only for VS/NAT */
{ {
.hook = ip_vs_reply6, .hook = ip_vs_reply6,

View File

@ -65,7 +65,6 @@ static int get_callid(const char *dptr, unsigned int dataoff,
static int static int
ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
{ {
struct sk_buff *reasm = skb_nfct_reasm(skb);
struct ip_vs_iphdr iph; struct ip_vs_iphdr iph;
unsigned int dataoff, datalen, matchoff, matchlen; unsigned int dataoff, datalen, matchoff, matchlen;
const char *dptr; const char *dptr;
@ -79,15 +78,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
/* todo: IPv6 fragments: /* todo: IPv6 fragments:
* I think this only should be done for the first fragment. /HS * I think this only should be done for the first fragment. /HS
*/ */
if (reasm) {
skb = reasm;
dataoff = iph.thoff_reasm + sizeof(struct udphdr);
} else
dataoff = iph.len + sizeof(struct udphdr); dataoff = iph.len + sizeof(struct udphdr);
if (dataoff >= skb->len) if (dataoff >= skb->len)
return -EINVAL; return -EINVAL;
/* todo: Check if this will mess-up the reasm skb !!! /HS */
retc = skb_linearize(skb); retc = skb_linearize(skb);
if (retc < 0) if (retc < 0)
return retc; return retc;

View File

@ -244,11 +244,15 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static void register_prot_hook(struct sock *sk) static void register_prot_hook(struct sock *sk)
{ {
struct packet_sock *po = pkt_sk(sk); struct packet_sock *po = pkt_sk(sk);
if (!po->running) { if (!po->running) {
if (po->fanout) if (po->fanout) {
__fanout_link(sk, po); __fanout_link(sk, po);
else } else {
dev_add_pack(&po->prot_hook); dev_add_pack(&po->prot_hook);
rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
}
sock_hold(sk); sock_hold(sk);
po->running = 1; po->running = 1;
} }
@ -266,10 +270,13 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
struct packet_sock *po = pkt_sk(sk); struct packet_sock *po = pkt_sk(sk);
po->running = 0; po->running = 0;
if (po->fanout) if (po->fanout) {
__fanout_unlink(sk, po); __fanout_unlink(sk, po);
else } else {
__dev_remove_pack(&po->prot_hook); __dev_remove_pack(&po->prot_hook);
RCU_INIT_POINTER(po->cached_dev, NULL);
}
__sock_put(sk); __sock_put(sk);
if (sync) { if (sync) {
@ -432,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
spin_lock(&rb_queue->lock); spin_lock_bh(&rb_queue->lock);
pkc->delete_blk_timer = 1; pkc->delete_blk_timer = 1;
spin_unlock(&rb_queue->lock); spin_unlock_bh(&rb_queue->lock);
prb_del_retire_blk_timer(pkc); prb_del_retire_blk_timer(pkc);
} }
@ -2043,12 +2050,24 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return tp_len; return tp_len;
} }
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
rcu_read_lock();
dev = rcu_dereference(po->cached_dev);
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct net_device *dev; struct net_device *dev;
__be16 proto; __be16 proto;
bool need_rls_dev = false;
int err, reserve = 0; int err, reserve = 0;
void *ph; void *ph;
struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
@ -2061,7 +2080,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock); mutex_lock(&po->pg_vec_lock);
if (saddr == NULL) { if (saddr == NULL) {
dev = po->prot_hook.dev; dev = packet_cached_dev_get(po);
proto = po->num; proto = po->num;
addr = NULL; addr = NULL;
} else { } else {
@ -2075,19 +2094,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
proto = saddr->sll_protocol; proto = saddr->sll_protocol;
addr = saddr->sll_addr; addr = saddr->sll_addr;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
need_rls_dev = true;
} }
err = -ENXIO; err = -ENXIO;
if (unlikely(dev == NULL)) if (unlikely(dev == NULL))
goto out; goto out;
reserve = dev->hard_header_len;
err = -ENETDOWN; err = -ENETDOWN;
if (unlikely(!(dev->flags & IFF_UP))) if (unlikely(!(dev->flags & IFF_UP)))
goto out_put; goto out_put;
reserve = dev->hard_header_len;
size_max = po->tx_ring.frame_size size_max = po->tx_ring.frame_size
- (po->tp_hdrlen - sizeof(struct sockaddr_ll)); - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
@ -2164,7 +2181,6 @@ out_status:
__packet_set_status(po, ph, status); __packet_set_status(po, ph, status);
kfree_skb(skb); kfree_skb(skb);
out_put: out_put:
if (need_rls_dev)
dev_put(dev); dev_put(dev);
out: out:
mutex_unlock(&po->pg_vec_lock); mutex_unlock(&po->pg_vec_lock);
@ -2203,7 +2219,6 @@ static int packet_snd(struct socket *sock,
struct sk_buff *skb; struct sk_buff *skb;
struct net_device *dev; struct net_device *dev;
__be16 proto; __be16 proto;
bool need_rls_dev = false;
unsigned char *addr; unsigned char *addr;
int err, reserve = 0; int err, reserve = 0;
struct virtio_net_hdr vnet_hdr = { 0 }; struct virtio_net_hdr vnet_hdr = { 0 };
@ -2219,7 +2234,7 @@ static int packet_snd(struct socket *sock,
*/ */
if (saddr == NULL) { if (saddr == NULL) {
dev = po->prot_hook.dev; dev = packet_cached_dev_get(po);
proto = po->num; proto = po->num;
addr = NULL; addr = NULL;
} else { } else {
@ -2231,19 +2246,17 @@ static int packet_snd(struct socket *sock,
proto = saddr->sll_protocol; proto = saddr->sll_protocol;
addr = saddr->sll_addr; addr = saddr->sll_addr;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
need_rls_dev = true;
} }
err = -ENXIO; err = -ENXIO;
if (dev == NULL) if (unlikely(dev == NULL))
goto out_unlock; goto out_unlock;
err = -ENETDOWN;
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
if (sock->type == SOCK_RAW) if (sock->type == SOCK_RAW)
reserve = dev->hard_header_len; reserve = dev->hard_header_len;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
if (po->has_vnet_hdr) { if (po->has_vnet_hdr) {
vnet_hdr_len = sizeof(vnet_hdr); vnet_hdr_len = sizeof(vnet_hdr);
@ -2377,7 +2390,6 @@ static int packet_snd(struct socket *sock,
if (err > 0 && (err = net_xmit_errno(err)) != 0) if (err > 0 && (err = net_xmit_errno(err)) != 0)
goto out_unlock; goto out_unlock;
if (need_rls_dev)
dev_put(dev); dev_put(dev);
return len; return len;
@ -2385,7 +2397,7 @@ static int packet_snd(struct socket *sock,
out_free: out_free:
kfree_skb(skb); kfree_skb(skb);
out_unlock: out_unlock:
if (dev && need_rls_dev) if (dev)
dev_put(dev); dev_put(dev);
out: out:
return err; return err;
@ -2605,6 +2617,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po = pkt_sk(sk); po = pkt_sk(sk);
sk->sk_family = PF_PACKET; sk->sk_family = PF_PACKET;
po->num = proto; po->num = proto;
RCU_INIT_POINTER(po->cached_dev, NULL);
sk->sk_destruct = packet_sock_destruct; sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk); sk_refcnt_debug_inc(sk);

View File

@ -113,6 +113,7 @@ struct packet_sock {
unsigned int tp_loss:1; unsigned int tp_loss:1;
unsigned int tp_tx_has_off:1; unsigned int tp_tx_has_off:1;
unsigned int tp_tstamp; unsigned int tp_tstamp;
struct net_device __rcu *cached_dev;
struct packet_type prot_hook ____cacheline_aligned_in_smp; struct packet_type prot_hook ____cacheline_aligned_in_smp;
}; };

View File

@ -215,12 +215,13 @@ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
int err; int err;
int len; int len;
BUG_ON(klen > sizeof(struct sockaddr_storage));
err = get_user(len, ulen); err = get_user(len, ulen);
if (err) if (err)
return err; return err;
if (len > klen) if (len > klen)
len = klen; len = klen;
if (len < 0 || len > sizeof(struct sockaddr_storage)) if (len < 0)
return -EINVAL; return -EINVAL;
if (len) { if (len) {
if (audit_sockaddr(klen, kaddr)) if (audit_sockaddr(klen, kaddr))
@ -1995,7 +1996,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT; return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
return -EINVAL; kmsg->msg_namelen = sizeof(struct sockaddr_storage);
return 0; return 0;
} }
@ -2265,6 +2266,9 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
/* We assume all kernel code knows the size of sockaddr_storage */ /* We assume all kernel code knows the size of sockaddr_storage */
msg_sys->msg_namelen = 0; msg_sys->msg_namelen = 0;
/* We assume all kernel code knows the size of sockaddr_storage */
msg_sys->msg_namelen = 0;
if (sock->file->f_flags & O_NONBLOCK) if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,