net: packported xt_qtaguid and xt_quota2 to support data usage.
This commit is contained in:
parent
df93a2fc56
commit
99dbc5c163
50
arch/arm/boot/compressed/decompress.c
Normal file
50
arch/arm/boot/compressed/decompress.c
Normal file
@ -0,0 +1,50 @@
|
||||
#define _LINUX_STRING_H_
|
||||
|
||||
#include <linux/compiler.h> /* for inline */
|
||||
#include <linux/types.h> /* for size_t */
|
||||
#include <linux/stddef.h> /* for NULL */
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/string.h>
|
||||
|
||||
extern unsigned long free_mem_ptr;
|
||||
extern unsigned long free_mem_end_ptr;
|
||||
extern void error(char *);
|
||||
|
||||
#define STATIC static
|
||||
#define STATIC_RW_DATA /* non-static please */
|
||||
|
||||
#define ARCH_HAS_DECOMP_WDOG
|
||||
|
||||
/* Diagnostic functions */
|
||||
#ifdef DEBUG
|
||||
# define Assert(cond,msg) {if(!(cond)) error(msg);}
|
||||
# define Trace(x) fprintf x
|
||||
# define Tracev(x) {if (verbose) fprintf x ;}
|
||||
# define Tracevv(x) {if (verbose>1) fprintf x ;}
|
||||
# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
|
||||
# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
|
||||
#else
|
||||
# define Assert(cond,msg)
|
||||
# define Trace(x)
|
||||
# define Tracev(x)
|
||||
# define Tracevv(x)
|
||||
# define Tracec(c,x)
|
||||
# define Tracecv(c,x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_GZIP
|
||||
#include "../../../../lib/decompress_inflate.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#include "../../../../lib/decompress_bunzip2.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_LZMA
|
||||
#include "../../../../lib/decompress_unlzma.c"
|
||||
#endif
|
||||
|
||||
void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
|
||||
{
|
||||
decompress(input, len, NULL, NULL, output, NULL, error);
|
||||
}
|
348
arch/arm/boot/compressed/lib1funcs.S
Normal file
348
arch/arm/boot/compressed/lib1funcs.S
Normal file
@ -0,0 +1,348 @@
|
||||
/*
|
||||
* linux/arch/arm/lib/lib1funcs.S: Optimized ARM division routines
|
||||
*
|
||||
* Author: Nicolas Pitre <nico@fluxnic.net>
|
||||
* - contributed to gcc-3.4 on Sep 30, 2003
|
||||
* - adapted for the Linux kernel on Oct 2, 2003
|
||||
*/
|
||||
|
||||
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003 Free Software Foundation, Inc.
|
||||
|
||||
This file is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the
|
||||
Free Software Foundation; either version 2, or (at your option) any
|
||||
later version.
|
||||
|
||||
In addition to the permissions in the GNU General Public License, the
|
||||
Free Software Foundation gives you unlimited permission to link the
|
||||
compiled version of this file into combinations with other programs,
|
||||
and to distribute those combinations without any restriction coming
|
||||
from the use of this file. (The General Public License restrictions
|
||||
do apply in other respects; for example, they cover modification of
|
||||
the file, and distribution when not linked into a combine
|
||||
executable.)
|
||||
|
||||
This file is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; see the file COPYING. If not, write to
|
||||
the Free Software Foundation, 59 Temple Place - Suite 330,
|
||||
Boston, MA 02111-1307, USA. */
|
||||
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
|
||||
.macro ARM_DIV_BODY dividend, divisor, result, curbit
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 5
|
||||
|
||||
clz \curbit, \divisor
|
||||
clz \result, \dividend
|
||||
sub \result, \curbit, \result
|
||||
mov \curbit, #1
|
||||
mov \divisor, \divisor, lsl \result
|
||||
mov \curbit, \curbit, lsl \result
|
||||
mov \result, #0
|
||||
|
||||
#else
|
||||
|
||||
@ Initially shift the divisor left 3 bits if possible,
|
||||
@ set curbit accordingly. This allows for curbit to be located
|
||||
@ at the left end of each 4 bit nibbles in the division loop
|
||||
@ to save one loop in most cases.
|
||||
tst \divisor, #0xe0000000
|
||||
moveq \divisor, \divisor, lsl #3
|
||||
moveq \curbit, #8
|
||||
movne \curbit, #1
|
||||
|
||||
@ Unless the divisor is very big, shift it up in multiples of
|
||||
@ four bits, since this is the amount of unwinding in the main
|
||||
@ division loop. Continue shifting until the divisor is
|
||||
@ larger than the dividend.
|
||||
1: cmp \divisor, #0x10000000
|
||||
cmplo \divisor, \dividend
|
||||
movlo \divisor, \divisor, lsl #4
|
||||
movlo \curbit, \curbit, lsl #4
|
||||
blo 1b
|
||||
|
||||
@ For very big divisors, we must shift it a bit at a time, or
|
||||
@ we will be in danger of overflowing.
|
||||
1: cmp \divisor, #0x80000000
|
||||
cmplo \divisor, \dividend
|
||||
movlo \divisor, \divisor, lsl #1
|
||||
movlo \curbit, \curbit, lsl #1
|
||||
blo 1b
|
||||
|
||||
mov \result, #0
|
||||
|
||||
#endif
|
||||
|
||||
@ Division loop
|
||||
1: cmp \dividend, \divisor
|
||||
subhs \dividend, \dividend, \divisor
|
||||
orrhs \result, \result, \curbit
|
||||
cmp \dividend, \divisor, lsr #1
|
||||
subhs \dividend, \dividend, \divisor, lsr #1
|
||||
orrhs \result, \result, \curbit, lsr #1
|
||||
cmp \dividend, \divisor, lsr #2
|
||||
subhs \dividend, \dividend, \divisor, lsr #2
|
||||
orrhs \result, \result, \curbit, lsr #2
|
||||
cmp \dividend, \divisor, lsr #3
|
||||
subhs \dividend, \dividend, \divisor, lsr #3
|
||||
orrhs \result, \result, \curbit, lsr #3
|
||||
cmp \dividend, #0 @ Early termination?
|
||||
movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
|
||||
movne \divisor, \divisor, lsr #4
|
||||
bne 1b
|
||||
|
||||
.endm
|
||||
|
||||
|
||||
.macro ARM_DIV2_ORDER divisor, order
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 5
|
||||
|
||||
clz \order, \divisor
|
||||
rsb \order, \order, #31
|
||||
|
||||
#else
|
||||
|
||||
cmp \divisor, #(1 << 16)
|
||||
movhs \divisor, \divisor, lsr #16
|
||||
movhs \order, #16
|
||||
movlo \order, #0
|
||||
|
||||
cmp \divisor, #(1 << 8)
|
||||
movhs \divisor, \divisor, lsr #8
|
||||
addhs \order, \order, #8
|
||||
|
||||
cmp \divisor, #(1 << 4)
|
||||
movhs \divisor, \divisor, lsr #4
|
||||
addhs \order, \order, #4
|
||||
|
||||
cmp \divisor, #(1 << 2)
|
||||
addhi \order, \order, #3
|
||||
addls \order, \order, \divisor, lsr #1
|
||||
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
||||
|
||||
.macro ARM_MOD_BODY dividend, divisor, order, spare
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 5
|
||||
|
||||
clz \order, \divisor
|
||||
clz \spare, \dividend
|
||||
sub \order, \order, \spare
|
||||
mov \divisor, \divisor, lsl \order
|
||||
|
||||
#else
|
||||
|
||||
mov \order, #0
|
||||
|
||||
@ Unless the divisor is very big, shift it up in multiples of
|
||||
@ four bits, since this is the amount of unwinding in the main
|
||||
@ division loop. Continue shifting until the divisor is
|
||||
@ larger than the dividend.
|
||||
1: cmp \divisor, #0x10000000
|
||||
cmplo \divisor, \dividend
|
||||
movlo \divisor, \divisor, lsl #4
|
||||
addlo \order, \order, #4
|
||||
blo 1b
|
||||
|
||||
@ For very big divisors, we must shift it a bit at a time, or
|
||||
@ we will be in danger of overflowing.
|
||||
1: cmp \divisor, #0x80000000
|
||||
cmplo \divisor, \dividend
|
||||
movlo \divisor, \divisor, lsl #1
|
||||
addlo \order, \order, #1
|
||||
blo 1b
|
||||
|
||||
#endif
|
||||
|
||||
@ Perform all needed substractions to keep only the reminder.
|
||||
@ Do comparisons in batch of 4 first.
|
||||
subs \order, \order, #3 @ yes, 3 is intended here
|
||||
blt 2f
|
||||
|
||||
1: cmp \dividend, \divisor
|
||||
subhs \dividend, \dividend, \divisor
|
||||
cmp \dividend, \divisor, lsr #1
|
||||
subhs \dividend, \dividend, \divisor, lsr #1
|
||||
cmp \dividend, \divisor, lsr #2
|
||||
subhs \dividend, \dividend, \divisor, lsr #2
|
||||
cmp \dividend, \divisor, lsr #3
|
||||
subhs \dividend, \dividend, \divisor, lsr #3
|
||||
cmp \dividend, #1
|
||||
mov \divisor, \divisor, lsr #4
|
||||
subges \order, \order, #4
|
||||
bge 1b
|
||||
|
||||
tst \order, #3
|
||||
teqne \dividend, #0
|
||||
beq 5f
|
||||
|
||||
@ Either 1, 2 or 3 comparison/substractions are left.
|
||||
2: cmn \order, #2
|
||||
blt 4f
|
||||
beq 3f
|
||||
cmp \dividend, \divisor
|
||||
subhs \dividend, \dividend, \divisor
|
||||
mov \divisor, \divisor, lsr #1
|
||||
3: cmp \dividend, \divisor
|
||||
subhs \dividend, \dividend, \divisor
|
||||
mov \divisor, \divisor, lsr #1
|
||||
4: cmp \dividend, \divisor
|
||||
subhs \dividend, \dividend, \divisor
|
||||
5:
|
||||
.endm
|
||||
|
||||
|
||||
ENTRY(__udivsi3)
|
||||
ENTRY(__aeabi_uidiv)
|
||||
|
||||
subs r2, r1, #1
|
||||
moveq pc, lr
|
||||
bcc Ldiv0
|
||||
cmp r0, r1
|
||||
bls 11f
|
||||
tst r1, r2
|
||||
beq 12f
|
||||
|
||||
ARM_DIV_BODY r0, r1, r2, r3
|
||||
|
||||
mov r0, r2
|
||||
mov pc, lr
|
||||
|
||||
11: moveq r0, #1
|
||||
movne r0, #0
|
||||
mov pc, lr
|
||||
|
||||
12: ARM_DIV2_ORDER r1, r2
|
||||
|
||||
mov r0, r0, lsr r2
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__udivsi3)
|
||||
ENDPROC(__aeabi_uidiv)
|
||||
|
||||
ENTRY(__umodsi3)
|
||||
|
||||
subs r2, r1, #1 @ compare divisor with 1
|
||||
bcc Ldiv0
|
||||
cmpne r0, r1 @ compare dividend with divisor
|
||||
moveq r0, #0
|
||||
tsthi r1, r2 @ see if divisor is power of 2
|
||||
andeq r0, r0, r2
|
||||
movls pc, lr
|
||||
|
||||
ARM_MOD_BODY r0, r1, r2, r3
|
||||
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__umodsi3)
|
||||
|
||||
ENTRY(__divsi3)
|
||||
ENTRY(__aeabi_idiv)
|
||||
|
||||
cmp r1, #0
|
||||
eor ip, r0, r1 @ save the sign of the result.
|
||||
beq Ldiv0
|
||||
rsbmi r1, r1, #0 @ loops below use unsigned.
|
||||
subs r2, r1, #1 @ division by 1 or -1 ?
|
||||
beq 10f
|
||||
movs r3, r0
|
||||
rsbmi r3, r0, #0 @ positive dividend value
|
||||
cmp r3, r1
|
||||
bls 11f
|
||||
tst r1, r2 @ divisor is power of 2 ?
|
||||
beq 12f
|
||||
|
||||
ARM_DIV_BODY r3, r1, r0, r2
|
||||
|
||||
cmp ip, #0
|
||||
rsbmi r0, r0, #0
|
||||
mov pc, lr
|
||||
|
||||
10: teq ip, r0 @ same sign ?
|
||||
rsbmi r0, r0, #0
|
||||
mov pc, lr
|
||||
|
||||
11: movlo r0, #0
|
||||
moveq r0, ip, asr #31
|
||||
orreq r0, r0, #1
|
||||
mov pc, lr
|
||||
|
||||
12: ARM_DIV2_ORDER r1, r2
|
||||
|
||||
cmp ip, #0
|
||||
mov r0, r3, lsr r2
|
||||
rsbmi r0, r0, #0
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__divsi3)
|
||||
ENDPROC(__aeabi_idiv)
|
||||
|
||||
ENTRY(__modsi3)
|
||||
|
||||
cmp r1, #0
|
||||
beq Ldiv0
|
||||
rsbmi r1, r1, #0 @ loops below use unsigned.
|
||||
movs ip, r0 @ preserve sign of dividend
|
||||
rsbmi r0, r0, #0 @ if negative make positive
|
||||
subs r2, r1, #1 @ compare divisor with 1
|
||||
cmpne r0, r1 @ compare dividend with divisor
|
||||
moveq r0, #0
|
||||
tsthi r1, r2 @ see if divisor is power of 2
|
||||
andeq r0, r0, r2
|
||||
bls 10f
|
||||
|
||||
ARM_MOD_BODY r0, r1, r2, r3
|
||||
|
||||
10: cmp ip, #0
|
||||
rsbmi r0, r0, #0
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__modsi3)
|
||||
|
||||
#ifdef CONFIG_AEABI
|
||||
|
||||
ENTRY(__aeabi_uidivmod)
|
||||
|
||||
stmfd sp!, {r0, r1, ip, lr}
|
||||
bl __aeabi_uidiv
|
||||
ldmfd sp!, {r1, r2, ip, lr}
|
||||
mul r3, r0, r2
|
||||
sub r1, r1, r3
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__aeabi_uidivmod)
|
||||
|
||||
ENTRY(__aeabi_idivmod)
|
||||
|
||||
stmfd sp!, {r0, r1, ip, lr}
|
||||
bl __aeabi_idiv
|
||||
ldmfd sp!, {r1, r2, ip, lr}
|
||||
mul r3, r0, r2
|
||||
sub r1, r1, r3
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__aeabi_idivmod)
|
||||
|
||||
#endif
|
||||
|
||||
Ldiv0:
|
||||
|
||||
str lr, [sp, #-8]!
|
||||
bl __div0
|
||||
mov r0, #0 @ About as wrong as it could be.
|
||||
ldr pc, [sp], #8
|
||||
|
||||
|
@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__("@ atomic_add\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" add %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" add %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
}
|
||||
@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_add_return\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" add %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" add %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
|
||||
@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" sub %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
}
|
||||
@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub_return\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" sub %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
|
||||
@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
|
||||
do {
|
||||
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
||||
"ldrex %1, [%2]\n"
|
||||
"ldrex %1, [%3]\n"
|
||||
"mov %0, #0\n"
|
||||
"teq %1, %3\n"
|
||||
"strexeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
"teq %1, %4\n"
|
||||
"strexeq %0, %5, [%3]\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
||||
: "r" (&ptr->counter), "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
||||
unsigned long tmp, tmp2;
|
||||
|
||||
__asm__ __volatile__("@ atomic_clear_mask\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" bic %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" bic %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (tmp), "=&r" (tmp2)
|
||||
: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
|
||||
: "r" (addr), "Ir" (mask)
|
||||
: "cc");
|
||||
}
|
||||
@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
#define smp_mb__before_atomic_inc() smp_mb()
|
||||
#define smp_mb__after_atomic_inc() smp_mb()
|
||||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
typedef struct {
|
||||
u64 __aligned(8) counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static inline u64 atomic64_read(atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_read\n"
|
||||
" ldrexd %0, %H0, [%1]"
|
||||
: "=&r" (result)
|
||||
: "r" (&v->counter)
|
||||
);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, u64 i)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_set\n"
|
||||
"1: ldrexd %0, %H0, [%1]\n"
|
||||
" strexd %0, %2, %H2, [%1]\n"
|
||||
" teq %0, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (tmp)
|
||||
: "r" (&v->counter), "r" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void atomic64_add(u64 i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_add\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" adds %0, %0, %3\n"
|
||||
" adc %H0, %H0, %H3\n"
|
||||
" strexd %1, %0, %H0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "r" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic64_add_return\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" adds %0, %0, %3\n"
|
||||
" adc %H0, %H0, %H3\n"
|
||||
" strexd %1, %0, %H0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "r" (i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic64_sub(u64 i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_sub\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" subs %0, %0, %3\n"
|
||||
" sbc %H0, %H0, %H3\n"
|
||||
" strexd %1, %0, %H0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "r" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic64_sub_return\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" subs %0, %0, %3\n"
|
||||
" sbc %H0, %H0, %H3\n"
|
||||
" strexd %1, %0, %H0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "r" (i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
||||
{
|
||||
u64 oldval;
|
||||
unsigned long res;
|
||||
|
||||
smp_mb();
|
||||
|
||||
do {
|
||||
__asm__ __volatile__("@ atomic64_cmpxchg\n"
|
||||
"ldrexd %1, %H1, [%2]\n"
|
||||
"mov %0, #0\n"
|
||||
"teq %1, %3\n"
|
||||
"teqeq %H1, %H3\n"
|
||||
"strexdeq %0, %4, %H4, [%2]"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (&ptr->counter), "r" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
|
||||
smp_mb();
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
||||
{
|
||||
u64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic64_xchg\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" strexd %1, %3, %H3, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&ptr->counter), "r" (new)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" subs %0, %0, #1\n"
|
||||
" sbc %H0, %H0, #0\n"
|
||||
" teq %H0, #0\n"
|
||||
" bmi 2f\n"
|
||||
" strexd %1, %0, %H0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b\n"
|
||||
"2:"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
||||
{
|
||||
u64 val;
|
||||
unsigned long tmp;
|
||||
int ret = 1;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic64_add_unless\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" teq %0, %4\n"
|
||||
" teqeq %H0, %H4\n"
|
||||
" moveq %1, #0\n"
|
||||
" beq 2f\n"
|
||||
" adds %0, %0, %5\n"
|
||||
" adc %H0, %H0, %H5\n"
|
||||
" strexd %2, %0, %H0, [%3]\n"
|
||||
" teq %2, #0\n"
|
||||
" bne 1b\n"
|
||||
"2:"
|
||||
: "=&r" (val), "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (&v->counter), "r" (u), "r" (a)
|
||||
: "cc");
|
||||
|
||||
if (ret)
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
||||
|
||||
#else /* !CONFIG_GENERIC_ATOMIC64 */
|
||||
#include <asm-generic/atomic64.h>
|
||||
#endif
|
||||
#include <asm-generic/atomic-long.h>
|
||||
#endif
|
||||
#endif
|
||||
|
@ -306,7 +306,7 @@ static void z_decomp_free(void *arg)
|
||||
|
||||
if (state) {
|
||||
zlib_inflateEnd(&state->strm);
|
||||
vfree(state->strm.workspace);
|
||||
kfree(state->strm.workspace);
|
||||
kfree(state);
|
||||
}
|
||||
}
|
||||
@ -346,7 +346,8 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
|
||||
|
||||
state->w_size = w_size;
|
||||
state->strm.next_out = NULL;
|
||||
state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
|
||||
state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
|
||||
GFP_KERNEL|__GFP_REPEAT);
|
||||
if (state->strm.workspace == NULL)
|
||||
goto out_free;
|
||||
|
||||
|
@ -756,6 +756,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
|
||||
|
||||
/* Try to dequeue as many skbs from reorder_q as we can. */
|
||||
pppol2tp_recv_dequeue(session);
|
||||
sock_put(sock);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -772,6 +773,7 @@ discard_bad_csum:
|
||||
UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
|
||||
tunnel->stats.rx_errors++;
|
||||
kfree_skb(skb);
|
||||
sock_put(sock);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1178,7 +1180,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
/* Calculate UDP checksum if configured to do so */
|
||||
if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
|
||||
else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
|
||||
(!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
csum = skb_checksum(skb, 0, udp_len, 0);
|
||||
uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
|
||||
@ -1657,6 +1660,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
||||
if (tunnel_sock == NULL)
|
||||
goto end;
|
||||
|
||||
sock_hold(tunnel_sock);
|
||||
tunnel = tunnel_sock->sk_user_data;
|
||||
} else {
|
||||
tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
|
||||
|
@ -15,12 +15,15 @@
|
||||
*/
|
||||
|
||||
/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
|
||||
* To keep things simple, only one session per socket is permitted. Packets are
|
||||
* sent via the socket, so it must keep connected to the same address. One must
|
||||
* not set sequencing in ICCN but let LNS controll it. Currently this driver
|
||||
* only works on IPv4 due to the lack of UDP encapsulation support in IPv6. */
|
||||
* The socket must keep connected, and only one session per socket is permitted.
|
||||
* Sequencing of outgoing packets is controlled by LNS. Incoming packets with
|
||||
* sequences are reordered within a sliding window of one second. Currently
|
||||
* reordering only happens when a packet is received. It is done for simplicity
|
||||
* since no additional locks or threads are required. This driver only works on
|
||||
* IPv4 due to the lack of UDP encapsulation support in IPv6. */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/file.h>
|
||||
@ -53,14 +56,28 @@ static inline union unaligned *unaligned(void *ptr)
|
||||
return (union unaligned *)ptr;
|
||||
}
|
||||
|
||||
struct meta {
|
||||
__u32 sequence;
|
||||
__u32 timestamp;
|
||||
};
|
||||
|
||||
static inline struct meta *skb_meta(struct sk_buff *skb)
|
||||
{
|
||||
return (struct meta *)skb->cb;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = (struct sock *)sk_udp->sk_user_data;
|
||||
struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
|
||||
struct meta *meta = skb_meta(skb);
|
||||
__u32 now = jiffies;
|
||||
__u8 bits;
|
||||
__u8 *ptr;
|
||||
|
||||
/* Drop the packet if it is too short. */
|
||||
/* Drop the packet if L2TP header is missing. */
|
||||
if (skb->len < sizeof(struct udphdr) + 6)
|
||||
goto drop;
|
||||
|
||||
@ -99,9 +116,12 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
|
||||
if (unaligned(ptr)->u32 != opt->local)
|
||||
goto drop;
|
||||
|
||||
/* Check the sequence if it is present. According to RFC 2661 section
|
||||
* 5.4, the only thing to do is to update opt->sequencing. */
|
||||
opt->sequencing = bits & L2TP_SEQUENCE_BIT;
|
||||
/* Check the sequence if it is present. */
|
||||
if (bits & L2TP_SEQUENCE_BIT) {
|
||||
meta->sequence = ptr[4] << 8 | ptr[5];
|
||||
if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Skip PPP address and control if they are present. */
|
||||
if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
|
||||
@ -112,7 +132,54 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
|
||||
if (skb->len >= 1 && skb->data[0] & 1)
|
||||
skb_push(skb, 1)[0] = 0;
|
||||
|
||||
/* Finally, deliver the packet to PPP channel. */
|
||||
/* Drop the packet if PPP protocol is missing. */
|
||||
if (skb->len < 2)
|
||||
goto drop;
|
||||
|
||||
/* Perform reordering if sequencing is enabled. */
|
||||
atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
|
||||
if (bits & L2TP_SEQUENCE_BIT) {
|
||||
struct sk_buff *skb1;
|
||||
|
||||
/* Insert the packet into receive queue in order. */
|
||||
skb_set_owner_r(skb, sk);
|
||||
skb_queue_walk(&sk->sk_receive_queue, skb1) {
|
||||
struct meta *meta1 = skb_meta(skb1);
|
||||
__s16 order = meta->sequence - meta1->sequence;
|
||||
if (order == 0)
|
||||
goto drop;
|
||||
if (order < 0) {
|
||||
meta->timestamp = meta1->timestamp;
|
||||
skb_insert(skb1, skb, &sk->sk_receive_queue);
|
||||
skb = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (skb) {
|
||||
meta->timestamp = now;
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
}
|
||||
|
||||
/* Remove packets from receive queue as long as
|
||||
* 1. the receive buffer is full,
|
||||
* 2. they are queued longer than one second, or
|
||||
* 3. there are no missing packets before them. */
|
||||
skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
|
||||
meta = skb_meta(skb);
|
||||
if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
|
||||
now - meta->timestamp < HZ &&
|
||||
meta->sequence != opt->recv_sequence)
|
||||
break;
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
opt->recv_sequence = (__u16)(meta->sequence + 1);
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
}
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
/* Flush receive queue if sequencing is disabled. */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
return NET_RX_SUCCESS;
|
||||
@ -163,14 +230,14 @@ static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
skb->data[1] = PPP_CTRL;
|
||||
|
||||
/* Install L2TP header. */
|
||||
if (opt->sequencing) {
|
||||
if (atomic_read(&opt->sequencing)) {
|
||||
skb_push(skb, 10);
|
||||
skb->data[0] = L2TP_SEQUENCE_BIT;
|
||||
skb->data[6] = opt->sequence >> 8;
|
||||
skb->data[7] = opt->sequence;
|
||||
skb->data[6] = opt->xmit_sequence >> 8;
|
||||
skb->data[7] = opt->xmit_sequence;
|
||||
skb->data[8] = 0;
|
||||
skb->data[9] = 0;
|
||||
opt->sequence++;
|
||||
opt->xmit_sequence++;
|
||||
} else {
|
||||
skb_push(skb, 6);
|
||||
skb->data[0] = 0;
|
||||
@ -246,6 +313,7 @@ static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
|
||||
po->chan.mtu = PPP_MTU - 80;
|
||||
po->proto.lac.local = unaligned(&addr->local)->u32;
|
||||
po->proto.lac.remote = unaligned(&addr->remote)->u32;
|
||||
atomic_set(&po->proto.lac.sequencing, 1);
|
||||
po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
|
||||
|
||||
error = ppp_register_channel(&po->chan);
|
||||
@ -283,6 +351,7 @@ static int pppolac_release(struct socket *sock)
|
||||
if (sk->sk_state != PPPOX_NONE) {
|
||||
struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
|
||||
lock_sock(sk_udp);
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
pppox_unbind_sock(sk);
|
||||
udp_sk(sk_udp)->encap_type = 0;
|
||||
udp_sk(sk_udp)->encap_rcv = NULL;
|
||||
|
@ -16,11 +16,14 @@
|
||||
|
||||
/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
|
||||
* The socket is created in the kernel space and connected to the same address
|
||||
* of the control socket. To keep things simple, packets are always sent with
|
||||
* sequence but without acknowledgement. This driver should work on both IPv4
|
||||
* and IPv6. */
|
||||
* of the control socket. Outgoing packets are always sent with sequences but
|
||||
* without acknowledgements. Incoming packets with sequences are reordered
|
||||
* within a sliding window of one second. Currently reordering only happens when
|
||||
* a packet is received. It is done for simplicity since no additional locks or
|
||||
* threads are required. This driver should work on both IPv4 and IPv6. */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/file.h>
|
||||
@ -52,21 +55,35 @@ struct header {
|
||||
__u32 sequence;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct meta {
|
||||
__u32 sequence;
|
||||
__u32 timestamp;
|
||||
};
|
||||
|
||||
static inline struct meta *skb_meta(struct sk_buff *skb)
|
||||
{
|
||||
return (struct meta *)skb->cb;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = (struct sock *)sk_raw->sk_user_data;
|
||||
struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
|
||||
struct meta *meta = skb_meta(skb);
|
||||
__u32 now = jiffies;
|
||||
struct header *hdr;
|
||||
|
||||
/* Skip transport header */
|
||||
skb_pull(skb, skb_transport_header(skb) - skb->data);
|
||||
|
||||
/* Drop the packet if it is too short. */
|
||||
/* Drop the packet if GRE header is missing. */
|
||||
if (skb->len < GRE_HEADER_SIZE)
|
||||
goto drop;
|
||||
hdr = (struct header *)skb->data;
|
||||
|
||||
/* Check the header. */
|
||||
hdr = (struct header *)skb->data;
|
||||
if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
|
||||
(hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
|
||||
goto drop;
|
||||
@ -81,6 +98,13 @@ static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
|
||||
if (skb->len != ntohs(hdr->length))
|
||||
goto drop;
|
||||
|
||||
/* Check the sequence if it is present. */
|
||||
if (hdr->bits & PPTP_GRE_SEQ_BIT) {
|
||||
meta->sequence = ntohl(hdr->sequence);
|
||||
if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Skip PPP address and control if they are present. */
|
||||
if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
|
||||
skb->data[1] == PPP_CTRL)
|
||||
@ -90,7 +114,53 @@ static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
|
||||
if (skb->len >= 1 && skb->data[0] & 1)
|
||||
skb_push(skb, 1)[0] = 0;
|
||||
|
||||
/* Finally, deliver the packet to PPP channel. */
|
||||
/* Drop the packet if PPP protocol is missing. */
|
||||
if (skb->len < 2)
|
||||
goto drop;
|
||||
|
||||
/* Perform reordering if sequencing is enabled. */
|
||||
if (hdr->bits & PPTP_GRE_SEQ_BIT) {
|
||||
struct sk_buff *skb1;
|
||||
|
||||
/* Insert the packet into receive queue in order. */
|
||||
skb_set_owner_r(skb, sk);
|
||||
skb_queue_walk(&sk->sk_receive_queue, skb1) {
|
||||
struct meta *meta1 = skb_meta(skb1);
|
||||
__s32 order = meta->sequence - meta1->sequence;
|
||||
if (order == 0)
|
||||
goto drop;
|
||||
if (order < 0) {
|
||||
meta->timestamp = meta1->timestamp;
|
||||
skb_insert(skb1, skb, &sk->sk_receive_queue);
|
||||
skb = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (skb) {
|
||||
meta->timestamp = now;
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
}
|
||||
|
||||
/* Remove packets from receive queue as long as
|
||||
* 1. the receive buffer is full,
|
||||
* 2. they are queued longer than one second, or
|
||||
* 3. there are no missing packets before them. */
|
||||
skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
|
||||
meta = skb_meta(skb);
|
||||
if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
|
||||
now - meta->timestamp < HZ &&
|
||||
meta->sequence != opt->recv_sequence)
|
||||
break;
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
opt->recv_sequence = meta->sequence + 1;
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
}
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
/* Flush receive queue if sequencing is disabled. */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
return NET_RX_SUCCESS;
|
||||
@ -151,8 +221,8 @@ static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
hdr->type = PPTP_GRE_TYPE;
|
||||
hdr->length = htons(length);
|
||||
hdr->call = opt->remote;
|
||||
hdr->sequence = htonl(opt->sequence);
|
||||
opt->sequence++;
|
||||
hdr->sequence = htonl(opt->xmit_sequence);
|
||||
opt->xmit_sequence++;
|
||||
|
||||
/* Now send the packet via the delivery queue. */
|
||||
skb_set_owner_w(skb, sk_raw);
|
||||
@ -261,6 +331,7 @@ static int pppopns_release(struct socket *sock)
|
||||
if (sk->sk_state != PPPOX_NONE) {
|
||||
struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
|
||||
lock_sock(sk_raw);
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
pppox_unbind_sock(sk);
|
||||
sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
|
||||
sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
|
||||
|
@ -22,5 +22,7 @@
|
||||
#define AID_INET 3003
|
||||
#define AID_NET_RAW 3004
|
||||
#define AID_NET_ADMIN 3005
|
||||
#define AID_NET_BW_STATS 3006 /* read bandwidth statistics */
|
||||
#define AID_NET_BW_ACCT 3007 /* change bandwidth statistics accounting */
|
||||
|
||||
#endif
|
||||
|
@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
|
||||
return IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
static inline long IS_ERR_OR_NULL(const void *ptr)
|
||||
{
|
||||
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
|
||||
* @ptr: The pointer to cast.
|
||||
|
@ -63,6 +63,7 @@ struct tpacket_auxdata
|
||||
__u16 tp_mac;
|
||||
__u16 tp_net;
|
||||
__u16 tp_vlan_tci;
|
||||
__u16 tp_padding;
|
||||
};
|
||||
|
||||
/* Rx ring - header status */
|
||||
@ -103,6 +104,7 @@ struct tpacket2_hdr
|
||||
__u32 tp_sec;
|
||||
__u32 tp_nsec;
|
||||
__u16 tp_vlan_tci;
|
||||
__u16 tp_padding;
|
||||
};
|
||||
|
||||
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
|
||||
|
@ -146,19 +146,21 @@ struct pppoe_opt {
|
||||
};
|
||||
|
||||
struct pppolac_opt {
|
||||
__u32 local;
|
||||
__u32 remote;
|
||||
__u16 sequence;
|
||||
__u8 sequencing;
|
||||
int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
|
||||
__u32 local;
|
||||
__u32 remote;
|
||||
__u32 recv_sequence;
|
||||
__u32 xmit_sequence;
|
||||
atomic_t sequencing;
|
||||
int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
|
||||
};
|
||||
|
||||
struct pppopns_opt {
|
||||
__u16 local;
|
||||
__u16 remote;
|
||||
__u32 sequence;
|
||||
void (*data_ready)(struct sock *sk_raw, int length);
|
||||
int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
|
||||
__u16 local;
|
||||
__u16 remote;
|
||||
__u32 recv_sequence;
|
||||
__u32 xmit_sequence;
|
||||
void (*data_ready)(struct sock *sk_raw, int length);
|
||||
int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
|
||||
};
|
||||
|
||||
#include <net/sock.h>
|
||||
|
@ -1164,9 +1164,12 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
|
||||
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
|
||||
unsigned int offset)
|
||||
{
|
||||
if (!pskb_may_pull(skb, hlen))
|
||||
return NULL;
|
||||
|
||||
NAPI_GRO_CB(skb)->frag0 = NULL;
|
||||
NAPI_GRO_CB(skb)->frag0_len = 0;
|
||||
return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
|
||||
return skb->data + offset;
|
||||
}
|
||||
|
||||
static inline void *skb_gro_mac_header(struct sk_buff *skb)
|
||||
@ -1560,6 +1563,8 @@ extern void netif_carrier_on(struct net_device *dev);
|
||||
|
||||
extern void netif_carrier_off(struct net_device *dev);
|
||||
|
||||
extern void netif_notify_peers(struct net_device *dev);
|
||||
|
||||
/**
|
||||
* netif_dormant_on - mark device as dormant.
|
||||
* @dev: network device
|
||||
@ -2013,6 +2018,10 @@ static inline u32 dev_ethtool_get_flags(struct net_device *dev)
|
||||
return 0;
|
||||
return dev->ethtool_ops->get_flags(dev);
|
||||
}
|
||||
|
||||
#define MODULE_ALIAS_NETDEV(device) \
|
||||
MODULE_ALIAS("netdev-" device)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_NETDEVICE_H */
|
||||
|
@ -6,6 +6,7 @@ header-y += nfnetlink_queue.h
|
||||
header-y += xt_CLASSIFY.h
|
||||
header-y += xt_CONNMARK.h
|
||||
header-y += xt_CONNSECMARK.h
|
||||
header-y += xt_CT.h
|
||||
header-y += xt_DSCP.h
|
||||
header-y += xt_LED.h
|
||||
header-y += xt_MARK.h
|
||||
@ -15,6 +16,7 @@ header-y += xt_RATEEST.h
|
||||
header-y += xt_SECMARK.h
|
||||
header-y += xt_TCPMSS.h
|
||||
header-y += xt_TCPOPTSTRIP.h
|
||||
header-y += xt_TEE.h
|
||||
header-y += xt_TPROXY.h
|
||||
header-y += xt_comment.h
|
||||
header-y += xt_connbytes.h
|
||||
|
@ -19,6 +19,9 @@ enum ip_conntrack_info
|
||||
/* >= this indicates reply direction */
|
||||
IP_CT_IS_REPLY,
|
||||
|
||||
IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
|
||||
IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
|
||||
IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,
|
||||
/* Number of distinct IP_CT types (no NEW in reply dirn). */
|
||||
IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
|
||||
};
|
||||
@ -73,6 +76,32 @@ enum ip_conntrack_status {
|
||||
/* Connection has fixed timeout. */
|
||||
IPS_FIXED_TIMEOUT_BIT = 10,
|
||||
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
|
||||
|
||||
/* Conntrack is a template */
|
||||
IPS_TEMPLATE_BIT = 11,
|
||||
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
|
||||
|
||||
/* Conntrack is a fake untracked entry */
|
||||
IPS_UNTRACKED_BIT = 12,
|
||||
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
|
||||
};
|
||||
|
||||
/* Connection tracking event types */
|
||||
enum ip_conntrack_events {
|
||||
IPCT_NEW, /* new conntrack */
|
||||
IPCT_RELATED, /* related conntrack */
|
||||
IPCT_DESTROY, /* destroyed conntrack */
|
||||
IPCT_REPLY, /* connection has seen two-way traffic */
|
||||
IPCT_ASSURED, /* connection status has changed to assured */
|
||||
IPCT_PROTOINFO, /* protocol information has changed */
|
||||
IPCT_HELPER, /* new helper has been set */
|
||||
IPCT_MARK, /* new mark has been set */
|
||||
IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */
|
||||
IPCT_SECMARK, /* new security mark has been set */
|
||||
};
|
||||
|
||||
enum ip_conntrack_expect_events {
|
||||
IPEXP_NEW, /* new expectation */
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -75,11 +75,11 @@ struct nfnetlink_subsystem
|
||||
extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
|
||||
extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
|
||||
|
||||
extern int nfnetlink_has_listeners(unsigned int group);
|
||||
extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
|
||||
extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
|
||||
extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group,
|
||||
int echo, gfp_t flags);
|
||||
extern void nfnetlink_set_err(u32 pid, u32 group, int error);
|
||||
extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
|
||||
extern void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error);
|
||||
extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags);
|
||||
|
||||
extern void nfnl_lock(void);
|
||||
extern void nfnl_unlock(void);
|
||||
|
@ -128,6 +128,7 @@ struct xt_counters_info
|
||||
|
||||
#define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define XT_MATCH_ITERATE(type, e, fn, args...) \
|
||||
({ \
|
||||
@ -171,17 +172,37 @@ struct xt_counters_info
|
||||
#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
|
||||
|
||||
#endif /* !__KERNEL__ */
|
||||
|
||||
/* pos is normally a struct ipt_entry/ip6t_entry/etc. */
|
||||
#define xt_entry_foreach(pos, ehead, esize) \
|
||||
for ((pos) = (typeof(pos))(ehead); \
|
||||
(pos) < (typeof(pos))((char *)(ehead) + (esize)); \
|
||||
(pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset))
|
||||
|
||||
/* can only be xt_entry_match, so no use of typeof here */
|
||||
#define xt_ematch_foreach(pos, entry) \
|
||||
for ((pos) = (struct xt_entry_match *)entry->elems; \
|
||||
(pos) < (struct xt_entry_match *)((char *)(entry) + \
|
||||
(entry)->target_offset); \
|
||||
(pos) = (struct xt_entry_match *)((char *)(pos) + \
|
||||
(pos)->u.match_size))
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#define xt_match_param xt_action_param
|
||||
#define xt_target_param xt_action_param
|
||||
/**
|
||||
* struct xt_match_param - parameters for match extensions' match functions
|
||||
* struct xt_action_param - parameters for matches/targets
|
||||
*
|
||||
* @match: the match extension
|
||||
* @target: the target extension
|
||||
* @matchinfo: per-match data
|
||||
* @targetinfo: per-target data
|
||||
* @in: input netdevice
|
||||
* @out: output netdevice
|
||||
* @match: struct xt_match through which this function was invoked
|
||||
* @matchinfo: per-match data
|
||||
* @fragoff: packet is a fragment, this is the data offset
|
||||
* @thoff: position of transport header relative to skb->data
|
||||
* @hook: hook number given packet came from
|
||||
@ -189,10 +210,15 @@ struct xt_counters_info
|
||||
* (helpful when match->family == NFPROTO_UNSPEC)
|
||||
* @hotdrop: drop packet if we had inspection problems
|
||||
*/
|
||||
struct xt_match_param {
|
||||
struct xt_action_param {
|
||||
union {
|
||||
const struct xt_match *match;
|
||||
const struct xt_target *target;
|
||||
};
|
||||
union {
|
||||
const void *matchinfo, *targinfo;
|
||||
};
|
||||
const struct net_device *in, *out;
|
||||
const struct xt_match *match;
|
||||
const void *matchinfo;
|
||||
int fragoff;
|
||||
unsigned int thoff;
|
||||
unsigned int hooknum;
|
||||
@ -212,6 +238,7 @@ struct xt_match_param {
|
||||
* @hook_mask: via which hooks the new rule is reachable
|
||||
*/
|
||||
struct xt_mtchk_param {
|
||||
struct net *net;
|
||||
const char *table;
|
||||
const void *entryinfo;
|
||||
const struct xt_match *match;
|
||||
@ -222,28 +249,12 @@ struct xt_mtchk_param {
|
||||
|
||||
/* Match destructor parameters */
|
||||
struct xt_mtdtor_param {
|
||||
struct net *net;
|
||||
const struct xt_match *match;
|
||||
void *matchinfo;
|
||||
u_int8_t family;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xt_target_param - parameters for target extensions' target functions
|
||||
*
|
||||
* @hooknum: hook through which this target was invoked
|
||||
* @target: struct xt_target through which this function was invoked
|
||||
* @targinfo: per-target data
|
||||
*
|
||||
* Other fields see above.
|
||||
*/
|
||||
struct xt_target_param {
|
||||
const struct net_device *in, *out;
|
||||
const struct xt_target *target;
|
||||
const void *targinfo;
|
||||
unsigned int hooknum;
|
||||
u_int8_t family;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xt_tgchk_param - parameters for target extensions'
|
||||
* checkentry functions
|
||||
@ -254,6 +265,7 @@ struct xt_target_param {
|
||||
* Other fields see above.
|
||||
*/
|
||||
struct xt_tgchk_param {
|
||||
struct net *net;
|
||||
const char *table;
|
||||
const void *entryinfo;
|
||||
const struct xt_target *target;
|
||||
@ -264,6 +276,7 @@ struct xt_tgchk_param {
|
||||
|
||||
/* Target destructor parameters */
|
||||
struct xt_tgdtor_param {
|
||||
struct net *net;
|
||||
const struct xt_target *target;
|
||||
void *targinfo;
|
||||
u_int8_t family;
|
||||
@ -282,10 +295,10 @@ struct xt_match
|
||||
non-linear skb, using skb_header_pointer and
|
||||
skb_ip_make_writable. */
|
||||
bool (*match)(const struct sk_buff *skb,
|
||||
const struct xt_match_param *);
|
||||
const struct xt_action_param *);
|
||||
|
||||
/* Called when user tries to insert an entry of this type. */
|
||||
bool (*checkentry)(const struct xt_mtchk_param *);
|
||||
int (*checkentry)(const struct xt_mtchk_param *);
|
||||
|
||||
/* Called when entry of this type deleted. */
|
||||
void (*destroy)(const struct xt_mtdtor_param *);
|
||||
@ -320,7 +333,7 @@ struct xt_target
|
||||
must now handle non-linear skbs, using skb_copy_bits and
|
||||
skb_ip_make_writable. */
|
||||
unsigned int (*target)(struct sk_buff *skb,
|
||||
const struct xt_target_param *);
|
||||
const struct xt_action_param *);
|
||||
|
||||
/* Called when user tries to insert an entry of this type:
|
||||
hook_mask is a bitmask of hooks from which it can be
|
||||
@ -384,6 +397,13 @@ struct xt_table_info
|
||||
unsigned int hook_entry[NF_INET_NUMHOOKS];
|
||||
unsigned int underflow[NF_INET_NUMHOOKS];
|
||||
|
||||
/*
|
||||
* Number of user chains. Since tables cannot have loops, at most
|
||||
* @stacksize jumps (number of user chains) can possibly be made.
|
||||
*/
|
||||
unsigned int stacksize;
|
||||
unsigned int *stackptr;
|
||||
void ***jumpstack;
|
||||
/* ipt_entry tables: one per CPU */
|
||||
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
|
||||
void *entries[1];
|
||||
@ -419,6 +439,8 @@ extern struct xt_table_info *xt_replace_table(struct xt_table *table,
|
||||
|
||||
extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
|
||||
extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
|
||||
extern struct xt_match *xt_request_find_match(u8 af, const char *name,
|
||||
u8 revision);
|
||||
extern struct xt_target *xt_request_find_target(u8 af, const char *name,
|
||||
u8 revision);
|
||||
extern int xt_find_revision(u8 af, const char *name, u8 revision,
|
||||
|
@ -1,26 +1,6 @@
|
||||
#ifndef _XT_CONNMARK_H_target
|
||||
#define _XT_CONNMARK_H_target
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
|
||||
* by Henrik Nordstrom <hno@marasystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
enum {
|
||||
XT_CONNMARK_SET = 0,
|
||||
XT_CONNMARK_SAVE,
|
||||
XT_CONNMARK_RESTORE
|
||||
};
|
||||
|
||||
struct xt_connmark_tginfo1 {
|
||||
__u32 ctmark, ctmask, nfmask;
|
||||
__u8 mode;
|
||||
};
|
||||
#include <linux/netfilter/xt_connmark.h>
|
||||
|
||||
#endif /*_XT_CONNMARK_H_target*/
|
||||
|
17
include/linux/netfilter/xt_CT.h
Normal file
17
include/linux/netfilter/xt_CT.h
Normal file
@ -0,0 +1,17 @@
|
||||
#ifndef _XT_CT_H
|
||||
#define _XT_CT_H
|
||||
|
||||
#define XT_CT_NOTRACK 0x1
|
||||
|
||||
struct xt_ct_target_info {
|
||||
u_int16_t flags;
|
||||
u_int16_t zone;
|
||||
u_int32_t ct_events;
|
||||
u_int32_t exp_events;
|
||||
char helper[16];
|
||||
|
||||
/* Used internally by the kernel */
|
||||
struct nf_conn *ct __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
#endif /* _XT_CT_H */
|
@ -1,10 +1,6 @@
|
||||
#ifndef _XT_MARK_H_target
|
||||
#define _XT_MARK_H_target
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xt_mark_tginfo2 {
|
||||
__u32 mark, mask;
|
||||
};
|
||||
#include <linux/netfilter/xt_mark.h>
|
||||
|
||||
#endif /*_XT_MARK_H_target */
|
||||
|
12
include/linux/netfilter/xt_TEE.h
Normal file
12
include/linux/netfilter/xt_TEE.h
Normal file
@ -0,0 +1,12 @@
|
||||
#ifndef _XT_TEE_TARGET_H
|
||||
#define _XT_TEE_TARGET_H
|
||||
|
||||
struct xt_tee_tginfo {
|
||||
union nf_inet_addr gw;
|
||||
char oif[16];
|
||||
|
||||
/* used internally by the kernel */
|
||||
struct xt_tee_priv *priv __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
#endif /* _XT_TEE_TARGET_H */
|
@ -1,5 +1,5 @@
|
||||
#ifndef _XT_TPROXY_H_target
|
||||
#define _XT_TPROXY_H_target
|
||||
#ifndef _XT_TPROXY_H
|
||||
#define _XT_TPROXY_H
|
||||
|
||||
/* TPROXY target is capable of marking the packet to perform
|
||||
* redirection. We can get rid of that whenever we get support for
|
||||
@ -11,4 +11,11 @@ struct xt_tproxy_target_info {
|
||||
__be16 lport;
|
||||
};
|
||||
|
||||
#endif /* _XT_TPROXY_H_target */
|
||||
struct xt_tproxy_target_info_v1 {
|
||||
u_int32_t mark_mask;
|
||||
u_int32_t mark_value;
|
||||
union nf_inet_addr laddr;
|
||||
__be16 lport;
|
||||
};
|
||||
|
||||
#endif /* _XT_TPROXY_H */
|
||||
|
@ -12,6 +12,17 @@
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
enum {
|
||||
XT_CONNMARK_SET = 0,
|
||||
XT_CONNMARK_SAVE,
|
||||
XT_CONNMARK_RESTORE
|
||||
};
|
||||
|
||||
struct xt_connmark_tginfo1 {
|
||||
__u32 ctmark, ctmask, nfmask;
|
||||
__u8 mode;
|
||||
};
|
||||
|
||||
struct xt_connmark_mtinfo1 {
|
||||
__u32 mark, mask;
|
||||
__u8 invert;
|
||||
|
@ -3,6 +3,10 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xt_mark_tginfo2 {
|
||||
__u32 mark, mask;
|
||||
};
|
||||
|
||||
struct xt_mark_mtinfo1 {
|
||||
__u32 mark, mask;
|
||||
__u8 invert;
|
||||
|
13
include/linux/netfilter/xt_qtaguid.h
Normal file
13
include/linux/netfilter/xt_qtaguid.h
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef _XT_QTAGUID_MATCH_H
|
||||
#define _XT_QTAGUID_MATCH_H
|
||||
|
||||
/* For now we just replace the xt_owner.
|
||||
* FIXME: make iptables aware of qtaguid. */
|
||||
#include <linux/netfilter/xt_owner.h>
|
||||
|
||||
#define XT_QTAGUID_UID XT_OWNER_UID
|
||||
#define XT_QTAGUID_GID XT_OWNER_GID
|
||||
#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
|
||||
#define xt_qtaguid_match_info xt_owner_match_info
|
||||
|
||||
#endif /* _XT_QTAGUID_MATCH_H */
|
25
include/linux/netfilter/xt_quota2.h
Normal file
25
include/linux/netfilter/xt_quota2.h
Normal file
@ -0,0 +1,25 @@
|
||||
#ifndef _XT_QUOTA_H
|
||||
#define _XT_QUOTA_H
|
||||
|
||||
enum xt_quota_flags {
|
||||
XT_QUOTA_INVERT = 1 << 0,
|
||||
XT_QUOTA_GROW = 1 << 1,
|
||||
XT_QUOTA_PACKET = 1 << 2,
|
||||
XT_QUOTA_NO_CHANGE = 1 << 3,
|
||||
XT_QUOTA_MASK = 0x0F,
|
||||
};
|
||||
|
||||
struct xt_quota_counter;
|
||||
|
||||
struct xt_quota_mtinfo2 {
|
||||
char name[15];
|
||||
u_int8_t flags;
|
||||
|
||||
/* Comparison-invariant */
|
||||
aligned_u64 quota;
|
||||
|
||||
/* Used internally by the kernel */
|
||||
struct xt_quota_counter *master __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
#endif /* _XT_QUOTA_H */
|
@ -9,4 +9,10 @@ struct xt_socket_mtinfo1 {
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
void xt_socket_put_sk(struct sock *sk);
|
||||
struct sock *xt_socket_get4_sk(const struct sk_buff *skb,
|
||||
struct xt_action_param *par);
|
||||
struct sock *xt_socket_get6_sk(const struct sk_buff *skb,
|
||||
struct xt_action_param *par);
|
||||
|
||||
#endif /* _XT_SOCKET_H */
|
||||
|
@ -214,9 +214,11 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e
|
||||
return (void *)e + e->target_offset;
|
||||
}
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Main firewall chains definitions and global var's definitions.
|
||||
@ -264,6 +266,7 @@ struct arpt_error
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
extern void *arpt_alloc_initial_table(const struct xt_table *);
|
||||
extern struct xt_table *arpt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct arpt_replace *repl);
|
||||
@ -297,14 +300,6 @@ compat_arpt_get_target(struct compat_arpt_entry *e)
|
||||
|
||||
#define COMPAT_ARPT_ALIGN(s) COMPAT_XT_ALIGN(s)
|
||||
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define COMPAT_ARPT_ENTRY_ITERATE(entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE(struct compat_arpt_entry, entries, size, fn, ## args)
|
||||
|
||||
#define COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \
|
||||
XT_ENTRY_ITERATE_CONTINUE(struct compat_arpt_entry, entries, size, n, \
|
||||
fn, ## args)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /*__KERNEL__*/
|
||||
#endif /* _ARPTABLES_H */
|
||||
|
@ -302,7 +302,7 @@ struct ebt_table
|
||||
~(__alignof__(struct ebt_replace)-1))
|
||||
extern struct ebt_table *ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table);
|
||||
extern void ebt_unregister_table(struct ebt_table *table);
|
||||
extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
|
||||
extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
struct ebt_table *table);
|
||||
|
@ -228,6 +228,7 @@ ipt_get_target(struct ipt_entry *e)
|
||||
return (void *)e + e->target_offset;
|
||||
}
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define IPT_MATCH_ITERATE(e, fn, args...) \
|
||||
XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args)
|
||||
@ -235,6 +236,7 @@ ipt_get_target(struct ipt_entry *e)
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define IPT_ENTRY_ITERATE(entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Main firewall chains definitions and global var's definitions.
|
||||
@ -247,7 +249,7 @@ extern void ipt_init(void) __init;
|
||||
extern struct xt_table *ipt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct ipt_replace *repl);
|
||||
extern void ipt_unregister_table(struct xt_table *table);
|
||||
extern void ipt_unregister_table(struct net *net, struct xt_table *table);
|
||||
|
||||
/* Standard entry. */
|
||||
struct ipt_standard
|
||||
@ -290,6 +292,7 @@ struct ipt_error
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
extern void *ipt_alloc_initial_table(const struct xt_table *);
|
||||
extern unsigned int ipt_do_table(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
@ -321,19 +324,6 @@ compat_ipt_get_target(struct compat_ipt_entry *e)
|
||||
|
||||
#define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s)
|
||||
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define COMPAT_IPT_MATCH_ITERATE(e, fn, args...) \
|
||||
XT_MATCH_ITERATE(struct compat_ipt_entry, e, fn, ## args)
|
||||
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define COMPAT_IPT_ENTRY_ITERATE(entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE(struct compat_ipt_entry, entries, size, fn, ## args)
|
||||
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \
|
||||
XT_ENTRY_ITERATE_CONTINUE(struct compat_ipt_entry, entries, size, n, \
|
||||
fn, ## args)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /*__KERNEL__*/
|
||||
#endif /* _IPTABLES_H */
|
||||
|
@ -288,6 +288,7 @@ ip6t_get_target(struct ip6t_entry *e)
|
||||
return (void *)e + e->target_offset;
|
||||
}
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define IP6T_MATCH_ITERATE(e, fn, args...) \
|
||||
XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args)
|
||||
@ -295,6 +296,7 @@ ip6t_get_target(struct ip6t_entry *e)
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Main firewall chains definitions and global var's definitions.
|
||||
@ -305,10 +307,11 @@ ip6t_get_target(struct ip6t_entry *e)
|
||||
#include <linux/init.h>
|
||||
extern void ip6t_init(void) __init;
|
||||
|
||||
extern void *ip6t_alloc_initial_table(const struct xt_table *);
|
||||
extern struct xt_table *ip6t_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct ip6t_replace *repl);
|
||||
extern void ip6t_unregister_table(struct xt_table *table);
|
||||
extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
|
||||
extern unsigned int ip6t_do_table(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
@ -349,18 +352,6 @@ compat_ip6t_get_target(struct compat_ip6t_entry *e)
|
||||
|
||||
#define COMPAT_IP6T_ALIGN(s) COMPAT_XT_ALIGN(s)
|
||||
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define COMPAT_IP6T_MATCH_ITERATE(e, fn, args...) \
|
||||
XT_MATCH_ITERATE(struct compat_ip6t_entry, e, fn, ## args)
|
||||
|
||||
/* fn returns 0 to continue iteration */
|
||||
#define COMPAT_IP6T_ENTRY_ITERATE(entries, size, fn, args...) \
|
||||
XT_ENTRY_ITERATE(struct compat_ip6t_entry, entries, size, fn, ## args)
|
||||
|
||||
#define COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \
|
||||
XT_ENTRY_ITERATE_CONTINUE(struct compat_ip6t_entry, entries, size, n, \
|
||||
fn, ## args)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /*__KERNEL__*/
|
||||
#endif /* _IP6_TABLES_H */
|
||||
|
@ -201,6 +201,7 @@ static inline int notifier_to_errno(int ret)
|
||||
#define NETDEV_PRE_UP 0x000D
|
||||
#define NETDEV_BONDING_OLDTYPE 0x000E
|
||||
#define NETDEV_BONDING_NEWTYPE 0x000F
|
||||
#define NETDEV_NOTIFY_PEERS 0x0013
|
||||
|
||||
#define SYS_DOWN 0x0001 /* Notify of system down */
|
||||
#define SYS_RESTART SYS_DOWN
|
||||
|
@ -84,14 +84,122 @@ extern int rcu_scheduler_active;
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
extern struct lockdep_map rcu_lock_map;
|
||||
# define rcu_read_acquire() \
|
||||
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_acquire() \
|
||||
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
|
||||
#else
|
||||
# define rcu_read_acquire() do { } while (0)
|
||||
# define rcu_read_release() do { } while (0)
|
||||
#endif
|
||||
|
||||
extern struct lockdep_map rcu_bh_lock_map;
|
||||
# define rcu_read_acquire_bh() \
|
||||
lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
|
||||
|
||||
extern struct lockdep_map rcu_sched_lock_map;
|
||||
# define rcu_read_acquire_sched() \
|
||||
lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release_sched() \
|
||||
lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
|
||||
|
||||
/**
|
||||
* rcu_read_lock_held - might we be in RCU read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*/
|
||||
static inline int rcu_read_lock_held(void)
|
||||
{
|
||||
if (debug_locks)
|
||||
return lock_is_held(&rcu_lock_map);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
|
||||
* an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU-bh read-side critical section unless it can
|
||||
* prove otherwise.
|
||||
*/
|
||||
static inline int rcu_read_lock_bh_held(void)
|
||||
{
|
||||
if (debug_locks)
|
||||
return lock_is_held(&rcu_bh_lock_map);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
|
||||
*
|
||||
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
|
||||
* RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
|
||||
* this assumes we are in an RCU-sched read-side critical section unless it
|
||||
* can prove otherwise. Note that disabling of preemption (including
|
||||
* disabling irqs) counts as an RCU-sched read-side critical section.
|
||||
*/
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
int lockdep_opinion = 0;
|
||||
|
||||
if (debug_locks)
|
||||
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
|
||||
return lockdep_opinion || preempt_count() != 0;
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
# define rcu_read_acquire() do { } while (0)
|
||||
# define rcu_read_release() do { } while (0)
|
||||
# define rcu_read_acquire_bh() do { } while (0)
|
||||
# define rcu_read_release_bh() do { } while (0)
|
||||
# define rcu_read_acquire_sched() do { } while (0)
|
||||
# define rcu_read_release_sched() do { } while (0)
|
||||
|
||||
static inline int rcu_read_lock_held(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int rcu_read_lock_bh_held(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return preempt_count() != 0;
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
|
||||
extern int rcu_my_thread_group_empty(void);
|
||||
|
||||
/**
|
||||
* rcu_dereference_check - rcu_dereference with debug checking
|
||||
*
|
||||
* Do an rcu_dereference(), but check that the context is correct.
|
||||
* For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
|
||||
* ensure that the rcu_dereference_check() executes within an RCU
|
||||
* read-side critical section. It is also possible to check for
|
||||
* locks being held, for example, by using lockdep_is_held().
|
||||
*/
|
||||
#define rcu_dereference_check(p, c) \
|
||||
({ \
|
||||
if (debug_locks && !(c)) \
|
||||
lockdep_rcu_dereference(__FILE__, __LINE__); \
|
||||
rcu_dereference_raw(p); \
|
||||
})
|
||||
|
||||
#else /* #ifdef CONFIG_PROVE_RCU */
|
||||
|
||||
#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
|
||||
|
||||
#endif /* #else #ifdef CONFIG_PROVE_RCU */
|
||||
|
||||
/**
|
||||
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
|
||||
@ -166,7 +274,7 @@ static inline void rcu_read_lock_bh(void)
|
||||
{
|
||||
__rcu_read_lock_bh();
|
||||
__acquire(RCU_BH);
|
||||
rcu_read_acquire();
|
||||
rcu_read_acquire_bh();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -176,7 +284,7 @@ static inline void rcu_read_lock_bh(void)
|
||||
*/
|
||||
static inline void rcu_read_unlock_bh(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
rcu_read_release_bh();
|
||||
__release(RCU_BH);
|
||||
__rcu_read_unlock_bh();
|
||||
}
|
||||
@ -194,7 +302,7 @@ static inline void rcu_read_lock_sched(void)
|
||||
{
|
||||
preempt_disable();
|
||||
__acquire(RCU_SCHED);
|
||||
rcu_read_acquire();
|
||||
rcu_read_acquire_sched();
|
||||
}
|
||||
|
||||
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
|
||||
@ -211,7 +319,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
|
||||
*/
|
||||
static inline void rcu_read_unlock_sched(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
rcu_read_release_sched();
|
||||
__release(RCU_SCHED);
|
||||
preempt_enable();
|
||||
}
|
||||
@ -225,21 +333,48 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer in an
|
||||
* RCU read-side critical section. This pointer may later
|
||||
* be safely dereferenced.
|
||||
* rcu_dereference_raw - fetch an RCU-protected pointer
|
||||
*
|
||||
* The caller must be within some flavor of RCU read-side critical
|
||||
* section, or must be otherwise preventing the pointer from changing,
|
||||
* for example, by holding an appropriate lock. This pointer may later
|
||||
* be safely dereferenced. It is the caller's responsibility to have
|
||||
* done the right thing, as this primitive does no checking of any kind.
|
||||
*
|
||||
* Inserts memory barriers on architectures that require them
|
||||
* (currently only the Alpha), and, more importantly, documents
|
||||
* exactly which pointers are protected by RCU.
|
||||
*/
|
||||
|
||||
#define rcu_dereference(p) ({ \
|
||||
#define rcu_dereference_raw(p) ({ \
|
||||
typeof(p) _________p1 = ACCESS_ONCE(p); \
|
||||
smp_read_barrier_depends(); \
|
||||
(_________p1); \
|
||||
})
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer, checking for RCU
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define rcu_dereference(p) \
|
||||
rcu_dereference_check(p, rcu_read_lock_held())
|
||||
|
||||
/**
|
||||
* rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define rcu_dereference_bh(p) \
|
||||
rcu_dereference_check(p, rcu_read_lock_bh_held())
|
||||
|
||||
/**
|
||||
* rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
|
||||
*
|
||||
* Makes rcu_dereference_check() do the dirty work.
|
||||
*/
|
||||
#define rcu_dereference_sched(p) \
|
||||
rcu_dereference_check(p, rcu_read_lock_sched_held())
|
||||
|
||||
/**
|
||||
* rcu_assign_pointer - assign (publicize) a pointer to a newly
|
||||
* initialized structure that will be dereferenced by RCU read-side
|
||||
|
@ -754,6 +754,9 @@ extern void rtnl_lock(void);
|
||||
extern void rtnl_unlock(void);
|
||||
extern int rtnl_trylock(void);
|
||||
extern int rtnl_is_locked(void);
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
extern int lockdep_rtnl_is_held(void);
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
|
||||
extern void rtnetlink_init(void);
|
||||
extern void __rtnl_unlock(void);
|
||||
|
@ -177,7 +177,9 @@ extern int unregister_inet6addr_notifier(struct notifier_block *nb);
|
||||
static inline struct inet6_dev *
|
||||
__in6_dev_get(struct net_device *dev)
|
||||
{
|
||||
return rcu_dereference(dev->ip6_ptr);
|
||||
return rcu_dereference_check(dev->ip6_ptr,
|
||||
rcu_read_lock_held() ||
|
||||
lockdep_rtnl_is_held());
|
||||
}
|
||||
|
||||
static inline struct inet6_dev *
|
||||
|
@ -10,6 +10,7 @@ extern void unix_inflight(struct file *fp);
|
||||
extern void unix_notinflight(struct file *fp);
|
||||
extern void unix_gc(void);
|
||||
extern void wait_for_unix_gc(void);
|
||||
extern struct sock *unix_get_socket(struct file *filp);
|
||||
|
||||
#define UNIX_HASH_SIZE 256
|
||||
|
||||
@ -56,6 +57,7 @@ struct unix_sock {
|
||||
spinlock_t lock;
|
||||
unsigned int gc_candidate : 1;
|
||||
unsigned int gc_maybe_cycle : 1;
|
||||
unsigned char recursion_level;
|
||||
wait_queue_head_t peer_wait;
|
||||
};
|
||||
#define unix_sk(__sk) ((struct unix_sock *)__sk)
|
||||
|
@ -341,8 +341,11 @@ enum ip_defrag_users
|
||||
IP_DEFRAG_LOCAL_DELIVER,
|
||||
IP_DEFRAG_CALL_RA_CHAIN,
|
||||
IP_DEFRAG_CONNTRACK_IN,
|
||||
__IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHORT_MAX,
|
||||
IP_DEFRAG_CONNTRACK_OUT,
|
||||
__IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHORT_MAX,
|
||||
IP_DEFRAG_CONNTRACK_BRIDGE_IN,
|
||||
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX,
|
||||
IP_DEFRAG_VS_IN,
|
||||
IP_DEFRAG_VS_OUT,
|
||||
IP_DEFRAG_VS_FWD
|
||||
|
@ -357,8 +357,11 @@ struct inet_frag_queue;
|
||||
enum ip6_defrag_users {
|
||||
IP6_DEFRAG_LOCAL_DELIVER,
|
||||
IP6_DEFRAG_CONNTRACK_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHORT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_OUT,
|
||||
__IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHORT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX,
|
||||
};
|
||||
|
||||
struct ip6_create_arg {
|
||||
|
@ -908,6 +908,9 @@ enum ieee80211_tkip_key_type {
|
||||
* @IEEE80211_HW_BEACON_FILTER:
|
||||
* Hardware supports dropping of irrelevant beacon frames to
|
||||
* avoid waking up cpu.
|
||||
* @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
|
||||
* Hardware can provide ack status reports of Tx frames to
|
||||
* the stack.
|
||||
*/
|
||||
enum ieee80211_hw_flags {
|
||||
IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
|
||||
@ -924,6 +927,7 @@ enum ieee80211_hw_flags {
|
||||
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
|
||||
IEEE80211_HW_MFP_CAPABLE = 1<<13,
|
||||
IEEE80211_HW_BEACON_FILTER = 1<<14,
|
||||
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<15,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1696,6 +1700,12 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
|
||||
*/
|
||||
void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* The TX headroom reserved by mac80211 for its own tx_status functions.
|
||||
* This is enough for the radiotap header.
|
||||
*/
|
||||
#define IEEE80211_TX_STATUS_HEADROOM 13
|
||||
|
||||
/**
|
||||
* ieee80211_tx_status - transmit status callback
|
||||
*
|
||||
|
@ -28,6 +28,10 @@ struct ctl_table_header;
|
||||
struct net_generic;
|
||||
struct sock;
|
||||
|
||||
|
||||
#define NETDEV_HASHBITS 8
|
||||
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
|
||||
|
||||
struct net {
|
||||
atomic_t count; /* To decided when the network
|
||||
* namespace should be freed.
|
||||
@ -38,7 +42,8 @@ struct net {
|
||||
*/
|
||||
#endif
|
||||
struct list_head list; /* list of network namespaces */
|
||||
struct work_struct work; /* work struct for freeing */
|
||||
struct list_head cleanup_list; /* namespaces on death row */
|
||||
struct list_head exit_list; /* Use only net_mutex */
|
||||
|
||||
struct proc_dir_entry *proc_net;
|
||||
struct proc_dir_entry *proc_net_stat;
|
||||
@ -76,6 +81,8 @@ struct net {
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
struct netns_ct ct;
|
||||
#endif
|
||||
struct sock *nfnl;
|
||||
struct sock *nfnl_stash;
|
||||
#endif
|
||||
#ifdef CONFIG_XFRM
|
||||
struct netns_xfrm xfrm;
|
||||
@ -232,6 +239,9 @@ struct pernet_operations {
|
||||
struct list_head list;
|
||||
int (*init)(struct net *net);
|
||||
void (*exit)(struct net *net);
|
||||
void (*exit_batch)(struct list_head *net_exit_list);
|
||||
int *id;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -198,7 +198,8 @@ extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int null
|
||||
extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
|
||||
|
||||
extern struct nf_conntrack_tuple_hash *
|
||||
__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
__nf_conntrack_find(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
extern void nf_conntrack_hash_insert(struct nf_conn *ct);
|
||||
extern void nf_ct_delete_from_lists(struct nf_conn *ct);
|
||||
@ -260,18 +261,29 @@ extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
|
||||
u32 seq);
|
||||
|
||||
/* Fake conntrack entry for untracked connections */
|
||||
extern struct nf_conn nf_conntrack_untracked;
|
||||
static inline struct nf_conn *nf_ct_untracked_get(void)
|
||||
{
|
||||
extern struct nf_conn nf_conntrack_untracked;
|
||||
|
||||
return &nf_conntrack_untracked;
|
||||
}
|
||||
extern void nf_ct_untracked_status_or(unsigned long bits);
|
||||
|
||||
/* Iterate over all conntracks: if iter returns true, it's deleted. */
|
||||
extern void
|
||||
nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
|
||||
extern void nf_conntrack_free(struct nf_conn *ct);
|
||||
extern struct nf_conn *
|
||||
nf_conntrack_alloc(struct net *net,
|
||||
nf_conntrack_alloc(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *orig,
|
||||
const struct nf_conntrack_tuple *repl,
|
||||
gfp_t gfp);
|
||||
|
||||
static inline int nf_ct_is_template(const struct nf_conn *ct)
|
||||
{
|
||||
return test_bit(IPS_TEMPLATE_BIT, &ct->status);
|
||||
}
|
||||
|
||||
/* It's confirmed if it is, or has been in the hash table. */
|
||||
static inline int nf_ct_is_confirmed(struct nf_conn *ct)
|
||||
{
|
||||
@ -283,9 +295,9 @@ static inline int nf_ct_is_dying(struct nf_conn *ct)
|
||||
return test_bit(IPS_DYING_BIT, &ct->status);
|
||||
}
|
||||
|
||||
static inline int nf_ct_is_untracked(const struct sk_buff *skb)
|
||||
static inline int nf_ct_is_untracked(const struct nf_conn *ct)
|
||||
{
|
||||
return (skb->nfct == &nf_conntrack_untracked.ct_general);
|
||||
return test_bit(IPS_UNTRACKED_BIT, &ct->status);
|
||||
}
|
||||
|
||||
extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
|
||||
|
@ -49,7 +49,8 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
||||
|
||||
/* Find a connection corresponding to a tuple. */
|
||||
extern struct nf_conntrack_tuple_hash *
|
||||
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
nf_conntrack_find_get(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
extern int __nf_conntrack_confirm(struct sk_buff *skb);
|
||||
|
||||
@ -59,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
|
||||
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
|
||||
int ret = NF_ACCEPT;
|
||||
|
||||
if (ct && ct != &nf_conntrack_untracked) {
|
||||
if (ct && !nf_ct_is_untracked(ct)) {
|
||||
if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
|
||||
ret = __nf_conntrack_confirm(skb);
|
||||
if (likely(ret == NF_ACCEPT))
|
||||
|
@ -12,28 +12,12 @@
|
||||
#include <linux/netfilter/nf_conntrack_tuple_common.h>
|
||||
#include <net/netfilter/nf_conntrack_extend.h>
|
||||
|
||||
/* Connection tracking event types */
|
||||
enum ip_conntrack_events
|
||||
{
|
||||
IPCT_NEW = 0, /* new conntrack */
|
||||
IPCT_RELATED = 1, /* related conntrack */
|
||||
IPCT_DESTROY = 2, /* destroyed conntrack */
|
||||
IPCT_STATUS = 3, /* status has changed */
|
||||
IPCT_PROTOINFO = 4, /* protocol information has changed */
|
||||
IPCT_HELPER = 5, /* new helper has been set */
|
||||
IPCT_MARK = 6, /* new mark has been set */
|
||||
IPCT_NATSEQADJ = 7, /* NAT is doing sequence adjustment */
|
||||
IPCT_SECMARK = 8, /* new security mark has been set */
|
||||
};
|
||||
|
||||
enum ip_conntrack_expect_events {
|
||||
IPEXP_NEW = 0, /* new expectation */
|
||||
};
|
||||
|
||||
struct nf_conntrack_ecache {
|
||||
unsigned long cache; /* bitops want long */
|
||||
unsigned long missed; /* missed events */
|
||||
u32 pid; /* netlink pid of destroyer */
|
||||
unsigned long cache; /* bitops want long */
|
||||
unsigned long missed; /* missed events */
|
||||
u16 ctmask; /* bitmask of ct events to be delivered */
|
||||
u16 expmask; /* bitmask of expect events to be delivered */
|
||||
u32 pid; /* netlink pid of destroyer */
|
||||
};
|
||||
|
||||
static inline struct nf_conntrack_ecache *
|
||||
@ -43,14 +27,24 @@ nf_ct_ecache_find(const struct nf_conn *ct)
|
||||
}
|
||||
|
||||
static inline struct nf_conntrack_ecache *
|
||||
nf_ct_ecache_ext_add(struct nf_conn *ct, gfp_t gfp)
|
||||
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
if (!net->ct.sysctl_events)
|
||||
if (!ctmask && !expmask && net->ct.sysctl_events) {
|
||||
ctmask = ~0;
|
||||
expmask = ~0;
|
||||
}
|
||||
if (!ctmask && !expmask)
|
||||
return NULL;
|
||||
|
||||
return nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
|
||||
e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
|
||||
if (e) {
|
||||
e->ctmask = ctmask;
|
||||
e->expmask = expmask;
|
||||
}
|
||||
return e;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
@ -83,6 +77,9 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
||||
if (e == NULL)
|
||||
return;
|
||||
|
||||
if (!(e->ctmask & (1 << event)))
|
||||
return;
|
||||
|
||||
set_bit(event, &e->cache);
|
||||
}
|
||||
|
||||
@ -93,7 +90,6 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
||||
int report)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_ct_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
@ -102,9 +98,6 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
if (!net->ct.sysctl_events)
|
||||
goto out_unlock;
|
||||
|
||||
e = nf_ct_ecache_find(ct);
|
||||
if (e == NULL)
|
||||
goto out_unlock;
|
||||
@ -118,6 +111,9 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
||||
/* This is a resent of a destroy event? If so, skip missed */
|
||||
unsigned long missed = e->pid ? 0 : e->missed;
|
||||
|
||||
if (!((eventmask | missed) & e->ctmask))
|
||||
goto out_unlock;
|
||||
|
||||
ret = notify->fcn(eventmask | missed, &item);
|
||||
if (unlikely(ret < 0 || missed)) {
|
||||
spin_lock_bh(&ct->lock);
|
||||
@ -173,18 +169,19 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
|
||||
u32 pid,
|
||||
int report)
|
||||
{
|
||||
struct net *net = nf_ct_exp_net(exp);
|
||||
struct nf_exp_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
rcu_read_lock();
|
||||
notify = rcu_dereference(nf_expect_event_cb);
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
if (!net->ct.sysctl_events)
|
||||
e = nf_ct_ecache_find(exp->master);
|
||||
if (e == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
{
|
||||
if (e->expmask & (1 << event)) {
|
||||
struct nf_exp_event item = {
|
||||
.exp = exp,
|
||||
.pid = pid,
|
||||
|
@ -79,13 +79,16 @@ int nf_conntrack_expect_init(struct net *net);
|
||||
void nf_conntrack_expect_fini(struct net *net);
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
__nf_ct_expect_find(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
nf_ct_expect_find_get(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
nf_ct_find_expectation(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
void nf_ct_unlink_expect(struct nf_conntrack_expect *exp);
|
||||
void nf_ct_remove_expectations(struct nf_conn *ct);
|
||||
|
@ -9,6 +9,7 @@ enum nf_ct_ext_id
|
||||
NF_CT_EXT_NAT,
|
||||
NF_CT_EXT_ACCT,
|
||||
NF_CT_EXT_ECACHE,
|
||||
NF_CT_EXT_ZONE,
|
||||
NF_CT_EXT_NUM,
|
||||
};
|
||||
|
||||
@ -16,6 +17,7 @@ enum nf_ct_ext_id
|
||||
#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
|
||||
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
|
||||
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
|
||||
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
|
||||
|
||||
/* Extensions: optional stuff which isn't permanently in struct. */
|
||||
struct nf_ct_ext {
|
||||
|
@ -41,14 +41,18 @@ struct nf_conntrack_helper
|
||||
};
|
||||
|
||||
extern struct nf_conntrack_helper *
|
||||
__nf_conntrack_helper_find_byname(const char *name);
|
||||
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
|
||||
|
||||
extern struct nf_conntrack_helper *
|
||||
nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
|
||||
|
||||
extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
|
||||
extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
|
||||
|
||||
extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
|
||||
|
||||
extern int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags);
|
||||
extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
||||
gfp_t flags);
|
||||
|
||||
extern void nf_ct_helper_destroy(struct nf_conn *ct);
|
||||
|
||||
|
@ -50,8 +50,8 @@ struct nf_conntrack_l4proto
|
||||
/* Called when a conntrack entry is destroyed */
|
||||
void (*destroy)(struct nf_conn *ct);
|
||||
|
||||
int (*error)(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||
unsigned int dataoff, enum ip_conntrack_info *ctinfo,
|
||||
u_int8_t pf, unsigned int hooknum);
|
||||
|
||||
/* Print out the per-protocol part of the tuple. Return like seq_* */
|
||||
|
23
include/net/netfilter/nf_conntrack_zones.h
Normal file
23
include/net/netfilter/nf_conntrack_zones.h
Normal file
@ -0,0 +1,23 @@
|
||||
#ifndef _NF_CONNTRACK_ZONES_H
|
||||
#define _NF_CONNTRACK_ZONES_H
|
||||
|
||||
#include <net/netfilter/nf_conntrack_extend.h>
|
||||
|
||||
#define NF_CT_DEFAULT_ZONE 0
|
||||
|
||||
struct nf_conntrack_zone {
|
||||
u16 id;
|
||||
};
|
||||
|
||||
static inline u16 nf_ct_zone(const struct nf_conn *ct)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
struct nf_conntrack_zone *nf_ct_zone;
|
||||
nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
|
||||
if (nf_ct_zone)
|
||||
return nf_ct_zone->id;
|
||||
#endif
|
||||
return NF_CT_DEFAULT_ZONE;
|
||||
}
|
||||
|
||||
#endif /* _NF_CONNTRACK_ZONES_H */
|
@ -8,25 +8,131 @@
|
||||
#include <net/inet_sock.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#define NFT_LOOKUP_ANY 0
|
||||
#define NFT_LOOKUP_LISTENER 1
|
||||
#define NFT_LOOKUP_ESTABLISHED 2
|
||||
|
||||
/* look up and get a reference to a matching socket */
|
||||
extern struct sock *
|
||||
|
||||
|
||||
/* This function is used by the 'TPROXY' target and the 'socket'
|
||||
* match. The following lookups are supported:
|
||||
*
|
||||
* Explicit TProxy target rule
|
||||
* ===========================
|
||||
*
|
||||
* This is used when the user wants to intercept a connection matching
|
||||
* an explicit iptables rule. In this case the sockets are assumed
|
||||
* matching in preference order:
|
||||
*
|
||||
* - match: if there's a fully established connection matching the
|
||||
* _packet_ tuple, it is returned, assuming the redirection
|
||||
* already took place and we process a packet belonging to an
|
||||
* established connection
|
||||
*
|
||||
* - match: if there's a listening socket matching the redirection
|
||||
* (e.g. on-port & on-ip of the connection), it is returned,
|
||||
* regardless if it was bound to 0.0.0.0 or an explicit
|
||||
* address. The reasoning is that if there's an explicit rule, it
|
||||
* does not really matter if the listener is bound to an interface
|
||||
* or to 0. The user already stated that he wants redirection
|
||||
* (since he added the rule).
|
||||
*
|
||||
* "socket" match based redirection (no specific rule)
|
||||
* ===================================================
|
||||
*
|
||||
* There are connections with dynamic endpoints (e.g. FTP data
|
||||
* connection) that the user is unable to add explicit rules
|
||||
* for. These are taken care of by a generic "socket" rule. It is
|
||||
* assumed that the proxy application is trusted to open such
|
||||
* connections without explicit iptables rule (except of course the
|
||||
* generic 'socket' rule). In this case the following sockets are
|
||||
* matched in preference order:
|
||||
*
|
||||
* - match: if there's a fully established connection matching the
|
||||
* _packet_ tuple
|
||||
*
|
||||
* - match: if there's a non-zero bound listener (possibly with a
|
||||
* non-local address) We don't accept zero-bound listeners, since
|
||||
* then local services could intercept traffic going through the
|
||||
* box.
|
||||
*
|
||||
* Please note that there's an overlap between what a TPROXY target
|
||||
* and a socket match will match. Normally if you have both rules the
|
||||
* "socket" match will be the first one, effectively all packets
|
||||
* belonging to established connections going through that one.
|
||||
*/
|
||||
static inline struct sock *
|
||||
nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
|
||||
const __be32 saddr, const __be32 daddr,
|
||||
const __be16 sport, const __be16 dport,
|
||||
const struct net_device *in, bool listening);
|
||||
|
||||
static inline void
|
||||
nf_tproxy_put_sock(struct sock *sk)
|
||||
const struct net_device *in, int lookup_type)
|
||||
{
|
||||
/* TIME_WAIT inet sockets have to be handled differently */
|
||||
if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
|
||||
inet_twsk_put(inet_twsk(sk));
|
||||
else
|
||||
sock_put(sk);
|
||||
struct sock *sk;
|
||||
|
||||
/* look up socket */
|
||||
switch (protocol) {
|
||||
case IPPROTO_TCP:
|
||||
switch (lookup_type) {
|
||||
case NFT_LOOKUP_ANY:
|
||||
sk = __inet_lookup(net, &tcp_hashinfo,
|
||||
saddr, sport, daddr, dport,
|
||||
in->ifindex);
|
||||
break;
|
||||
case NFT_LOOKUP_LISTENER:
|
||||
sk = inet_lookup_listener(net, &tcp_hashinfo,
|
||||
daddr, dport,
|
||||
in->ifindex);
|
||||
|
||||
/* NOTE: we return listeners even if bound to
|
||||
* 0.0.0.0, those are filtered out in
|
||||
* xt_socket, since xt_TPROXY needs 0 bound
|
||||
* listeners too */
|
||||
|
||||
break;
|
||||
case NFT_LOOKUP_ESTABLISHED:
|
||||
sk = inet_lookup_established(net, &tcp_hashinfo,
|
||||
saddr, sport, daddr, dport,
|
||||
in->ifindex);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
sk = NULL;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
|
||||
in->ifindex);
|
||||
if (sk && lookup_type != NFT_LOOKUP_ANY) {
|
||||
int connected = (sk->sk_state == TCP_ESTABLISHED);
|
||||
int wildcard = (inet_sk(sk)->rcv_saddr == 0);
|
||||
|
||||
/* NOTE: we return listeners even if bound to
|
||||
* 0.0.0.0, those are filtered out in
|
||||
* xt_socket, since xt_TPROXY needs 0 bound
|
||||
* listeners too */
|
||||
if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
|
||||
(lookup_type == NFT_LOOKUP_LISTENER && connected)) {
|
||||
sock_put(sk);
|
||||
sk = NULL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
sk = NULL;
|
||||
}
|
||||
|
||||
pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
|
||||
protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
|
||||
/* assign a socket to the skb -- consumes sk */
|
||||
int
|
||||
void
|
||||
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
|
||||
|
||||
#endif
|
||||
|
@ -384,7 +384,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
|
||||
*
|
||||
* Returns the first attribute which matches the specified type.
|
||||
*/
|
||||
static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
|
||||
static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
|
||||
int hdrlen, int attrtype)
|
||||
{
|
||||
return nla_find(nlmsg_attrdata(nlh, hdrlen),
|
||||
|
@ -107,6 +107,7 @@ typedef enum {
|
||||
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
|
||||
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
|
||||
SCTP_CMD_SEND_MSG, /* Send the whole use message */
|
||||
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
|
||||
SCTP_CMD_LAST
|
||||
} sctp_verb_t;
|
||||
|
||||
|
@ -278,6 +278,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
/* 2nd level prototypes */
|
||||
void sctp_generate_t3_rtx_event(unsigned long peer);
|
||||
void sctp_generate_heartbeat_event(unsigned long peer);
|
||||
void sctp_generate_proto_unreach_event(unsigned long peer);
|
||||
|
||||
void sctp_ootb_pkt_free(struct sctp_packet *);
|
||||
|
||||
|
@ -772,6 +772,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
|
||||
struct iovec *data);
|
||||
void sctp_chunk_free(struct sctp_chunk *);
|
||||
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
|
||||
void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
|
||||
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
|
||||
const struct sctp_association *,
|
||||
struct sock *);
|
||||
@ -987,7 +988,7 @@ struct sctp_transport {
|
||||
int init_sent_count;
|
||||
|
||||
/* state : The current state of this destination,
|
||||
* : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKOWN.
|
||||
* : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKNOWN.
|
||||
*/
|
||||
int state;
|
||||
|
||||
@ -1007,6 +1008,9 @@ struct sctp_transport {
|
||||
/* Heartbeat timer is per destination. */
|
||||
struct timer_list hb_timer;
|
||||
|
||||
/* Timer to handle ICMP proto unreachable envets */
|
||||
struct timer_list proto_unreach_timer;
|
||||
|
||||
/* Since we're using per-destination retransmission timers
|
||||
* (see above), we're also using per-destination "transmitted"
|
||||
* queues. This probably ought to be a private struct
|
||||
|
20
include/net/secure_seq.h
Normal file
20
include/net/secure_seq.h
Normal file
@ -0,0 +1,20 @@
|
||||
#ifndef _NET_SECURE_SEQ
|
||||
#define _NET_SECURE_SEQ
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
extern __u32 secure_ip_id(__be32 daddr);
|
||||
extern __u32 secure_ipv6_id(const __be32 daddr[4]);
|
||||
extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
||||
extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||
__be16 dport);
|
||||
extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
|
||||
__be16 sport, __be16 dport);
|
||||
extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
||||
__be16 sport, __be16 dport);
|
||||
extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
|
||||
__be16 sport, __be16 dport);
|
||||
extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
||||
__be16 sport, __be16 dport);
|
||||
|
||||
#endif /* _NET_SECURE_SEQ */
|
@ -259,11 +259,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
|
||||
return seq3 - seq2 >= seq1 - seq2;
|
||||
}
|
||||
|
||||
static inline int tcp_too_many_orphans(struct sock *sk, int num)
|
||||
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
|
||||
{
|
||||
return (num > sysctl_tcp_max_orphans) ||
|
||||
(sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
|
||||
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
|
||||
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
|
||||
int orphans = percpu_counter_read_positive(ocp);
|
||||
|
||||
if (orphans << shift > sysctl_tcp_max_orphans) {
|
||||
orphans = percpu_counter_sum_positive(ocp);
|
||||
if (orphans << shift > sysctl_tcp_max_orphans)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
|
||||
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* syncookies: remember time of last synqueue overflow */
|
||||
@ -501,8 +511,22 @@ extern unsigned int tcp_current_mss(struct sock *sk);
|
||||
/* Bound MSS / TSO packet size with the half of the window */
|
||||
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
|
||||
{
|
||||
if (tp->max_window && pktsize > (tp->max_window >> 1))
|
||||
return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
|
||||
int cutoff;
|
||||
|
||||
/* When peer uses tiny windows, there is no use in packetizing
|
||||
* to sub-MSS pieces for the sake of SWS or making sure there
|
||||
* are enough packets in the pipe for fast recovery.
|
||||
*
|
||||
* On the other hand, for extremely large MSS devices, handling
|
||||
* smaller than MSS windows in this way does make sense.
|
||||
*/
|
||||
if (tp->max_window >= 512)
|
||||
cutoff = (tp->max_window >> 1);
|
||||
else
|
||||
cutoff = tp->max_window;
|
||||
|
||||
if (cutoff && pktsize > cutoff)
|
||||
return max_t(int, cutoff, 68U - tp->tcp_header_len);
|
||||
else
|
||||
return pktsize;
|
||||
}
|
||||
@ -1260,7 +1284,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
|
||||
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
|
||||
|
||||
/* This function calculates a "timeout" which is equivalent to the timeout of a
|
||||
* TCP connection after "boundary" unsucessful, exponentially backed-off
|
||||
* TCP connection after "boundary" unsuccessful, exponentially backed-off
|
||||
* retransmissions with an initial RTO of TCP_RTO_MIN.
|
||||
*/
|
||||
static inline bool retransmits_timed_out(struct sock *sk,
|
||||
|
@ -79,7 +79,7 @@
|
||||
* drivers have to only report state changes due to external
|
||||
* conditions.
|
||||
*
|
||||
* All API operations are 'atomic', serialized thorough a mutex in the
|
||||
* All API operations are 'atomic', serialized through a mutex in the
|
||||
* `struct wimax_dev`.
|
||||
*
|
||||
* EXPORTING TO USER SPACE THROUGH GENERIC NETLINK
|
||||
|
@ -182,6 +182,10 @@ extern int sysctl_x25_clear_request_timeout;
|
||||
extern int sysctl_x25_ack_holdback_timeout;
|
||||
extern int sysctl_x25_forward;
|
||||
|
||||
extern int x25_parse_address_block(struct sk_buff *skb,
|
||||
struct x25_address *called_addr,
|
||||
struct x25_address *calling_addr);
|
||||
|
||||
extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
|
||||
struct x25_address *);
|
||||
extern int x25_addr_aton(unsigned char *, struct x25_address *,
|
||||
|
95
lib/md5.c
Normal file
95
lib/md5.c
Normal file
@ -0,0 +1,95 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cryptohash.h>
|
||||
|
||||
#define F1(x, y, z) (z ^ (x & (y ^ z)))
|
||||
#define F2(x, y, z) F1(z, x, y)
|
||||
#define F3(x, y, z) (x ^ y ^ z)
|
||||
#define F4(x, y, z) (y ^ (x | ~z))
|
||||
|
||||
#define MD5STEP(f, w, x, y, z, in, s) \
|
||||
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
|
||||
|
||||
void md5_transform(__u32 *hash, __u32 const *in)
|
||||
{
|
||||
u32 a, b, c, d;
|
||||
|
||||
a = hash[0];
|
||||
b = hash[1];
|
||||
c = hash[2];
|
||||
d = hash[3];
|
||||
|
||||
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
|
||||
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
|
||||
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
|
||||
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
|
||||
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
|
||||
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
|
||||
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
|
||||
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
|
||||
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
|
||||
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
|
||||
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
|
||||
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
|
||||
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
|
||||
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
|
||||
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
|
||||
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
|
||||
|
||||
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
|
||||
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
|
||||
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
|
||||
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
|
||||
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
|
||||
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
|
||||
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
|
||||
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
|
||||
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
|
||||
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
|
||||
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
|
||||
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
|
||||
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
|
||||
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
|
||||
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
|
||||
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
|
||||
|
||||
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
|
||||
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
|
||||
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
|
||||
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
|
||||
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
|
||||
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
|
||||
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
|
||||
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
|
||||
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
|
||||
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
|
||||
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
|
||||
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
|
||||
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
|
||||
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
|
||||
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
|
||||
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
|
||||
|
||||
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
|
||||
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
|
||||
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
|
||||
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
|
||||
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
|
||||
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
|
||||
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
|
||||
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
|
||||
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
|
||||
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
|
||||
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
|
||||
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
|
||||
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
|
||||
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
|
||||
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
|
||||
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
|
||||
|
||||
hash[0] += a;
|
||||
hash[1] += b;
|
||||
hash[2] += c;
|
||||
hash[3] += d;
|
||||
}
|
||||
EXPORT_SYMBOL(md5_transform);
|
@ -477,6 +477,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
if (addr_len < sizeof(struct sockaddr_in))
|
||||
goto out;
|
||||
|
||||
if (addr->sin_family != AF_INET)
|
||||
goto out;
|
||||
|
||||
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
|
||||
|
||||
/* Not specified by any standard per-se, however it breaks too
|
||||
|
@ -54,7 +54,6 @@
|
||||
#include <linux/sysctl.h>
|
||||
#endif
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/iface_stat.h>
|
||||
|
||||
#include <net/arp.h>
|
||||
#include <net/ip.h>
|
||||
@ -374,9 +373,6 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
|
||||
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid);
|
||||
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
|
||||
|
||||
/* Start persistent interface stat monitoring. Ignores if loopback. */
|
||||
create_iface_stat(in_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1036,6 +1032,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
|
||||
return mtu >= 68;
|
||||
}
|
||||
|
||||
static void inetdev_send_gratuitous_arp(struct net_device *dev,
|
||||
struct in_device *in_dev)
|
||||
|
||||
{
|
||||
struct in_ifaddr *ifa = in_dev->ifa_list;
|
||||
|
||||
if (!ifa)
|
||||
return;
|
||||
|
||||
arp_send(ARPOP_REQUEST, ETH_P_ARP,
|
||||
ifa->ifa_address, dev,
|
||||
ifa->ifa_address, NULL,
|
||||
dev->dev_addr, NULL);
|
||||
}
|
||||
|
||||
/* Called only under RTNL semaphore */
|
||||
|
||||
static int inetdev_event(struct notifier_block *this, unsigned long event,
|
||||
@ -1088,16 +1099,12 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
|
||||
ip_mc_up(in_dev);
|
||||
/* fall through */
|
||||
case NETDEV_CHANGEADDR:
|
||||
if (!IN_DEV_ARP_NOTIFY(in_dev))
|
||||
break;
|
||||
/* fall through */
|
||||
case NETDEV_NOTIFY_PEERS:
|
||||
/* Send gratuitous ARP to notify of link change */
|
||||
if (IN_DEV_ARP_NOTIFY(in_dev)) {
|
||||
struct in_ifaddr *ifa = in_dev->ifa_list;
|
||||
|
||||
if (ifa)
|
||||
arp_send(ARPOP_REQUEST, ETH_P_ARP,
|
||||
ifa->ifa_address, dev,
|
||||
ifa->ifa_address, NULL,
|
||||
dev->dev_addr, NULL);
|
||||
}
|
||||
inetdev_send_gratuitous_arp(dev, in_dev);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
ip_mc_down(in_dev);
|
||||
|
@ -1899,8 +1899,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
|
||||
err = -EADDRNOTAVAIL;
|
||||
|
||||
for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
|
||||
if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr
|
||||
&& pmc->multi.imr_ifindex == imr.imr_ifindex)
|
||||
if ((pmc->multi.imr_multiaddr.s_addr ==
|
||||
imr.imr_multiaddr.s_addr) &&
|
||||
(pmc->multi.imr_ifindex == imr.imr_ifindex))
|
||||
break;
|
||||
}
|
||||
if (!pmc) { /* must have a prior join */
|
||||
|
@ -436,7 +436,7 @@ static int valid_cc(const void *bc, int len, int cc)
|
||||
return 0;
|
||||
if (cc == len)
|
||||
return 1;
|
||||
if (op->yes < 4)
|
||||
if (op->yes < 4 || op->yes & 3)
|
||||
return 0;
|
||||
len -= op->yes;
|
||||
bc += op->yes;
|
||||
@ -446,11 +446,11 @@ static int valid_cc(const void *bc, int len, int cc)
|
||||
|
||||
static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
|
||||
{
|
||||
const unsigned char *bc = bytecode;
|
||||
const void *bc = bytecode;
|
||||
int len = bytecode_len;
|
||||
|
||||
while (len > 0) {
|
||||
struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
|
||||
const struct inet_diag_bc_op *op = bc;
|
||||
|
||||
//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
|
||||
switch (op->code) {
|
||||
@ -461,22 +461,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
|
||||
case INET_DIAG_BC_S_LE:
|
||||
case INET_DIAG_BC_D_GE:
|
||||
case INET_DIAG_BC_D_LE:
|
||||
if (op->yes < 4 || op->yes > len + 4)
|
||||
return -EINVAL;
|
||||
case INET_DIAG_BC_JMP:
|
||||
if (op->no < 4 || op->no > len + 4)
|
||||
if (op->no < 4 || op->no > len + 4 || op->no & 3)
|
||||
return -EINVAL;
|
||||
if (op->no < len &&
|
||||
!valid_cc(bytecode, bytecode_len, len - op->no))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case INET_DIAG_BC_NOP:
|
||||
if (op->yes < 4 || op->yes > len + 4)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
|
||||
return -EINVAL;
|
||||
bc += op->yes;
|
||||
len -= op->yes;
|
||||
}
|
||||
@ -489,9 +487,11 @@ static int inet_csk_diag_dump(struct sock *sk,
|
||||
{
|
||||
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
|
||||
|
||||
if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
|
||||
if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
|
||||
struct inet_diag_entry entry;
|
||||
struct rtattr *bc = (struct rtattr *)(r + 1);
|
||||
const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
|
||||
sizeof(*r),
|
||||
INET_DIAG_REQ_BYTECODE);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
entry.family = sk->sk_family;
|
||||
@ -511,7 +511,7 @@ static int inet_csk_diag_dump(struct sock *sk,
|
||||
entry.dport = ntohs(inet->dport);
|
||||
entry.userlocks = sk->sk_userlocks;
|
||||
|
||||
if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
|
||||
if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -526,9 +526,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
|
||||
{
|
||||
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
|
||||
|
||||
if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
|
||||
if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
|
||||
struct inet_diag_entry entry;
|
||||
struct rtattr *bc = (struct rtattr *)(r + 1);
|
||||
const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
|
||||
sizeof(*r),
|
||||
INET_DIAG_REQ_BYTECODE);
|
||||
|
||||
entry.family = tw->tw_family;
|
||||
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
@ -547,7 +549,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
|
||||
entry.dport = ntohs(tw->tw_dport);
|
||||
entry.userlocks = 0;
|
||||
|
||||
if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
|
||||
if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -617,7 +619,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
|
||||
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct listen_sock *lopt;
|
||||
struct rtattr *bc = NULL;
|
||||
const struct nlattr *bc = NULL;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
int j, s_j;
|
||||
int reqnum, s_reqnum;
|
||||
@ -637,8 +639,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
|
||||
if (!lopt || !lopt->qlen)
|
||||
goto out;
|
||||
|
||||
if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
|
||||
bc = (struct rtattr *)(r + 1);
|
||||
if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
|
||||
bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
|
||||
INET_DIAG_REQ_BYTECODE);
|
||||
entry.sport = inet->num;
|
||||
entry.userlocks = sk->sk_userlocks;
|
||||
}
|
||||
@ -671,8 +674,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
|
||||
&ireq->rmt_addr;
|
||||
entry.dport = ntohs(ireq->rmt_port);
|
||||
|
||||
if (!inet_diag_bc_run(RTA_DATA(bc),
|
||||
RTA_PAYLOAD(bc), &entry))
|
||||
if (!inet_diag_bc_run(nla_data(bc),
|
||||
nla_len(bc), &entry))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include <net/inet_connection_sock.h>
|
||||
#include <net/inet_hashtables.h>
|
||||
#include <net/secure_seq.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
/*
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/net.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/inetpeer.h>
|
||||
#include <net/secure_seq.h>
|
||||
|
||||
/*
|
||||
* Theory of operations.
|
||||
|
@ -1665,14 +1665,15 @@ static int __init ipgre_init(void)
|
||||
|
||||
printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
|
||||
|
||||
if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
|
||||
printk(KERN_INFO "ipgre init: can't add protocol\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
|
||||
if (err < 0)
|
||||
goto gen_device_failed;
|
||||
return err;
|
||||
|
||||
err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
|
||||
if (err < 0) {
|
||||
printk(KERN_INFO "ipgre init: can't add protocol\n");
|
||||
goto add_proto_failed;
|
||||
}
|
||||
|
||||
err = rtnl_link_register(&ipgre_link_ops);
|
||||
if (err < 0)
|
||||
@ -1688,9 +1689,9 @@ out:
|
||||
tap_ops_failed:
|
||||
rtnl_link_unregister(&ipgre_link_ops);
|
||||
rtnl_link_failed:
|
||||
unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
|
||||
gen_device_failed:
|
||||
inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
|
||||
add_proto_failed:
|
||||
unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1698,9 +1699,9 @@ static void __exit ipgre_fini(void)
|
||||
{
|
||||
rtnl_link_unregister(&ipgre_tap_ops);
|
||||
rtnl_link_unregister(&ipgre_link_ops);
|
||||
unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
|
||||
if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
|
||||
printk(KERN_INFO "ipgre close: can't remove protocol\n");
|
||||
unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
|
||||
}
|
||||
|
||||
module_init(ipgre_init);
|
||||
@ -1708,3 +1709,4 @@ module_exit(ipgre_fini);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_RTNL_LINK("gre");
|
||||
MODULE_ALIAS_RTNL_LINK("gretap");
|
||||
MODULE_ALIAS_NETDEV("gre0");
|
||||
|
@ -476,9 +476,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
* we can switch to copy when see the first bad fragment.
|
||||
*/
|
||||
if (skb_has_frags(skb)) {
|
||||
struct sk_buff *frag;
|
||||
struct sk_buff *frag, *frag2;
|
||||
int first_len = skb_pagelen(skb);
|
||||
int truesizes = 0;
|
||||
|
||||
if (first_len - hlen > mtu ||
|
||||
((first_len - hlen) & 7) ||
|
||||
@ -491,18 +490,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
if (frag->len > mtu ||
|
||||
((frag->len & 7) && frag->next) ||
|
||||
skb_headroom(frag) < hlen)
|
||||
goto slow_path;
|
||||
goto slow_path_clean;
|
||||
|
||||
/* Partially cloned skb? */
|
||||
if (skb_shared(frag))
|
||||
goto slow_path;
|
||||
goto slow_path_clean;
|
||||
|
||||
BUG_ON(frag->sk);
|
||||
if (skb->sk) {
|
||||
frag->sk = skb->sk;
|
||||
frag->destructor = sock_wfree;
|
||||
}
|
||||
truesizes += frag->truesize;
|
||||
skb->truesize -= frag->truesize;
|
||||
}
|
||||
|
||||
/* Everything is OK. Generate! */
|
||||
@ -512,7 +511,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
frag = skb_shinfo(skb)->frag_list;
|
||||
skb_frag_list_init(skb);
|
||||
skb->data_len = first_len - skb_headlen(skb);
|
||||
skb->truesize -= truesizes;
|
||||
skb->len = first_len;
|
||||
iph->tot_len = htons(first_len);
|
||||
iph->frag_off = htons(IP_MF);
|
||||
@ -564,6 +562,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
}
|
||||
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
|
||||
return err;
|
||||
|
||||
slow_path_clean:
|
||||
skb_walk_frags(skb, frag2) {
|
||||
if (frag2 == frag)
|
||||
break;
|
||||
frag2->sk = NULL;
|
||||
frag2->destructor = NULL;
|
||||
skb->truesize += frag2->truesize;
|
||||
}
|
||||
}
|
||||
|
||||
slow_path:
|
||||
@ -860,8 +867,10 @@ int ip_append_data(struct sock *sk,
|
||||
!exthdrlen)
|
||||
csummode = CHECKSUM_PARTIAL;
|
||||
|
||||
skb = skb_peek_tail(&sk->sk_write_queue);
|
||||
|
||||
inet->cork.length += length;
|
||||
if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
|
||||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->u.dst.dev->features & NETIF_F_UFO)) {
|
||||
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
|
||||
@ -879,7 +888,7 @@ int ip_append_data(struct sock *sk,
|
||||
* adding appropriate IP header.
|
||||
*/
|
||||
|
||||
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
|
||||
if (!skb)
|
||||
goto alloc_new_skb;
|
||||
|
||||
while (length > 0) {
|
||||
@ -1108,7 +1117,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
|
||||
return -EINVAL;
|
||||
|
||||
inet->cork.length += size;
|
||||
if ((sk->sk_protocol == IPPROTO_UDP) &&
|
||||
if ((size + skb->len > mtu) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->u.dst.dev->features & NETIF_F_UFO)) {
|
||||
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
|
||||
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
|
||||
|
@ -853,3 +853,4 @@ static void __exit ipip_fini(void)
|
||||
module_init(ipip_init);
|
||||
module_exit(ipip_fini);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NETDEV("tunl0");
|
||||
|
@ -155,10 +155,10 @@ static int nf_ip_reroute(struct sk_buff *skb,
|
||||
if (entry->hook == NF_INET_LOCAL_OUT) {
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if (!(iph->tos == rt_info->tos
|
||||
&& skb->mark == rt_info->mark
|
||||
&& iph->daddr == rt_info->daddr
|
||||
&& iph->saddr == rt_info->saddr))
|
||||
if (!(iph->tos == rt_info->tos &&
|
||||
skb->mark == rt_info->mark &&
|
||||
iph->daddr == rt_info->daddr &&
|
||||
iph->saddr == rt_info->saddr))
|
||||
return ip_route_me_harder(skb, RTN_UNSPEC);
|
||||
}
|
||||
return 0;
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter_arp/arp_tables.h>
|
||||
#include "../../netfilter/xt_repldata.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
|
||||
@ -58,6 +59,12 @@ do { \
|
||||
#define ARP_NF_ASSERT(x)
|
||||
#endif
|
||||
|
||||
void *arpt_alloc_initial_table(const struct xt_table *info)
|
||||
{
|
||||
return xt_alloc_initial_table(arpt, ARPT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
|
||||
|
||||
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
|
||||
const char *hdr_addr, int len)
|
||||
{
|
||||
@ -217,7 +224,7 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
if (net_ratelimit())
|
||||
printk("arp_tables: error: '%s'\n",
|
||||
@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
static inline struct arpt_entry *get_entry(void *base, unsigned int offset)
|
||||
static inline const struct arpt_entry_target *
|
||||
arpt_get_target_c(const struct arpt_entry *e)
|
||||
{
|
||||
return arpt_get_target((struct arpt_entry *)e);
|
||||
}
|
||||
|
||||
static inline struct arpt_entry *
|
||||
get_entry(const void *base, unsigned int offset)
|
||||
{
|
||||
return (struct arpt_entry *)(base + offset);
|
||||
}
|
||||
@ -251,7 +265,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
const char *indev, *outdev;
|
||||
void *table_base;
|
||||
const struct xt_table_info *private;
|
||||
struct xt_target_param tgpar;
|
||||
struct xt_action_param acpar;
|
||||
|
||||
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
|
||||
return NF_DROP;
|
||||
@ -266,14 +280,14 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
e = get_entry(table_base, private->hook_entry[hook]);
|
||||
back = get_entry(table_base, private->underflow[hook]);
|
||||
|
||||
tgpar.in = in;
|
||||
tgpar.out = out;
|
||||
tgpar.hooknum = hook;
|
||||
tgpar.family = NFPROTO_ARP;
|
||||
acpar.in = in;
|
||||
acpar.out = out;
|
||||
acpar.hooknum = hook;
|
||||
acpar.family = NFPROTO_ARP;
|
||||
|
||||
arp = arp_hdr(skb);
|
||||
do {
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
int hdr_len;
|
||||
|
||||
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
|
||||
@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
(2 * skb->dev->addr_len);
|
||||
ADD_COUNTER(e->counters, hdr_len, 1);
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
|
||||
/* Standard target? */
|
||||
if (!t->u.kernel.target->target) {
|
||||
@ -319,9 +333,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
/* Targets which reenter must return
|
||||
* abs. verdicts
|
||||
*/
|
||||
tgpar.target = t->u.kernel.target;
|
||||
tgpar.targinfo = t->data;
|
||||
verdict = t->u.kernel.target->target(skb, &tgpar);
|
||||
acpar.target = t->u.kernel.target;
|
||||
acpar.targinfo = t->data;
|
||||
verdict = t->u.kernel.target->target(skb, &acpar);
|
||||
|
||||
/* Target might have changed stuff. */
|
||||
arp = arp_hdr(skb);
|
||||
@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp)
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
* there are loops. Puts hook bitmask in comefrom.
|
||||
*/
|
||||
static int mark_source_chains(struct xt_table_info *newinfo,
|
||||
static int mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
{
|
||||
unsigned int hook;
|
||||
@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
|
||||
|
||||
for (;;) {
|
||||
const struct arpt_standard_target *t
|
||||
= (void *)arpt_get_target(e);
|
||||
= (void *)arpt_get_target_c(e);
|
||||
int visited = e->comefrom & (1 << hook);
|
||||
|
||||
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
|
||||
@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int check_entry(struct arpt_entry *e, const char *name)
|
||||
static inline int check_entry(const struct arpt_entry *e, const char *name)
|
||||
{
|
||||
const struct arpt_entry_target *t;
|
||||
|
||||
@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
|
||||
if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
@ -498,8 +512,7 @@ static inline int check_target(struct arpt_entry *e, const char *name)
|
||||
}
|
||||
|
||||
static inline int
|
||||
find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
||||
unsigned int *i)
|
||||
find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
@ -510,13 +523,11 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
||||
return ret;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
target = try_then_request_module(xt_find_target(NFPROTO_ARP,
|
||||
t->u.user.name,
|
||||
t->u.user.revision),
|
||||
"arpt_%s", t->u.user.name);
|
||||
if (IS_ERR(target) || !target) {
|
||||
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
|
||||
t->u.user.revision);
|
||||
if (IS_ERR(target)) {
|
||||
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
|
||||
ret = target ? PTR_ERR(target) : -ENOENT;
|
||||
ret = PTR_ERR(target);
|
||||
goto out;
|
||||
}
|
||||
t->u.kernel.target = target;
|
||||
@ -524,8 +535,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
||||
ret = check_target(e, name);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
err:
|
||||
module_put(t->u.kernel.target->me);
|
||||
@ -533,14 +542,14 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool check_underflow(struct arpt_entry *e)
|
||||
static bool check_underflow(const struct arpt_entry *e)
|
||||
{
|
||||
const struct arpt_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->arp))
|
||||
return false;
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
return false;
|
||||
verdict = ((struct arpt_standard_target *)t)->verdict;
|
||||
@ -550,12 +559,11 @@ static bool check_underflow(struct arpt_entry *e)
|
||||
|
||||
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int valid_hooks,
|
||||
unsigned int *i)
|
||||
unsigned int valid_hooks)
|
||||
{
|
||||
unsigned int h;
|
||||
|
||||
@ -592,19 +600,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
||||
/* Clear counters and comefrom */
|
||||
e->counters = ((struct xt_counters) { 0, 0 });
|
||||
e->comefrom = 0;
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
|
||||
static inline void cleanup_entry(struct arpt_entry *e)
|
||||
{
|
||||
struct xt_tgdtor_param par;
|
||||
struct arpt_entry_target *t;
|
||||
|
||||
if (i && (*i)-- == 0)
|
||||
return 1;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
par.target = t->u.kernel.target;
|
||||
par.targinfo = t->data;
|
||||
@ -612,26 +615,20 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
|
||||
if (par.target->destroy != NULL)
|
||||
par.target->destroy(&par);
|
||||
module_put(par.target->me);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Checks and translates the user-supplied table segment (held in
|
||||
* newinfo).
|
||||
*/
|
||||
static int translate_table(const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info *newinfo,
|
||||
void *entry0,
|
||||
unsigned int size,
|
||||
unsigned int number,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows)
|
||||
static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
const struct arpt_replace *repl)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
newinfo->size = size;
|
||||
newinfo->number = number;
|
||||
newinfo->size = repl->size;
|
||||
newinfo->number = repl->num_entries;
|
||||
|
||||
/* Init all hooks to impossible value. */
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
@ -643,52 +640,64 @@ static int translate_table(const char *name,
|
||||
i = 0;
|
||||
|
||||
/* Walk through entries, checking offsets. */
|
||||
ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
check_entry_size_and_hooks,
|
||||
newinfo,
|
||||
entry0,
|
||||
entry0 + size,
|
||||
hook_entries, underflows, valid_hooks, &i);
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
|
||||
entry0 + repl->size, repl->hook_entry, repl->underflow,
|
||||
repl->valid_hooks);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
if (strcmp(arpt_get_target(iter)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (i != number) {
|
||||
if (i != repl->num_entries) {
|
||||
duprintf("translate_table: %u not %u entries\n",
|
||||
i, number);
|
||||
i, repl->num_entries);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
/* Only hooks which are valid */
|
||||
if (!(valid_hooks & (1 << i)))
|
||||
if (!(repl->valid_hooks & (1 << i)))
|
||||
continue;
|
||||
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, hook_entries[i]);
|
||||
i, repl->hook_entry[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (newinfo->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, underflows[i]);
|
||||
i, repl->underflow[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
|
||||
duprintf("Looping hook\n");
|
||||
return -ELOOP;
|
||||
}
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
find_check_entry, name, size, &i);
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
ret = find_check_entry(iter, repl->name, repl->size);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
ARPT_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
cleanup_entry, &i);
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
if (i-- == 0)
|
||||
break;
|
||||
cleanup_entry(iter);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -701,30 +710,10 @@ static int translate_table(const char *name,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Gets counters. */
|
||||
static inline int add_entry_to_counter(const struct arpt_entry *e,
|
||||
struct xt_counters total[],
|
||||
unsigned int *i)
|
||||
{
|
||||
ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int set_entry_to_counter(const struct arpt_entry *e,
|
||||
struct xt_counters total[],
|
||||
unsigned int *i)
|
||||
{
|
||||
SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void get_counters(const struct xt_table_info *t,
|
||||
struct xt_counters counters[])
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
unsigned int cpu;
|
||||
unsigned int i;
|
||||
unsigned int curcpu;
|
||||
@ -740,32 +729,32 @@ static void get_counters(const struct xt_table_info *t,
|
||||
curcpu = smp_processor_id();
|
||||
|
||||
i = 0;
|
||||
ARPT_ENTRY_ITERATE(t->entries[curcpu],
|
||||
t->size,
|
||||
set_entry_to_counter,
|
||||
counters,
|
||||
&i);
|
||||
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
|
||||
SET_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
++i;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == curcpu)
|
||||
continue;
|
||||
i = 0;
|
||||
xt_info_wrlock(cpu);
|
||||
ARPT_ENTRY_ITERATE(t->entries[cpu],
|
||||
t->size,
|
||||
add_entry_to_counter,
|
||||
counters,
|
||||
&i);
|
||||
xt_entry_foreach(iter, t->entries[cpu], t->size) {
|
||||
ADD_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
++i;
|
||||
}
|
||||
xt_info_wrunlock(cpu);
|
||||
}
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
* (other than comefrom, which userspace doesn't care
|
||||
@ -783,11 +772,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
}
|
||||
|
||||
static int copy_entries_to_user(unsigned int total_size,
|
||||
struct xt_table *table,
|
||||
const struct xt_table *table,
|
||||
void __user *userptr)
|
||||
{
|
||||
unsigned int off, num;
|
||||
struct arpt_entry *e;
|
||||
const struct arpt_entry *e;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
@ -807,7 +796,7 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||
/* FIXME: use iterator macros --RR */
|
||||
/* ... then go back and fix counters and names */
|
||||
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
|
||||
e = (struct arpt_entry *)(loc_cpu_entry + off);
|
||||
if (copy_to_user(userptr + off
|
||||
@ -818,7 +807,7 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||
goto free_counters;
|
||||
}
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
if (copy_to_user(userptr + off + e->target_offset
|
||||
+ offsetof(struct arpt_entry_target,
|
||||
u.user.name),
|
||||
@ -853,18 +842,18 @@ static int compat_standard_to_user(void __user *dst, void *src)
|
||||
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int compat_calc_entry(struct arpt_entry *e,
|
||||
static int compat_calc_entry(const struct arpt_entry *e,
|
||||
const struct xt_table_info *info,
|
||||
void *base, struct xt_table_info *newinfo)
|
||||
const void *base, struct xt_table_info *newinfo)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
unsigned int entry_offset;
|
||||
int off, i, ret;
|
||||
|
||||
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
|
||||
entry_offset = (void *)e - base;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
off += xt_compat_target_offset(t->u.kernel.target);
|
||||
newinfo->size -= off;
|
||||
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
|
||||
@ -885,7 +874,9 @@ static int compat_calc_entry(struct arpt_entry *e,
|
||||
static int compat_table_info(const struct xt_table_info *info,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
if (!newinfo || !info)
|
||||
return -EINVAL;
|
||||
@ -894,13 +885,17 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
|
||||
newinfo->initial_entries = 0;
|
||||
loc_cpu_entry = info->entries[raw_smp_processor_id()];
|
||||
return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
|
||||
compat_calc_entry, info, loc_cpu_entry,
|
||||
newinfo);
|
||||
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
|
||||
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
static int get_info(struct net *net, void __user *user,
|
||||
const int *len, int compat)
|
||||
{
|
||||
char name[ARPT_TABLE_MAXNAMELEN];
|
||||
struct xt_table *t;
|
||||
@ -959,7 +954,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
}
|
||||
|
||||
static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
|
||||
int *len)
|
||||
const int *len)
|
||||
{
|
||||
int ret;
|
||||
struct arpt_get_entries get;
|
||||
@ -1010,6 +1005,7 @@ static int __do_replace(struct net *net, const char *name,
|
||||
struct xt_table_info *oldinfo;
|
||||
struct xt_counters *counters;
|
||||
void *loc_cpu_old_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
ret = 0;
|
||||
counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
|
||||
@ -1053,8 +1049,8 @@ static int __do_replace(struct net *net, const char *name,
|
||||
|
||||
/* Decrease module usage counts and free resource */
|
||||
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
|
||||
ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
|
||||
NULL);
|
||||
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
|
||||
cleanup_entry(iter);
|
||||
|
||||
xt_free_table_info(oldinfo);
|
||||
if (copy_to_user(counters_ptr, counters,
|
||||
@ -1073,12 +1069,14 @@ static int __do_replace(struct net *net, const char *name,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
static int do_replace(struct net *net, const void __user *user,
|
||||
unsigned int len)
|
||||
{
|
||||
int ret;
|
||||
struct arpt_replace tmp;
|
||||
struct xt_table_info *newinfo;
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
@ -1086,6 +1084,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
/* overflow check */
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
if (!newinfo)
|
||||
@ -1099,9 +1098,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_table(tmp.name, tmp.valid_hooks,
|
||||
newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
|
||||
tmp.hook_entry, tmp.underflow);
|
||||
ret = translate_table(newinfo, loc_cpu_entry, &tmp);
|
||||
if (ret != 0)
|
||||
goto free_newinfo;
|
||||
|
||||
@ -1114,27 +1111,15 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return 0;
|
||||
|
||||
free_newinfo_untrans:
|
||||
ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
|
||||
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
|
||||
cleanup_entry(iter);
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* We're lazy, and add to the first CPU; overflow works its fey magic
|
||||
* and everything is OK. */
|
||||
static int
|
||||
add_counter_to_entry(struct arpt_entry *e,
|
||||
const struct xt_counters addme[],
|
||||
unsigned int *i)
|
||||
{
|
||||
ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_add_counters(struct net *net, void __user *user, unsigned int len,
|
||||
int compat)
|
||||
static int do_add_counters(struct net *net, const void __user *user,
|
||||
unsigned int len, int compat)
|
||||
{
|
||||
unsigned int i, curcpu;
|
||||
struct xt_counters_info tmp;
|
||||
@ -1147,6 +1132,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_xt_counters_info compat_tmp;
|
||||
|
||||
@ -1204,11 +1190,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
|
||||
curcpu = smp_processor_id();
|
||||
loc_cpu_entry = private->entries[curcpu];
|
||||
xt_info_wrlock(curcpu);
|
||||
ARPT_ENTRY_ITERATE(loc_cpu_entry,
|
||||
private->size,
|
||||
add_counter_to_entry,
|
||||
paddc,
|
||||
&i);
|
||||
xt_entry_foreach(iter, loc_cpu_entry, private->size) {
|
||||
ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
|
||||
++i;
|
||||
}
|
||||
xt_info_wrunlock(curcpu);
|
||||
unlock_up_free:
|
||||
local_bh_enable();
|
||||
@ -1221,28 +1206,22 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static inline int
|
||||
compat_release_entry(struct compat_arpt_entry *e, unsigned int *i)
|
||||
static inline void compat_release_entry(struct compat_arpt_entry *e)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
|
||||
if (i && (*i)-- == 0)
|
||||
return 1;
|
||||
|
||||
t = compat_arpt_get_target(e);
|
||||
module_put(t->u.kernel.target->me);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows,
|
||||
unsigned int *i,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
const char *name)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
@ -1273,14 +1252,12 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
||||
entry_offset = (void *)e - (void *)base;
|
||||
|
||||
t = compat_arpt_get_target(e);
|
||||
target = try_then_request_module(xt_find_target(NFPROTO_ARP,
|
||||
t->u.user.name,
|
||||
t->u.user.revision),
|
||||
"arpt_%s", t->u.user.name);
|
||||
if (IS_ERR(target) || !target) {
|
||||
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
|
||||
t->u.user.revision);
|
||||
if (IS_ERR(target)) {
|
||||
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
|
||||
t->u.user.name);
|
||||
ret = target ? PTR_ERR(target) : -ENOENT;
|
||||
ret = PTR_ERR(target);
|
||||
goto out;
|
||||
}
|
||||
t->u.kernel.target = target;
|
||||
@ -1302,8 +1279,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
||||
/* Clear counters and comefrom */
|
||||
memset(&e->counters, 0, sizeof(e->counters));
|
||||
e->comefrom = 0;
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
|
||||
release_target:
|
||||
@ -1347,19 +1322,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int compat_check_entry(struct arpt_entry *e, const char *name,
|
||||
unsigned int *i)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_target(e, name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
(*i)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int translate_compat_table(const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info **pinfo,
|
||||
@ -1372,8 +1334,10 @@ static int translate_compat_table(const char *name,
|
||||
unsigned int i, j;
|
||||
struct xt_table_info *newinfo, *info;
|
||||
void *pos, *entry0, *entry1;
|
||||
struct compat_arpt_entry *iter0;
|
||||
struct arpt_entry *iter1;
|
||||
unsigned int size;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
info = *pinfo;
|
||||
entry0 = *pentry0;
|
||||
@ -1390,13 +1354,14 @@ static int translate_compat_table(const char *name,
|
||||
j = 0;
|
||||
xt_compat_lock(NFPROTO_ARP);
|
||||
/* Walk through entries, checking offsets. */
|
||||
ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size,
|
||||
check_compat_entry_size_and_hooks,
|
||||
info, &size, entry0,
|
||||
entry0 + total_size,
|
||||
hook_entries, underflows, &j, name);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
||||
entry0, entry0 + total_size, hook_entries, underflows,
|
||||
name);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
++j;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (j != number) {
|
||||
@ -1435,9 +1400,12 @@ static int translate_compat_table(const char *name,
|
||||
entry1 = newinfo->entries[raw_smp_processor_id()];
|
||||
pos = entry1;
|
||||
size = total_size;
|
||||
ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size,
|
||||
compat_copy_entry_from_user,
|
||||
&pos, &size, name, newinfo, entry1);
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
ret = compat_copy_entry_from_user(iter0, &pos,
|
||||
&size, name, newinfo, entry1);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
xt_compat_flush_offsets(NFPROTO_ARP);
|
||||
xt_compat_unlock(NFPROTO_ARP);
|
||||
if (ret)
|
||||
@ -1448,13 +1416,32 @@ static int translate_compat_table(const char *name,
|
||||
goto free_newinfo;
|
||||
|
||||
i = 0;
|
||||
ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
|
||||
name, &i);
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
ret = check_target(iter1, name);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
}
|
||||
if (ret) {
|
||||
/*
|
||||
* The first i matches need cleanup_entry (calls ->destroy)
|
||||
* because they had called ->check already. The other j-i
|
||||
* entries need only release.
|
||||
*/
|
||||
int skip = i;
|
||||
j -= i;
|
||||
COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
|
||||
compat_release_entry, &j);
|
||||
ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
|
||||
xt_entry_foreach(iter0, entry0, newinfo->size) {
|
||||
if (skip-- > 0)
|
||||
continue;
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
if (i-- == 0)
|
||||
break;
|
||||
cleanup_entry(iter1);
|
||||
}
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
@ -1472,7 +1459,11 @@ static int translate_compat_table(const char *name,
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
out:
|
||||
COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(NFPROTO_ARP);
|
||||
@ -1499,6 +1490,7 @@ static int compat_do_replace(struct net *net, void __user *user,
|
||||
struct compat_arpt_replace tmp;
|
||||
struct xt_table_info *newinfo;
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
@ -1508,6 +1500,7 @@ static int compat_do_replace(struct net *net, void __user *user,
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
if (!newinfo)
|
||||
@ -1536,7 +1529,8 @@ static int compat_do_replace(struct net *net, void __user *user,
|
||||
return 0;
|
||||
|
||||
free_newinfo_untrans:
|
||||
ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
|
||||
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
|
||||
cleanup_entry(iter);
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
@ -1570,7 +1564,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
|
||||
static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
|
||||
compat_uint_t *size,
|
||||
struct xt_counters *counters,
|
||||
unsigned int *i)
|
||||
unsigned int i)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
struct compat_arpt_entry __user *ce;
|
||||
@ -1578,14 +1572,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
|
||||
compat_uint_t origsize;
|
||||
int ret;
|
||||
|
||||
ret = -EFAULT;
|
||||
origsize = *size;
|
||||
ce = (struct compat_arpt_entry __user *)*dstptr;
|
||||
if (copy_to_user(ce, e, sizeof(struct arpt_entry)))
|
||||
goto out;
|
||||
|
||||
if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
|
||||
goto out;
|
||||
if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
|
||||
copy_to_user(&ce->counters, &counters[i],
|
||||
sizeof(counters[i])) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
*dstptr += sizeof(struct compat_arpt_entry);
|
||||
*size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
|
||||
@ -1595,18 +1587,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
|
||||
t = arpt_get_target(e);
|
||||
ret = xt_compat_target_to_user(t, dstptr, size);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = -EFAULT;
|
||||
return ret;
|
||||
next_offset = e->next_offset - (origsize - *size);
|
||||
if (put_user(target_offset, &ce->target_offset))
|
||||
goto out;
|
||||
if (put_user(next_offset, &ce->next_offset))
|
||||
goto out;
|
||||
|
||||
(*i)++;
|
||||
if (put_user(target_offset, &ce->target_offset) != 0 ||
|
||||
put_user(next_offset, &ce->next_offset) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int compat_copy_entries_to_user(unsigned int total_size,
|
||||
@ -1620,6 +1606,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
unsigned int i = 0;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
if (IS_ERR(counters))
|
||||
@ -1629,9 +1616,12 @@ static int compat_copy_entries_to_user(unsigned int total_size,
|
||||
loc_cpu_entry = private->entries[raw_smp_processor_id()];
|
||||
pos = userptr;
|
||||
size = total_size;
|
||||
ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
|
||||
compat_copy_entry_to_user,
|
||||
&pos, &size, counters, &i);
|
||||
xt_entry_foreach(iter, loc_cpu_entry, total_size) {
|
||||
ret = compat_copy_entry_to_user(iter, &pos,
|
||||
&size, counters, i++);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
vfree(counters);
|
||||
return ret;
|
||||
}
|
||||
@ -1763,6 +1753,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
rev.name[sizeof(rev.name)-1] = 0;
|
||||
|
||||
try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
|
||||
rev.revision, 1, &ret),
|
||||
@ -1784,8 +1775,7 @@ struct xt_table *arpt_register_table(struct net *net,
|
||||
{
|
||||
int ret;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap
|
||||
= { 0, 0, 0, { 0 }, { 0 }, { } };
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table *new_table;
|
||||
|
||||
@ -1799,12 +1789,7 @@ struct xt_table *arpt_register_table(struct net *net,
|
||||
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
|
||||
memcpy(loc_cpu_entry, repl->entries, repl->size);
|
||||
|
||||
ret = translate_table(table->name, table->valid_hooks,
|
||||
newinfo, loc_cpu_entry, repl->size,
|
||||
repl->num_entries,
|
||||
repl->hook_entry,
|
||||
repl->underflow);
|
||||
|
||||
ret = translate_table(newinfo, loc_cpu_entry, repl);
|
||||
duprintf("arpt_register_table: translate table gives %d\n", ret);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
@ -1827,13 +1812,14 @@ void arpt_unregister_table(struct xt_table *table)
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct module *table_owner = table->me;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries[raw_smp_processor_id()];
|
||||
ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,
|
||||
cleanup_entry, NULL);
|
||||
xt_entry_foreach(iter, loc_cpu_entry, private->size)
|
||||
cleanup_entry(iter);
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
|
@ -9,7 +9,7 @@ MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
|
||||
MODULE_DESCRIPTION("arptables arp payload mangle target");
|
||||
|
||||
static unsigned int
|
||||
target(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
target(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct arpt_mangle *mangle = par->targinfo;
|
||||
const struct arphdr *arp;
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter_arp/arp_tables.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
@ -15,36 +16,6 @@ MODULE_DESCRIPTION("arptables filter table");
|
||||
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
|
||||
(1 << NF_ARP_FORWARD))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct arpt_replace repl;
|
||||
struct arpt_standard entries[3];
|
||||
struct arpt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
|
||||
.hook_entry = {
|
||||
[NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
|
||||
},
|
||||
.underflow = {
|
||||
[NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
|
||||
},
|
||||
.term = ARPT_ERROR_INIT,
|
||||
};
|
||||
|
||||
static const struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
@ -99,9 +70,14 @@ static struct nf_hook_ops arpt_ops[] __read_mostly = {
|
||||
|
||||
static int __net_init arptable_filter_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct arpt_replace *repl;
|
||||
|
||||
repl = arpt_alloc_initial_table(&packet_filter);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.arptable_filter =
|
||||
arpt_register_table(net, &packet_filter, &initial_table.repl);
|
||||
arpt_register_table(net, &packet_filter, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.arptable_filter))
|
||||
return PTR_ERR(net->ipv4.arptable_filter);
|
||||
return 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -281,7 +281,7 @@ clusterip_responsible(const struct clusterip_config *config, u_int32_t hash)
|
||||
***********************************************************************/
|
||||
|
||||
static unsigned int
|
||||
clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
|
||||
struct nf_conn *ct;
|
||||
@ -319,12 +319,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
ct->mark = hash;
|
||||
break;
|
||||
case IP_CT_RELATED:
|
||||
case IP_CT_RELATED+IP_CT_IS_REPLY:
|
||||
case IP_CT_RELATED_REPLY:
|
||||
/* FIXME: we don't handle expectations at the
|
||||
* moment. they can arrive on a different node than
|
||||
* the master connection (e.g. FTP passive mode) */
|
||||
case IP_CT_ESTABLISHED:
|
||||
case IP_CT_ESTABLISHED+IP_CT_IS_REPLY:
|
||||
case IP_CT_ESTABLISHED_REPLY:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -666,8 +666,11 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
|
||||
struct clusterip_config *c = pde->data;
|
||||
unsigned long nodenum;
|
||||
|
||||
if (copy_from_user(buffer, input, PROC_WRITELEN))
|
||||
if (size > PROC_WRITELEN)
|
||||
return -EIO;
|
||||
if (copy_from_user(buffer, input, size))
|
||||
return -EFAULT;
|
||||
buffer[size] = 0;
|
||||
|
||||
if (*buffer == '+') {
|
||||
nodenum = simple_strtoul(buffer+1, NULL, 10);
|
||||
|
@ -50,7 +50,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
|
||||
struct tcphdr _tcph, *tcph;
|
||||
__be16 oldval;
|
||||
|
||||
/* Not enought header? */
|
||||
/* Not enough header? */
|
||||
tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
|
||||
if (!tcph)
|
||||
return false;
|
||||
@ -77,7 +77,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ecn_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
ecn_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct ipt_ECN_info *einfo = par->targinfo;
|
||||
|
||||
|
@ -425,7 +425,7 @@ ipt_log_packet(u_int8_t pf,
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
log_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
log_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct ipt_log_info *loginfo = par->targinfo;
|
||||
struct nf_loginfo li;
|
||||
|
@ -44,7 +44,7 @@ static bool masquerade_tg_check(const struct xt_tgchk_param *par)
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
struct nf_conn_nat *nat;
|
||||
@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
nat = nfct_nat(ct);
|
||||
|
||||
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
|
||||
|| ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
|| ctinfo == IP_CT_RELATED_REPLY));
|
||||
|
||||
/* Source address is 0.0.0.0 - locally generated packet that is
|
||||
* probably not supposed to be masqueraded.
|
||||
|
@ -38,7 +38,7 @@ static bool netmap_tg_check(const struct xt_tgchk_param *par)
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
netmap_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
|
@ -42,7 +42,7 @@ static bool redirect_tg_check(const struct xt_tgchk_param *par)
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
redirect_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
|
@ -135,13 +135,10 @@ static inline void send_unreach(struct sk_buff *skb_in, int code)
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
reject_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct ipt_reject_info *reject = par->targinfo;
|
||||
|
||||
/* WARNING: This code causes reentry within iptables.
|
||||
This means that the iptables jump stack is now crap. We
|
||||
must return an absolute verdict. --RR */
|
||||
switch (reject->with) {
|
||||
case IPT_ICMP_NET_UNREACHABLE:
|
||||
send_unreach(skb, ICMP_NET_UNREACH);
|
||||
|
@ -281,7 +281,7 @@ alloc_failure:
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ulog_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
|
||||
par->targinfo, NULL);
|
||||
|
@ -30,7 +30,7 @@ static inline bool match_type(struct net *net, const struct net_device *dev,
|
||||
}
|
||||
|
||||
static bool
|
||||
addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
addrtype_mt_v0(const struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct net *net = dev_net(par->in ? par->in : par->out);
|
||||
const struct ipt_addrtype_info *info = par->matchinfo;
|
||||
@ -48,7 +48,7 @@ addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
}
|
||||
|
||||
static bool
|
||||
addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
addrtype_mt_v1(const struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct net *net = dev_net(par->in ? par->in : par->out);
|
||||
const struct ipt_addrtype_info_v1 *info = par->matchinfo;
|
||||
@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
|
||||
static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
|
||||
{
|
||||
struct ipt_addrtype_info_v1 *info = par->matchinfo;
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#include <linux/in.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
@ -18,25 +18,19 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
|
||||
MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match");
|
||||
|
||||
#ifdef DEBUG_CONNTRACK
|
||||
#define duprintf(format, args...) printk(format , ## args)
|
||||
#else
|
||||
#define duprintf(format, args...)
|
||||
#endif
|
||||
|
||||
/* Returns 1 if the spi is matched by the range, 0 otherwise */
|
||||
static inline bool
|
||||
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
|
||||
{
|
||||
bool r;
|
||||
duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
|
||||
min,spi,max);
|
||||
pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
|
||||
invert ? '!' : ' ', min, spi, max);
|
||||
r=(spi >= min && spi <= max) ^ invert;
|
||||
duprintf(" result %s\n",r? "PASS" : "FAILED");
|
||||
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
|
||||
return r;
|
||||
}
|
||||
|
||||
static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
static bool ah_mt(const struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct ip_auth_hdr _ahdr;
|
||||
const struct ip_auth_hdr *ah;
|
||||
@ -51,7 +45,7 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
/* We've been asked to examine this packet, and we
|
||||
* can't. Hence, no choice but to drop.
|
||||
*/
|
||||
duprintf("Dropping evil AH tinygram.\n");
|
||||
pr_debug("Dropping evil AH tinygram.\n");
|
||||
*par->hotdrop = true;
|
||||
return 0;
|
||||
}
|
||||
@ -61,13 +55,13 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
!!(ahinfo->invflags & IPT_AH_INV_SPI));
|
||||
}
|
||||
|
||||
static bool ah_mt_check(const struct xt_mtchk_param *par)
|
||||
static int ah_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
const struct ipt_ah *ahinfo = par->matchinfo;
|
||||
|
||||
/* Must specify no unknown invflags */
|
||||
if (ahinfo->invflags & ~IPT_AH_INV_MASK) {
|
||||
duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags);
|
||||
pr_debug("unknown flags %X\n", ahinfo->invflags);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -67,7 +67,8 @@ static inline bool match_tcp(const struct sk_buff *skb,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
static bool ecn_mt(const struct sk_buff *skb,
|
||||
const struct xt_action_param *par)
|
||||
{
|
||||
const struct ipt_ecn_info *info = par->matchinfo;
|
||||
|
||||
@ -85,7 +86,7 @@ static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ecn_mt_check(const struct xt_mtchk_param *par)
|
||||
static int ecn_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
const struct ipt_ecn_info *info = par->matchinfo;
|
||||
const struct ipt_ip *ip = par->entryinfo;
|
||||
|
@ -23,36 +23,6 @@ MODULE_DESCRIPTION("iptables filter table");
|
||||
(1 << NF_INET_FORWARD) | \
|
||||
(1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
@ -128,9 +98,18 @@ module_param(forward, bool, 0000);
|
||||
|
||||
static int __net_init iptable_filter_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&packet_filter);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
((struct ipt_standard *)repl->entries)[1].target.verdict =
|
||||
-forward - 1;
|
||||
|
||||
net->ipv4.iptable_filter =
|
||||
ipt_register_table(net, &packet_filter, &initial_table.repl);
|
||||
ipt_register_table(net, &packet_filter, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_filter))
|
||||
return PTR_ERR(net->ipv4.iptable_filter);
|
||||
return 0;
|
||||
@ -138,7 +117,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_filter_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_filter);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_filter_net_ops = {
|
||||
@ -155,9 +134,6 @@ static int __init iptable_filter_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
initial_table.entries[1].target.verdict = -forward - 1;
|
||||
|
||||
ret = register_pernet_subsys(&iptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -27,43 +27,6 @@ MODULE_DESCRIPTION("iptables mangle table");
|
||||
(1 << NF_INET_LOCAL_OUT) | \
|
||||
(1 << NF_INET_POST_ROUTING))
|
||||
|
||||
/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[5];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.num_entries = 6,
|
||||
.size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_mangler = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
@ -198,9 +161,14 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
|
||||
static int __net_init iptable_mangle_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&packet_mangler);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.iptable_mangle =
|
||||
ipt_register_table(net, &packet_mangler, &initial_table.repl);
|
||||
ipt_register_table(net, &packet_mangler, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_mangle))
|
||||
return PTR_ERR(net->ipv4.iptable_mangle);
|
||||
return 0;
|
||||
@ -208,7 +176,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_mangle_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_mangle);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_mangle_net_ops = {
|
||||
|
@ -9,33 +9,6 @@
|
||||
|
||||
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[2];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.num_entries = 3,
|
||||
.size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_raw = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
@ -90,9 +63,14 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
|
||||
static int __net_init iptable_raw_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&packet_raw);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.iptable_raw =
|
||||
ipt_register_table(net, &packet_raw, &initial_table.repl);
|
||||
ipt_register_table(net, &packet_raw, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_raw))
|
||||
return PTR_ERR(net->ipv4.iptable_raw);
|
||||
return 0;
|
||||
@ -100,7 +78,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_raw_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_raw);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_raw);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_raw_net_ops = {
|
||||
|
@ -27,36 +27,6 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
|
||||
(1 << NF_INET_FORWARD) | \
|
||||
(1 << NF_INET_LOCAL_OUT)
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "security",
|
||||
.valid_hooks = SECURITY_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table security_table = {
|
||||
.name = "security",
|
||||
.valid_hooks = SECURITY_VALID_HOOKS,
|
||||
@ -127,9 +97,14 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
|
||||
static int __net_init iptable_security_net_init(struct net *net)
|
||||
{
|
||||
net->ipv4.iptable_security =
|
||||
ipt_register_table(net, &security_table, &initial_table.repl);
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&security_table);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.iptable_security =
|
||||
ipt_register_table(net, &security_table, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_security))
|
||||
return PTR_ERR(net->ipv4.iptable_security);
|
||||
|
||||
@ -138,7 +113,7 @@ static int __net_init iptable_security_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_security_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_security);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_security);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_security_net_ops = {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
|
||||
#include <net/netfilter/nf_nat_helper.h>
|
||||
@ -100,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
|
||||
|
||||
/* This is where we call the helper: as the packet goes out. */
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
|
||||
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
|
||||
goto out;
|
||||
|
||||
help = nfct_help(ct);
|
||||
@ -274,7 +275,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
h = nf_conntrack_find_get(sock_net(sk), &tuple);
|
||||
h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
|
||||
if (h) {
|
||||
struct sockaddr_in sin;
|
||||
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
|
||||
@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
|
||||
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
|
||||
static int
|
||||
icmp_error_message(struct net *net, struct sk_buff *skb,
|
||||
icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
struct nf_conntrack_tuple innertuple, origtuple;
|
||||
const struct nf_conntrack_l4proto *innerproto;
|
||||
const struct nf_conntrack_tuple_hash *h;
|
||||
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
||||
|
||||
NF_CT_ASSERT(skb->nfct == NULL);
|
||||
|
||||
@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
|
||||
|
||||
*ctinfo = IP_CT_RELATED;
|
||||
|
||||
h = nf_conntrack_find_get(net, &innertuple);
|
||||
h = nf_conntrack_find_get(net, zone, &innertuple);
|
||||
if (!h) {
|
||||
pr_debug("icmp_error_message: no match\n");
|
||||
return -NF_ACCEPT;
|
||||
@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
|
||||
|
||||
/* Small and modified version of icmp_rcv */
|
||||
static int
|
||||
icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
icmp_error(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
|
||||
{
|
||||
const struct icmphdr *icmph;
|
||||
@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
&& icmph->type != ICMP_REDIRECT)
|
||||
return NF_ACCEPT;
|
||||
|
||||
return icmp_error_message(net, skb, ctinfo, hooknum);
|
||||
return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/netfilter_bridge.h>
|
||||
#include <linux/netfilter_ipv4.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
|
||||
|
||||
/* Returns new sk_buff, or NULL */
|
||||
@ -38,15 +39,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
|
||||
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u16 zone = NF_CT_DEFAULT_ZONE;
|
||||
|
||||
if (skb->nfct)
|
||||
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
|
||||
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
if (skb->nf_bridge &&
|
||||
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
|
||||
return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
|
||||
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
|
||||
#endif
|
||||
if (hooknum == NF_INET_PRE_ROUTING)
|
||||
return IP_DEFRAG_CONNTRACK_IN;
|
||||
return IP_DEFRAG_CONNTRACK_IN + zone;
|
||||
else
|
||||
return IP_DEFRAG_CONNTRACK_OUT;
|
||||
return IP_DEFRAG_CONNTRACK_OUT + zone;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
|
||||
@ -59,7 +65,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
|
||||
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
|
||||
/* Previously seen (loopback)? Ignore. Do this before
|
||||
fragment check. */
|
||||
if (skb->nfct)
|
||||
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
|
||||
return NF_ACCEPT;
|
||||
#endif
|
||||
#endif
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
|
||||
static DEFINE_SPINLOCK(nf_nat_lock);
|
||||
|
||||
@ -69,13 +70,14 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
|
||||
|
||||
/* We keep an extra hash for each conntrack, for fast searching. */
|
||||
static inline unsigned int
|
||||
hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
hash_by_src(const struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
unsigned int hash;
|
||||
|
||||
/* Original src, to ensure we map it consistently if poss. */
|
||||
hash = jhash_3words((__force u32)tuple->src.u3.ip,
|
||||
(__force u32)tuple->src.u.all,
|
||||
(__force u32)tuple->src.u.all ^ zone,
|
||||
tuple->dst.protonum, 0);
|
||||
return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
|
||||
}
|
||||
@ -139,12 +141,12 @@ same_src(const struct nf_conn *ct,
|
||||
|
||||
/* Only called for SRC manip */
|
||||
static int
|
||||
find_appropriate_src(struct net *net,
|
||||
find_appropriate_src(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *result,
|
||||
const struct nf_nat_range *range)
|
||||
{
|
||||
unsigned int h = hash_by_src(net, tuple);
|
||||
unsigned int h = hash_by_src(net, zone, tuple);
|
||||
const struct nf_conn_nat *nat;
|
||||
const struct nf_conn *ct;
|
||||
const struct hlist_node *n;
|
||||
@ -152,7 +154,7 @@ find_appropriate_src(struct net *net,
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
|
||||
ct = nat->ct;
|
||||
if (same_src(ct, tuple)) {
|
||||
if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
|
||||
/* Copy source part from reply tuple. */
|
||||
nf_ct_invert_tuplepr(result,
|
||||
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
@ -175,7 +177,7 @@ find_appropriate_src(struct net *net,
|
||||
the ip with the lowest src-ip/dst-ip/proto usage.
|
||||
*/
|
||||
static void
|
||||
find_best_ips_proto(struct nf_conntrack_tuple *tuple,
|
||||
find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
const struct nf_conn *ct,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
@ -209,7 +211,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
|
||||
maxip = ntohl(range->max_ip);
|
||||
j = jhash_2words((__force u32)tuple->src.u3.ip,
|
||||
range->flags & IP_NAT_RANGE_PERSISTENT ?
|
||||
0 : (__force u32)tuple->dst.u3.ip, 0);
|
||||
0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
|
||||
j = ((u64)j * (maxip - minip + 1)) >> 32;
|
||||
*var_ipp = htonl(minip + j);
|
||||
}
|
||||
@ -229,6 +231,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
const struct nf_nat_protocol *proto;
|
||||
u16 zone = nf_ct_zone(ct);
|
||||
|
||||
/* 1) If this srcip/proto/src-proto-part is currently mapped,
|
||||
and that same mapping gives a unique tuple within the given
|
||||
@ -239,7 +242,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
manips not an issue. */
|
||||
if (maniptype == IP_NAT_MANIP_SRC &&
|
||||
!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
|
||||
if (find_appropriate_src(net, orig_tuple, tuple, range)) {
|
||||
if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
|
||||
pr_debug("get_unique_tuple: Found current src map\n");
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return;
|
||||
@ -249,7 +252,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
/* 2) Select the least-used IP/proto combination in the given
|
||||
range. */
|
||||
*tuple = *orig_tuple;
|
||||
find_best_ips_proto(tuple, range, ct, maniptype);
|
||||
find_best_ips_proto(zone, tuple, range, ct, maniptype);
|
||||
|
||||
/* 3) The per-protocol part of the manip is made to map into
|
||||
the range to make a unique tuple. */
|
||||
@ -327,7 +330,8 @@ nf_nat_setup_info(struct nf_conn *ct,
|
||||
if (have_to_hash) {
|
||||
unsigned int srchash;
|
||||
|
||||
srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
srchash = hash_by_src(net, nf_ct_zone(ct),
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
spin_lock_bh(&nf_nat_lock);
|
||||
/* nf_conntrack_alter_reply might re-allocate exntension aera */
|
||||
nat = nfct_nat(ct);
|
||||
@ -444,7 +448,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
|
||||
|
||||
/* Must be RELATED */
|
||||
NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
|
||||
skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
|
||||
skb->nfctinfo == IP_CT_RELATED_REPLY);
|
||||
|
||||
/* Redirects on non-null nats must be dropped, else they'll
|
||||
start talking to each other without our translation, and be
|
||||
@ -737,7 +741,7 @@ static int __init nf_nat_init(void)
|
||||
spin_unlock_bh(&nf_nat_lock);
|
||||
|
||||
/* Initialize fake conntrack so that NAT will skip it */
|
||||
nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
|
||||
nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
|
||||
|
||||
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <net/netfilter/nf_nat_rule.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <linux/netfilter/nf_conntrack_proto_gre.h>
|
||||
#include <linux/netfilter/nf_conntrack_pptp.h>
|
||||
|
||||
@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
|
||||
|
||||
pr_debug("trying to unexpect other dir: ");
|
||||
nf_ct_dump_tuple_ip(&t);
|
||||
other_exp = nf_ct_expect_find_get(net, &t);
|
||||
other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
|
||||
if (other_exp) {
|
||||
nf_ct_unexpect_related(other_exp);
|
||||
nf_ct_expect_put(other_exp);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/ip.h>
|
||||
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/secure_seq.h>
|
||||
#include <net/netfilter/nf_nat.h>
|
||||
#include <net/netfilter/nf_nat_core.h>
|
||||
#include <net/netfilter/nf_nat_rule.h>
|
||||
|
@ -28,36 +28,6 @@
|
||||
(1 << NF_INET_POST_ROUTING) | \
|
||||
(1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} nat_initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "nat",
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table nat_table = {
|
||||
.name = "nat",
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
@ -67,7 +37,7 @@ static const struct xt_table nat_table = {
|
||||
|
||||
/* Source NAT */
|
||||
static unsigned int
|
||||
ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
@ -79,14 +49,14 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
|
||||
/* Connection must be valid and new. */
|
||||
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
|
||||
ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
ctinfo == IP_CT_RELATED_REPLY));
|
||||
NF_CT_ASSERT(par->out != NULL);
|
||||
|
||||
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
@ -186,8 +156,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = {
|
||||
|
||||
static int __net_init nf_nat_rule_net_init(struct net *net)
|
||||
{
|
||||
net->ipv4.nat_table = ipt_register_table(net, &nat_table,
|
||||
&nat_initial_table.repl);
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&nat_table);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.nat_table))
|
||||
return PTR_ERR(net->ipv4.nat_table);
|
||||
return 0;
|
||||
@ -195,7 +170,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net)
|
||||
|
||||
static void __net_exit nf_nat_rule_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.nat_table);
|
||||
ipt_unregister_table(net, net->ipv4.nat_table);
|
||||
}
|
||||
|
||||
static struct pernet_operations nf_nat_rule_net_ops = {
|
||||
|
@ -97,7 +97,7 @@ nf_nat_fn(unsigned int hooknum,
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* Don't try to NAT if this packet is not conntracked */
|
||||
if (ct == &nf_conntrack_untracked)
|
||||
if (nf_ct_is_untracked(ct))
|
||||
return NF_ACCEPT;
|
||||
|
||||
nat = nfct_nat(ct);
|
||||
@ -114,7 +114,7 @@ nf_nat_fn(unsigned int hooknum,
|
||||
|
||||
switch (ctinfo) {
|
||||
case IP_CT_RELATED:
|
||||
case IP_CT_RELATED+IP_CT_IS_REPLY:
|
||||
case IP_CT_RELATED_REPLY:
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
|
||||
if (!nf_nat_icmp_reply_translation(ct, ctinfo,
|
||||
hooknum, skb))
|
||||
@ -149,7 +149,7 @@ nf_nat_fn(unsigned int hooknum,
|
||||
default:
|
||||
/* ESTABLISHED */
|
||||
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
|
||||
ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
|
||||
ctinfo == IP_CT_ESTABLISHED_REPLY);
|
||||
}
|
||||
|
||||
return nf_nat_packet(ct, ctinfo, hooknum, skb);
|
||||
|
@ -107,6 +107,7 @@
|
||||
#ifdef CONFIG_SYSCTL
|
||||
#include <linux/sysctl.h>
|
||||
#endif
|
||||
#include <net/secure_seq.h>
|
||||
|
||||
#define RT_FL_TOS(oldflp) \
|
||||
((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
|
||||
@ -287,12 +288,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
|
||||
if (!rt_hash_table[st->bucket].chain)
|
||||
continue;
|
||||
rcu_read_lock_bh();
|
||||
r = rcu_dereference(rt_hash_table[st->bucket].chain);
|
||||
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
|
||||
while (r) {
|
||||
if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
|
||||
r->rt_genid == st->genid)
|
||||
return r;
|
||||
r = rcu_dereference(r->u.dst.rt_next);
|
||||
r = rcu_dereference_bh(r->u.dst.rt_next);
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
@ -314,7 +315,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
|
||||
rcu_read_lock_bh();
|
||||
r = rt_hash_table[st->bucket].chain;
|
||||
}
|
||||
return rcu_dereference(r);
|
||||
return rcu_dereference_bh(r);
|
||||
}
|
||||
|
||||
static struct rtable *rt_cache_get_next(struct seq_file *seq,
|
||||
@ -2685,8 +2686,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
|
||||
hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
|
||||
|
||||
rcu_read_lock_bh();
|
||||
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
|
||||
rth = rcu_dereference(rth->u.dst.rt_next)) {
|
||||
for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
|
||||
rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
|
||||
if (rth->fl.fl4_dst == flp->fl4_dst &&
|
||||
rth->fl.fl4_src == flp->fl4_src &&
|
||||
rth->fl.iif == 0 &&
|
||||
@ -2712,6 +2713,11 @@ slow_output:
|
||||
|
||||
EXPORT_SYMBOL_GPL(__ip_route_output_key);
|
||||
|
||||
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
||||
{
|
||||
}
|
||||
@ -2720,7 +2726,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
|
||||
.family = AF_INET,
|
||||
.protocol = cpu_to_be16(ETH_P_IP),
|
||||
.destroy = ipv4_dst_destroy,
|
||||
.check = ipv4_dst_check,
|
||||
.check = ipv4_blackhole_dst_check,
|
||||
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
|
||||
.entries = ATOMIC_INIT(0),
|
||||
};
|
||||
@ -3004,8 +3010,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (!rt_hash_table[h].chain)
|
||||
continue;
|
||||
rcu_read_lock_bh();
|
||||
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
|
||||
rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
|
||||
for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
|
||||
rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
|
||||
if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
|
||||
continue;
|
||||
if (rt_is_expired(rt))
|
||||
|
@ -265,6 +265,10 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/crypto.h>
|
||||
|
||||
#ifdef CONFIG_UID_STAT
|
||||
#include <linux/uid_stat.h>
|
||||
#endif
|
||||
|
||||
#include <net/icmp.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/xfrm.h>
|
||||
@ -386,8 +390,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
*/
|
||||
|
||||
mask = 0;
|
||||
if (sk->sk_err)
|
||||
mask = POLLERR;
|
||||
|
||||
/*
|
||||
* POLLHUP is certainly not done right. But poll() doesn't
|
||||
@ -451,11 +453,17 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
}
|
||||
}
|
||||
} else
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
|
||||
if (tp->urg_data & TCP_URG_VALID)
|
||||
mask |= POLLPRI;
|
||||
}
|
||||
/* This barrier is coupled with smp_wmb() in tcp_reset() */
|
||||
smp_rmb();
|
||||
if (sk->sk_err)
|
||||
mask |= POLLERR;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
@ -934,7 +942,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
|
||||
goto out_err;
|
||||
|
||||
while (--iovlen >= 0) {
|
||||
int seglen = iov->iov_len;
|
||||
size_t seglen = iov->iov_len;
|
||||
unsigned char __user *from = iov->iov_base;
|
||||
|
||||
iov++;
|
||||
@ -1098,6 +1106,11 @@ out:
|
||||
tcp_push(sk, flags, mss_now, tp->nonagle);
|
||||
TCP_CHECK_TIMER(sk);
|
||||
release_sock(sk);
|
||||
|
||||
#ifdef CONFIG_UID_STAT
|
||||
if (copied > 0)
|
||||
update_tcp_snd(current_uid(), copied);
|
||||
#endif
|
||||
return copied;
|
||||
|
||||
do_fault:
|
||||
@ -1334,14 +1347,19 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_eat_skb(sk, skb, 0);
|
||||
if (!desc->count)
|
||||
break;
|
||||
tp->copied_seq = seq;
|
||||
}
|
||||
tp->copied_seq = seq;
|
||||
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
/* Clean up data we have read: This will do ACK frames. */
|
||||
if (copied > 0)
|
||||
if (copied > 0) {
|
||||
tcp_cleanup_rbuf(sk, copied);
|
||||
#ifdef CONFIG_UID_STAT
|
||||
update_tcp_rcv(current_uid(), copied);
|
||||
#endif
|
||||
}
|
||||
return copied;
|
||||
}
|
||||
|
||||
@ -1737,6 +1755,10 @@ skip_copy:
|
||||
|
||||
TCP_CHECK_TIMER(sk);
|
||||
release_sock(sk);
|
||||
#ifdef CONFIG_UID_STAT
|
||||
if (copied > 0)
|
||||
update_tcp_rcv(current_uid(), copied);
|
||||
#endif
|
||||
return copied;
|
||||
|
||||
out:
|
||||
@ -1746,6 +1768,10 @@ out:
|
||||
|
||||
recv_urg:
|
||||
err = tcp_recv_urg(sk, msg, len, flags);
|
||||
#ifdef CONFIG_UID_STAT
|
||||
if (err > 0)
|
||||
update_tcp_rcv(current_uid(), err);
|
||||
#endif
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1975,11 +2001,8 @@ adjudge_to_death:
|
||||
}
|
||||
}
|
||||
if (sk->sk_state != TCP_CLOSE) {
|
||||
int orphan_count = percpu_counter_read_positive(
|
||||
sk->sk_prot->orphan_count);
|
||||
|
||||
sk_mem_reclaim(sk);
|
||||
if (tcp_too_many_orphans(sk, orphan_count)) {
|
||||
if (tcp_too_many_orphans(sk, 0)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO "TCP: too many of orphaned "
|
||||
"sockets\n");
|
||||
@ -2115,7 +2138,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
||||
/* Values greater than interface MTU won't take effect. However
|
||||
* at the point when this call is done we typically don't yet
|
||||
* know which interface is going to be used */
|
||||
if (val < 8 || val > MAX_TCP_WINDOW) {
|
||||
if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -2880,7 +2903,7 @@ void __init tcp_init(void)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned long nr_pages, limit;
|
||||
int order, i, max_share;
|
||||
int i, max_share, cnt;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
|
||||
|
||||
@ -2929,31 +2952,23 @@ void __init tcp_init(void)
|
||||
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
|
||||
}
|
||||
|
||||
/* Try to be a bit smarter and adjust defaults depending
|
||||
* on available memory.
|
||||
*/
|
||||
for (order = 0; ((1 << order) << PAGE_SHIFT) <
|
||||
(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
|
||||
order++)
|
||||
;
|
||||
if (order >= 4) {
|
||||
tcp_death_row.sysctl_max_tw_buckets = 180000;
|
||||
sysctl_tcp_max_orphans = 4096 << (order - 4);
|
||||
sysctl_max_syn_backlog = 1024;
|
||||
} else if (order < 3) {
|
||||
tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
|
||||
sysctl_tcp_max_orphans >>= (3 - order);
|
||||
sysctl_max_syn_backlog = 128;
|
||||
}
|
||||
|
||||
cnt = tcp_hashinfo.ehash_size;
|
||||
|
||||
tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
|
||||
sysctl_tcp_max_orphans = cnt / 2;
|
||||
sysctl_max_syn_backlog = max(128, cnt / 256);
|
||||
|
||||
/* Set the pressure threshold to be a fraction of global memory that
|
||||
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
|
||||
* memory, with a floor of 128 pages.
|
||||
* memory, with a floor of 128 pages, and a ceiling that prevents an
|
||||
* integer overflow.
|
||||
*/
|
||||
nr_pages = totalram_pages - totalhigh_pages;
|
||||
limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
|
||||
limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
|
||||
limit = max(limit, 128UL);
|
||||
limit = min(limit, INT_MAX * 4UL / 3 / 2);
|
||||
sysctl_tcp_mem[0] = limit / 4 * 3;
|
||||
sysctl_tcp_mem[1] = limit;
|
||||
sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
|
||||
|
@ -3969,6 +3969,8 @@ static void tcp_reset(struct sock *sk)
|
||||
default:
|
||||
sk->sk_err = ECONNRESET;
|
||||
}
|
||||
/* This barrier is coupled with smp_rmb() in tcp_poll() */
|
||||
smp_wmb();
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_error_report(sk);
|
||||
|
@ -71,6 +71,7 @@
|
||||
#include <net/timewait_sock.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/netdma.h>
|
||||
#include <net/secure_seq.h>
|
||||
|
||||
#include <linux/inet.h>
|
||||
#include <linux/ipv6.h>
|
||||
|
@ -231,8 +231,6 @@ void tcp_select_initial_window(int __space, __u32 mss,
|
||||
if (*rcv_wnd > init_cwnd * mss)
|
||||
*rcv_wnd = init_cwnd * mss;
|
||||
}
|
||||
/* Lock the initial TCP window size to 64K*/
|
||||
*rcv_wnd = 64240;
|
||||
|
||||
/* Set the clamp no higher than max representable value */
|
||||
(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
|
||||
@ -2039,6 +2037,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
||||
int mib_idx;
|
||||
int fwd_rexmitting = 0;
|
||||
|
||||
if (!tp->packets_out)
|
||||
return;
|
||||
|
||||
if (!tp->lost_out)
|
||||
tp->retransmit_high = tp->snd_una;
|
||||
|
||||
|
@ -65,18 +65,18 @@ static void tcp_write_err(struct sock *sk)
|
||||
static int tcp_out_of_resources(struct sock *sk, int do_reset)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int orphans = percpu_counter_read_positive(&tcp_orphan_count);
|
||||
int shift = 0;
|
||||
|
||||
/* If peer does not open window for long time, or did not transmit
|
||||
* anything for long time, penalize it. */
|
||||
if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
|
||||
orphans <<= 1;
|
||||
shift++;
|
||||
|
||||
/* If some dubious ICMP arrived, penalize even more. */
|
||||
if (sk->sk_err_soft)
|
||||
orphans <<= 1;
|
||||
shift++;
|
||||
|
||||
if (tcp_too_many_orphans(sk, orphans)) {
|
||||
if (tcp_too_many_orphans(sk, shift)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO "Out of socket memory\n");
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user