remove 2.6.21 patches and config, too
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@19579 3c298f89-4303-0410-b956-a3cf2f4a3e73master
parent
98d6f0382b
commit
3d047f889a
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,788 +0,0 @@
|
|||
--- /dev/null
|
||||
+++ b/include/linux/LzmaDecode.h
|
||||
@@ -0,0 +1,100 @@
|
||||
+/*
|
||||
+ LzmaDecode.h
|
||||
+ LZMA Decoder interface
|
||||
+
|
||||
+ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
|
||||
+ http://www.7-zip.org/
|
||||
+
|
||||
+ LZMA SDK is licensed under two licenses:
|
||||
+ 1) GNU Lesser General Public License (GNU LGPL)
|
||||
+ 2) Common Public License (CPL)
|
||||
+ It means that you can select one of these two licenses and
|
||||
+ follow rules of that license.
|
||||
+
|
||||
+ SPECIAL EXCEPTION:
|
||||
+ Igor Pavlov, as the author of this code, expressly permits you to
|
||||
+ statically or dynamically link your code (or bind by name) to the
|
||||
+ interfaces of this file without subjecting your linked code to the
|
||||
+ terms of the CPL or GNU LGPL. Any modifications or additions
|
||||
+ to this file, however, are subject to the LGPL or CPL terms.
|
||||
+*/
|
||||
+
|
||||
+#ifndef __LZMADECODE_H
|
||||
+#define __LZMADECODE_H
|
||||
+
|
||||
+/* #define _LZMA_IN_CB */
|
||||
+/* Use callback for input data */
|
||||
+
|
||||
+/* #define _LZMA_OUT_READ */
|
||||
+/* Use read function for output data */
|
||||
+
|
||||
+/* #define _LZMA_PROB32 */
|
||||
+/* It can increase speed on some 32-bit CPUs,
|
||||
+ but memory usage will be doubled in that case */
|
||||
+
|
||||
+/* #define _LZMA_LOC_OPT */
|
||||
+/* Enable local speed optimizations inside code */
|
||||
+
|
||||
+#ifndef UInt32
|
||||
+#ifdef _LZMA_UINT32_IS_ULONG
|
||||
+#define UInt32 unsigned long
|
||||
+#else
|
||||
+#define UInt32 unsigned int
|
||||
+#endif
|
||||
+#endif
|
||||
+
|
||||
+#ifdef _LZMA_PROB32
|
||||
+#define CProb UInt32
|
||||
+#else
|
||||
+#define CProb unsigned short
|
||||
+#endif
|
||||
+
|
||||
+#define LZMA_RESULT_OK 0
|
||||
+#define LZMA_RESULT_DATA_ERROR 1
|
||||
+#define LZMA_RESULT_NOT_ENOUGH_MEM 2
|
||||
+
|
||||
+#ifdef _LZMA_IN_CB
|
||||
+typedef struct _ILzmaInCallback
|
||||
+{
|
||||
+ int (*Read)(void *object, unsigned char **buffer, UInt32 *bufferSize);
|
||||
+} ILzmaInCallback;
|
||||
+#endif
|
||||
+
|
||||
+#define LZMA_BASE_SIZE 1846
|
||||
+#define LZMA_LIT_SIZE 768
|
||||
+
|
||||
+/*
|
||||
+bufferSize = (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)))* sizeof(CProb)
|
||||
+bufferSize += 100 in case of _LZMA_OUT_READ
|
||||
+by default CProb is unsigned short,
|
||||
+but if specify _LZMA_PROB_32, CProb will be UInt32(unsigned int)
|
||||
+*/
|
||||
+
|
||||
+#ifdef _LZMA_OUT_READ
|
||||
+int LzmaDecoderInit(
|
||||
+ unsigned char *buffer, UInt32 bufferSize,
|
||||
+ int lc, int lp, int pb,
|
||||
+ unsigned char *dictionary, UInt32 dictionarySize,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ ILzmaInCallback *inCallback
|
||||
+ #else
|
||||
+ unsigned char *inStream, UInt32 inSize
|
||||
+ #endif
|
||||
+);
|
||||
+#endif
|
||||
+
|
||||
+int LzmaDecode(
|
||||
+ unsigned char *buffer,
|
||||
+ #ifndef _LZMA_OUT_READ
|
||||
+ UInt32 bufferSize,
|
||||
+ int lc, int lp, int pb,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ ILzmaInCallback *inCallback,
|
||||
+ #else
|
||||
+ unsigned char *inStream, UInt32 inSize,
|
||||
+ #endif
|
||||
+ #endif
|
||||
+ unsigned char *outStream, UInt32 outSize,
|
||||
+ UInt32 *outSizeProcessed);
|
||||
+
|
||||
+#endif
|
||||
--- /dev/null
|
||||
+++ b/lib/LzmaDecode.c
|
||||
@@ -0,0 +1,663 @@
|
||||
+/*
|
||||
+ LzmaDecode.c
|
||||
+ LZMA Decoder
|
||||
+
|
||||
+ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
|
||||
+ http://www.7-zip.org/
|
||||
+
|
||||
+ LZMA SDK is licensed under two licenses:
|
||||
+ 1) GNU Lesser General Public License (GNU LGPL)
|
||||
+ 2) Common Public License (CPL)
|
||||
+ It means that you can select one of these two licenses and
|
||||
+ follow rules of that license.
|
||||
+
|
||||
+ SPECIAL EXCEPTION:
|
||||
+ Igor Pavlov, as the author of this code, expressly permits you to
|
||||
+ statically or dynamically link your code (or bind by name) to the
|
||||
+ interfaces of this file without subjecting your linked code to the
|
||||
+ terms of the CPL or GNU LGPL. Any modifications or additions
|
||||
+ to this file, however, are subject to the LGPL or CPL terms.
|
||||
+*/
|
||||
+
|
||||
+#include <linux/LzmaDecode.h>
|
||||
+
|
||||
+#ifndef Byte
|
||||
+#define Byte unsigned char
|
||||
+#endif
|
||||
+
|
||||
+#define kNumTopBits 24
|
||||
+#define kTopValue ((UInt32)1 << kNumTopBits)
|
||||
+
|
||||
+#define kNumBitModelTotalBits 11
|
||||
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
|
||||
+#define kNumMoveBits 5
|
||||
+
|
||||
+typedef struct _CRangeDecoder
|
||||
+{
|
||||
+ Byte *Buffer;
|
||||
+ Byte *BufferLim;
|
||||
+ UInt32 Range;
|
||||
+ UInt32 Code;
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ ILzmaInCallback *InCallback;
|
||||
+ int Result;
|
||||
+ #endif
|
||||
+ int ExtraBytes;
|
||||
+} CRangeDecoder;
|
||||
+
|
||||
+Byte RangeDecoderReadByte(CRangeDecoder *rd)
|
||||
+{
|
||||
+ if (rd->Buffer == rd->BufferLim)
|
||||
+ {
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ UInt32 size;
|
||||
+ rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size);
|
||||
+ rd->BufferLim = rd->Buffer + size;
|
||||
+ if (size == 0)
|
||||
+ #endif
|
||||
+ {
|
||||
+ rd->ExtraBytes = 1;
|
||||
+ return 0xFF;
|
||||
+ }
|
||||
+ }
|
||||
+ return (*rd->Buffer++);
|
||||
+}
|
||||
+
|
||||
+/* #define ReadByte (*rd->Buffer++) */
|
||||
+#define ReadByte (RangeDecoderReadByte(rd))
|
||||
+
|
||||
+void RangeDecoderInit(CRangeDecoder *rd,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ ILzmaInCallback *inCallback
|
||||
+ #else
|
||||
+ Byte *stream, UInt32 bufferSize
|
||||
+ #endif
|
||||
+ )
|
||||
+{
|
||||
+ int i;
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ rd->InCallback = inCallback;
|
||||
+ rd->Buffer = rd->BufferLim = 0;
|
||||
+ #else
|
||||
+ rd->Buffer = stream;
|
||||
+ rd->BufferLim = stream + bufferSize;
|
||||
+ #endif
|
||||
+ rd->ExtraBytes = 0;
|
||||
+ rd->Code = 0;
|
||||
+ rd->Range = (0xFFFFFFFF);
|
||||
+ for(i = 0; i < 5; i++)
|
||||
+ rd->Code = (rd->Code << 8) | ReadByte;
|
||||
+}
|
||||
+
|
||||
+#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code;
|
||||
+#define RC_FLUSH_VAR rd->Range = range; rd->Code = code;
|
||||
+#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; }
|
||||
+
|
||||
+UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits)
|
||||
+{
|
||||
+ RC_INIT_VAR
|
||||
+ UInt32 result = 0;
|
||||
+ int i;
|
||||
+ for (i = numTotalBits; i > 0; i--)
|
||||
+ {
|
||||
+ /* UInt32 t; */
|
||||
+ range >>= 1;
|
||||
+
|
||||
+ result <<= 1;
|
||||
+ if (code >= range)
|
||||
+ {
|
||||
+ code -= range;
|
||||
+ result |= 1;
|
||||
+ }
|
||||
+ /*
|
||||
+ t = (code - range) >> 31;
|
||||
+ t &= 1;
|
||||
+ code -= range & (t - 1);
|
||||
+ result = (result + result) | (1 - t);
|
||||
+ */
|
||||
+ RC_NORMALIZE
|
||||
+ }
|
||||
+ RC_FLUSH_VAR
|
||||
+ return result;
|
||||
+}
|
||||
+
|
||||
+int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd)
|
||||
+{
|
||||
+ UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob;
|
||||
+ if (rd->Code < bound)
|
||||
+ {
|
||||
+ rd->Range = bound;
|
||||
+ *prob += (kBitModelTotal - *prob) >> kNumMoveBits;
|
||||
+ if (rd->Range < kTopValue)
|
||||
+ {
|
||||
+ rd->Code = (rd->Code << 8) | ReadByte;
|
||||
+ rd->Range <<= 8;
|
||||
+ }
|
||||
+ return 0;
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ rd->Range -= bound;
|
||||
+ rd->Code -= bound;
|
||||
+ *prob -= (*prob) >> kNumMoveBits;
|
||||
+ if (rd->Range < kTopValue)
|
||||
+ {
|
||||
+ rd->Code = (rd->Code << 8) | ReadByte;
|
||||
+ rd->Range <<= 8;
|
||||
+ }
|
||||
+ return 1;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+#define RC_GET_BIT2(prob, mi, A0, A1) \
|
||||
+ UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \
|
||||
+ if (code < bound) \
|
||||
+ { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \
|
||||
+ else \
|
||||
+ { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \
|
||||
+ RC_NORMALIZE
|
||||
+
|
||||
+#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;)
|
||||
+
|
||||
+int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
|
||||
+{
|
||||
+ int mi = 1;
|
||||
+ int i;
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_INIT_VAR
|
||||
+ #endif
|
||||
+ for(i = numLevels; i > 0; i--)
|
||||
+ {
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ CProb *prob = probs + mi;
|
||||
+ RC_GET_BIT(prob, mi)
|
||||
+ #else
|
||||
+ mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd);
|
||||
+ #endif
|
||||
+ }
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_FLUSH_VAR
|
||||
+ #endif
|
||||
+ return mi - (1 << numLevels);
|
||||
+}
|
||||
+
|
||||
+int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
|
||||
+{
|
||||
+ int mi = 1;
|
||||
+ int i;
|
||||
+ int symbol = 0;
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_INIT_VAR
|
||||
+ #endif
|
||||
+ for(i = 0; i < numLevels; i++)
|
||||
+ {
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ CProb *prob = probs + mi;
|
||||
+ RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i))
|
||||
+ #else
|
||||
+ int bit = RangeDecoderBitDecode(probs + mi, rd);
|
||||
+ mi = mi + mi + bit;
|
||||
+ symbol |= (bit << i);
|
||||
+ #endif
|
||||
+ }
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_FLUSH_VAR
|
||||
+ #endif
|
||||
+ return symbol;
|
||||
+}
|
||||
+
|
||||
+Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
|
||||
+{
|
||||
+ int symbol = 1;
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_INIT_VAR
|
||||
+ #endif
|
||||
+ do
|
||||
+ {
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ CProb *prob = probs + symbol;
|
||||
+ RC_GET_BIT(prob, symbol)
|
||||
+ #else
|
||||
+ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
|
||||
+ #endif
|
||||
+ }
|
||||
+ while (symbol < 0x100);
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_FLUSH_VAR
|
||||
+ #endif
|
||||
+ return symbol;
|
||||
+}
|
||||
+
|
||||
+Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte)
|
||||
+{
|
||||
+ int symbol = 1;
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_INIT_VAR
|
||||
+ #endif
|
||||
+ do
|
||||
+ {
|
||||
+ int bit;
|
||||
+ int matchBit = (matchByte >> 7) & 1;
|
||||
+ matchByte <<= 1;
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ {
|
||||
+ CProb *prob = probs + ((1 + matchBit) << 8) + symbol;
|
||||
+ RC_GET_BIT2(prob, symbol, bit = 0, bit = 1)
|
||||
+ }
|
||||
+ #else
|
||||
+ bit = RangeDecoderBitDecode(probs + ((1 + matchBit) << 8) + symbol, rd);
|
||||
+ symbol = (symbol << 1) | bit;
|
||||
+ #endif
|
||||
+ if (matchBit != bit)
|
||||
+ {
|
||||
+ while (symbol < 0x100)
|
||||
+ {
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ CProb *prob = probs + symbol;
|
||||
+ RC_GET_BIT(prob, symbol)
|
||||
+ #else
|
||||
+ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
|
||||
+ #endif
|
||||
+ }
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ while (symbol < 0x100);
|
||||
+ #ifdef _LZMA_LOC_OPT
|
||||
+ RC_FLUSH_VAR
|
||||
+ #endif
|
||||
+ return symbol;
|
||||
+}
|
||||
+
|
||||
+#define kNumPosBitsMax 4
|
||||
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
|
||||
+
|
||||
+#define kLenNumLowBits 3
|
||||
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
|
||||
+#define kLenNumMidBits 3
|
||||
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
|
||||
+#define kLenNumHighBits 8
|
||||
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
|
||||
+
|
||||
+#define LenChoice 0
|
||||
+#define LenChoice2 (LenChoice + 1)
|
||||
+#define LenLow (LenChoice2 + 1)
|
||||
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
|
||||
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
|
||||
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
|
||||
+
|
||||
+int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState)
|
||||
+{
|
||||
+ if(RangeDecoderBitDecode(p + LenChoice, rd) == 0)
|
||||
+ return RangeDecoderBitTreeDecode(p + LenLow +
|
||||
+ (posState << kLenNumLowBits), kLenNumLowBits, rd);
|
||||
+ if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0)
|
||||
+ return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid +
|
||||
+ (posState << kLenNumMidBits), kLenNumMidBits, rd);
|
||||
+ return kLenNumLowSymbols + kLenNumMidSymbols +
|
||||
+ RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd);
|
||||
+}
|
||||
+
|
||||
+#define kNumStates 12
|
||||
+
|
||||
+#define kStartPosModelIndex 4
|
||||
+#define kEndPosModelIndex 14
|
||||
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
|
||||
+
|
||||
+#define kNumPosSlotBits 6
|
||||
+#define kNumLenToPosStates 4
|
||||
+
|
||||
+#define kNumAlignBits 4
|
||||
+#define kAlignTableSize (1 << kNumAlignBits)
|
||||
+
|
||||
+#define kMatchMinLen 2
|
||||
+
|
||||
+#define IsMatch 0
|
||||
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
|
||||
+#define IsRepG0 (IsRep + kNumStates)
|
||||
+#define IsRepG1 (IsRepG0 + kNumStates)
|
||||
+#define IsRepG2 (IsRepG1 + kNumStates)
|
||||
+#define IsRep0Long (IsRepG2 + kNumStates)
|
||||
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
|
||||
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
|
||||
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
|
||||
+#define LenCoder (Align + kAlignTableSize)
|
||||
+#define RepLenCoder (LenCoder + kNumLenProbs)
|
||||
+#define Literal (RepLenCoder + kNumLenProbs)
|
||||
+
|
||||
+#if Literal != LZMA_BASE_SIZE
|
||||
+StopCompilingDueBUG
|
||||
+#endif
|
||||
+
|
||||
+#ifdef _LZMA_OUT_READ
|
||||
+
|
||||
+typedef struct _LzmaVarState
|
||||
+{
|
||||
+ CRangeDecoder RangeDecoder;
|
||||
+ Byte *Dictionary;
|
||||
+ UInt32 DictionarySize;
|
||||
+ UInt32 DictionaryPos;
|
||||
+ UInt32 GlobalPos;
|
||||
+ UInt32 Reps[4];
|
||||
+ int lc;
|
||||
+ int lp;
|
||||
+ int pb;
|
||||
+ int State;
|
||||
+ int PreviousIsMatch;
|
||||
+ int RemainLen;
|
||||
+} LzmaVarState;
|
||||
+
|
||||
+int LzmaDecoderInit(
|
||||
+ unsigned char *buffer, UInt32 bufferSize,
|
||||
+ int lc, int lp, int pb,
|
||||
+ unsigned char *dictionary, UInt32 dictionarySize,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ ILzmaInCallback *inCallback
|
||||
+ #else
|
||||
+ unsigned char *inStream, UInt32 inSize
|
||||
+ #endif
|
||||
+ )
|
||||
+{
|
||||
+ LzmaVarState *vs = (LzmaVarState *)buffer;
|
||||
+ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
|
||||
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
|
||||
+ UInt32 i;
|
||||
+ if (bufferSize < numProbs * sizeof(CProb) + sizeof(LzmaVarState))
|
||||
+ return LZMA_RESULT_NOT_ENOUGH_MEM;
|
||||
+ vs->Dictionary = dictionary;
|
||||
+ vs->DictionarySize = dictionarySize;
|
||||
+ vs->DictionaryPos = 0;
|
||||
+ vs->GlobalPos = 0;
|
||||
+ vs->Reps[0] = vs->Reps[1] = vs->Reps[2] = vs->Reps[3] = 1;
|
||||
+ vs->lc = lc;
|
||||
+ vs->lp = lp;
|
||||
+ vs->pb = pb;
|
||||
+ vs->State = 0;
|
||||
+ vs->PreviousIsMatch = 0;
|
||||
+ vs->RemainLen = 0;
|
||||
+ dictionary[dictionarySize - 1] = 0;
|
||||
+ for (i = 0; i < numProbs; i++)
|
||||
+ p[i] = kBitModelTotal >> 1;
|
||||
+ RangeDecoderInit(&vs->RangeDecoder,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ inCallback
|
||||
+ #else
|
||||
+ inStream, inSize
|
||||
+ #endif
|
||||
+ );
|
||||
+ return LZMA_RESULT_OK;
|
||||
+}
|
||||
+
|
||||
+int LzmaDecode(unsigned char *buffer,
|
||||
+ unsigned char *outStream, UInt32 outSize,
|
||||
+ UInt32 *outSizeProcessed)
|
||||
+{
|
||||
+ LzmaVarState *vs = (LzmaVarState *)buffer;
|
||||
+ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
|
||||
+ CRangeDecoder rd = vs->RangeDecoder;
|
||||
+ int state = vs->State;
|
||||
+ int previousIsMatch = vs->PreviousIsMatch;
|
||||
+ Byte previousByte;
|
||||
+ UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
|
||||
+ UInt32 nowPos = 0;
|
||||
+ UInt32 posStateMask = (1 << (vs->pb)) - 1;
|
||||
+ UInt32 literalPosMask = (1 << (vs->lp)) - 1;
|
||||
+ int lc = vs->lc;
|
||||
+ int len = vs->RemainLen;
|
||||
+ UInt32 globalPos = vs->GlobalPos;
|
||||
+
|
||||
+ Byte *dictionary = vs->Dictionary;
|
||||
+ UInt32 dictionarySize = vs->DictionarySize;
|
||||
+ UInt32 dictionaryPos = vs->DictionaryPos;
|
||||
+
|
||||
+ if (len == -1)
|
||||
+ {
|
||||
+ *outSizeProcessed = 0;
|
||||
+ return LZMA_RESULT_OK;
|
||||
+ }
|
||||
+
|
||||
+ while(len > 0 && nowPos < outSize)
|
||||
+ {
|
||||
+ UInt32 pos = dictionaryPos - rep0;
|
||||
+ if (pos >= dictionarySize)
|
||||
+ pos += dictionarySize;
|
||||
+ outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
|
||||
+ if (++dictionaryPos == dictionarySize)
|
||||
+ dictionaryPos = 0;
|
||||
+ len--;
|
||||
+ }
|
||||
+ if (dictionaryPos == 0)
|
||||
+ previousByte = dictionary[dictionarySize - 1];
|
||||
+ else
|
||||
+ previousByte = dictionary[dictionaryPos - 1];
|
||||
+#else
|
||||
+
|
||||
+int LzmaDecode(
|
||||
+ Byte *buffer, UInt32 bufferSize,
|
||||
+ int lc, int lp, int pb,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ ILzmaInCallback *inCallback,
|
||||
+ #else
|
||||
+ unsigned char *inStream, UInt32 inSize,
|
||||
+ #endif
|
||||
+ unsigned char *outStream, UInt32 outSize,
|
||||
+ UInt32 *outSizeProcessed)
|
||||
+{
|
||||
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
|
||||
+ CProb *p = (CProb *)buffer;
|
||||
+ CRangeDecoder rd;
|
||||
+ UInt32 i;
|
||||
+ int state = 0;
|
||||
+ int previousIsMatch = 0;
|
||||
+ Byte previousByte = 0;
|
||||
+ UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
|
||||
+ UInt32 nowPos = 0;
|
||||
+ UInt32 posStateMask = (1 << pb) - 1;
|
||||
+ UInt32 literalPosMask = (1 << lp) - 1;
|
||||
+ int len = 0;
|
||||
+ if (bufferSize < numProbs * sizeof(CProb))
|
||||
+ return LZMA_RESULT_NOT_ENOUGH_MEM;
|
||||
+ for (i = 0; i < numProbs; i++)
|
||||
+ p[i] = kBitModelTotal >> 1;
|
||||
+ RangeDecoderInit(&rd,
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ inCallback
|
||||
+ #else
|
||||
+ inStream, inSize
|
||||
+ #endif
|
||||
+ );
|
||||
+#endif
|
||||
+
|
||||
+ *outSizeProcessed = 0;
|
||||
+ while(nowPos < outSize)
|
||||
+ {
|
||||
+ int posState = (int)(
|
||||
+ (nowPos
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ + globalPos
|
||||
+ #endif
|
||||
+ )
|
||||
+ & posStateMask);
|
||||
+ #ifdef _LZMA_IN_CB
|
||||
+ if (rd.Result != LZMA_RESULT_OK)
|
||||
+ return rd.Result;
|
||||
+ #endif
|
||||
+ if (rd.ExtraBytes != 0)
|
||||
+ return LZMA_RESULT_DATA_ERROR;
|
||||
+ if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0)
|
||||
+ {
|
||||
+ CProb *probs = p + Literal + (LZMA_LIT_SIZE *
|
||||
+ (((
|
||||
+ (nowPos
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ + globalPos
|
||||
+ #endif
|
||||
+ )
|
||||
+ & literalPosMask) << lc) + (previousByte >> (8 - lc))));
|
||||
+
|
||||
+ if (state < 4) state = 0;
|
||||
+ else if (state < 10) state -= 3;
|
||||
+ else state -= 6;
|
||||
+ if (previousIsMatch)
|
||||
+ {
|
||||
+ Byte matchByte;
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ UInt32 pos = dictionaryPos - rep0;
|
||||
+ if (pos >= dictionarySize)
|
||||
+ pos += dictionarySize;
|
||||
+ matchByte = dictionary[pos];
|
||||
+ #else
|
||||
+ matchByte = outStream[nowPos - rep0];
|
||||
+ #endif
|
||||
+ previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte);
|
||||
+ previousIsMatch = 0;
|
||||
+ }
|
||||
+ else
|
||||
+ previousByte = LzmaLiteralDecode(probs, &rd);
|
||||
+ outStream[nowPos++] = previousByte;
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ dictionary[dictionaryPos] = previousByte;
|
||||
+ if (++dictionaryPos == dictionarySize)
|
||||
+ dictionaryPos = 0;
|
||||
+ #endif
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ previousIsMatch = 1;
|
||||
+ if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1)
|
||||
+ {
|
||||
+ if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0)
|
||||
+ {
|
||||
+ if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0)
|
||||
+ {
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ UInt32 pos;
|
||||
+ #endif
|
||||
+ if (
|
||||
+ (nowPos
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ + globalPos
|
||||
+ #endif
|
||||
+ )
|
||||
+ == 0)
|
||||
+ return LZMA_RESULT_DATA_ERROR;
|
||||
+ state = state < 7 ? 9 : 11;
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ pos = dictionaryPos - rep0;
|
||||
+ if (pos >= dictionarySize)
|
||||
+ pos += dictionarySize;
|
||||
+ previousByte = dictionary[pos];
|
||||
+ dictionary[dictionaryPos] = previousByte;
|
||||
+ if (++dictionaryPos == dictionarySize)
|
||||
+ dictionaryPos = 0;
|
||||
+ #else
|
||||
+ previousByte = outStream[nowPos - rep0];
|
||||
+ #endif
|
||||
+ outStream[nowPos++] = previousByte;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ UInt32 distance;
|
||||
+ if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0)
|
||||
+ distance = rep1;
|
||||
+ else
|
||||
+ {
|
||||
+ if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0)
|
||||
+ distance = rep2;
|
||||
+ else
|
||||
+ {
|
||||
+ distance = rep3;
|
||||
+ rep3 = rep2;
|
||||
+ }
|
||||
+ rep2 = rep1;
|
||||
+ }
|
||||
+ rep1 = rep0;
|
||||
+ rep0 = distance;
|
||||
+ }
|
||||
+ len = LzmaLenDecode(p + RepLenCoder, &rd, posState);
|
||||
+ state = state < 7 ? 8 : 11;
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ int posSlot;
|
||||
+ rep3 = rep2;
|
||||
+ rep2 = rep1;
|
||||
+ rep1 = rep0;
|
||||
+ state = state < 7 ? 7 : 10;
|
||||
+ len = LzmaLenDecode(p + LenCoder, &rd, posState);
|
||||
+ posSlot = RangeDecoderBitTreeDecode(p + PosSlot +
|
||||
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
|
||||
+ kNumPosSlotBits), kNumPosSlotBits, &rd);
|
||||
+ if (posSlot >= kStartPosModelIndex)
|
||||
+ {
|
||||
+ int numDirectBits = ((posSlot >> 1) - 1);
|
||||
+ rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits);
|
||||
+ if (posSlot < kEndPosModelIndex)
|
||||
+ {
|
||||
+ rep0 += RangeDecoderReverseBitTreeDecode(
|
||||
+ p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd);
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ rep0 += RangeDecoderDecodeDirectBits(&rd,
|
||||
+ numDirectBits - kNumAlignBits) << kNumAlignBits;
|
||||
+ rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd);
|
||||
+ }
|
||||
+ }
|
||||
+ else
|
||||
+ rep0 = posSlot;
|
||||
+ rep0++;
|
||||
+ }
|
||||
+ if (rep0 == (UInt32)(0))
|
||||
+ {
|
||||
+ /* it's for stream version */
|
||||
+ len = -1;
|
||||
+ break;
|
||||
+ }
|
||||
+ if (rep0 > nowPos
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ + globalPos
|
||||
+ #endif
|
||||
+ )
|
||||
+ {
|
||||
+ return LZMA_RESULT_DATA_ERROR;
|
||||
+ }
|
||||
+ len += kMatchMinLen;
|
||||
+ do
|
||||
+ {
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ UInt32 pos = dictionaryPos - rep0;
|
||||
+ if (pos >= dictionarySize)
|
||||
+ pos += dictionarySize;
|
||||
+ previousByte = dictionary[pos];
|
||||
+ dictionary[dictionaryPos] = previousByte;
|
||||
+ if (++dictionaryPos == dictionarySize)
|
||||
+ dictionaryPos = 0;
|
||||
+ #else
|
||||
+ previousByte = outStream[nowPos - rep0];
|
||||
+ #endif
|
||||
+ outStream[nowPos++] = previousByte;
|
||||
+ len--;
|
||||
+ }
|
||||
+ while(len > 0 && nowPos < outSize);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ #ifdef _LZMA_OUT_READ
|
||||
+ vs->RangeDecoder = rd;
|
||||
+ vs->DictionaryPos = dictionaryPos;
|
||||
+ vs->GlobalPos = globalPos + nowPos;
|
||||
+ vs->Reps[0] = rep0;
|
||||
+ vs->Reps[1] = rep1;
|
||||
+ vs->Reps[2] = rep2;
|
||||
+ vs->Reps[3] = rep3;
|
||||
+ vs->State = state;
|
||||
+ vs->PreviousIsMatch = previousIsMatch;
|
||||
+ vs->RemainLen = len;
|
||||
+ #endif
|
||||
+
|
||||
+ *outSizeProcessed = nowPos;
|
||||
+ return LZMA_RESULT_OK;
|
||||
+}
|
||||
--- a/lib/Makefile
|
||||
+++ b/lib/Makefile
|
||||
@@ -12,7 +12,7 @@ lib-$(CONFIG_SMP) += cpumask.o
|
||||
|
||||
lib-y += kobject.o kref.o kobject_uevent.o klist.o
|
||||
|
||||
-obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o
|
||||
+obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o LzmaDecode.o
|
||||
|
||||
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
|
||||
CFLAGS_kobject.o += -DDEBUG
|
||||
@@ -56,6 +56,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
|
||||
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += swiotlb.o
|
||||
+
|
||||
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
|
||||
|
||||
lib-$(CONFIG_GENERIC_BUG) += bug.o
|
|
@ -1,107 +0,0 @@
|
|||
--- a/fs/squashfs/inode.c
|
||||
+++ b/fs/squashfs/inode.c
|
||||
@@ -4,6 +4,9 @@
|
||||
* Copyright (c) 2002, 2003, 2004, 2005, 2006
|
||||
* Phillip Lougher <phillip@lougher.org.uk>
|
||||
*
|
||||
+ * LZMA decompressor support added by Oleg I. Vdovikin
|
||||
+ * Copyright (c) 2005 Oleg I.Vdovikin <oleg@cs.msu.su>
|
||||
+ *
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2,
|
||||
@@ -21,6 +24,7 @@
|
||||
* inode.c
|
||||
*/
|
||||
|
||||
+#define SQUASHFS_LZMA
|
||||
#include <linux/types.h>
|
||||
#include <linux/squashfs_fs.h>
|
||||
#include <linux/module.h>
|
||||
@@ -44,6 +48,19 @@
|
||||
|
||||
#include "squashfs.h"
|
||||
|
||||
+#ifdef SQUASHFS_LZMA
|
||||
+#include <linux/LzmaDecode.h>
|
||||
+
|
||||
+/* default LZMA settings, should be in sync with mksquashfs */
|
||||
+#define LZMA_LC 3
|
||||
+#define LZMA_LP 0
|
||||
+#define LZMA_PB 2
|
||||
+
|
||||
+#define LZMA_WORKSPACE_SIZE ((LZMA_BASE_SIZE + \
|
||||
+ (LZMA_LIT_SIZE << (LZMA_LC + LZMA_LP))) * sizeof(CProb))
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
static void squashfs_put_super(struct super_block *);
|
||||
static int squashfs_statfs(struct dentry *, struct kstatfs *);
|
||||
static int squashfs_symlink_readpage(struct file *file, struct page *page);
|
||||
@@ -64,7 +81,11 @@ static int squashfs_get_sb(struct file_s
|
||||
const char *, void *, struct vfsmount *);
|
||||
|
||||
|
||||
+#ifdef SQUASHFS_LZMA
|
||||
+static unsigned char lzma_workspace[LZMA_WORKSPACE_SIZE];
|
||||
+#else
|
||||
static z_stream stream;
|
||||
+#endif
|
||||
|
||||
static struct file_system_type squashfs_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
@@ -249,6 +270,15 @@ SQSH_EXTERN unsigned int squashfs_read_d
|
||||
if (compressed) {
|
||||
int zlib_err;
|
||||
|
||||
+#ifdef SQUASHFS_LZMA
|
||||
+ if ((zlib_err = LzmaDecode(lzma_workspace,
|
||||
+ LZMA_WORKSPACE_SIZE, LZMA_LC, LZMA_LP, LZMA_PB,
|
||||
+ c_buffer, c_byte, buffer, msblk->read_size, &bytes)) != LZMA_RESULT_OK)
|
||||
+ {
|
||||
+ ERROR("lzma returned unexpected result 0x%x\n", zlib_err);
|
||||
+ bytes = 0;
|
||||
+ }
|
||||
+#else
|
||||
stream.next_in = c_buffer;
|
||||
stream.avail_in = c_byte;
|
||||
stream.next_out = buffer;
|
||||
@@ -263,7 +293,7 @@ SQSH_EXTERN unsigned int squashfs_read_d
|
||||
bytes = 0;
|
||||
} else
|
||||
bytes = stream.total_out;
|
||||
-
|
||||
+#endif
|
||||
up(&msblk->read_data_mutex);
|
||||
}
|
||||
|
||||
@@ -2045,15 +2075,19 @@ static int __init init_squashfs_fs(void)
|
||||
printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
|
||||
"Phillip Lougher\n");
|
||||
|
||||
+#ifndef SQUASHFS_LZMA
|
||||
if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
|
||||
ERROR("Failed to allocate zlib workspace\n");
|
||||
destroy_inodecache();
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
+#endif
|
||||
|
||||
if ((err = register_filesystem(&squashfs_fs_type))) {
|
||||
+#ifndef SQUASHFS_LZMA
|
||||
vfree(stream.workspace);
|
||||
+#endif
|
||||
destroy_inodecache();
|
||||
}
|
||||
|
||||
@@ -2064,7 +2098,9 @@ out:
|
||||
|
||||
static void __exit exit_squashfs_fs(void)
|
||||
{
|
||||
+#ifndef SQUASHFS_LZMA
|
||||
vfree(stream.workspace);
|
||||
+#endif
|
||||
unregister_filesystem(&squashfs_fs_type);
|
||||
destroy_inodecache();
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
--- a/Makefile
|
||||
+++ b/Makefile
|
||||
@@ -507,6 +507,9 @@ CFLAGS += $(call cc-option, -fn
|
||||
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
|
||||
CHECKFLAGS += $(NOSTDINC_FLAGS)
|
||||
|
||||
+# improve gcc optimization
|
||||
+CFLAGS += $(call cc-option,-funit-at-a-time,)
|
||||
+
|
||||
# warn about C99 declaration after statement
|
||||
CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
--- a/include/asm-mips/system.h
|
||||
+++ b/include/asm-mips/system.h
|
||||
@@ -188,7 +188,7 @@ extern __u64 __xchg_u64_unsupported_on_3
|
||||
if something tries to do an invalid xchg(). */
|
||||
extern void __xchg_called_with_bad_pointer(void);
|
||||
|
||||
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
|
||||
+static __always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 4:
|
|
@ -1,36 +0,0 @@
|
|||
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
|
||||
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
|
||||
@@ -51,6 +51,7 @@
|
||||
#define SST49LF040B 0x0050
|
||||
#define SST49LF008A 0x005a
|
||||
#define AT49BV6416 0x00d6
|
||||
+#define MANUFACTURER_SAMSUNG 0x00ec
|
||||
|
||||
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
|
||||
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
|
||||
@@ -294,12 +295,19 @@ struct mtd_info *cfi_cmdset_0002(struct
|
||||
|
||||
if (extp->MajorVersion != '1' ||
|
||||
(extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
|
||||
- printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
|
||||
- "version %c.%c.\n", extp->MajorVersion,
|
||||
- extp->MinorVersion);
|
||||
- kfree(extp);
|
||||
- kfree(mtd);
|
||||
- return NULL;
|
||||
+ if (cfi->mfr == MANUFACTURER_SAMSUNG &&
|
||||
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
|
||||
+ printk(KERN_NOTICE " Newer Samsung flash detected, "
|
||||
+ "should be compatibile with Amd/Fujitsu.\n");
|
||||
+ }
|
||||
+ else {
|
||||
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
|
||||
+ "version %c.%c.\n", extp->MajorVersion,
|
||||
+ extp->MinorVersion);
|
||||
+ kfree(extp);
|
||||
+ kfree(mtd);
|
||||
+ return NULL;
|
||||
+ }
|
||||
}
|
||||
|
||||
/* Install our own private info structure */
|
|
@ -1,169 +0,0 @@
|
|||
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
|
||||
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
|
||||
@@ -919,7 +919,7 @@ static void __xipram xip_enable(struct m
|
||||
|
||||
static int __xipram xip_wait_for_operation(
|
||||
struct map_info *map, struct flchip *chip,
|
||||
- unsigned long adr, unsigned int chip_op_time )
|
||||
+ unsigned long adr, int *chip_op_time )
|
||||
{
|
||||
struct cfi_private *cfi = map->fldrv_priv;
|
||||
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
|
||||
@@ -928,7 +928,7 @@ static int __xipram xip_wait_for_operati
|
||||
flstate_t oldstate, newstate;
|
||||
|
||||
start = xip_currtime();
|
||||
- usec = chip_op_time * 8;
|
||||
+ usec = *chip_op_time * 8;
|
||||
if (usec == 0)
|
||||
usec = 500000;
|
||||
done = 0;
|
||||
@@ -1038,8 +1038,8 @@ static int __xipram xip_wait_for_operati
|
||||
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
|
||||
INVALIDATE_CACHED_RANGE(map, from, size)
|
||||
|
||||
-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
|
||||
- xip_wait_for_operation(map, chip, cmd_adr, usec)
|
||||
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
|
||||
+ xip_wait_for_operation(map, chip, cmd_adr, p_usec)
|
||||
|
||||
#else
|
||||
|
||||
@@ -1051,65 +1051,65 @@ static int __xipram xip_wait_for_operati
|
||||
static int inval_cache_and_wait_for_operation(
|
||||
struct map_info *map, struct flchip *chip,
|
||||
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
|
||||
- unsigned int chip_op_time)
|
||||
+ int *chip_op_time )
|
||||
{
|
||||
struct cfi_private *cfi = map->fldrv_priv;
|
||||
map_word status, status_OK = CMD(0x80);
|
||||
- int chip_state = chip->state;
|
||||
- unsigned int timeo, sleep_time;
|
||||
+ int z, chip_state = chip->state;
|
||||
+ unsigned long timeo;
|
||||
|
||||
spin_unlock(chip->mutex);
|
||||
if (inval_len)
|
||||
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
|
||||
+ if (*chip_op_time)
|
||||
+ cfi_udelay(*chip_op_time);
|
||||
spin_lock(chip->mutex);
|
||||
|
||||
- /* set our timeout to 8 times the expected delay */
|
||||
- timeo = chip_op_time * 8;
|
||||
- if (!timeo)
|
||||
- timeo = 500000;
|
||||
- sleep_time = chip_op_time / 2;
|
||||
+ timeo = *chip_op_time * 8 * HZ / 1000000;
|
||||
+ if (timeo < HZ/2)
|
||||
+ timeo = HZ/2;
|
||||
+ timeo += jiffies;
|
||||
|
||||
+ z = 0;
|
||||
for (;;) {
|
||||
+ if (chip->state != chip_state) {
|
||||
+ /* Someone's suspended the operation: sleep */
|
||||
+ DECLARE_WAITQUEUE(wait, current);
|
||||
+
|
||||
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ add_wait_queue(&chip->wq, &wait);
|
||||
+ spin_unlock(chip->mutex);
|
||||
+ schedule();
|
||||
+ remove_wait_queue(&chip->wq, &wait);
|
||||
+ timeo = jiffies + (HZ / 2); /* FIXME */
|
||||
+ spin_lock(chip->mutex);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
status = map_read(map, cmd_adr);
|
||||
if (map_word_andequal(map, status, status_OK, status_OK))
|
||||
break;
|
||||
|
||||
- if (!timeo) {
|
||||
+ /* OK Still waiting */
|
||||
+ if (time_after(jiffies, timeo)) {
|
||||
map_write(map, CMD(0x70), cmd_adr);
|
||||
chip->state = FL_STATUS;
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
- /* OK Still waiting. Drop the lock, wait a while and retry. */
|
||||
+ /* Latency issues. Drop the lock, wait a while and retry */
|
||||
+ z++;
|
||||
spin_unlock(chip->mutex);
|
||||
- if (sleep_time >= 1000000/HZ) {
|
||||
- /*
|
||||
- * Half of the normal delay still remaining
|
||||
- * can be performed with a sleeping delay instead
|
||||
- * of busy waiting.
|
||||
- */
|
||||
- msleep(sleep_time/1000);
|
||||
- timeo -= sleep_time;
|
||||
- sleep_time = 1000000/HZ;
|
||||
- } else {
|
||||
- udelay(1);
|
||||
- cond_resched();
|
||||
- timeo--;
|
||||
- }
|
||||
+ cfi_udelay(1);
|
||||
spin_lock(chip->mutex);
|
||||
-
|
||||
- while (chip->state != chip_state) {
|
||||
- /* Someone's suspended the operation: sleep */
|
||||
- DECLARE_WAITQUEUE(wait, current);
|
||||
- set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- add_wait_queue(&chip->wq, &wait);
|
||||
- spin_unlock(chip->mutex);
|
||||
- schedule();
|
||||
- remove_wait_queue(&chip->wq, &wait);
|
||||
- spin_lock(chip->mutex);
|
||||
- }
|
||||
}
|
||||
|
||||
+ if (!z) {
|
||||
+ if (!--(*chip_op_time))
|
||||
+ *chip_op_time = 1;
|
||||
+ } else if (z > 1)
|
||||
+ ++(*chip_op_time);
|
||||
+
|
||||
/* Done and happy. */
|
||||
chip->state = FL_STATUS;
|
||||
return 0;
|
||||
@@ -1118,7 +1118,8 @@ static int inval_cache_and_wait_for_oper
|
||||
#endif
|
||||
|
||||
#define WAIT_TIMEOUT(map, chip, adr, udelay) \
|
||||
- INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
|
||||
+ ({ int __udelay = (udelay); \
|
||||
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
|
||||
|
||||
|
||||
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
|
||||
@@ -1342,7 +1343,7 @@ static int __xipram do_write_oneword(str
|
||||
|
||||
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
|
||||
adr, map_bankwidth(map),
|
||||
- chip->word_write_time);
|
||||
+ &chip->word_write_time);
|
||||
if (ret) {
|
||||
xip_enable(map, chip, adr);
|
||||
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
|
||||
@@ -1579,7 +1580,7 @@ static int __xipram do_write_buffer(stru
|
||||
|
||||
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
|
||||
adr, len,
|
||||
- chip->buffer_write_time);
|
||||
+ &chip->buffer_write_time);
|
||||
if (ret) {
|
||||
map_write(map, CMD(0x70), cmd_adr);
|
||||
chip->state = FL_STATUS;
|
||||
@@ -1714,7 +1715,7 @@ static int __xipram do_erase_oneblock(st
|
||||
|
||||
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
|
||||
adr, len,
|
||||
- chip->erase_time);
|
||||
+ &chip->erase_time);
|
||||
if (ret) {
|
||||
map_write(map, CMD(0x70), adr);
|
||||
chip->state = FL_STATUS;
|
|
@ -1,19 +0,0 @@
|
|||
--- a/fs/squashfs/Makefile
|
||||
+++ b/fs/squashfs/Makefile
|
||||
@@ -4,4 +4,3 @@
|
||||
|
||||
obj-$(CONFIG_SQUASHFS) += squashfs.o
|
||||
squashfs-y += inode.o
|
||||
-squashfs-y += squashfs2_0.o
|
||||
--- a/fs/squashfs/squashfs.h
|
||||
+++ b/fs/squashfs/squashfs.h
|
||||
@@ -24,6 +24,9 @@
|
||||
#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
|
||||
#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
|
||||
#endif
|
||||
+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
|
||||
+#undef CONFIG_SQUASHFS_2_0_COMPATIBILITY
|
||||
+#endif
|
||||
|
||||
#ifdef SQUASHFS_TRACE
|
||||
#define TRACE(s, args...) printk(KERN_NOTICE "SQUASHFS: "s, ## args)
|
|
@ -1,19 +0,0 @@
|
|||
--- a/arch/mips/kernel/head.S
|
||||
+++ b/arch/mips/kernel/head.S
|
||||
@@ -129,11 +129,15 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
+
|
||||
+ j kernel_entry
|
||||
+ nop
|
||||
+
|
||||
/*
|
||||
* Reserved space for exception handlers.
|
||||
* Necessary for machines which link their kernels at KSEG0.
|
||||
*/
|
||||
- .fill 0x400
|
||||
+ .align 10
|
||||
|
||||
EXPORT(stext) # used for profiling
|
||||
EXPORT(_stext)
|
|
@ -1,18 +0,0 @@
|
|||
--- a/arch/mips/mm/tlbex.c
|
||||
+++ b/arch/mips/mm/tlbex.c
|
||||
@@ -887,7 +887,6 @@ static __init void build_tlb_write_entry
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
- case CPU_4KC:
|
||||
case CPU_SB1:
|
||||
case CPU_SB1A:
|
||||
case CPU_4KSC:
|
||||
@@ -915,6 +914,7 @@ static __init void build_tlb_write_entry
|
||||
tlbw(p);
|
||||
break;
|
||||
|
||||
+ case CPU_4KC:
|
||||
case CPU_4KEC:
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
|
@ -1,32 +0,0 @@
|
|||
--- a/arch/mips/defconfig
|
||||
+++ b/arch/mips/defconfig
|
||||
@@ -69,6 +69,7 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
|
||||
CONFIG_GENERIC_HWEIGHT=y
|
||||
CONFIG_GENERIC_CALIBRATE_DELAY=y
|
||||
CONFIG_GENERIC_TIME=y
|
||||
+CONFIG_GENERIC_GPIO=n
|
||||
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
|
||||
# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
|
||||
CONFIG_ARC=y
|
||||
--- a/arch/mips/Kconfig
|
||||
+++ b/arch/mips/Kconfig
|
||||
@@ -869,6 +869,10 @@ config GENERIC_TIME
|
||||
bool
|
||||
default y
|
||||
|
||||
+config GENERIC_GPIO
|
||||
+ bool
|
||||
+ default n
|
||||
+
|
||||
config SCHED_NO_NO_OMIT_FRAME_POINTER
|
||||
bool
|
||||
default y
|
||||
--- /dev/null
|
||||
+++ b/include/asm-mips/gpio.h
|
||||
@@ -0,0 +1,6 @@
|
||||
+#ifndef _ASM_MIPS_GPIO_H
|
||||
+#define _ASM_MIPS_GPIO_H
|
||||
+
|
||||
+#include <gpio.h>
|
||||
+
|
||||
+#endif /* _ASM_MIPS_GPIO_H */
|
|
@ -1,150 +0,0 @@
|
|||
MIPS: allow disabling the kernel FPU emulator
|
||||
|
||||
This patch allows turning off the in-kernel Algorithmics
|
||||
FPU emulator support, which allows one to save a couple of
|
||||
precious blocks on an embedded system.
|
||||
|
||||
Signed-off-by: Florian Fainelli <florian@openwrt.org>
|
||||
|
||||
--- a/arch/mips/Kconfig
|
||||
+++ b/arch/mips/Kconfig
|
||||
@@ -938,6 +938,17 @@ config LIMITED_DMA
|
||||
config MIPS_BONITO64
|
||||
bool
|
||||
|
||||
+config MIPS_FPU_EMU
|
||||
+ bool "Enable FPU emulation"
|
||||
+ default y
|
||||
+ help
|
||||
+ This option allows building a kernel with or without the Algorithmics
|
||||
+ FPU emulator enabled. Turning off this option results in a kernel which
|
||||
+ does not catch floating operations exceptions. Make sure that your toolchain
|
||||
+ is configured to enable software floating point emulation in that case.
|
||||
+
|
||||
+ If unsure say Y here.
|
||||
+
|
||||
config MIPS_MSC
|
||||
bool
|
||||
|
||||
--- a/arch/mips/math-emu/Makefile
|
||||
+++ b/arch/mips/math-emu/Makefile
|
||||
@@ -1,11 +1,12 @@
|
||||
#
|
||||
# Makefile for the Linux/MIPS kernel FPU emulation.
|
||||
#
|
||||
+obj-y := kernel_linkage.o dsemul.o cp1emu.o
|
||||
|
||||
-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
|
||||
+obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
|
||||
ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \
|
||||
dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \
|
||||
dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
|
||||
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
|
||||
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
|
||||
- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
|
||||
+ dp_sqrt.o sp_sqrt.o
|
||||
--- a/arch/mips/math-emu/cp1emu.c
|
||||
+++ b/arch/mips/math-emu/cp1emu.c
|
||||
@@ -56,6 +56,12 @@
|
||||
#endif
|
||||
#define __mips 4
|
||||
|
||||
+/* Further private data for which no space exists in mips_fpu_struct */
|
||||
+
|
||||
+struct mips_fpu_emulator_stats fpuemustats;
|
||||
+
|
||||
+#ifdef CONFIG_MIPS_FPU_EMU
|
||||
+
|
||||
/* Function which emulates a floating point instruction. */
|
||||
|
||||
static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
|
||||
@@ -66,10 +72,6 @@ static int fpux_emu(struct pt_regs *,
|
||||
struct mips_fpu_struct *, mips_instruction);
|
||||
#endif
|
||||
|
||||
-/* Further private data for which no space exists in mips_fpu_struct */
|
||||
-
|
||||
-struct mips_fpu_emulator_stats fpuemustats;
|
||||
-
|
||||
/* Control registers */
|
||||
|
||||
#define FPCREG_RID 0 /* $0 = revision id */
|
||||
@@ -1277,3 +1279,10 @@ int fpu_emulator_cop1Handler(struct pt_r
|
||||
|
||||
return sig;
|
||||
}
|
||||
+#else
|
||||
+int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
||||
+ int has_fpu)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+#endif /* CONFIG_MIPS_FPU_EMU */
|
||||
--- a/arch/mips/math-emu/dsemul.c
|
||||
+++ b/arch/mips/math-emu/dsemul.c
|
||||
@@ -112,6 +112,7 @@ int mips_dsemul(struct pt_regs *regs, mi
|
||||
return SIGILL; /* force out of emulation loop */
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_MIPS_FPU_EMU
|
||||
int do_dsemulret(struct pt_regs *xcp)
|
||||
{
|
||||
struct emuframe *fr;
|
||||
@@ -167,3 +168,9 @@ int do_dsemulret(struct pt_regs *xcp)
|
||||
|
||||
return 1;
|
||||
}
|
||||
+#else
|
||||
+int do_dsemulret(struct pt_regs *xcp)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+#endif /* CONFIG_MIPS_FPU_EMU */
|
||||
--- a/arch/mips/math-emu/kernel_linkage.c
|
||||
+++ b/arch/mips/math-emu/kernel_linkage.c
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
#define SIGNALLING_NAN 0x7ff800007ff80000LL
|
||||
|
||||
+#ifdef CONFIG_MIPS_FPU_EMU
|
||||
void fpu_emulator_init_fpu(void)
|
||||
{
|
||||
static int first = 1;
|
||||
@@ -111,4 +112,36 @@ int fpu_emulator_restore_context32(struc
|
||||
|
||||
return err;
|
||||
}
|
||||
-#endif
|
||||
+#endif /* CONFIG_64BIT */
|
||||
+#else
|
||||
+
|
||||
+void fpu_emulator_init_fpu(void)
|
||||
+{
|
||||
+ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain"
|
||||
+ "was compiled with software floating point support (soft-float)\n");
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+int fpu_emulator_save_context(struct sigcontext __user *sc)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+int fpu_emulator_restore_context(struct sigcontext __user *sc)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+#ifdef CONFIG_64BIT
|
||||
+int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+#endif /* CONFIG_64BIT */
|
||||
+#endif /* CONFIG_MIPS_FPU_EMU */
|
|
@ -1,488 +0,0 @@
|
|||
--- a/drivers/mtd/Kconfig
|
||||
+++ b/drivers/mtd/Kconfig
|
||||
@@ -49,6 +49,16 @@ config MTD_PARTITIONS
|
||||
devices. Partitioning on NFTL 'devices' is a different - that's the
|
||||
'normal' form of partitioning used on a block device.
|
||||
|
||||
+config MTD_ROOTFS_ROOT_DEV
|
||||
+ bool "Automatically set 'rootfs' partition to be root filesystem"
|
||||
+ depends on MTD_PARTITIONS
|
||||
+ default y
|
||||
+
|
||||
+config MTD_ROOTFS_SPLIT
|
||||
+ bool "Automatically split 'rootfs' partition for squashfs"
|
||||
+ depends on MTD_PARTITIONS
|
||||
+ default y
|
||||
+
|
||||
config MTD_REDBOOT_PARTS
|
||||
tristate "RedBoot partition table parsing"
|
||||
depends on MTD_PARTITIONS
|
||||
--- a/drivers/mtd/mtdpart.c
|
||||
+++ b/drivers/mtd/mtdpart.c
|
||||
@@ -20,6 +20,8 @@
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/mtd/compatmac.h>
|
||||
+#include <linux/squashfs_fs.h>
|
||||
+#include <linux/root_dev.h>
|
||||
|
||||
/* Our partition linked list */
|
||||
static LIST_HEAD(mtd_partitions);
|
||||
@@ -308,6 +310,266 @@ int del_mtd_partitions(struct mtd_info *
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static u_int32_t cur_offset = 0;
|
||||
+static int add_one_partition(struct mtd_info *master, const struct mtd_partition *part,
|
||||
+ int i, struct mtd_part **slp)
|
||||
+{
|
||||
+ struct mtd_part *slave;
|
||||
+
|
||||
+ /* allocate the partition structure */
|
||||
+ slave = kzalloc (sizeof(*slave), GFP_KERNEL);
|
||||
+ if (!slave) {
|
||||
+ printk ("memory allocation error while creating partitions for \"%s\"\n",
|
||||
+ master->name);
|
||||
+ del_mtd_partitions(master);
|
||||
+ return -ENOMEM;
|
||||
+ }
|
||||
+ list_add(&slave->list, &mtd_partitions);
|
||||
+
|
||||
+ /* set up the MTD object for this partition */
|
||||
+ slave->mtd.type = master->type;
|
||||
+ slave->mtd.flags = master->flags & ~part->mask_flags;
|
||||
+ slave->mtd.size = part->size;
|
||||
+ slave->mtd.writesize = master->writesize;
|
||||
+ slave->mtd.oobsize = master->oobsize;
|
||||
+ slave->mtd.oobavail = master->oobavail;
|
||||
+ slave->mtd.subpage_sft = master->subpage_sft;
|
||||
+
|
||||
+ slave->mtd.name = part->name;
|
||||
+ slave->mtd.bank_size = master->bank_size;
|
||||
+ slave->mtd.owner = master->owner;
|
||||
+
|
||||
+ slave->mtd.read = part_read;
|
||||
+ slave->mtd.write = part_write;
|
||||
+
|
||||
+ if(master->point && master->unpoint){
|
||||
+ slave->mtd.point = part_point;
|
||||
+ slave->mtd.unpoint = part_unpoint;
|
||||
+ }
|
||||
+
|
||||
+ if (master->read_oob)
|
||||
+ slave->mtd.read_oob = part_read_oob;
|
||||
+ if (master->write_oob)
|
||||
+ slave->mtd.write_oob = part_write_oob;
|
||||
+ if(master->read_user_prot_reg)
|
||||
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
|
||||
+ if(master->read_fact_prot_reg)
|
||||
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
|
||||
+ if(master->write_user_prot_reg)
|
||||
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
|
||||
+ if(master->lock_user_prot_reg)
|
||||
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
|
||||
+ if(master->get_user_prot_info)
|
||||
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
|
||||
+ if(master->get_fact_prot_info)
|
||||
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
|
||||
+ if (master->sync)
|
||||
+ slave->mtd.sync = part_sync;
|
||||
+ if (!i && master->suspend && master->resume) {
|
||||
+ slave->mtd.suspend = part_suspend;
|
||||
+ slave->mtd.resume = part_resume;
|
||||
+ }
|
||||
+ if (master->writev)
|
||||
+ slave->mtd.writev = part_writev;
|
||||
+ if (master->lock)
|
||||
+ slave->mtd.lock = part_lock;
|
||||
+ if (master->unlock)
|
||||
+ slave->mtd.unlock = part_unlock;
|
||||
+ if (master->block_isbad)
|
||||
+ slave->mtd.block_isbad = part_block_isbad;
|
||||
+ if (master->block_markbad)
|
||||
+ slave->mtd.block_markbad = part_block_markbad;
|
||||
+ slave->mtd.erase = part_erase;
|
||||
+ slave->master = master;
|
||||
+ slave->offset = part->offset;
|
||||
+ slave->index = i;
|
||||
+
|
||||
+ if (slave->offset == MTDPART_OFS_APPEND)
|
||||
+ slave->offset = cur_offset;
|
||||
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
|
||||
+ slave->offset = cur_offset;
|
||||
+ if ((cur_offset % master->erasesize) != 0) {
|
||||
+ /* Round up to next erasesize */
|
||||
+ slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
|
||||
+ printk(KERN_NOTICE "Moving partition %d: "
|
||||
+ "0x%08x -> 0x%08x\n", i,
|
||||
+ cur_offset, slave->offset);
|
||||
+ }
|
||||
+ }
|
||||
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
|
||||
+ slave->mtd.size = master->size - slave->offset;
|
||||
+ cur_offset = slave->offset + slave->mtd.size;
|
||||
+
|
||||
+ printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
|
||||
+ slave->offset + slave->mtd.size, slave->mtd.name);
|
||||
+
|
||||
+ /* let's do some sanity checks */
|
||||
+ if (slave->offset >= master->size) {
|
||||
+ /* let's register it anyway to preserve ordering */
|
||||
+ slave->offset = 0;
|
||||
+ slave->mtd.size = 0;
|
||||
+ printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
|
||||
+ part->name);
|
||||
+ }
|
||||
+ if (slave->offset + slave->mtd.size > master->size) {
|
||||
+ slave->mtd.size = master->size - slave->offset;
|
||||
+ printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
|
||||
+ part->name, master->name, slave->mtd.size);
|
||||
+ }
|
||||
+ if (master->numeraseregions>1) {
|
||||
+ /* Deal with variable erase size stuff */
|
||||
+ int i;
|
||||
+ struct mtd_erase_region_info *regions = master->eraseregions;
|
||||
+
|
||||
+ /* Find the first erase regions which is part of this partition. */
|
||||
+ for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
|
||||
+ ;
|
||||
+
|
||||
+ for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
|
||||
+ if (slave->mtd.erasesize < regions[i].erasesize) {
|
||||
+ slave->mtd.erasesize = regions[i].erasesize;
|
||||
+ }
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Single erase size */
|
||||
+ slave->mtd.erasesize = master->erasesize;
|
||||
+ }
|
||||
+
|
||||
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
|
||||
+ (slave->offset % slave->mtd.erasesize)) {
|
||||
+ /* Doesn't start on a boundary of major erase size */
|
||||
+ /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
|
||||
+ slave->mtd.flags &= ~MTD_WRITEABLE;
|
||||
+ printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
|
||||
+ part->name);
|
||||
+ }
|
||||
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
|
||||
+ (slave->mtd.size % slave->mtd.erasesize)) {
|
||||
+ slave->mtd.flags &= ~MTD_WRITEABLE;
|
||||
+ printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
|
||||
+ part->name);
|
||||
+ }
|
||||
+
|
||||
+ slave->mtd.ecclayout = master->ecclayout;
|
||||
+ if (master->block_isbad) {
|
||||
+ uint32_t offs = 0;
|
||||
+
|
||||
+ while(offs < slave->mtd.size) {
|
||||
+ if (master->block_isbad(master,
|
||||
+ offs + slave->offset))
|
||||
+ slave->mtd.ecc_stats.badblocks++;
|
||||
+ offs += slave->mtd.erasesize;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if(part->mtdp)
|
||||
+ { /* store the object pointer (caller may or may not register it */
|
||||
+ *part->mtdp = &slave->mtd;
|
||||
+ slave->registered = 0;
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ /* register our partition */
|
||||
+ add_mtd_device(&slave->mtd);
|
||||
+ slave->registered = 1;
|
||||
+ }
|
||||
+
|
||||
+ if (slp)
|
||||
+ *slp = slave;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
|
||||
+#define ROOTFS_SPLIT_NAME "rootfs_data"
|
||||
+static int split_squashfs(struct mtd_info *master, struct mtd_partition *old,
|
||||
+ struct mtd_partition **new)
|
||||
+{
|
||||
+ struct mtd_partition *part = NULL;
|
||||
+ int len;
|
||||
+ char buf[512];
|
||||
+ struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
|
||||
+ int ret;
|
||||
+
|
||||
+ ret = master->read(master, old->offset, sizeof(*sb), &len, buf);
|
||||
+ if (ret) {
|
||||
+ printk(KERN_ALERT "split_squashfs: error occured while reading "
|
||||
+ "from \"%s\"\n", master->name);
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ if (len != sizeof(*sb)) {
|
||||
+ printk(KERN_ALERT "split_squashfs: unable to read superblock "
|
||||
+ "from \"%s\"\n", master->name);
|
||||
+ ret=-1;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ if (*((u32 *) buf) != SQUASHFS_MAGIC) {
|
||||
+ printk(KERN_ALERT "split_squasfs: no squashfs found in \"%s\"\n",
|
||||
+ master->name);
|
||||
+ ret=0;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ if (sb->bytes_used <= 0) {
|
||||
+ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
|
||||
+ master->name);
|
||||
+ ret=0;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ part = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
|
||||
+ if (part == NULL) {
|
||||
+ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
|
||||
+ ROOTFS_SPLIT_NAME);
|
||||
+ ret = -ENOMEM;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ memcpy(part, old, sizeof(*part));
|
||||
+ part->name = (unsigned char *)&part[1];
|
||||
+ strcpy(part->name, ROOTFS_SPLIT_NAME);
|
||||
+
|
||||
+ len = (u32) sb->bytes_used;
|
||||
+ len += (part->offset & 0x000fffff);
|
||||
+ len += (master->erasesize - 1);
|
||||
+ len &= ~(master->erasesize - 1);
|
||||
+ len -= (part->offset & 0x000fffff);
|
||||
+ part->offset += len;
|
||||
+ part->size -= len;
|
||||
+
|
||||
+ ret = 0;
|
||||
+
|
||||
+out:
|
||||
+ *new = part;
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static int split_rootfs_data(struct mtd_info *master, struct mtd_partition *part,
|
||||
+ int index)
|
||||
+{
|
||||
+ struct mtd_partition *dpart;
|
||||
+ int ret;
|
||||
+
|
||||
+ ret = split_squashfs(master, part, &dpart);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ if (dpart == NULL)
|
||||
+ return 1;
|
||||
+
|
||||
+ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%X, len=%X \n",
|
||||
+ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
|
||||
+
|
||||
+ ret = add_one_partition(master, dpart, index, NULL);
|
||||
+ if (ret)
|
||||
+ kfree(dpart);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+#endif /* CONFIG_MTD_ROOTFS_SPLIT */
|
||||
+
|
||||
/*
|
||||
* This function, given a master MTD object and a partition table, creates
|
||||
* and registers slave MTD objects which are bound to the master according to
|
||||
@@ -320,169 +582,31 @@ int add_mtd_partitions(struct mtd_info *
|
||||
int nbparts)
|
||||
{
|
||||
struct mtd_part *slave;
|
||||
- u_int32_t cur_offset = 0;
|
||||
- int i;
|
||||
+ struct mtd_partition *part;
|
||||
+ int i, j, ret = 0;
|
||||
|
||||
printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
|
||||
|
||||
- for (i = 0; i < nbparts; i++) {
|
||||
-
|
||||
- /* allocate the partition structure */
|
||||
- slave = kzalloc (sizeof(*slave), GFP_KERNEL);
|
||||
- if (!slave) {
|
||||
- printk ("memory allocation error while creating partitions for \"%s\"\n",
|
||||
- master->name);
|
||||
- del_mtd_partitions(master);
|
||||
- return -ENOMEM;
|
||||
- }
|
||||
- list_add(&slave->list, &mtd_partitions);
|
||||
-
|
||||
- /* set up the MTD object for this partition */
|
||||
- slave->mtd.type = master->type;
|
||||
- slave->mtd.flags = master->flags & ~parts[i].mask_flags;
|
||||
- slave->mtd.size = parts[i].size;
|
||||
- slave->mtd.writesize = master->writesize;
|
||||
- slave->mtd.oobsize = master->oobsize;
|
||||
- slave->mtd.oobavail = master->oobavail;
|
||||
- slave->mtd.subpage_sft = master->subpage_sft;
|
||||
-
|
||||
- slave->mtd.name = parts[i].name;
|
||||
- slave->mtd.bank_size = master->bank_size;
|
||||
- slave->mtd.owner = master->owner;
|
||||
-
|
||||
- slave->mtd.read = part_read;
|
||||
- slave->mtd.write = part_write;
|
||||
-
|
||||
- if(master->point && master->unpoint){
|
||||
- slave->mtd.point = part_point;
|
||||
- slave->mtd.unpoint = part_unpoint;
|
||||
- }
|
||||
-
|
||||
- if (master->read_oob)
|
||||
- slave->mtd.read_oob = part_read_oob;
|
||||
- if (master->write_oob)
|
||||
- slave->mtd.write_oob = part_write_oob;
|
||||
- if(master->read_user_prot_reg)
|
||||
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
|
||||
- if(master->read_fact_prot_reg)
|
||||
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
|
||||
- if(master->write_user_prot_reg)
|
||||
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
|
||||
- if(master->lock_user_prot_reg)
|
||||
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
|
||||
- if(master->get_user_prot_info)
|
||||
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
|
||||
- if(master->get_fact_prot_info)
|
||||
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
|
||||
- if (master->sync)
|
||||
- slave->mtd.sync = part_sync;
|
||||
- if (!i && master->suspend && master->resume) {
|
||||
- slave->mtd.suspend = part_suspend;
|
||||
- slave->mtd.resume = part_resume;
|
||||
- }
|
||||
- if (master->writev)
|
||||
- slave->mtd.writev = part_writev;
|
||||
- if (master->lock)
|
||||
- slave->mtd.lock = part_lock;
|
||||
- if (master->unlock)
|
||||
- slave->mtd.unlock = part_unlock;
|
||||
- if (master->block_isbad)
|
||||
- slave->mtd.block_isbad = part_block_isbad;
|
||||
- if (master->block_markbad)
|
||||
- slave->mtd.block_markbad = part_block_markbad;
|
||||
- slave->mtd.erase = part_erase;
|
||||
- slave->master = master;
|
||||
- slave->offset = parts[i].offset;
|
||||
- slave->index = i;
|
||||
-
|
||||
- if (slave->offset == MTDPART_OFS_APPEND)
|
||||
- slave->offset = cur_offset;
|
||||
- if (slave->offset == MTDPART_OFS_NXTBLK) {
|
||||
- slave->offset = cur_offset;
|
||||
- if ((cur_offset % master->erasesize) != 0) {
|
||||
- /* Round up to next erasesize */
|
||||
- slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
|
||||
- printk(KERN_NOTICE "Moving partition %d: "
|
||||
- "0x%08x -> 0x%08x\n", i,
|
||||
- cur_offset, slave->offset);
|
||||
- }
|
||||
- }
|
||||
- if (slave->mtd.size == MTDPART_SIZ_FULL)
|
||||
- slave->mtd.size = master->size - slave->offset;
|
||||
- cur_offset = slave->offset + slave->mtd.size;
|
||||
-
|
||||
- printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
|
||||
- slave->offset + slave->mtd.size, slave->mtd.name);
|
||||
-
|
||||
- /* let's do some sanity checks */
|
||||
- if (slave->offset >= master->size) {
|
||||
- /* let's register it anyway to preserve ordering */
|
||||
- slave->offset = 0;
|
||||
- slave->mtd.size = 0;
|
||||
- printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
|
||||
- parts[i].name);
|
||||
- }
|
||||
- if (slave->offset + slave->mtd.size > master->size) {
|
||||
- slave->mtd.size = master->size - slave->offset;
|
||||
- printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
|
||||
- parts[i].name, master->name, slave->mtd.size);
|
||||
- }
|
||||
- if (master->numeraseregions>1) {
|
||||
- /* Deal with variable erase size stuff */
|
||||
- int i;
|
||||
- struct mtd_erase_region_info *regions = master->eraseregions;
|
||||
-
|
||||
- /* Find the first erase regions which is part of this partition. */
|
||||
- for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
|
||||
- ;
|
||||
-
|
||||
- for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
|
||||
- if (slave->mtd.erasesize < regions[i].erasesize) {
|
||||
- slave->mtd.erasesize = regions[i].erasesize;
|
||||
- }
|
||||
- }
|
||||
- } else {
|
||||
- /* Single erase size */
|
||||
- slave->mtd.erasesize = master->erasesize;
|
||||
- }
|
||||
-
|
||||
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
|
||||
- (slave->offset % slave->mtd.erasesize)) {
|
||||
- /* Doesn't start on a boundary of major erase size */
|
||||
- /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
|
||||
- slave->mtd.flags &= ~MTD_WRITEABLE;
|
||||
- printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
|
||||
- parts[i].name);
|
||||
- }
|
||||
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
|
||||
- (slave->mtd.size % slave->mtd.erasesize)) {
|
||||
- slave->mtd.flags &= ~MTD_WRITEABLE;
|
||||
- printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
|
||||
- parts[i].name);
|
||||
- }
|
||||
-
|
||||
- slave->mtd.ecclayout = master->ecclayout;
|
||||
- if (master->block_isbad) {
|
||||
- uint32_t offs = 0;
|
||||
-
|
||||
- while(offs < slave->mtd.size) {
|
||||
- if (master->block_isbad(master,
|
||||
- offs + slave->offset))
|
||||
- slave->mtd.ecc_stats.badblocks++;
|
||||
- offs += slave->mtd.erasesize;
|
||||
+ for (i = 0, j = 0; i < nbparts; i++) {
|
||||
+ part = (struct mtd_partition *) &parts[i];
|
||||
+ ret = add_one_partition(master, part, j, &slave);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+ j++;
|
||||
+
|
||||
+ if (strcmp(part->name, "rootfs") == 0 && slave->registered) {
|
||||
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
|
||||
+ if (ROOT_DEV == 0) {
|
||||
+ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
|
||||
+ "set to be root filesystem\n");
|
||||
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
|
||||
}
|
||||
- }
|
||||
-
|
||||
- if(parts[i].mtdp)
|
||||
- { /* store the object pointer (caller may or may not register it */
|
||||
- *parts[i].mtdp = &slave->mtd;
|
||||
- slave->registered = 0;
|
||||
- }
|
||||
- else
|
||||
- {
|
||||
- /* register our partition */
|
||||
- add_mtd_device(&slave->mtd);
|
||||
- slave->registered = 1;
|
||||
+#endif
|
||||
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
|
||||
+ ret = split_rootfs_data(master, part, j);
|
||||
+ if (ret == 0)
|
||||
+ j++;
|
||||
+#endif
|
||||
}
|
||||
}
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
--- a/drivers/mtd/devices/block2mtd.c
|
||||
+++ b/drivers/mtd/devices/block2mtd.c
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
+#include <linux/mtd/partitions.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mount.h>
|
||||
@@ -288,10 +289,11 @@ static void block2mtd_free_device(struct
|
||||
|
||||
|
||||
/* FIXME: ensure that mtd->size % erase_size == 0 */
|
||||
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
|
||||
+static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
struct block2mtd_dev *dev;
|
||||
+ struct mtd_partition *part;
|
||||
|
||||
if (!devname)
|
||||
return NULL;
|
||||
@@ -330,14 +332,18 @@ static struct block2mtd_dev *add_device(
|
||||
|
||||
/* Setup the MTD structure */
|
||||
/* make the name contain the block device in */
|
||||
- dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
|
||||
- GFP_KERNEL);
|
||||
+
|
||||
+ if (!mtdname)
|
||||
+ mtdname = devname;
|
||||
+
|
||||
+ dev->mtd.name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
|
||||
+
|
||||
if (!dev->mtd.name)
|
||||
goto devinit_err;
|
||||
+
|
||||
+ strcpy(dev->mtd.name, mtdname);
|
||||
|
||||
- sprintf(dev->mtd.name, "block2mtd: %s", devname);
|
||||
-
|
||||
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
|
||||
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
|
||||
dev->mtd.erasesize = erase_size;
|
||||
dev->mtd.writesize = 1;
|
||||
dev->mtd.type = MTD_RAM;
|
||||
@@ -349,15 +355,18 @@ static struct block2mtd_dev *add_device(
|
||||
dev->mtd.read = block2mtd_read;
|
||||
dev->mtd.priv = dev;
|
||||
dev->mtd.owner = THIS_MODULE;
|
||||
-
|
||||
- if (add_mtd_device(&dev->mtd)) {
|
||||
+
|
||||
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
|
||||
+ part->name = dev->mtd.name;
|
||||
+ part->offset = 0;
|
||||
+ part->size = dev->mtd.size;
|
||||
+ if (add_mtd_partitions(&dev->mtd, part, 1)) {
|
||||
/* Device didnt get added, so free the entry */
|
||||
goto devinit_err;
|
||||
}
|
||||
list_add(&dev->list, &blkmtd_device_list);
|
||||
INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
|
||||
- dev->mtd.name + strlen("blkmtd: "),
|
||||
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
|
||||
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
|
||||
return dev;
|
||||
|
||||
devinit_err:
|
||||
@@ -430,9 +439,9 @@ static __initdata char block2mtd_paramli
|
||||
|
||||
static int block2mtd_setup2(const char *val)
|
||||
{
|
||||
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
|
||||
+ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
|
||||
char *str = buf;
|
||||
- char *token[2];
|
||||
+ char *token[3];
|
||||
char *name;
|
||||
size_t erase_size = PAGE_SIZE;
|
||||
int i, ret;
|
||||
@@ -443,7 +452,7 @@ static int block2mtd_setup2(const char *
|
||||
strcpy(str, val);
|
||||
kill_final_newline(str);
|
||||
|
||||
- for (i = 0; i < 2; i++)
|
||||
+ for (i = 0; i < 3; i++)
|
||||
token[i] = strsep(&str, ",");
|
||||
|
||||
if (str)
|
||||
@@ -463,8 +472,10 @@ static int block2mtd_setup2(const char *
|
||||
parse_err("illegal erase size");
|
||||
}
|
||||
}
|
||||
+ if (token[2] && (strlen(token[2]) + 1 > 80))
|
||||
+ parse_err("mtd device name too long");
|
||||
|
||||
- add_device(name, erase_size);
|
||||
+ add_device(name, erase_size, token[2]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -498,7 +509,7 @@ static int block2mtd_setup(const char *v
|
||||
|
||||
|
||||
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
|
||||
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
|
||||
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
|
||||
|
||||
static int __init block2mtd_init(void)
|
||||
{
|
File diff suppressed because it is too large
Load Diff
|
@ -1,109 +0,0 @@
|
|||
--- a/include/linux/netfilter/xt_layer7.h
|
||||
+++ b/include/linux/netfilter/xt_layer7.h
|
||||
@@ -8,6 +8,7 @@ struct xt_layer7_info {
|
||||
char protocol[MAX_PROTOCOL_LEN];
|
||||
char invert:1;
|
||||
char pattern[MAX_PATTERN_LEN];
|
||||
+ u_int8_t pkt;
|
||||
};
|
||||
|
||||
#endif /* _XT_LAYER7_H */
|
||||
--- a/net/netfilter/xt_layer7.c
|
||||
+++ b/net/netfilter/xt_layer7.c
|
||||
@@ -296,34 +296,36 @@ static int match_no_append(struct nf_con
|
||||
}
|
||||
|
||||
/* add the new app data to the conntrack. Return number of bytes added. */
|
||||
-static int add_data(struct nf_conn * master_conntrack,
|
||||
- char * app_data, int appdatalen)
|
||||
+static int add_datastr(char *target, int offset, char *app_data, int len)
|
||||
{
|
||||
int length = 0, i;
|
||||
- int oldlength = master_conntrack->layer7.app_data_len;
|
||||
-
|
||||
- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
|
||||
- clear on whether the race condition exists or whether this really
|
||||
- fixes it. I might just be being dense... Anyway, if it's not really
|
||||
- a fix, all it does is waste a very small amount of time. */
|
||||
- if(!master_conntrack->layer7.app_data) return 0;
|
||||
+
|
||||
+ if (!target) return 0;
|
||||
|
||||
/* Strip nulls. Make everything lower case (our regex lib doesn't
|
||||
do case insensitivity). Add it to the end of the current data. */
|
||||
- for(i = 0; i < maxdatalen-oldlength-1 &&
|
||||
- i < appdatalen; i++) {
|
||||
+ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
|
||||
if(app_data[i] != '\0') {
|
||||
/* the kernel version of tolower mungs 'upper ascii' */
|
||||
- master_conntrack->layer7.app_data[length+oldlength] =
|
||||
+ target[length+offset] =
|
||||
isascii(app_data[i])?
|
||||
tolower(app_data[i]) : app_data[i];
|
||||
length++;
|
||||
}
|
||||
}
|
||||
+ target[length+offset] = '\0';
|
||||
+
|
||||
+ return length;
|
||||
+}
|
||||
|
||||
- master_conntrack->layer7.app_data[length+oldlength] = '\0';
|
||||
- master_conntrack->layer7.app_data_len = length + oldlength;
|
||||
+/* add the new app data to the conntrack. Return number of bytes added. */
|
||||
+static int add_data(struct nf_conn * master_conntrack,
|
||||
+ char * app_data, int appdatalen)
|
||||
+{
|
||||
+ int length;
|
||||
|
||||
+ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
|
||||
+ master_conntrack->layer7.app_data_len += length;
|
||||
return length;
|
||||
}
|
||||
|
||||
@@ -410,7 +412,7 @@ match(const struct sk_buff *skbin,
|
||||
struct xt_layer7_info * info = (struct xt_layer7_info *)matchinfo;
|
||||
enum ip_conntrack_info master_ctinfo, ctinfo;
|
||||
struct nf_conn *master_conntrack, *conntrack;
|
||||
- unsigned char * app_data;
|
||||
+ unsigned char *app_data, *tmp_data;
|
||||
unsigned int pattern_result, appdatalen;
|
||||
regexp * comppattern;
|
||||
|
||||
@@ -438,8 +440,8 @@ match(const struct sk_buff *skbin,
|
||||
master_conntrack = master_ct(master_conntrack);
|
||||
|
||||
/* if we've classified it or seen too many packets */
|
||||
- if(TOTAL_PACKETS > num_packets ||
|
||||
- master_conntrack->layer7.app_proto) {
|
||||
+ if(!info->pkt && (TOTAL_PACKETS > num_packets ||
|
||||
+ master_conntrack->layer7.app_proto)) {
|
||||
|
||||
pattern_result = match_no_append(conntrack, master_conntrack,
|
||||
ctinfo, master_ctinfo, info);
|
||||
@@ -472,6 +474,25 @@ match(const struct sk_buff *skbin,
|
||||
/* the return value gets checked later, when we're ready to use it */
|
||||
comppattern = compile_and_cache(info->pattern, info->protocol);
|
||||
|
||||
+ if (info->pkt) {
|
||||
+ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
|
||||
+ if(!tmp_data){
|
||||
+ if (net_ratelimit())
|
||||
+ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
|
||||
+ return info->invert;
|
||||
+ }
|
||||
+
|
||||
+ tmp_data[0] = '\0';
|
||||
+ add_datastr(tmp_data, 0, app_data, appdatalen);
|
||||
+ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
|
||||
+
|
||||
+ kfree(tmp_data);
|
||||
+ tmp_data = NULL;
|
||||
+ spin_unlock_bh(&l7_lock);
|
||||
+
|
||||
+ return (pattern_result ^ info->invert);
|
||||
+ }
|
||||
+
|
||||
/* On the first packet of a connection, allocate space for app data */
|
||||
if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
|
||||
!master_conntrack->layer7.app_data){
|
|
@ -1,166 +0,0 @@
|
|||
--- /dev/null
|
||||
+++ b/include/net/xfrmudp.h
|
||||
@@ -0,0 +1,10 @@
|
||||
+/*
|
||||
+ * pointer to function for type that xfrm4_input wants, to permit
|
||||
+ * decoupling of XFRM from udp.c
|
||||
+ */
|
||||
+#define HAVE_XFRM4_UDP_REGISTER
|
||||
+
|
||||
+typedef int (*xfrm4_rcv_encap_t)(struct sk_buff *skb, __u16 encap_type);
|
||||
+extern int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
|
||||
+ , xfrm4_rcv_encap_t *oldfunc);
|
||||
+extern int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func);
|
||||
--- a/net/ipv4/Kconfig
|
||||
+++ b/net/ipv4/Kconfig
|
||||
@@ -266,6 +266,12 @@ config NET_IPGRE_BROADCAST
|
||||
Network), but can be distributed all over the Internet. If you want
|
||||
to do that, say Y here and to "IP multicast routing" below.
|
||||
|
||||
+config IPSEC_NAT_TRAVERSAL
|
||||
+ bool "IPSEC NAT-Traversal (KLIPS compatible)"
|
||||
+ depends on INET
|
||||
+ ---help---
|
||||
+ Includes support for RFC3947/RFC3948 NAT-Traversal of ESP over UDP.
|
||||
+
|
||||
config IP_MROUTE
|
||||
bool "IP: multicast routing"
|
||||
depends on IP_MULTICAST
|
||||
--- a/net/ipv4/udp.c
|
||||
+++ b/net/ipv4/udp.c
|
||||
@@ -101,12 +101,15 @@
|
||||
#include <net/route.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/xfrm.h>
|
||||
+#include <net/xfrmudp.h>
|
||||
#include "udp_impl.h"
|
||||
|
||||
/*
|
||||
* Snmp MIB for the UDP layer
|
||||
*/
|
||||
|
||||
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func;
|
||||
+
|
||||
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
|
||||
|
||||
struct hlist_head udp_hash[UDP_HTABLE_SIZE];
|
||||
@@ -915,6 +918,42 @@ int udp_disconnect(struct sock *sk, int
|
||||
return 0;
|
||||
}
|
||||
|
||||
+#if defined(CONFIG_XFRM) || defined(CONFIG_IPSEC_NAT_TRAVERSAL)
|
||||
+
|
||||
+/* if XFRM isn't a module, then register it directly. */
|
||||
+#if 0 && !defined(CONFIG_XFRM_MODULE) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
|
||||
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = xfrm4_rcv_encap;
|
||||
+#else
|
||||
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = NULL;
|
||||
+#endif
|
||||
+
|
||||
+int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
|
||||
+ , xfrm4_rcv_encap_t *oldfunc)
|
||||
+{
|
||||
+ if(oldfunc != NULL) {
|
||||
+ *oldfunc = xfrm4_rcv_encap_func;
|
||||
+ }
|
||||
+
|
||||
+#if 0
|
||||
+ if(xfrm4_rcv_encap_func != NULL)
|
||||
+ return -1;
|
||||
+#endif
|
||||
+
|
||||
+ xfrm4_rcv_encap_func = func;
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func)
|
||||
+{
|
||||
+ if(xfrm4_rcv_encap_func != func)
|
||||
+ return -1;
|
||||
+
|
||||
+ xfrm4_rcv_encap_func = NULL;
|
||||
+ return 0;
|
||||
+}
|
||||
+#endif /* CONFIG_XFRM_MODULE || CONFIG_IPSEC_NAT_TRAVERSAL */
|
||||
+
|
||||
+
|
||||
/* return:
|
||||
* 1 if the the UDP system should process it
|
||||
* 0 if we should drop this packet
|
||||
@@ -922,7 +961,7 @@ int udp_disconnect(struct sock *sk, int
|
||||
*/
|
||||
static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
|
||||
{
|
||||
-#ifndef CONFIG_XFRM
|
||||
+#if !defined(CONFIG_XFRM) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
|
||||
return 1;
|
||||
#else
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
@@ -937,11 +976,11 @@ static int udp_encap_rcv(struct sock * s
|
||||
/* if we're overly short, let UDP handle it */
|
||||
len = skb->len - sizeof(struct udphdr);
|
||||
if (len <= 0)
|
||||
- return 1;
|
||||
+ return 2;
|
||||
|
||||
/* if this is not encapsulated socket, then just return now */
|
||||
if (!encap_type)
|
||||
- return 1;
|
||||
+ return 3;
|
||||
|
||||
/* If this is a paged skb, make sure we pull up
|
||||
* whatever data we need to look at. */
|
||||
@@ -964,7 +1003,7 @@ static int udp_encap_rcv(struct sock * s
|
||||
len = sizeof(struct udphdr);
|
||||
} else
|
||||
/* Must be an IKE packet.. pass it through */
|
||||
- return 1;
|
||||
+ return 4;
|
||||
break;
|
||||
case UDP_ENCAP_ESPINUDP_NON_IKE:
|
||||
/* Check if this is a keepalive packet. If so, eat it. */
|
||||
@@ -977,7 +1016,7 @@ static int udp_encap_rcv(struct sock * s
|
||||
len = sizeof(struct udphdr) + 2 * sizeof(u32);
|
||||
} else
|
||||
/* Must be an IKE packet.. pass it through */
|
||||
- return 1;
|
||||
+ return 5;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -988,6 +1027,8 @@ static int udp_encap_rcv(struct sock * s
|
||||
*/
|
||||
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
|
||||
return 0;
|
||||
+ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
|
||||
+ return 0;
|
||||
|
||||
/* Now we can update and verify the packet length... */
|
||||
iph = skb->nh.iph;
|
||||
@@ -1051,9 +1092,13 @@ int udp_queue_rcv_skb(struct sock * sk,
|
||||
return 0;
|
||||
}
|
||||
if (ret < 0) {
|
||||
- /* process the ESP packet */
|
||||
- ret = xfrm4_rcv_encap(skb, up->encap_type);
|
||||
- UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
|
||||
+ if(xfrm4_rcv_encap_func != NULL) {
|
||||
+ ret = (*xfrm4_rcv_encap_func)(skb, up->encap_type);
|
||||
+ UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
|
||||
+ } else {
|
||||
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
|
||||
+ ret = 1;
|
||||
+ }
|
||||
return -ret;
|
||||
}
|
||||
/* FALLTHROUGH -- it's a UDP Packet */
|
||||
@@ -1733,3 +1778,9 @@ EXPORT_SYMBOL(udp_poll);
|
||||
EXPORT_SYMBOL(udp_proc_register);
|
||||
EXPORT_SYMBOL(udp_proc_unregister);
|
||||
#endif
|
||||
+
|
||||
+#if defined(CONFIG_IPSEC_NAT_TRAVERSAL)
|
||||
+EXPORT_SYMBOL(udp4_register_esp_rcvencap);
|
||||
+EXPORT_SYMBOL(udp4_unregister_esp_rcvencap);
|
||||
+#endif
|
||||
+
|
|
@ -1,237 +0,0 @@
|
|||
--- /dev/null
|
||||
+++ b/include/linux/netfilter_ipv4/ipt_time.h
|
||||
@@ -0,0 +1,18 @@
|
||||
+#ifndef __ipt_time_h_included__
|
||||
+#define __ipt_time_h_included__
|
||||
+
|
||||
+
|
||||
+struct ipt_time_info {
|
||||
+ u_int8_t days_match; /* 1 bit per day. -SMTWTFS */
|
||||
+ u_int16_t time_start; /* 0 < time_start < 23*60+59 = 1439 */
|
||||
+ u_int16_t time_stop; /* 0:0 < time_stat < 23:59 */
|
||||
+
|
||||
+ /* FIXME: Keep this one for userspace iptables binary compability: */
|
||||
+ u_int8_t kerneltime; /* ignore skb time (and use kerneltime) or not. */
|
||||
+
|
||||
+ time_t date_start;
|
||||
+ time_t date_stop;
|
||||
+};
|
||||
+
|
||||
+
|
||||
+#endif /* __ipt_time_h_included__ */
|
||||
--- /dev/null
|
||||
+++ b/net/ipv4/netfilter/ipt_time.c
|
||||
@@ -0,0 +1,178 @@
|
||||
+/*
|
||||
+ This is a module which is used for time matching
|
||||
+ It is using some modified code from dietlibc (localtime() function)
|
||||
+ that you can find at http://www.fefe.de/dietlibc/
|
||||
+ This file is distributed under the terms of the GNU General Public
|
||||
+ License (GPL). Copies of the GPL can be obtained from: ftp://prep.ai.mit.edu/pub/gnu/GPL
|
||||
+ 2001-05-04 Fabrice MARIE <fabrice@netfilter.org> : initial development.
|
||||
+ 2001-21-05 Fabrice MARIE <fabrice@netfilter.org> : bug fix in the match code,
|
||||
+ thanks to "Zeng Yu" <zengy@capitel.com.cn> for bug report.
|
||||
+ 2001-26-09 Fabrice MARIE <fabrice@netfilter.org> : force the match to be in LOCAL_IN or PRE_ROUTING only.
|
||||
+ 2001-30-11 Fabrice : added the possibility to use the match in FORWARD/OUTPUT with a little hack,
|
||||
+ added Nguyen Dang Phuoc Dong <dongnd@tlnet.com.vn> patch to support timezones.
|
||||
+ 2004-05-02 Fabrice : added support for date matching, from an idea of Fabien COELHO.
|
||||
+*/
|
||||
+
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/netfilter_ipv4/ip_tables.h>
|
||||
+#include <linux/netfilter_ipv4/ipt_time.h>
|
||||
+#include <linux/time.h>
|
||||
+
|
||||
+MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
|
||||
+MODULE_DESCRIPTION("Match arrival timestamp/date");
|
||||
+MODULE_LICENSE("GPL");
|
||||
+
|
||||
+struct tm
|
||||
+{
|
||||
+ int tm_sec; /* Seconds. [0-60] (1 leap second) */
|
||||
+ int tm_min; /* Minutes. [0-59] */
|
||||
+ int tm_hour; /* Hours. [0-23] */
|
||||
+ int tm_mday; /* Day. [1-31] */
|
||||
+ int tm_mon; /* Month. [0-11] */
|
||||
+ int tm_year; /* Year - 1900. */
|
||||
+ int tm_wday; /* Day of week. [0-6] */
|
||||
+ int tm_yday; /* Days in year.[0-365] */
|
||||
+ int tm_isdst; /* DST. [-1/0/1]*/
|
||||
+
|
||||
+ long int tm_gmtoff; /* we don't care, we count from GMT */
|
||||
+ const char *tm_zone; /* we don't care, we count from GMT */
|
||||
+};
|
||||
+
|
||||
+void
|
||||
+localtime(const u32 time, struct tm *r);
|
||||
+
|
||||
+static int
|
||||
+match(const struct sk_buff *skb,
|
||||
+ const struct net_device *in,
|
||||
+ const struct net_device *out,
|
||||
+ const struct xt_match *match,
|
||||
+ const void *matchinfo,
|
||||
+ int offset,
|
||||
+ unsigned int protoff,
|
||||
+ int *hotdrop)
|
||||
+{
|
||||
+ const struct ipt_time_info *info = matchinfo; /* match info for rule */
|
||||
+ struct tm currenttime; /* time human readable */
|
||||
+ u_int8_t days_of_week[7] = {64, 32, 16, 8, 4, 2, 1};
|
||||
+ u_int16_t packet_time;
|
||||
+
|
||||
+ /* We might not have a timestamp, get one */
|
||||
+ if (skb->tstamp.off_sec == 0)
|
||||
+ __net_timestamp((struct sk_buff *)skb);
|
||||
+
|
||||
+ /* First we make sure we are in the date start-stop boundaries */
|
||||
+ if ((skb->tstamp.off_sec < info->date_start) || (skb->tstamp.off_sec > info->date_stop))
|
||||
+ return 0; /* We are outside the date boundaries */
|
||||
+
|
||||
+ /* Transform the timestamp of the packet, in a human readable form */
|
||||
+ localtime(skb->tstamp.off_sec, ¤ttime);
|
||||
+
|
||||
+ /* check if we match this timestamp, we start by the days... */
|
||||
+ if ((days_of_week[currenttime.tm_wday] & info->days_match) != days_of_week[currenttime.tm_wday])
|
||||
+ return 0; /* the day doesn't match */
|
||||
+
|
||||
+ /* ... check the time now */
|
||||
+ packet_time = (currenttime.tm_hour * 60) + currenttime.tm_min;
|
||||
+ if ((packet_time < info->time_start) || (packet_time > info->time_stop))
|
||||
+ return 0;
|
||||
+
|
||||
+ /* here we match ! */
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+checkentry(const char *tablename,
|
||||
+ const void *ip,
|
||||
+ const struct xt_match *match,
|
||||
+ void *matchinfo,
|
||||
+ unsigned int hook_mask)
|
||||
+{
|
||||
+ struct ipt_time_info *info = matchinfo; /* match info for rule */
|
||||
+
|
||||
+ /* First, check that we are in the correct hooks */
|
||||
+ if (hook_mask
|
||||
+ & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) | (1 << NF_IP_LOCAL_OUT)))
|
||||
+ {
|
||||
+ printk("ipt_time: error, only valid for PRE_ROUTING, LOCAL_IN, FORWARD and OUTPUT)\n");
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ /* Now check the coherence of the data ... */
|
||||
+ if ((info->time_start > 1439) || /* 23*60+59 = 1439*/
|
||||
+ (info->time_stop > 1439))
|
||||
+ {
|
||||
+ printk(KERN_WARNING "ipt_time: invalid argument\n");
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+static struct ipt_match time_match = {
|
||||
+ .name = "time",
|
||||
+ .match = &match,
|
||||
+ .matchsize = sizeof(struct ipt_time_info),
|
||||
+ .checkentry = &checkentry,
|
||||
+ .me = THIS_MODULE
|
||||
+};
|
||||
+
|
||||
+static int __init init(void)
|
||||
+{
|
||||
+ printk("ipt_time loading\n");
|
||||
+ return xt_register_match(&time_match);
|
||||
+}
|
||||
+
|
||||
+static void __exit fini(void)
|
||||
+{
|
||||
+ xt_unregister_match(&time_match);
|
||||
+ printk("ipt_time unloaded\n");
|
||||
+}
|
||||
+
|
||||
+module_init(init);
|
||||
+module_exit(fini);
|
||||
+
|
||||
+
|
||||
+/* The part below is borowed and modified from dietlibc */
|
||||
+
|
||||
+/* seconds per day */
|
||||
+#define SPD 24*60*60
|
||||
+
|
||||
+void
|
||||
+localtime(const u32 time, struct tm *r) {
|
||||
+ u32 i, timep;
|
||||
+ extern struct timezone sys_tz;
|
||||
+ const unsigned int __spm[12] =
|
||||
+ { 0,
|
||||
+ (31),
|
||||
+ (31+28),
|
||||
+ (31+28+31),
|
||||
+ (31+28+31+30),
|
||||
+ (31+28+31+30+31),
|
||||
+ (31+28+31+30+31+30),
|
||||
+ (31+28+31+30+31+30+31),
|
||||
+ (31+28+31+30+31+30+31+31),
|
||||
+ (31+28+31+30+31+30+31+31+30),
|
||||
+ (31+28+31+30+31+30+31+31+30+31),
|
||||
+ (31+28+31+30+31+30+31+31+30+31+30),
|
||||
+ };
|
||||
+ register u32 work;
|
||||
+
|
||||
+ timep = time - (sys_tz.tz_minuteswest * 60);
|
||||
+ work=timep%(SPD);
|
||||
+ r->tm_sec=work%60; work/=60;
|
||||
+ r->tm_min=work%60; r->tm_hour=work/60;
|
||||
+ work=timep/(SPD);
|
||||
+ r->tm_wday=(4+work)%7;
|
||||
+ for (i=1970; ; ++i) {
|
||||
+ register time_t k= (!(i%4) && ((i%100) || !(i%400)))?366:365;
|
||||
+ if (work>k)
|
||||
+ work-=k;
|
||||
+ else
|
||||
+ break;
|
||||
+ }
|
||||
+ r->tm_year=i-1900;
|
||||
+ for (i=11; i && __spm[i]>work; --i) ;
|
||||
+ r->tm_mon=i;
|
||||
+ r->tm_mday=work-__spm[i]+1;
|
||||
+}
|
||||
--- a/net/ipv4/netfilter/Kconfig
|
||||
+++ b/net/ipv4/netfilter/Kconfig
|
||||
@@ -254,6 +254,22 @@ config IP_NF_MATCH_TOS
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
+
|
||||
+config IP_NF_MATCH_TIME
|
||||
+ tristate 'TIME match support'
|
||||
+ depends on IP_NF_IPTABLES
|
||||
+ help
|
||||
+ This option adds a `time' match, which allows you
|
||||
+ to match based on the packet arrival time/date
|
||||
+ (arrival time/date at the machine which netfilter is running on) or
|
||||
+ departure time/date (for locally generated packets).
|
||||
+
|
||||
+ If you say Y here, try iptables -m time --help for more information.
|
||||
+ If you want to compile it as a module, say M here and read
|
||||
+
|
||||
+ Documentation/modules.txt. If unsure, say `N'.
|
||||
+
|
||||
+
|
||||
config IP_NF_MATCH_RECENT
|
||||
tristate "recent match support"
|
||||
depends on IP_NF_IPTABLES
|
||||
--- a/net/ipv4/netfilter/Makefile
|
||||
+++ b/net/ipv4/netfilter/Makefile
|
||||
@@ -86,6 +86,7 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
|
||||
obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
|
||||
obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
|
||||
obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
|
||||
+obj-$(CONFIG_IP_NF_MATCH_TIME) += ipt_time.o
|
||||
obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
|
||||
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
|
||||
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
|
|
@ -1,872 +0,0 @@
|
|||
--- /dev/null
|
||||
+++ b/drivers/net/imq.c
|
||||
@@ -0,0 +1,402 @@
|
||||
+/*
|
||||
+ * Pseudo-driver for the intermediate queue device.
|
||||
+ *
|
||||
+ * This program is free software; you can redistribute it and/or
|
||||
+ * modify it under the terms of the GNU General Public License
|
||||
+ * as published by the Free Software Foundation; either version
|
||||
+ * 2 of the License, or (at your option) any later version.
|
||||
+ *
|
||||
+ * Authors: Patrick McHardy, <kaber@trash.net>
|
||||
+ *
|
||||
+ * The first version was written by Martin Devera, <devik@cdi.cz>
|
||||
+ *
|
||||
+ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
|
||||
+ * - Update patch to 2.4.21
|
||||
+ * Sebastian Strollo <sstrollo@nortelnetworks.com>
|
||||
+ * - Fix "Dead-loop on netdevice imq"-issue
|
||||
+ * Marcel Sebek <sebek64@post.cz>
|
||||
+ * - Update to 2.6.2-rc1
|
||||
+ *
|
||||
+ * After some time of inactivity there is a group taking care
|
||||
+ * of IMQ again: http://www.linuximq.net
|
||||
+ *
|
||||
+ *
|
||||
+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including
|
||||
+ * the following changes:
|
||||
+ *
|
||||
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
|
||||
+ * - Correction of imq_init_devs() issue that resulted in
|
||||
+ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
|
||||
+ * - Addition of functionality to choose number of IMQ devices
|
||||
+ * during kernel config (Andre Correa)
|
||||
+ * - Addition of functionality to choose how IMQ hooks on
|
||||
+ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
|
||||
+ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
|
||||
+ *
|
||||
+ *
|
||||
+ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
|
||||
+ * released with almost no problems. 2.6.14-x was released
|
||||
+ * with some important changes: nfcache was removed; After
|
||||
+ * some weeks of trouble we figured out that some IMQ fields
|
||||
+ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
|
||||
+ * These functions are correctly patched by this new patch version.
|
||||
+ *
|
||||
+ * Thanks for all who helped to figure out all the problems with
|
||||
+ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
|
||||
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
|
||||
+ * I didn't forget anybody). I apologize again for my lack of time.
|
||||
+ *
|
||||
+ * More info at: http://www.linuximq.net/ (Andre Correa)
|
||||
+ */
|
||||
+
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/kernel.h>
|
||||
+#include <linux/moduleparam.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/netdevice.h>
|
||||
+#include <linux/rtnetlink.h>
|
||||
+#include <linux/if_arp.h>
|
||||
+#include <linux/netfilter.h>
|
||||
+#include <linux/netfilter_ipv4.h>
|
||||
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
+ #include <linux/netfilter_ipv6.h>
|
||||
+#endif
|
||||
+#include <linux/imq.h>
|
||||
+#include <net/pkt_sched.h>
|
||||
+
|
||||
+extern int qdisc_restart1(struct net_device *dev);
|
||||
+
|
||||
+static nf_hookfn imq_nf_hook;
|
||||
+
|
||||
+static struct nf_hook_ops imq_ingress_ipv4 = {
|
||||
+ .hook = imq_nf_hook,
|
||||
+ .owner = THIS_MODULE,
|
||||
+ .pf = PF_INET,
|
||||
+ .hooknum = NF_IP_PRE_ROUTING,
|
||||
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
|
||||
+ .priority = NF_IP_PRI_MANGLE + 1
|
||||
+#else
|
||||
+ .priority = NF_IP_PRI_NAT_DST + 1
|
||||
+#endif
|
||||
+};
|
||||
+
|
||||
+static struct nf_hook_ops imq_egress_ipv4 = {
|
||||
+ .hook = imq_nf_hook,
|
||||
+ .owner = THIS_MODULE,
|
||||
+ .pf = PF_INET,
|
||||
+ .hooknum = NF_IP_POST_ROUTING,
|
||||
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
|
||||
+ .priority = NF_IP_PRI_LAST
|
||||
+#else
|
||||
+ .priority = NF_IP_PRI_NAT_SRC - 1
|
||||
+#endif
|
||||
+};
|
||||
+
|
||||
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
+static struct nf_hook_ops imq_ingress_ipv6 = {
|
||||
+ .hook = imq_nf_hook,
|
||||
+ .owner = THIS_MODULE,
|
||||
+ .pf = PF_INET6,
|
||||
+ .hooknum = NF_IP6_PRE_ROUTING,
|
||||
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
|
||||
+ .priority = NF_IP6_PRI_MANGLE + 1
|
||||
+#else
|
||||
+ .priority = NF_IP6_PRI_NAT_DST + 1
|
||||
+#endif
|
||||
+};
|
||||
+
|
||||
+static struct nf_hook_ops imq_egress_ipv6 = {
|
||||
+ .hook = imq_nf_hook,
|
||||
+ .owner = THIS_MODULE,
|
||||
+ .pf = PF_INET6,
|
||||
+ .hooknum = NF_IP6_POST_ROUTING,
|
||||
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
|
||||
+ .priority = NF_IP6_PRI_LAST
|
||||
+#else
|
||||
+ .priority = NF_IP6_PRI_NAT_SRC - 1
|
||||
+#endif
|
||||
+};
|
||||
+#endif
|
||||
+
|
||||
+#if defined(CONFIG_IMQ_NUM_DEVS)
|
||||
+static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
|
||||
+#else
|
||||
+static unsigned int numdevs = 2;
|
||||
+#endif
|
||||
+
|
||||
+static struct net_device *imq_devs;
|
||||
+
|
||||
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
|
||||
+{
|
||||
+ return (struct net_device_stats *)dev->priv;
|
||||
+}
|
||||
+
|
||||
+/* called for packets kfree'd in qdiscs at places other than enqueue */
|
||||
+static void imq_skb_destructor(struct sk_buff *skb)
|
||||
+{
|
||||
+ struct nf_info *info = skb->nf_info;
|
||||
+
|
||||
+ if (info) {
|
||||
+ if (info->indev)
|
||||
+ dev_put(info->indev);
|
||||
+ if (info->outdev)
|
||||
+ dev_put(info->outdev);
|
||||
+ kfree(info);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
+{
|
||||
+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
|
||||
+
|
||||
+ stats->tx_bytes += skb->len;
|
||||
+ stats->tx_packets++;
|
||||
+
|
||||
+ skb->imq_flags = 0;
|
||||
+ skb->destructor = NULL;
|
||||
+
|
||||
+ dev->trans_start = jiffies;
|
||||
+ nf_reinject(skb, skb->nf_info, NF_ACCEPT);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, unsigned queue_num, void *data)
|
||||
+{
|
||||
+ struct net_device *dev;
|
||||
+ struct net_device_stats *stats;
|
||||
+ struct sk_buff *skb2 = NULL;
|
||||
+ struct Qdisc *q;
|
||||
+ unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
|
||||
+ int ret = -1;
|
||||
+
|
||||
+ if (index > numdevs)
|
||||
+ return -1;
|
||||
+
|
||||
+ dev = imq_devs + index;
|
||||
+ if (!(dev->flags & IFF_UP)) {
|
||||
+ skb->imq_flags = 0;
|
||||
+ nf_reinject(skb, info, NF_ACCEPT);
|
||||
+ return 0;
|
||||
+ }
|
||||
+ dev->last_rx = jiffies;
|
||||
+
|
||||
+ if (skb->destructor) {
|
||||
+ skb2 = skb;
|
||||
+ skb = skb_clone(skb, GFP_ATOMIC);
|
||||
+ if (!skb)
|
||||
+ return -1;
|
||||
+ }
|
||||
+ skb->nf_info = info;
|
||||
+
|
||||
+ stats = (struct net_device_stats *)dev->priv;
|
||||
+ stats->rx_bytes+= skb->len;
|
||||
+ stats->rx_packets++;
|
||||
+
|
||||
+ spin_lock_bh(&dev->queue_lock);
|
||||
+ q = dev->qdisc;
|
||||
+ if (q->enqueue) {
|
||||
+ q->enqueue(skb_get(skb), q);
|
||||
+ if (skb_shared(skb)) {
|
||||
+ skb->destructor = imq_skb_destructor;
|
||||
+ kfree_skb(skb);
|
||||
+ ret = 0;
|
||||
+ }
|
||||
+ }
|
||||
+ if (spin_is_locked(&dev->_xmit_lock))
|
||||
+ netif_schedule(dev);
|
||||
+ else
|
||||
+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
|
||||
+ /* NOTHING */;
|
||||
+
|
||||
+ spin_unlock_bh(&dev->queue_lock);
|
||||
+
|
||||
+ if (skb2)
|
||||
+ kfree_skb(ret ? skb : skb2);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static struct nf_queue_handler nfqh = {
|
||||
+ .name = "imq",
|
||||
+ .outfn = imq_nf_queue,
|
||||
+};
|
||||
+
|
||||
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff **pskb,
|
||||
+ const struct net_device *indev,
|
||||
+ const struct net_device *outdev,
|
||||
+ int (*okfn)(struct sk_buff *))
|
||||
+{
|
||||
+ if ((*pskb)->imq_flags & IMQ_F_ENQUEUE)
|
||||
+ return NF_QUEUE;
|
||||
+
|
||||
+ return NF_ACCEPT;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static int __init imq_init_hooks(void)
|
||||
+{
|
||||
+ int err;
|
||||
+
|
||||
+ err = nf_register_queue_handler(PF_INET, &nfqh);
|
||||
+ if (err > 0)
|
||||
+ goto err1;
|
||||
+ if ((err = nf_register_hook(&imq_ingress_ipv4)))
|
||||
+ goto err2;
|
||||
+ if ((err = nf_register_hook(&imq_egress_ipv4)))
|
||||
+ goto err3;
|
||||
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh)))
|
||||
+ goto err4;
|
||||
+ if ((err = nf_register_hook(&imq_ingress_ipv6)))
|
||||
+ goto err5;
|
||||
+ if ((err = nf_register_hook(&imq_egress_ipv6)))
|
||||
+ goto err6;
|
||||
+#endif
|
||||
+
|
||||
+ return 0;
|
||||
+
|
||||
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
+err6:
|
||||
+ nf_unregister_hook(&imq_ingress_ipv6);
|
||||
+err5:
|
||||
+ nf_unregister_queue_handler(PF_INET6);
|
||||
+err4:
|
||||
+ nf_unregister_hook(&imq_egress_ipv4);
|
||||
+#endif
|
||||
+err3:
|
||||
+ nf_unregister_hook(&imq_ingress_ipv4);
|
||||
+err2:
|
||||
+ nf_unregister_queue_handler(PF_INET);
|
||||
+err1:
|
||||
+ return err;
|
||||
+}
|
||||
+
|
||||
+static void __exit imq_unhook(void)
|
||||
+{
|
||||
+ nf_unregister_hook(&imq_ingress_ipv4);
|
||||
+ nf_unregister_hook(&imq_egress_ipv4);
|
||||
+ nf_unregister_queue_handler(PF_INET);
|
||||
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
+ nf_unregister_hook(&imq_ingress_ipv6);
|
||||
+ nf_unregister_hook(&imq_egress_ipv6);
|
||||
+ nf_unregister_queue_handler(PF_INET6);
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
+static int __init imq_dev_init(struct net_device *dev)
|
||||
+{
|
||||
+ dev->hard_start_xmit = imq_dev_xmit;
|
||||
+ dev->type = ARPHRD_VOID;
|
||||
+ dev->mtu = 1500;
|
||||
+ dev->tx_queue_len = 30;
|
||||
+ dev->flags = IFF_NOARP;
|
||||
+ dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
|
||||
+ if (dev->priv == NULL)
|
||||
+ return -ENOMEM;
|
||||
+ memset(dev->priv, 0, sizeof(struct net_device_stats));
|
||||
+ dev->get_stats = imq_get_stats;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static void imq_dev_uninit(struct net_device *dev)
|
||||
+{
|
||||
+ kfree(dev->priv);
|
||||
+}
|
||||
+
|
||||
+static int __init imq_init_devs(void)
|
||||
+{
|
||||
+ struct net_device *dev;
|
||||
+ int i,j;
|
||||
+ j = numdevs;
|
||||
+
|
||||
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
|
||||
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
|
||||
+ IMQ_MAX_DEVS);
|
||||
+ return -EINVAL;
|
||||
+ }
|
||||
+
|
||||
+ imq_devs = kmalloc(sizeof(struct net_device) * numdevs, GFP_KERNEL);
|
||||
+ if (!imq_devs)
|
||||
+ return -ENOMEM;
|
||||
+ memset(imq_devs, 0, sizeof(struct net_device) * numdevs);
|
||||
+
|
||||
+ /* we start counting at zero */
|
||||
+ numdevs--;
|
||||
+
|
||||
+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
|
||||
+ SET_MODULE_OWNER(dev);
|
||||
+ strcpy(dev->name, "imq%d");
|
||||
+ dev->init = imq_dev_init;
|
||||
+ dev->uninit = imq_dev_uninit;
|
||||
+
|
||||
+ if (register_netdev(dev) < 0)
|
||||
+ goto err_register;
|
||||
+ }
|
||||
+ printk(KERN_INFO "IMQ starting with %u devices...\n", j);
|
||||
+ return 0;
|
||||
+
|
||||
+err_register:
|
||||
+ for (; i; i--)
|
||||
+ unregister_netdev(--dev);
|
||||
+ kfree(imq_devs);
|
||||
+ return -EIO;
|
||||
+}
|
||||
+
|
||||
+static void imq_cleanup_devs(void)
|
||||
+{
|
||||
+ int i;
|
||||
+ struct net_device *dev = imq_devs;
|
||||
+
|
||||
+ for (i = 0; i <= numdevs; i++)
|
||||
+ unregister_netdev(dev++);
|
||||
+
|
||||
+ kfree(imq_devs);
|
||||
+}
|
||||
+
|
||||
+static int __init imq_init_module(void)
|
||||
+{
|
||||
+ int err;
|
||||
+
|
||||
+ if ((err = imq_init_devs())) {
|
||||
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs()\n");
|
||||
+ return err;
|
||||
+ }
|
||||
+ if ((err = imq_init_hooks())) {
|
||||
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
|
||||
+ imq_cleanup_devs();
|
||||
+ return err;
|
||||
+ }
|
||||
+
|
||||
+ printk(KERN_INFO "IMQ driver loaded successfully.\n");
|
||||
+
|
||||
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
|
||||
+ printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
|
||||
+#else
|
||||
+ printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
|
||||
+#endif
|
||||
+#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
|
||||
+ printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
|
||||
+#else
|
||||
+ printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
|
||||
+#endif
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static void __exit imq_cleanup_module(void)
|
||||
+{
|
||||
+ imq_unhook();
|
||||
+ imq_cleanup_devs();
|
||||
+ printk(KERN_INFO "IMQ driver unloaded successfully.\n");
|
||||
+}
|
||||
+
|
||||
+
|
||||
+module_init(imq_init_module);
|
||||
+module_exit(imq_cleanup_module);
|
||||
+
|
||||
+module_param(numdevs, int, 0);
|
||||
+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
|
||||
+MODULE_AUTHOR("http://www.linuximq.net");
|
||||
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
|
||||
+MODULE_LICENSE("GPL");
|
||||
--- a/drivers/net/Kconfig
|
||||
+++ b/drivers/net/Kconfig
|
||||
@@ -96,6 +96,129 @@ config EQUALIZER
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called eql. If unsure, say N.
|
||||
|
||||
+config IMQ
|
||||
+ tristate "IMQ (intermediate queueing device) support"
|
||||
+ depends on NETDEVICES && NETFILTER
|
||||
+ ---help---
|
||||
+ The IMQ device(s) is used as placeholder for QoS queueing
|
||||
+ disciplines. Every packet entering/leaving the IP stack can be
|
||||
+ directed through the IMQ device where it's enqueued/dequeued to the
|
||||
+ attached qdisc. This allows you to treat network devices as classes
|
||||
+ and distribute bandwidth among them. Iptables is used to specify
|
||||
+ through which IMQ device, if any, packets travel.
|
||||
+
|
||||
+ More information at: http://www.linuximq.net/
|
||||
+
|
||||
+ To compile this driver as a module, choose M here: the module
|
||||
+ will be called imq. If unsure, say N.
|
||||
+
|
||||
+choice
|
||||
+ prompt "IMQ behavior (PRE/POSTROUTING)"
|
||||
+ depends on IMQ
|
||||
+ default IMQ_BEHAVIOR_BA
|
||||
+ help
|
||||
+
|
||||
+ This settings defines how IMQ behaves in respect to its
|
||||
+ hooking in PREROUTING and POSTROUTING.
|
||||
+
|
||||
+ IMQ can work in any of the following ways:
|
||||
+
|
||||
+ PREROUTING | POSTROUTING
|
||||
+ -----------------|-------------------
|
||||
+ #1 After NAT | After NAT
|
||||
+ #2 After NAT | Before NAT
|
||||
+ #3 Before NAT | After NAT
|
||||
+ #4 Before NAT | Before NAT
|
||||
+
|
||||
+ The default behavior is to hook before NAT on PREROUTING
|
||||
+ and after NAT on POSTROUTING (#3).
|
||||
+
|
||||
+ This settings are specially usefull when trying to use IMQ
|
||||
+ to shape NATed clients.
|
||||
+
|
||||
+ More information can be found at: www.linuximq.net
|
||||
+
|
||||
+ If not sure leave the default settings alone.
|
||||
+
|
||||
+config IMQ_BEHAVIOR_AA
|
||||
+ bool "IMQ AA"
|
||||
+ help
|
||||
+ This settings defines how IMQ behaves in respect to its
|
||||
+ hooking in PREROUTING and POSTROUTING.
|
||||
+
|
||||
+ Choosing this option will make IMQ hook like this:
|
||||
+
|
||||
+ PREROUTING: After NAT
|
||||
+ POSTROUTING: After NAT
|
||||
+
|
||||
+ More information can be found at: www.linuximq.net
|
||||
+
|
||||
+ If not sure leave the default settings alone.
|
||||
+
|
||||
+config IMQ_BEHAVIOR_AB
|
||||
+ bool "IMQ AB"
|
||||
+ help
|
||||
+ This settings defines how IMQ behaves in respect to its
|
||||
+ hooking in PREROUTING and POSTROUTING.
|
||||
+
|
||||
+ Choosing this option will make IMQ hook like this:
|
||||
+
|
||||
+ PREROUTING: After NAT
|
||||
+ POSTROUTING: Before NAT
|
||||
+
|
||||
+ More information can be found at: www.linuximq.net
|
||||
+
|
||||
+ If not sure leave the default settings alone.
|
||||
+
|
||||
+config IMQ_BEHAVIOR_BA
|
||||
+ bool "IMQ BA"
|
||||
+ help
|
||||
+ This settings defines how IMQ behaves in respect to its
|
||||
+ hooking in PREROUTING and POSTROUTING.
|
||||
+
|
||||
+ Choosing this option will make IMQ hook like this:
|
||||
+
|
||||
+ PREROUTING: Before NAT
|
||||
+ POSTROUTING: After NAT
|
||||
+
|
||||
+ More information can be found at: www.linuximq.net
|
||||
+
|
||||
+ If not sure leave the default settings alone.
|
||||
+
|
||||
+config IMQ_BEHAVIOR_BB
|
||||
+ bool "IMQ BB"
|
||||
+ help
|
||||
+ This settings defines how IMQ behaves in respect to its
|
||||
+ hooking in PREROUTING and POSTROUTING.
|
||||
+
|
||||
+ Choosing this option will make IMQ hook like this:
|
||||
+
|
||||
+ PREROUTING: Before NAT
|
||||
+ POSTROUTING: Before NAT
|
||||
+
|
||||
+ More information can be found at: www.linuximq.net
|
||||
+
|
||||
+ If not sure leave the default settings alone.
|
||||
+
|
||||
+endchoice
|
||||
+
|
||||
+config IMQ_NUM_DEVS
|
||||
+
|
||||
+ int "Number of IMQ devices"
|
||||
+ range 2 8
|
||||
+ depends on IMQ
|
||||
+ default "2"
|
||||
+ help
|
||||
+
|
||||
+ This settings defines how many IMQ devices will be
|
||||
+ created.
|
||||
+
|
||||
+ The default value is 2.
|
||||
+
|
||||
+ More information can be found at: www.linuximq.net
|
||||
+
|
||||
+ If not sure leave the default settings alone.
|
||||
+
|
||||
config TUN
|
||||
tristate "Universal TUN/TAP device driver support"
|
||||
select CRC32
|
||||
--- a/drivers/net/Makefile
|
||||
+++ b/drivers/net/Makefile
|
||||
@@ -124,6 +124,7 @@ obj-$(CONFIG_SLIP) += slip.o
|
||||
obj-$(CONFIG_SLHC) += slhc.o
|
||||
|
||||
obj-$(CONFIG_DUMMY) += dummy.o
|
||||
+obj-$(CONFIG_IMQ) += imq.o
|
||||
obj-$(CONFIG_IFB) += ifb.o
|
||||
obj-$(CONFIG_DE600) += de600.o
|
||||
obj-$(CONFIG_DE620) += de620.o
|
||||
--- /dev/null
|
||||
+++ b/include/linux/imq.h
|
||||
@@ -0,0 +1,9 @@
|
||||
+#ifndef _IMQ_H
|
||||
+#define _IMQ_H
|
||||
+
|
||||
+#define IMQ_MAX_DEVS 16
|
||||
+
|
||||
+#define IMQ_F_IFMASK 0x7f
|
||||
+#define IMQ_F_ENQUEUE 0x80
|
||||
+
|
||||
+#endif /* _IMQ_H */
|
||||
--- /dev/null
|
||||
+++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
|
||||
@@ -0,0 +1,8 @@
|
||||
+#ifndef _IPT_IMQ_H
|
||||
+#define _IPT_IMQ_H
|
||||
+
|
||||
+struct ipt_imq_info {
|
||||
+ unsigned int todev; /* target imq device */
|
||||
+};
|
||||
+
|
||||
+#endif /* _IPT_IMQ_H */
|
||||
--- /dev/null
|
||||
+++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
|
||||
@@ -0,0 +1,8 @@
|
||||
+#ifndef _IP6T_IMQ_H
|
||||
+#define _IP6T_IMQ_H
|
||||
+
|
||||
+struct ip6t_imq_info {
|
||||
+ unsigned int todev; /* target imq device */
|
||||
+};
|
||||
+
|
||||
+#endif /* _IP6T_IMQ_H */
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -294,6 +294,10 @@ struct sk_buff {
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
struct sk_buff *nfct_reasm;
|
||||
#endif
|
||||
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
||||
+ unsigned char imq_flags;
|
||||
+ struct nf_info *nf_info;
|
||||
+#endif
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
struct nf_bridge_info *nf_bridge;
|
||||
#endif
|
||||
--- a/net/core/dev.c
|
||||
+++ b/net/core/dev.c
|
||||
@@ -94,6 +94,9 @@
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/sock.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
||||
+#include <linux/imq.h>
|
||||
+#endif
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/stat.h>
|
||||
@@ -1340,7 +1343,11 @@ static int dev_gso_segment(struct sk_buf
|
||||
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
if (likely(!skb->next)) {
|
||||
- if (netdev_nit)
|
||||
+ if (netdev_nit
|
||||
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
||||
+ && !(skb->imq_flags & IMQ_F_ENQUEUE)
|
||||
+#endif
|
||||
+ )
|
||||
dev_queue_xmit_nit(skb, dev);
|
||||
|
||||
if (netif_needs_gso(dev, skb)) {
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -430,6 +430,10 @@ struct sk_buff *skb_clone(struct sk_buff
|
||||
C(nfct_reasm);
|
||||
nf_conntrack_get_reasm(skb->nfct_reasm);
|
||||
#endif
|
||||
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
||||
+ C(imq_flags);
|
||||
+ C(nf_info);
|
||||
+#endif /*CONFIG_IMQ*/
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
C(nf_bridge);
|
||||
nf_bridge_get(skb->nf_bridge);
|
||||
@@ -494,6 +498,10 @@ static void copy_skb_header(struct sk_bu
|
||||
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
|
||||
new->ipvs_property = old->ipvs_property;
|
||||
#endif
|
||||
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
||||
+ new->imq_flags = old->imq_flags;
|
||||
+ new->nf_info = old->nf_info;
|
||||
+#endif /*CONFIG_IMQ*/
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
new->nf_bridge = old->nf_bridge;
|
||||
nf_bridge_get(old->nf_bridge);
|
||||
--- /dev/null
|
||||
+++ b/net/ipv4/netfilter/ipt_IMQ.c
|
||||
@@ -0,0 +1,69 @@
|
||||
+/*
|
||||
+ * This target marks packets to be enqueued to an imq device
|
||||
+ */
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/netfilter_ipv4/ip_tables.h>
|
||||
+#include <linux/netfilter_ipv4/ipt_IMQ.h>
|
||||
+#include <linux/imq.h>
|
||||
+
|
||||
+static unsigned int imq_target(struct sk_buff **pskb,
|
||||
+ const struct net_device *in,
|
||||
+ const struct net_device *out,
|
||||
+ unsigned int hooknum,
|
||||
+ const struct xt_target *target,
|
||||
+ const void *targinfo)
|
||||
+{
|
||||
+ struct ipt_imq_info *mr = (struct ipt_imq_info*)targinfo;
|
||||
+
|
||||
+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
|
||||
+
|
||||
+ return XT_CONTINUE;
|
||||
+}
|
||||
+
|
||||
+static int imq_checkentry(const char *tablename,
|
||||
+ const void *e,
|
||||
+ const struct xt_target *target,
|
||||
+ void *targinfo,
|
||||
+ unsigned int hook_mask)
|
||||
+{
|
||||
+ struct ipt_imq_info *mr;
|
||||
+
|
||||
+ mr = (struct ipt_imq_info*)targinfo;
|
||||
+
|
||||
+ if (mr->todev > IMQ_MAX_DEVS) {
|
||||
+ printk(KERN_WARNING
|
||||
+ "IMQ: invalid device specified, highest is %u\n",
|
||||
+ IMQ_MAX_DEVS);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+static struct xt_target ipt_imq_reg = {
|
||||
+ .name = "IMQ",
|
||||
+ .family = AF_INET,
|
||||
+ .target = imq_target,
|
||||
+ .targetsize = sizeof(struct ipt_imq_info),
|
||||
+ .checkentry = imq_checkentry,
|
||||
+ .me = THIS_MODULE,
|
||||
+ .table = "mangle"
|
||||
+};
|
||||
+
|
||||
+static int __init init(void)
|
||||
+{
|
||||
+ return xt_register_target(&ipt_imq_reg);
|
||||
+}
|
||||
+
|
||||
+static void __exit fini(void)
|
||||
+{
|
||||
+ xt_unregister_target(&ipt_imq_reg);
|
||||
+}
|
||||
+
|
||||
+module_init(init);
|
||||
+module_exit(fini);
|
||||
+
|
||||
+MODULE_AUTHOR("http://www.linuximq.net");
|
||||
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
|
||||
+MODULE_LICENSE("GPL");
|
||||
--- a/net/ipv4/netfilter/Kconfig
|
||||
+++ b/net/ipv4/netfilter/Kconfig
|
||||
@@ -581,6 +581,17 @@ config IP_NF_MANGLE
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
+config IP_NF_TARGET_IMQ
|
||||
+ tristate "IMQ target support"
|
||||
+ depends on IP_NF_MANGLE
|
||||
+ help
|
||||
+ This option adds a `IMQ' target which is used to specify if and
|
||||
+ to which IMQ device packets should get enqueued/dequeued.
|
||||
+
|
||||
+ For more information visit: http://www.linuximq.net/
|
||||
+
|
||||
+ To compile it as a module, choose M here. If unsure, say N.
|
||||
+
|
||||
config IP_NF_TARGET_TOS
|
||||
tristate "TOS target support"
|
||||
depends on IP_NF_MANGLE
|
||||
--- a/net/ipv4/netfilter/Makefile
|
||||
+++ b/net/ipv4/netfilter/Makefile
|
||||
@@ -97,6 +97,7 @@ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ip
|
||||
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
|
||||
+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
|
||||
--- /dev/null
|
||||
+++ b/net/ipv6/netfilter/ip6t_IMQ.c
|
||||
@@ -0,0 +1,69 @@
|
||||
+/*
|
||||
+ * This target marks packets to be enqueued to an imq device
|
||||
+ */
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/netfilter_ipv6/ip6_tables.h>
|
||||
+#include <linux/netfilter_ipv6/ip6t_IMQ.h>
|
||||
+#include <linux/imq.h>
|
||||
+
|
||||
+static unsigned int imq_target(struct sk_buff **pskb,
|
||||
+ const struct net_device *in,
|
||||
+ const struct net_device *out,
|
||||
+ unsigned int hooknum,
|
||||
+ const struct xt_target *target,
|
||||
+ const void *targinfo)
|
||||
+{
|
||||
+ struct ip6t_imq_info *mr = (struct ip6t_imq_info*)targinfo;
|
||||
+
|
||||
+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
|
||||
+
|
||||
+ return XT_CONTINUE;
|
||||
+}
|
||||
+
|
||||
+static int imq_checkentry(const char *tablename,
|
||||
+ const void *entry,
|
||||
+ const struct xt_target *target,
|
||||
+ void *targinfo,
|
||||
+ unsigned int hook_mask)
|
||||
+{
|
||||
+ struct ip6t_imq_info *mr;
|
||||
+
|
||||
+ mr = (struct ip6t_imq_info*)targinfo;
|
||||
+
|
||||
+ if (mr->todev > IMQ_MAX_DEVS) {
|
||||
+ printk(KERN_WARNING
|
||||
+ "IMQ: invalid device specified, highest is %u\n",
|
||||
+ IMQ_MAX_DEVS);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+static struct xt_target ip6t_imq_reg = {
|
||||
+ .name = "IMQ",
|
||||
+ .family = AF_INET6,
|
||||
+ .target = imq_target,
|
||||
+ .targetsize = sizeof(struct ip6t_imq_info),
|
||||
+ .table = "mangle",
|
||||
+ .checkentry = imq_checkentry,
|
||||
+ .me = THIS_MODULE
|
||||
+};
|
||||
+
|
||||
+static int __init init(void)
|
||||
+{
|
||||
+ return xt_register_target(&ip6t_imq_reg);
|
||||
+}
|
||||
+
|
||||
+static void __exit fini(void)
|
||||
+{
|
||||
+ xt_unregister_target(&ip6t_imq_reg);
|
||||
+}
|
||||
+
|
||||
+module_init(init);
|
||||
+module_exit(fini);
|
||||
+
|
||||
+MODULE_AUTHOR("http://www.linuximq.net");
|
||||
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
|
||||
+MODULE_LICENSE("GPL");
|
||||
--- a/net/ipv6/netfilter/Kconfig
|
||||
+++ b/net/ipv6/netfilter/Kconfig
|
||||
@@ -173,6 +173,15 @@ config IP6_NF_MANGLE
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
+config IP6_NF_TARGET_IMQ
|
||||
+ tristate "IMQ target support"
|
||||
+ depends on IP6_NF_MANGLE
|
||||
+ help
|
||||
+ This option adds a `IMQ' target which is used to specify if and
|
||||
+ to which imq device packets should get enqueued/dequeued.
|
||||
+
|
||||
+ To compile it as a module, choose M here. If unsure, say N.
|
||||
+
|
||||
config IP6_NF_TARGET_HL
|
||||
tristate 'HL (hoplimit) target support'
|
||||
depends on IP6_NF_MANGLE
|
||||
--- a/net/ipv6/netfilter/Makefile
|
||||
+++ b/net/ipv6/netfilter/Makefile
|
||||
@@ -13,6 +13,7 @@ obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t
|
||||
obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
|
||||
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
|
||||
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
|
||||
+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o
|
||||
obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
|
||||
obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
|
||||
obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
|
||||
--- a/net/sched/sch_generic.c
|
||||
+++ b/net/sched/sch_generic.c
|
||||
@@ -87,7 +87,6 @@ void qdisc_unlock_tree(struct net_device
|
||||
|
||||
NOTE: Called under dev->queue_lock with locally disabled BH.
|
||||
*/
|
||||
-
|
||||
static inline int qdisc_restart(struct net_device *dev)
|
||||
{
|
||||
struct Qdisc *q = dev->qdisc;
|
||||
@@ -181,6 +180,11 @@ requeue:
|
||||
return q->q.qlen;
|
||||
}
|
||||
|
||||
+int qdisc_restart1(struct net_device *dev)
|
||||
+{
|
||||
+ return qdisc_restart(dev);
|
||||
+}
|
||||
+
|
||||
void __qdisc_run(struct net_device *dev)
|
||||
{
|
||||
if (unlikely(dev->qdisc == &noop_qdisc))
|
||||
@@ -617,3 +621,4 @@ EXPORT_SYMBOL(qdisc_destroy);
|
||||
EXPORT_SYMBOL(qdisc_reset);
|
||||
EXPORT_SYMBOL(qdisc_lock_tree);
|
||||
EXPORT_SYMBOL(qdisc_unlock_tree);
|
||||
+EXPORT_SYMBOL(qdisc_restart1);
|
|
@ -1,947 +0,0 @@
|
|||
--- /dev/null
|
||||
+++ b/include/linux/netfilter_ipv4/ipt_ROUTE.h
|
||||
@@ -0,0 +1,23 @@
|
||||
+/* Header file for iptables ipt_ROUTE target
|
||||
+ *
|
||||
+ * (C) 2002 by Cédric de Launois <delaunois@info.ucl.ac.be>
|
||||
+ *
|
||||
+ * This software is distributed under GNU GPL v2, 1991
|
||||
+ */
|
||||
+#ifndef _IPT_ROUTE_H_target
|
||||
+#define _IPT_ROUTE_H_target
|
||||
+
|
||||
+#define IPT_ROUTE_IFNAMSIZ 16
|
||||
+
|
||||
+struct ipt_route_target_info {
|
||||
+ char oif[IPT_ROUTE_IFNAMSIZ]; /* Output Interface Name */
|
||||
+ char iif[IPT_ROUTE_IFNAMSIZ]; /* Input Interface Name */
|
||||
+ u_int32_t gw; /* IP address of gateway */
|
||||
+ u_int8_t flags;
|
||||
+};
|
||||
+
|
||||
+/* Values for "flags" field */
|
||||
+#define IPT_ROUTE_CONTINUE 0x01
|
||||
+#define IPT_ROUTE_TEE 0x02
|
||||
+
|
||||
+#endif /*_IPT_ROUTE_H_target*/
|
||||
--- /dev/null
|
||||
+++ b/include/linux/netfilter_ipv6/ip6t_ROUTE.h
|
||||
@@ -0,0 +1,23 @@
|
||||
+/* Header file for iptables ip6t_ROUTE target
|
||||
+ *
|
||||
+ * (C) 2003 by Cédric de Launois <delaunois@info.ucl.ac.be>
|
||||
+ *
|
||||
+ * This software is distributed under GNU GPL v2, 1991
|
||||
+ */
|
||||
+#ifndef _IPT_ROUTE_H_target
|
||||
+#define _IPT_ROUTE_H_target
|
||||
+
|
||||
+#define IP6T_ROUTE_IFNAMSIZ 16
|
||||
+
|
||||
+struct ip6t_route_target_info {
|
||||
+ char oif[IP6T_ROUTE_IFNAMSIZ]; /* Output Interface Name */
|
||||
+ char iif[IP6T_ROUTE_IFNAMSIZ]; /* Input Interface Name */
|
||||
+ u_int32_t gw[4]; /* IPv6 address of gateway */
|
||||
+ u_int8_t flags;
|
||||
+};
|
||||
+
|
||||
+/* Values for "flags" field */
|
||||
+#define IP6T_ROUTE_CONTINUE 0x01
|
||||
+#define IP6T_ROUTE_TEE 0x02
|
||||
+
|
||||
+#endif /*_IP6T_ROUTE_H_target*/
|
||||
--- /dev/null
|
||||
+++ b/net/ipv4/netfilter/ipt_ROUTE.c
|
||||
@@ -0,0 +1,483 @@
|
||||
+/*
|
||||
+ * This implements the ROUTE target, which enables you to setup unusual
|
||||
+ * routes not supported by the standard kernel routing table.
|
||||
+ *
|
||||
+ * Copyright (C) 2002 Cedric de Launois <delaunois@info.ucl.ac.be>
|
||||
+ *
|
||||
+ * v 1.11 2004/11/23
|
||||
+ *
|
||||
+ * This software is distributed under GNU GPL v2, 1991
|
||||
+ */
|
||||
+
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/ip.h>
|
||||
+#include <linux/netfilter_ipv4/ip_tables.h>
|
||||
+#include <linux/netfilter_ipv4/ip_conntrack.h>
|
||||
+#include <linux/netfilter_ipv4/ipt_ROUTE.h>
|
||||
+#include <linux/netdevice.h>
|
||||
+#include <linux/route.h>
|
||||
+#include <linux/version.h>
|
||||
+#include <linux/if_arp.h>
|
||||
+#include <net/ip.h>
|
||||
+#include <net/route.h>
|
||||
+#include <net/icmp.h>
|
||||
+#include <net/checksum.h>
|
||||
+
|
||||
+#if 0
|
||||
+#define DEBUGP printk
|
||||
+#else
|
||||
+#define DEBUGP(format, args...)
|
||||
+#endif
|
||||
+
|
||||
+MODULE_LICENSE("GPL");
|
||||
+MODULE_AUTHOR("Cedric de Launois <delaunois@info.ucl.ac.be>");
|
||||
+MODULE_DESCRIPTION("iptables ROUTE target module");
|
||||
+
|
||||
+/* Try to route the packet according to the routing keys specified in
|
||||
+ * route_info. Keys are :
|
||||
+ * - ifindex :
|
||||
+ * 0 if no oif preferred,
|
||||
+ * otherwise set to the index of the desired oif
|
||||
+ * - route_info->gw :
|
||||
+ * 0 if no gateway specified,
|
||||
+ * otherwise set to the next host to which the pkt must be routed
|
||||
+ * If success, skb->dev is the output device to which the packet must
|
||||
+ * be sent and skb->dst is not NULL
|
||||
+ *
|
||||
+ * RETURN: -1 if an error occured
|
||||
+ * 1 if the packet was succesfully routed to the
|
||||
+ * destination desired
|
||||
+ * 0 if the kernel routing table could not route the packet
|
||||
+ * according to the keys specified
|
||||
+ */
|
||||
+static int route(struct sk_buff *skb,
|
||||
+ unsigned int ifindex,
|
||||
+ const struct ipt_route_target_info *route_info)
|
||||
+{
|
||||
+ int err;
|
||||
+ struct rtable *rt;
|
||||
+ struct iphdr *iph = skb->nh.iph;
|
||||
+ struct flowi fl = {
|
||||
+ .oif = ifindex,
|
||||
+ .nl_u = {
|
||||
+ .ip4_u = {
|
||||
+ .daddr = iph->daddr,
|
||||
+ .saddr = 0,
|
||||
+ .tos = RT_TOS(iph->tos),
|
||||
+ .scope = RT_SCOPE_UNIVERSE,
|
||||
+ }
|
||||
+ }
|
||||
+ };
|
||||
+
|
||||
+ /* The destination address may be overloaded by the target */
|
||||
+ if (route_info->gw)
|
||||
+ fl.fl4_dst = route_info->gw;
|
||||
+
|
||||
+ /* Trying to route the packet using the standard routing table. */
|
||||
+ if ((err = ip_route_output_key(&rt, &fl))) {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ipt_ROUTE: couldn't route pkt (err: %i)",err);
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ /* Drop old route. */
|
||||
+ dst_release(skb->dst);
|
||||
+ skb->dst = NULL;
|
||||
+
|
||||
+ /* Success if no oif specified or if the oif correspond to the
|
||||
+ * one desired */
|
||||
+ if (!ifindex || rt->u.dst.dev->ifindex == ifindex) {
|
||||
+ skb->dst = &rt->u.dst;
|
||||
+ skb->dev = skb->dst->dev;
|
||||
+ skb->protocol = htons(ETH_P_IP);
|
||||
+ return 1;
|
||||
+ }
|
||||
+
|
||||
+ /* The interface selected by the routing table is not the one
|
||||
+ * specified by the user. This may happen because the dst address
|
||||
+ * is one of our own addresses.
|
||||
+ */
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ipt_ROUTE: failed to route as desired gw=%u.%u.%u.%u oif=%i (got oif=%i)\n",
|
||||
+ NIPQUAD(route_info->gw), ifindex, rt->u.dst.dev->ifindex);
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* Stolen from ip_finish_output2
|
||||
+ * PRE : skb->dev is set to the device we are leaving by
|
||||
+ * skb->dst is not NULL
|
||||
+ * POST: the packet is sent with the link layer header pushed
|
||||
+ * the packet is destroyed
|
||||
+ */
|
||||
+static void ip_direct_send(struct sk_buff *skb)
|
||||
+{
|
||||
+ struct dst_entry *dst = skb->dst;
|
||||
+ struct hh_cache *hh = dst->hh;
|
||||
+ struct net_device *dev = dst->dev;
|
||||
+ int hh_len = LL_RESERVED_SPACE(dev);
|
||||
+
|
||||
+ /* Be paranoid, rather than too clever. */
|
||||
+ if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
|
||||
+ struct sk_buff *skb2;
|
||||
+
|
||||
+ skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
|
||||
+ if (skb2 == NULL) {
|
||||
+ kfree_skb(skb);
|
||||
+ return;
|
||||
+ }
|
||||
+ if (skb->sk)
|
||||
+ skb_set_owner_w(skb2, skb->sk);
|
||||
+ kfree_skb(skb);
|
||||
+ skb = skb2;
|
||||
+ }
|
||||
+
|
||||
+ if (hh) {
|
||||
+ int hh_alen;
|
||||
+
|
||||
+ read_lock_bh(&hh->hh_lock);
|
||||
+ hh_alen = HH_DATA_ALIGN(hh->hh_len);
|
||||
+ memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
|
||||
+ read_unlock_bh(&hh->hh_lock);
|
||||
+ skb_push(skb, hh->hh_len);
|
||||
+ hh->hh_output(skb);
|
||||
+ } else if (dst->neighbour)
|
||||
+ dst->neighbour->output(skb);
|
||||
+ else {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
|
||||
+ kfree_skb(skb);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* PRE : skb->dev is set to the device we are leaving by
|
||||
+ * POST: - the packet is directly sent to the skb->dev device, without
|
||||
+ * pushing the link layer header.
|
||||
+ * - the packet is destroyed
|
||||
+ */
|
||||
+static inline int dev_direct_send(struct sk_buff *skb)
|
||||
+{
|
||||
+ return dev_queue_xmit(skb);
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned int route_oif(const struct ipt_route_target_info *route_info,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ unsigned int ifindex = 0;
|
||||
+ struct net_device *dev_out = NULL;
|
||||
+
|
||||
+ /* The user set the interface name to use.
|
||||
+ * Getting the current interface index.
|
||||
+ */
|
||||
+ if ((dev_out = dev_get_by_name(route_info->oif))) {
|
||||
+ ifindex = dev_out->ifindex;
|
||||
+ } else {
|
||||
+ /* Unknown interface name : packet dropped */
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ipt_ROUTE: oif interface %s not found\n", route_info->oif);
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ /* Trying the standard way of routing packets */
|
||||
+ switch (route(skb, ifindex, route_info)) {
|
||||
+ case 1:
|
||||
+ dev_put(dev_out);
|
||||
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
|
||||
+ return IPT_CONTINUE;
|
||||
+
|
||||
+ ip_direct_send(skb);
|
||||
+ return NF_STOLEN;
|
||||
+
|
||||
+ case 0:
|
||||
+ /* Failed to send to oif. Trying the hard way */
|
||||
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
|
||||
+ return NF_DROP;
|
||||
+
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ipt_ROUTE: forcing the use of %i\n",
|
||||
+ ifindex);
|
||||
+
|
||||
+ /* We have to force the use of an interface.
|
||||
+ * This interface must be a tunnel interface since
|
||||
+ * otherwise we can't guess the hw address for
|
||||
+ * the packet. For a tunnel interface, no hw address
|
||||
+ * is needed.
|
||||
+ */
|
||||
+ if ((dev_out->type != ARPHRD_TUNNEL)
|
||||
+ && (dev_out->type != ARPHRD_IPGRE)) {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ipt_ROUTE: can't guess the hw addr !\n");
|
||||
+ dev_put(dev_out);
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ /* Send the packet. This will also free skb
|
||||
+ * Do not go through the POST_ROUTING hook because
|
||||
+ * skb->dst is not set and because it will probably
|
||||
+ * get confused by the destination IP address.
|
||||
+ */
|
||||
+ skb->dev = dev_out;
|
||||
+ dev_direct_send(skb);
|
||||
+ dev_put(dev_out);
|
||||
+ return NF_STOLEN;
|
||||
+
|
||||
+ default:
|
||||
+ /* Unexpected error */
|
||||
+ dev_put(dev_out);
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned int route_iif(const struct ipt_route_target_info *route_info,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ struct net_device *dev_in = NULL;
|
||||
+
|
||||
+ /* Getting the current interface index. */
|
||||
+ if (!(dev_in = dev_get_by_name(route_info->iif))) {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ipt_ROUTE: iif interface %s not found\n", route_info->iif);
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ skb->dev = dev_in;
|
||||
+ dst_release(skb->dst);
|
||||
+ skb->dst = NULL;
|
||||
+
|
||||
+ netif_rx(skb);
|
||||
+ dev_put(dev_in);
|
||||
+ return NF_STOLEN;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned int route_gw(const struct ipt_route_target_info *route_info,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ if (route(skb, 0, route_info)!=1)
|
||||
+ return NF_DROP;
|
||||
+
|
||||
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
|
||||
+ return IPT_CONTINUE;
|
||||
+
|
||||
+ ip_direct_send(skb);
|
||||
+ return NF_STOLEN;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* To detect and deter routed packet loopback when using the --tee option,
|
||||
+ * we take a page out of the raw.patch book: on the copied skb, we set up
|
||||
+ * a fake ->nfct entry, pointing to the local &route_tee_track. We skip
|
||||
+ * routing packets when we see they already have that ->nfct.
|
||||
+ */
|
||||
+
|
||||
+static struct ip_conntrack route_tee_track;
|
||||
+
|
||||
+static unsigned int ipt_route_target(struct sk_buff **pskb,
|
||||
+ const struct net_device *in,
|
||||
+ const struct net_device *out,
|
||||
+ unsigned int hooknum,
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
||||
+ const struct xt_target *target,
|
||||
+#endif
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
|
||||
+ const void *targinfo,
|
||||
+ void *userinfo)
|
||||
+#else
|
||||
+ const void *targinfo)
|
||||
+#endif
|
||||
+{
|
||||
+ const struct ipt_route_target_info *route_info = targinfo;
|
||||
+ struct sk_buff *skb = *pskb;
|
||||
+ unsigned int res;
|
||||
+
|
||||
+ if (skb->nfct == &route_tee_track.ct_general) {
|
||||
+ /* Loopback - a packet we already routed, is to be
|
||||
+ * routed another time. Avoid that, now.
|
||||
+ */
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: loopback - DROP!\n");
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ /* If we are at PREROUTING or INPUT hook
|
||||
+ * the TTL isn't decreased by the IP stack
|
||||
+ */
|
||||
+ if (hooknum == NF_IP_PRE_ROUTING ||
|
||||
+ hooknum == NF_IP_LOCAL_IN) {
|
||||
+
|
||||
+ struct iphdr *iph = skb->nh.iph;
|
||||
+
|
||||
+ if (iph->ttl <= 1) {
|
||||
+ struct rtable *rt;
|
||||
+ struct flowi fl = {
|
||||
+ .oif = 0,
|
||||
+ .nl_u = {
|
||||
+ .ip4_u = {
|
||||
+ .daddr = iph->daddr,
|
||||
+ .saddr = iph->saddr,
|
||||
+ .tos = RT_TOS(iph->tos),
|
||||
+ .scope = ((iph->tos & RTO_ONLINK) ?
|
||||
+ RT_SCOPE_LINK :
|
||||
+ RT_SCOPE_UNIVERSE)
|
||||
+ }
|
||||
+ }
|
||||
+ };
|
||||
+
|
||||
+ if (ip_route_output_key(&rt, &fl)) {
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ if (skb->dev == rt->u.dst.dev) {
|
||||
+ /* Drop old route. */
|
||||
+ dst_release(skb->dst);
|
||||
+ skb->dst = &rt->u.dst;
|
||||
+
|
||||
+ /* this will traverse normal stack, and
|
||||
+ * thus call conntrack on the icmp packet */
|
||||
+ icmp_send(skb, ICMP_TIME_EXCEEDED,
|
||||
+ ICMP_EXC_TTL, 0);
|
||||
+ }
|
||||
+
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * If we are at INPUT the checksum must be recalculated since
|
||||
+ * the length could change as the result of a defragmentation.
|
||||
+ */
|
||||
+ if(hooknum == NF_IP_LOCAL_IN) {
|
||||
+ iph->ttl = iph->ttl - 1;
|
||||
+ iph->check = 0;
|
||||
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
|
||||
+ } else {
|
||||
+ ip_decrease_ttl(iph);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if ((route_info->flags & IPT_ROUTE_TEE)) {
|
||||
+ /*
|
||||
+ * Copy the *pskb, and route the copy. Will later return
|
||||
+ * IPT_CONTINUE for the original skb, which should continue
|
||||
+ * on its way as if nothing happened. The copy should be
|
||||
+ * independantly delivered to the ROUTE --gw.
|
||||
+ */
|
||||
+ skb = skb_copy(*pskb, GFP_ATOMIC);
|
||||
+ if (!skb) {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: copy failed!\n");
|
||||
+ return IPT_CONTINUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Tell conntrack to forget this packet since it may get confused
|
||||
+ * when a packet is leaving with dst address == our address.
|
||||
+ * Good idea ? Dunno. Need advice.
|
||||
+ *
|
||||
+ * NEW: mark the skb with our &route_tee_track, so we avoid looping
|
||||
+ * on any already routed packet.
|
||||
+ */
|
||||
+ if (!(route_info->flags & IPT_ROUTE_CONTINUE)) {
|
||||
+ nf_conntrack_put(skb->nfct);
|
||||
+ skb->nfct = &route_tee_track.ct_general;
|
||||
+ skb->nfctinfo = IP_CT_NEW;
|
||||
+ nf_conntrack_get(skb->nfct);
|
||||
+ }
|
||||
+
|
||||
+ if (route_info->oif[0] != '\0') {
|
||||
+ res = route_oif(route_info, skb);
|
||||
+ } else if (route_info->iif[0] != '\0') {
|
||||
+ res = route_iif(route_info, skb);
|
||||
+ } else if (route_info->gw) {
|
||||
+ res = route_gw(route_info, skb);
|
||||
+ } else {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: no parameter !\n");
|
||||
+ res = IPT_CONTINUE;
|
||||
+ }
|
||||
+
|
||||
+ if ((route_info->flags & IPT_ROUTE_TEE))
|
||||
+ res = IPT_CONTINUE;
|
||||
+
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static int ipt_route_checkentry(const char *tablename,
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
|
||||
+ const void *e,
|
||||
+#else
|
||||
+ const struct ipt_ip *ip,
|
||||
+#endif
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
||||
+ const struct xt_target *target,
|
||||
+#endif
|
||||
+ void *targinfo,
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
|
||||
+ unsigned int targinfosize,
|
||||
+#endif
|
||||
+ unsigned int hook_mask)
|
||||
+{
|
||||
+ if (strcmp(tablename, "mangle") != 0) {
|
||||
+ printk("ipt_ROUTE: bad table `%s', use the `mangle' table.\n",
|
||||
+ tablename);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ if (hook_mask & ~( (1 << NF_IP_PRE_ROUTING)
|
||||
+ | (1 << NF_IP_LOCAL_IN)
|
||||
+ | (1 << NF_IP_FORWARD)
|
||||
+ | (1 << NF_IP_LOCAL_OUT)
|
||||
+ | (1 << NF_IP_POST_ROUTING))) {
|
||||
+ printk("ipt_ROUTE: bad hook\n");
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
|
||||
+ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_route_target_info))) {
|
||||
+ printk(KERN_WARNING "ipt_ROUTE: targinfosize %u != %Zu\n",
|
||||
+ targinfosize,
|
||||
+ IPT_ALIGN(sizeof(struct ipt_route_target_info)));
|
||||
+ return 0;
|
||||
+ }
|
||||
+#endif
|
||||
+
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static struct ipt_target ipt_route_reg = {
|
||||
+ .name = "ROUTE",
|
||||
+ .target = ipt_route_target,
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
||||
+ .targetsize = sizeof(struct ipt_route_target_info),
|
||||
+#endif
|
||||
+ .checkentry = ipt_route_checkentry,
|
||||
+ .me = THIS_MODULE,
|
||||
+};
|
||||
+
|
||||
+static int __init init(void)
|
||||
+{
|
||||
+ /* Set up fake conntrack (stolen from raw.patch):
|
||||
+ - to never be deleted, not in any hashes */
|
||||
+ atomic_set(&route_tee_track.ct_general.use, 1);
|
||||
+ /* - and look it like as a confirmed connection */
|
||||
+ set_bit(IPS_CONFIRMED_BIT, &route_tee_track.status);
|
||||
+ /* Initialize fake conntrack so that NAT will skip it */
|
||||
+ route_tee_track.status |= IPS_NAT_DONE_MASK;
|
||||
+
|
||||
+ return xt_register_target(&ipt_route_reg);
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static void __exit fini(void)
|
||||
+{
|
||||
+ xt_unregister_target(&ipt_route_reg);
|
||||
+}
|
||||
+
|
||||
+module_init(init);
|
||||
+module_exit(fini);
|
||||
--- a/net/ipv4/netfilter/Kconfig
|
||||
+++ b/net/ipv4/netfilter/Kconfig
|
||||
@@ -684,5 +684,22 @@ config IP_NF_ARP_MANGLE
|
||||
Allows altering the ARP packet payload: source and destination
|
||||
hardware and network addresses.
|
||||
|
||||
+config IP_NF_TARGET_ROUTE
|
||||
+ tristate 'ROUTE target support'
|
||||
+ depends on IP_NF_MANGLE
|
||||
+ help
|
||||
+ This option adds a `ROUTE' target, which enables you to setup unusual
|
||||
+ routes. For example, the ROUTE lets you route a received packet through
|
||||
+ an interface or towards a host, even if the regular destination of the
|
||||
+ packet is the router itself. The ROUTE target is also able to change the
|
||||
+ incoming interface of a packet.
|
||||
+
|
||||
+ The target can be or not a final target. It has to be used inside the
|
||||
+ mangle table.
|
||||
+
|
||||
+ If you want to compile it as a module, say M here and read
|
||||
+ Documentation/modules.txt. The module will be called ipt_ROUTE.o.
|
||||
+ If unsure, say `N'.
|
||||
+
|
||||
endmenu
|
||||
|
||||
--- a/net/ipv4/netfilter/Makefile
|
||||
+++ b/net/ipv4/netfilter/Makefile
|
||||
@@ -100,6 +100,7 @@ obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_EC
|
||||
obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
|
||||
+obj-$(CONFIG_IP_NF_TARGET_ROUTE) += ipt_ROUTE.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
|
||||
obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
|
||||
obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
|
||||
--- a/net/ipv6/ipv6_syms.c
|
||||
+++ b/net/ipv6/ipv6_syms.c
|
||||
@@ -10,6 +10,7 @@ EXPORT_SYMBOL(icmpv6_send);
|
||||
EXPORT_SYMBOL(icmpv6_statistics);
|
||||
EXPORT_SYMBOL(icmpv6_err_convert);
|
||||
EXPORT_SYMBOL(ndisc_mc_map);
|
||||
+EXPORT_SYMBOL(nd_tbl);
|
||||
EXPORT_SYMBOL(register_inet6addr_notifier);
|
||||
EXPORT_SYMBOL(unregister_inet6addr_notifier);
|
||||
EXPORT_SYMBOL(ip6_route_output);
|
||||
--- /dev/null
|
||||
+++ b/net/ipv6/netfilter/ip6t_ROUTE.c
|
||||
@@ -0,0 +1,330 @@
|
||||
+/*
|
||||
+ * This implements the ROUTE v6 target, which enables you to setup unusual
|
||||
+ * routes not supported by the standard kernel routing table.
|
||||
+ *
|
||||
+ * Copyright (C) 2003 Cedric de Launois <delaunois@info.ucl.ac.be>
|
||||
+ *
|
||||
+ * v 1.1 2004/11/23
|
||||
+ *
|
||||
+ * This software is distributed under GNU GPL v2, 1991
|
||||
+ */
|
||||
+
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/ipv6.h>
|
||||
+#include <linux/netfilter_ipv6/ip6_tables.h>
|
||||
+#include <linux/netfilter_ipv6/ip6t_ROUTE.h>
|
||||
+#include <linux/netdevice.h>
|
||||
+#include <linux/version.h>
|
||||
+#include <net/ipv6.h>
|
||||
+#include <net/ndisc.h>
|
||||
+#include <net/ip6_route.h>
|
||||
+#include <linux/icmpv6.h>
|
||||
+
|
||||
+#if 1
|
||||
+#define DEBUGP printk
|
||||
+#else
|
||||
+#define DEBUGP(format, args...)
|
||||
+#endif
|
||||
+
|
||||
+#define NIP6(addr) \
|
||||
+ ntohs((addr).s6_addr16[0]), \
|
||||
+ ntohs((addr).s6_addr16[1]), \
|
||||
+ ntohs((addr).s6_addr16[2]), \
|
||||
+ ntohs((addr).s6_addr16[3]), \
|
||||
+ ntohs((addr).s6_addr16[4]), \
|
||||
+ ntohs((addr).s6_addr16[5]), \
|
||||
+ ntohs((addr).s6_addr16[6]), \
|
||||
+ ntohs((addr).s6_addr16[7])
|
||||
+
|
||||
+/* Route the packet according to the routing keys specified in
|
||||
+ * route_info. Keys are :
|
||||
+ * - ifindex :
|
||||
+ * 0 if no oif preferred,
|
||||
+ * otherwise set to the index of the desired oif
|
||||
+ * - route_info->gw :
|
||||
+ * 0 if no gateway specified,
|
||||
+ * otherwise set to the next host to which the pkt must be routed
|
||||
+ * If success, skb->dev is the output device to which the packet must
|
||||
+ * be sent and skb->dst is not NULL
|
||||
+ *
|
||||
+ * RETURN: 1 if the packet was succesfully routed to the
|
||||
+ * destination desired
|
||||
+ * 0 if the kernel routing table could not route the packet
|
||||
+ * according to the keys specified
|
||||
+ */
|
||||
+static int
|
||||
+route6(struct sk_buff *skb,
|
||||
+ unsigned int ifindex,
|
||||
+ const struct ip6t_route_target_info *route_info)
|
||||
+{
|
||||
+ struct rt6_info *rt = NULL;
|
||||
+ struct ipv6hdr *ipv6h = skb->nh.ipv6h;
|
||||
+ struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
|
||||
+
|
||||
+ DEBUGP("ip6t_ROUTE: called with: ");
|
||||
+ DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ipv6h->daddr));
|
||||
+ DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(*gw));
|
||||
+ DEBUGP("OUT=%s\n", route_info->oif);
|
||||
+
|
||||
+ if (ipv6_addr_any(gw))
|
||||
+ rt = rt6_lookup(&ipv6h->daddr, &ipv6h->saddr, ifindex, 1);
|
||||
+ else
|
||||
+ rt = rt6_lookup(gw, &ipv6h->saddr, ifindex, 1);
|
||||
+
|
||||
+ if (!rt)
|
||||
+ goto no_route;
|
||||
+
|
||||
+ DEBUGP("ip6t_ROUTE: routing gives: ");
|
||||
+ DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_dst.addr));
|
||||
+ DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_gateway));
|
||||
+ DEBUGP("OUT=%s\n", rt->rt6i_dev->name);
|
||||
+
|
||||
+ if (ifindex && rt->rt6i_dev->ifindex!=ifindex)
|
||||
+ goto wrong_route;
|
||||
+
|
||||
+ if (!rt->rt6i_nexthop) {
|
||||
+ DEBUGP("ip6t_ROUTE: discovering neighbour\n");
|
||||
+ rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_dst.addr);
|
||||
+ }
|
||||
+
|
||||
+ /* Drop old route. */
|
||||
+ dst_release(skb->dst);
|
||||
+ skb->dst = &rt->u.dst;
|
||||
+ skb->dev = rt->rt6i_dev;
|
||||
+ return 1;
|
||||
+
|
||||
+ wrong_route:
|
||||
+ dst_release(&rt->u.dst);
|
||||
+ no_route:
|
||||
+ if (!net_ratelimit())
|
||||
+ return 0;
|
||||
+
|
||||
+ printk("ip6t_ROUTE: no explicit route found ");
|
||||
+ if (ifindex)
|
||||
+ printk("via interface %s ", route_info->oif);
|
||||
+ if (!ipv6_addr_any(gw))
|
||||
+ printk("via gateway %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", NIP6(*gw));
|
||||
+ printk("\n");
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* Stolen from ip6_output_finish
|
||||
+ * PRE : skb->dev is set to the device we are leaving by
|
||||
+ * skb->dst is not NULL
|
||||
+ * POST: the packet is sent with the link layer header pushed
|
||||
+ * the packet is destroyed
|
||||
+ */
|
||||
+static void ip_direct_send(struct sk_buff *skb)
|
||||
+{
|
||||
+ struct dst_entry *dst = skb->dst;
|
||||
+ struct hh_cache *hh = dst->hh;
|
||||
+
|
||||
+ if (hh) {
|
||||
+ read_lock_bh(&hh->hh_lock);
|
||||
+ memcpy(skb->data - 16, hh->hh_data, 16);
|
||||
+ read_unlock_bh(&hh->hh_lock);
|
||||
+ skb_push(skb, hh->hh_len);
|
||||
+ hh->hh_output(skb);
|
||||
+ } else if (dst->neighbour)
|
||||
+ dst->neighbour->output(skb);
|
||||
+ else {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: no hdr & no neighbour cache!\n");
|
||||
+ kfree_skb(skb);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned int
|
||||
+route6_oif(const struct ip6t_route_target_info *route_info,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ unsigned int ifindex = 0;
|
||||
+ struct net_device *dev_out = NULL;
|
||||
+
|
||||
+ /* The user set the interface name to use.
|
||||
+ * Getting the current interface index.
|
||||
+ */
|
||||
+ if ((dev_out = dev_get_by_name(route_info->oif))) {
|
||||
+ ifindex = dev_out->ifindex;
|
||||
+ } else {
|
||||
+ /* Unknown interface name : packet dropped */
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP("ip6t_ROUTE: oif interface %s not found\n", route_info->oif);
|
||||
+
|
||||
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
|
||||
+ return IP6T_CONTINUE;
|
||||
+ else
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ /* Trying the standard way of routing packets */
|
||||
+ if (route6(skb, ifindex, route_info)) {
|
||||
+ dev_put(dev_out);
|
||||
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
|
||||
+ return IP6T_CONTINUE;
|
||||
+
|
||||
+ ip_direct_send(skb);
|
||||
+ return NF_STOLEN;
|
||||
+ } else
|
||||
+ return NF_DROP;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned int
|
||||
+route6_gw(const struct ip6t_route_target_info *route_info,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ if (route6(skb, 0, route_info)) {
|
||||
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
|
||||
+ return IP6T_CONTINUE;
|
||||
+
|
||||
+ ip_direct_send(skb);
|
||||
+ return NF_STOLEN;
|
||||
+ } else
|
||||
+ return NF_DROP;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned int
|
||||
+ip6t_route_target(struct sk_buff **pskb,
|
||||
+ const struct net_device *in,
|
||||
+ const struct net_device *out,
|
||||
+ unsigned int hooknum,
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
||||
+ const struct xt_target *target,
|
||||
+#endif
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
|
||||
+ const void *targinfo,
|
||||
+ void *userinfo)
|
||||
+#else
|
||||
+ const void *targinfo)
|
||||
+#endif
|
||||
+{
|
||||
+ const struct ip6t_route_target_info *route_info = targinfo;
|
||||
+ struct sk_buff *skb = *pskb;
|
||||
+ struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
|
||||
+ unsigned int res;
|
||||
+
|
||||
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
|
||||
+ goto do_it;
|
||||
+
|
||||
+ /* If we are at PREROUTING or INPUT hook
|
||||
+ * the TTL isn't decreased by the IP stack
|
||||
+ */
|
||||
+ if (hooknum == NF_IP6_PRE_ROUTING ||
|
||||
+ hooknum == NF_IP6_LOCAL_IN) {
|
||||
+
|
||||
+ struct ipv6hdr *ipv6h = skb->nh.ipv6h;
|
||||
+
|
||||
+ if (ipv6h->hop_limit <= 1) {
|
||||
+ /* Force OUTPUT device used as source address */
|
||||
+ skb->dev = skb->dst->dev;
|
||||
+
|
||||
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
|
||||
+ ICMPV6_EXC_HOPLIMIT, 0, skb->dev);
|
||||
+
|
||||
+ return NF_DROP;
|
||||
+ }
|
||||
+
|
||||
+ ipv6h->hop_limit--;
|
||||
+ }
|
||||
+
|
||||
+ if ((route_info->flags & IP6T_ROUTE_TEE)) {
|
||||
+ /*
|
||||
+ * Copy the *pskb, and route the copy. Will later return
|
||||
+ * IP6T_CONTINUE for the original skb, which should continue
|
||||
+ * on its way as if nothing happened. The copy should be
|
||||
+ * independantly delivered to the ROUTE --gw.
|
||||
+ */
|
||||
+ skb = skb_copy(*pskb, GFP_ATOMIC);
|
||||
+ if (!skb) {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: copy failed!\n");
|
||||
+ return IP6T_CONTINUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+do_it:
|
||||
+ if (route_info->oif[0]) {
|
||||
+ res = route6_oif(route_info, skb);
|
||||
+ } else if (!ipv6_addr_any(gw)) {
|
||||
+ res = route6_gw(route_info, skb);
|
||||
+ } else {
|
||||
+ if (net_ratelimit())
|
||||
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: no parameter !\n");
|
||||
+ res = IP6T_CONTINUE;
|
||||
+ }
|
||||
+
|
||||
+ if ((route_info->flags & IP6T_ROUTE_TEE))
|
||||
+ res = IP6T_CONTINUE;
|
||||
+
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static int
|
||||
+ip6t_route_checkentry(const char *tablename,
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
|
||||
+ const void *entry,
|
||||
+#else
|
||||
+ const struct ip6t_entry *entry
|
||||
+#endif
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
||||
+ const struct xt_target *target,
|
||||
+#endif
|
||||
+ void *targinfo,
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
|
||||
+ unsigned int targinfosize,
|
||||
+#endif
|
||||
+ unsigned int hook_mask)
|
||||
+{
|
||||
+ if (strcmp(tablename, "mangle") != 0) {
|
||||
+ printk("ip6t_ROUTE: can only be called from \"mangle\" table.\n");
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
|
||||
+ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_route_target_info))) {
|
||||
+ printk(KERN_WARNING "ip6t_ROUTE: targinfosize %u != %Zu\n",
|
||||
+ targinfosize,
|
||||
+ IP6T_ALIGN(sizeof(struct ip6t_route_target_info)));
|
||||
+ return 0;
|
||||
+ }
|
||||
+#endif
|
||||
+
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static struct ip6t_target ip6t_route_reg = {
|
||||
+ .name = "ROUTE",
|
||||
+ .target = ip6t_route_target,
|
||||
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
||||
+ .targetsize = sizeof(struct ip6t_route_target_info),
|
||||
+#endif
|
||||
+ .checkentry = ip6t_route_checkentry,
|
||||
+ .me = THIS_MODULE
|
||||
+};
|
||||
+
|
||||
+
|
||||
+static int __init init(void)
|
||||
+{
|
||||
+ printk(KERN_DEBUG "registering ipv6 ROUTE target\n");
|
||||
+ if (xt_register_target(&ip6t_route_reg))
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static void __exit fini(void)
|
||||
+{
|
||||
+ xt_unregister_target(&ip6t_route_reg);
|
||||
+}
|
||||
+
|
||||
+module_init(init);
|
||||
+module_exit(fini);
|
||||
+MODULE_LICENSE("GPL");
|
||||
--- a/net/ipv6/netfilter/Kconfig
|
||||
+++ b/net/ipv6/netfilter/Kconfig
|
||||
@@ -209,5 +209,18 @@ config IP6_NF_RAW
|
||||
If you want to compile it as a module, say M here and read
|
||||
<file:Documentation/modules.txt>. If unsure, say `N'.
|
||||
|
||||
+config IP6_NF_TARGET_ROUTE
|
||||
+ tristate 'ROUTE target support'
|
||||
+ depends on IP6_NF_MANGLE
|
||||
+ help
|
||||
+ This option adds a `ROUTE' target, which enables you to setup unusual
|
||||
+ routes. The ROUTE target is also able to change the incoming interface
|
||||
+ of a packet.
|
||||
+
|
||||
+ The target can be or not a final target. It has to be used inside the
|
||||
+ mangle table.
|
||||
+
|
||||
+ Not working as a module.
|
||||
+
|
||||
endmenu
|
||||
|
||||
--- a/net/ipv6/netfilter/Makefile
|
||||
+++ b/net/ipv6/netfilter/Makefile
|
||||
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_
|
||||
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
|
||||
obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
|
||||
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
|
||||
+obj-$(CONFIG_IP6_NF_TARGET_ROUTE) += ip6t_ROUTE.o
|
||||
obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
|
||||
|
||||
# objects for l3 independent conntrack
|
|
@ -1,20 +0,0 @@
|
|||
--- a/net/netfilter/Kconfig
|
||||
+++ b/net/netfilter/Kconfig
|
||||
@@ -165,7 +165,7 @@ config NF_CONNTRACK_FTP
|
||||
|
||||
config NF_CONNTRACK_H323
|
||||
tristate "H.323 protocol support (EXPERIMENTAL)"
|
||||
- depends on EXPERIMENTAL && NF_CONNTRACK && (IPV6 || IPV6=n)
|
||||
+ depends on EXPERIMENTAL && NF_CONNTRACK
|
||||
help
|
||||
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
|
||||
important VoIP protocols, it is widely used by voice hardware and
|
||||
@@ -400,7 +400,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK
|
||||
|
||||
config NETFILTER_XT_TARGET_TCPMSS
|
||||
tristate '"TCPMSS" target support'
|
||||
- depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
|
||||
+ depends on NETFILTER_XTABLES
|
||||
---help---
|
||||
This option adds a `TCPMSS' target, which allows you to alter the
|
||||
MSS value of TCP SYN packets, to control the maximum size for that
|
|
@ -1,789 +0,0 @@
|
|||
--- a/include/linux/pkt_sched.h
|
||||
+++ b/include/linux/pkt_sched.h
|
||||
@@ -146,8 +146,40 @@ struct tc_sfq_qopt
|
||||
*
|
||||
* The only reason for this is efficiency, it is possible
|
||||
* to change these parameters in compile time.
|
||||
+ *
|
||||
+ * If you need to play with these values use esfq instead.
|
||||
*/
|
||||
|
||||
+/* ESFQ section */
|
||||
+
|
||||
+enum
|
||||
+{
|
||||
+ /* traditional */
|
||||
+ TCA_SFQ_HASH_CLASSIC,
|
||||
+ TCA_SFQ_HASH_DST,
|
||||
+ TCA_SFQ_HASH_SRC,
|
||||
+ TCA_SFQ_HASH_FWMARK,
|
||||
+ /* direct */
|
||||
+ TCA_SFQ_HASH_DSTDIR,
|
||||
+ TCA_SFQ_HASH_SRCDIR,
|
||||
+ TCA_SFQ_HASH_FWMARKDIR,
|
||||
+ /* conntrack */
|
||||
+ TCA_SFQ_HASH_CTORIGDST,
|
||||
+ TCA_SFQ_HASH_CTORIGSRC,
|
||||
+ TCA_SFQ_HASH_CTREPLDST,
|
||||
+ TCA_SFQ_HASH_CTREPLSRC,
|
||||
+};
|
||||
+
|
||||
+struct tc_esfq_qopt
|
||||
+{
|
||||
+ unsigned quantum; /* Bytes per round allocated to flow */
|
||||
+ int perturb_period; /* Period of hash perturbation */
|
||||
+ __u32 limit; /* Maximal packets in queue */
|
||||
+ unsigned divisor; /* Hash divisor */
|
||||
+ unsigned flows; /* Maximal number of flows */
|
||||
+ unsigned hash_kind; /* Hash function to use for flow identification */
|
||||
+};
|
||||
+
|
||||
/* RED section */
|
||||
|
||||
enum
|
||||
--- a/net/sched/Kconfig
|
||||
+++ b/net/sched/Kconfig
|
||||
@@ -189,6 +189,26 @@ config NET_SCH_SFQ
|
||||
To compile this code as a module, choose M here: the
|
||||
module will be called sch_sfq.
|
||||
|
||||
+config NET_SCH_ESFQ
|
||||
+ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
|
||||
+ ---help---
|
||||
+ Say Y here if you want to use the Enhanced Stochastic Fairness
|
||||
+ Queueing (ESFQ) packet scheduling algorithm for some of your network
|
||||
+ devices or as a leaf discipline for a classful qdisc such as HTB or
|
||||
+ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
|
||||
+ references to the SFQ algorithm).
|
||||
+
|
||||
+ This is an enchanced SFQ version which allows you to control some
|
||||
+ hardcoded values in the SFQ scheduler.
|
||||
+
|
||||
+ ESFQ also adds control of the hash function used to identify packet
|
||||
+ flows. The original SFQ discipline hashes by connection; ESFQ add
|
||||
+ several other hashing methods, such as by src IP or by dst IP, which
|
||||
+ can be more fair to users in some networking situations.
|
||||
+
|
||||
+ To compile this code as a module, choose M here: the
|
||||
+ module will be called sch_esfq.
|
||||
+
|
||||
config NET_SCH_TEQL
|
||||
tristate "True Link Equalizer (TEQL)"
|
||||
---help---
|
||||
--- a/net/sched/Makefile
|
||||
+++ b/net/sched/Makefile
|
||||
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
|
||||
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
|
||||
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
|
||||
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
|
||||
+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
|
||||
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
|
||||
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
|
||||
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
|
||||
--- /dev/null
|
||||
+++ b/net/sched/sch_esfq.c
|
||||
@@ -0,0 +1,704 @@
|
||||
+/*
|
||||
+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
|
||||
+ *
|
||||
+ * This program is free software; you can redistribute it and/or
|
||||
+ * modify it under the terms of the GNU General Public License
|
||||
+ * as published by the Free Software Foundation; either version
|
||||
+ * 2 of the License, or (at your option) any later version.
|
||||
+ *
|
||||
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
||||
+ *
|
||||
+ * Changes: Alexander Atanasov, <alex@ssi.bg>
|
||||
+ * Added dynamic depth,limit,divisor,hash_kind options.
|
||||
+ * Added dst and src hashes.
|
||||
+ *
|
||||
+ * Alexander Clouter, <alex@digriz.org.uk>
|
||||
+ * Ported ESFQ to Linux 2.6.
|
||||
+ *
|
||||
+ * Corey Hickey, <bugfood-c@fatooh.org>
|
||||
+ * Maintenance of the Linux 2.6 port.
|
||||
+ * Added fwmark hash (thanks to Robert Kurjata).
|
||||
+ * Added direct hashing for src, dst, and fwmark.
|
||||
+ * Added usage of jhash.
|
||||
+ *
|
||||
+ */
|
||||
+
|
||||
+#include <linux/module.h>
|
||||
+#include <asm/uaccess.h>
|
||||
+#include <asm/system.h>
|
||||
+#include <linux/bitops.h>
|
||||
+#include <linux/types.h>
|
||||
+#include <linux/kernel.h>
|
||||
+#include <linux/jiffies.h>
|
||||
+#include <linux/string.h>
|
||||
+#include <linux/mm.h>
|
||||
+#include <linux/socket.h>
|
||||
+#include <linux/sockios.h>
|
||||
+#include <linux/in.h>
|
||||
+#include <linux/errno.h>
|
||||
+#include <linux/interrupt.h>
|
||||
+#include <linux/if_ether.h>
|
||||
+#include <linux/inet.h>
|
||||
+#include <linux/netdevice.h>
|
||||
+#include <linux/etherdevice.h>
|
||||
+#include <linux/notifier.h>
|
||||
+#include <linux/init.h>
|
||||
+#include <net/ip.h>
|
||||
+#include <linux/ipv6.h>
|
||||
+#include <net/route.h>
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <net/sock.h>
|
||||
+#include <net/pkt_sched.h>
|
||||
+#include <linux/jhash.h>
|
||||
+
|
||||
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
|
||||
+#include <net/netfilter/nf_conntrack.h>
|
||||
+#endif
|
||||
+
|
||||
+/* Stochastic Fairness Queuing algorithm.
|
||||
+ For more comments look at sch_sfq.c.
|
||||
+ The difference is that you can change limit, depth,
|
||||
+ hash table size and choose alternate hash types.
|
||||
+
|
||||
+ classic: same as in sch_sfq.c
|
||||
+ dst: destination IP address
|
||||
+ src: source IP address
|
||||
+ fwmark: netfilter mark value
|
||||
+ dst_direct:
|
||||
+ src_direct:
|
||||
+ fwmark_direct: direct hashing of the above sources
|
||||
+ ctorigdst: original destination IP address
|
||||
+ ctorigsrc: original source IP address
|
||||
+ ctrepldst: reply destination IP address
|
||||
+ ctreplsrc: reply source IP
|
||||
+
|
||||
+*/
|
||||
+
|
||||
+
|
||||
+/* This type should contain at least SFQ_DEPTH*2 values */
|
||||
+typedef unsigned int esfq_index;
|
||||
+
|
||||
+struct esfq_head
|
||||
+{
|
||||
+ esfq_index next;
|
||||
+ esfq_index prev;
|
||||
+};
|
||||
+
|
||||
+struct esfq_sched_data
|
||||
+{
|
||||
+/* Parameters */
|
||||
+ int perturb_period;
|
||||
+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
|
||||
+ int limit;
|
||||
+ unsigned depth;
|
||||
+ unsigned hash_divisor;
|
||||
+ unsigned hash_kind;
|
||||
+/* Variables */
|
||||
+ struct timer_list perturb_timer;
|
||||
+ int perturbation;
|
||||
+ esfq_index tail; /* Index of current slot in round */
|
||||
+ esfq_index max_depth; /* Maximal depth */
|
||||
+
|
||||
+ esfq_index *ht; /* Hash table */
|
||||
+ esfq_index *next; /* Active slots link */
|
||||
+ short *allot; /* Current allotment per slot */
|
||||
+ unsigned short *hash; /* Hash value indexed by slots */
|
||||
+ struct sk_buff_head *qs; /* Slot queue */
|
||||
+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
|
||||
+ unsigned dyn_min; /* For dynamic divisor adjustment; minimum value seen */
|
||||
+ unsigned dyn_max; /* maximum value seen */
|
||||
+ unsigned dyn_range; /* saved range */
|
||||
+};
|
||||
+
|
||||
+/* This contains the info we will hash. */
|
||||
+struct esfq_packet_info
|
||||
+{
|
||||
+ u32 proto; /* protocol or port */
|
||||
+ u32 src; /* source from packet header */
|
||||
+ u32 dst; /* destination from packet header */
|
||||
+ u32 ctorigsrc; /* original source from conntrack */
|
||||
+ u32 ctorigdst; /* original destination from conntrack */
|
||||
+ u32 ctreplsrc; /* reply source from conntrack */
|
||||
+ u32 ctrepldst; /* reply destination from conntrack */
|
||||
+ u32 mark; /* netfilter mark (fwmark) */
|
||||
+};
|
||||
+
|
||||
+/* Hash input values directly into the "nearest" slot, taking into account the
|
||||
+ * range of input values seen. This is most useful when the hash table is at
|
||||
+ * least as large as the range of possible values.
|
||||
+ * Note: this functionality was added before the change to using jhash, and may
|
||||
+ * no longer be useful. */
|
||||
+static __inline__ unsigned esfq_hash_direct(struct esfq_sched_data *q, u32 h)
|
||||
+{
|
||||
+ /* adjust minimum and maximum */
|
||||
+ if (h < q->dyn_min || h > q->dyn_max) {
|
||||
+ q->dyn_min = h < q->dyn_min ? h : q->dyn_min;
|
||||
+ q->dyn_max = h > q->dyn_max ? h : q->dyn_max;
|
||||
+
|
||||
+ /* find new range */
|
||||
+ if ((q->dyn_range = q->dyn_max - q->dyn_min) >= q->hash_divisor)
|
||||
+ printk(KERN_WARNING "ESFQ: (direct hash) Input range %u is larger than hash "
|
||||
+ "table. See ESFQ README for details.\n", q->dyn_range);
|
||||
+ }
|
||||
+
|
||||
+ /* hash input values into slot numbers */
|
||||
+ if (q->dyn_min == q->dyn_max)
|
||||
+ return 0; /* only one value seen; avoid division by 0 */
|
||||
+ else
|
||||
+ return (h - q->dyn_min) * (q->hash_divisor - 1) / q->dyn_range;
|
||||
+}
|
||||
+
|
||||
+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
|
||||
+{
|
||||
+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
|
||||
+}
|
||||
+
|
||||
+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
|
||||
+{
|
||||
+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
|
||||
+}
|
||||
+
|
||||
+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
|
||||
+{
|
||||
+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
|
||||
+{
|
||||
+ struct esfq_packet_info info;
|
||||
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
|
||||
+ enum ip_conntrack_info ctinfo;
|
||||
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
+#endif
|
||||
+
|
||||
+ switch (skb->protocol) {
|
||||
+ case __constant_htons(ETH_P_IP):
|
||||
+ {
|
||||
+ struct iphdr *iph = skb->nh.iph;
|
||||
+ info.dst = iph->daddr;
|
||||
+ info.src = iph->saddr;
|
||||
+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
|
||||
+ (iph->protocol == IPPROTO_TCP ||
|
||||
+ iph->protocol == IPPROTO_UDP ||
|
||||
+ iph->protocol == IPPROTO_SCTP ||
|
||||
+ iph->protocol == IPPROTO_DCCP ||
|
||||
+ iph->protocol == IPPROTO_ESP))
|
||||
+ info.proto = *(((u32*)iph) + iph->ihl);
|
||||
+ else
|
||||
+ info.proto = iph->protocol;
|
||||
+ break;
|
||||
+ }
|
||||
+ case __constant_htons(ETH_P_IPV6):
|
||||
+ {
|
||||
+ struct ipv6hdr *iph = skb->nh.ipv6h;
|
||||
+ /* Hash ipv6 addresses into a u32. This isn't ideal,
|
||||
+ * but the code is simple. */
|
||||
+ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
|
||||
+ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
|
||||
+ if (iph->nexthdr == IPPROTO_TCP ||
|
||||
+ iph->nexthdr == IPPROTO_UDP ||
|
||||
+ iph->nexthdr == IPPROTO_SCTP ||
|
||||
+ iph->nexthdr == IPPROTO_DCCP ||
|
||||
+ iph->nexthdr == IPPROTO_ESP)
|
||||
+ info.proto = *(u32*)&iph[1];
|
||||
+ else
|
||||
+ info.proto = iph->nexthdr;
|
||||
+ break;
|
||||
+ }
|
||||
+ default:
|
||||
+ info.dst = (u32)(unsigned long)skb->dst;
|
||||
+ info.src = (u32)(unsigned long)skb->sk;
|
||||
+ info.proto = skb->protocol;
|
||||
+ }
|
||||
+
|
||||
+ info.mark = skb->mark;
|
||||
+
|
||||
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
|
||||
+ /* defaults if there is no conntrack info */
|
||||
+ info.ctorigsrc = info.src;
|
||||
+ info.ctorigdst = info.dst;
|
||||
+ info.ctreplsrc = info.dst;
|
||||
+ info.ctrepldst = info.src;
|
||||
+ /* collect conntrack info */
|
||||
+ if (ct && ct != &nf_conntrack_untracked) {
|
||||
+ if (skb->protocol == __constant_htons(ETH_P_IP)) {
|
||||
+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
|
||||
+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
|
||||
+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
|
||||
+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
|
||||
+ }
|
||||
+ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
|
||||
+ /* Again, hash ipv6 addresses into a single u32. */
|
||||
+ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
|
||||
+ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
|
||||
+ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
|
||||
+ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
|
||||
+ }
|
||||
+
|
||||
+ }
|
||||
+#endif
|
||||
+
|
||||
+ switch(q->hash_kind)
|
||||
+ {
|
||||
+ case TCA_SFQ_HASH_CLASSIC:
|
||||
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
|
||||
+ case TCA_SFQ_HASH_DST:
|
||||
+ return esfq_jhash_1word(q, info.dst);
|
||||
+ case TCA_SFQ_HASH_DSTDIR:
|
||||
+ return esfq_hash_direct(q, ntohl(info.dst));
|
||||
+ case TCA_SFQ_HASH_SRC:
|
||||
+ return esfq_jhash_1word(q, info.src);
|
||||
+ case TCA_SFQ_HASH_SRCDIR:
|
||||
+ return esfq_hash_direct(q, ntohl(info.src));
|
||||
+ case TCA_SFQ_HASH_FWMARK:
|
||||
+ return esfq_jhash_1word(q, info.mark);
|
||||
+ case TCA_SFQ_HASH_FWMARKDIR:
|
||||
+ return esfq_hash_direct(q, info.mark);
|
||||
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
|
||||
+ case TCA_SFQ_HASH_CTORIGDST:
|
||||
+ return esfq_jhash_1word(q, info.ctorigdst);
|
||||
+ case TCA_SFQ_HASH_CTORIGSRC:
|
||||
+ return esfq_jhash_1word(q, info.ctorigsrc);
|
||||
+ case TCA_SFQ_HASH_CTREPLDST:
|
||||
+ return esfq_jhash_1word(q, info.ctrepldst);
|
||||
+ case TCA_SFQ_HASH_CTREPLSRC:
|
||||
+ return esfq_jhash_1word(q, info.ctreplsrc);
|
||||
+#endif
|
||||
+ default:
|
||||
+ if (net_ratelimit())
|
||||
+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
|
||||
+ }
|
||||
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
|
||||
+}
|
||||
+
|
||||
+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
|
||||
+{
|
||||
+ esfq_index p, n;
|
||||
+ int d = q->qs[x].qlen + q->depth;
|
||||
+
|
||||
+ p = d;
|
||||
+ n = q->dep[d].next;
|
||||
+ q->dep[x].next = n;
|
||||
+ q->dep[x].prev = p;
|
||||
+ q->dep[p].next = q->dep[n].prev = x;
|
||||
+}
|
||||
+
|
||||
+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
|
||||
+{
|
||||
+ esfq_index p, n;
|
||||
+
|
||||
+ n = q->dep[x].next;
|
||||
+ p = q->dep[x].prev;
|
||||
+ q->dep[p].next = n;
|
||||
+ q->dep[n].prev = p;
|
||||
+
|
||||
+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
|
||||
+ q->max_depth--;
|
||||
+
|
||||
+ esfq_link(q, x);
|
||||
+}
|
||||
+
|
||||
+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
|
||||
+{
|
||||
+ esfq_index p, n;
|
||||
+ int d;
|
||||
+
|
||||
+ n = q->dep[x].next;
|
||||
+ p = q->dep[x].prev;
|
||||
+ q->dep[p].next = n;
|
||||
+ q->dep[n].prev = p;
|
||||
+ d = q->qs[x].qlen;
|
||||
+ if (q->max_depth < d)
|
||||
+ q->max_depth = d;
|
||||
+
|
||||
+ esfq_link(q, x);
|
||||
+}
|
||||
+
|
||||
+static unsigned int esfq_drop(struct Qdisc *sch)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ esfq_index d = q->max_depth;
|
||||
+ struct sk_buff *skb;
|
||||
+ unsigned int len;
|
||||
+
|
||||
+ /* Queue is full! Find the longest slot and
|
||||
+ drop a packet from it */
|
||||
+
|
||||
+ if (d > 1) {
|
||||
+ esfq_index x = q->dep[d+q->depth].next;
|
||||
+ skb = q->qs[x].prev;
|
||||
+ len = skb->len;
|
||||
+ __skb_unlink(skb, &q->qs[x]);
|
||||
+ kfree_skb(skb);
|
||||
+ esfq_dec(q, x);
|
||||
+ sch->q.qlen--;
|
||||
+ sch->qstats.drops++;
|
||||
+ sch->qstats.backlog -= len;
|
||||
+ return len;
|
||||
+ }
|
||||
+
|
||||
+ if (d == 1) {
|
||||
+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
|
||||
+ d = q->next[q->tail];
|
||||
+ q->next[q->tail] = q->next[d];
|
||||
+ q->allot[q->next[d]] += q->quantum;
|
||||
+ skb = q->qs[d].prev;
|
||||
+ len = skb->len;
|
||||
+ __skb_unlink(skb, &q->qs[d]);
|
||||
+ kfree_skb(skb);
|
||||
+ esfq_dec(q, d);
|
||||
+ sch->q.qlen--;
|
||||
+ q->ht[q->hash[d]] = q->depth;
|
||||
+ sch->qstats.drops++;
|
||||
+ sch->qstats.backlog -= len;
|
||||
+ return len;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ unsigned hash = esfq_hash(q, skb);
|
||||
+ unsigned depth = q->depth;
|
||||
+ esfq_index x;
|
||||
+
|
||||
+ x = q->ht[hash];
|
||||
+ if (x == depth) {
|
||||
+ q->ht[hash] = x = q->dep[depth].next;
|
||||
+ q->hash[x] = hash;
|
||||
+ }
|
||||
+ sch->qstats.backlog += skb->len;
|
||||
+ __skb_queue_tail(&q->qs[x], skb);
|
||||
+ esfq_inc(q, x);
|
||||
+ if (q->qs[x].qlen == 1) { /* The flow is new */
|
||||
+ if (q->tail == depth) { /* It is the first flow */
|
||||
+ q->tail = x;
|
||||
+ q->next[x] = x;
|
||||
+ q->allot[x] = q->quantum;
|
||||
+ } else {
|
||||
+ q->next[x] = q->next[q->tail];
|
||||
+ q->next[q->tail] = x;
|
||||
+ q->tail = x;
|
||||
+ }
|
||||
+ }
|
||||
+ if (++sch->q.qlen < q->limit-1) {
|
||||
+ sch->bstats.bytes += skb->len;
|
||||
+ sch->bstats.packets++;
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ esfq_drop(sch);
|
||||
+ return NET_XMIT_CN;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ unsigned hash = esfq_hash(q, skb);
|
||||
+ unsigned depth = q->depth;
|
||||
+ esfq_index x;
|
||||
+
|
||||
+ x = q->ht[hash];
|
||||
+ if (x == depth) {
|
||||
+ q->ht[hash] = x = q->dep[depth].next;
|
||||
+ q->hash[x] = hash;
|
||||
+ }
|
||||
+ sch->qstats.backlog += skb->len;
|
||||
+ __skb_queue_head(&q->qs[x], skb);
|
||||
+ esfq_inc(q, x);
|
||||
+ if (q->qs[x].qlen == 1) { /* The flow is new */
|
||||
+ if (q->tail == depth) { /* It is the first flow */
|
||||
+ q->tail = x;
|
||||
+ q->next[x] = x;
|
||||
+ q->allot[x] = q->quantum;
|
||||
+ } else {
|
||||
+ q->next[x] = q->next[q->tail];
|
||||
+ q->next[q->tail] = x;
|
||||
+ q->tail = x;
|
||||
+ }
|
||||
+ }
|
||||
+ if (++sch->q.qlen < q->limit - 1) {
|
||||
+ sch->qstats.requeues++;
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ sch->qstats.drops++;
|
||||
+ esfq_drop(sch);
|
||||
+ return NET_XMIT_CN;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+static struct sk_buff *
|
||||
+esfq_dequeue(struct Qdisc* sch)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ struct sk_buff *skb;
|
||||
+ unsigned depth = q->depth;
|
||||
+ esfq_index a, old_a;
|
||||
+
|
||||
+ /* No active slots */
|
||||
+ if (q->tail == depth)
|
||||
+ return NULL;
|
||||
+
|
||||
+ a = old_a = q->next[q->tail];
|
||||
+
|
||||
+ /* Grab packet */
|
||||
+ skb = __skb_dequeue(&q->qs[a]);
|
||||
+ esfq_dec(q, a);
|
||||
+ sch->q.qlen--;
|
||||
+ sch->qstats.backlog -= skb->len;
|
||||
+
|
||||
+ /* Is the slot empty? */
|
||||
+ if (q->qs[a].qlen == 0) {
|
||||
+ q->ht[q->hash[a]] = depth;
|
||||
+ a = q->next[a];
|
||||
+ if (a == old_a) {
|
||||
+ q->tail = depth;
|
||||
+ return skb;
|
||||
+ }
|
||||
+ q->next[q->tail] = a;
|
||||
+ q->allot[a] += q->quantum;
|
||||
+ } else if ((q->allot[a] -= skb->len) <= 0) {
|
||||
+ q->tail = a;
|
||||
+ a = q->next[a];
|
||||
+ q->allot[a] += q->quantum;
|
||||
+ }
|
||||
+
|
||||
+ return skb;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+esfq_reset(struct Qdisc* sch)
|
||||
+{
|
||||
+ struct sk_buff *skb;
|
||||
+
|
||||
+ while ((skb = esfq_dequeue(sch)) != NULL)
|
||||
+ kfree_skb(skb);
|
||||
+}
|
||||
+
|
||||
+static void esfq_perturbation(unsigned long arg)
|
||||
+{
|
||||
+ struct Qdisc *sch = (struct Qdisc*)arg;
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+
|
||||
+ q->perturbation = net_random()&0x1F;
|
||||
+
|
||||
+ if (q->perturb_period) {
|
||||
+ q->perturb_timer.expires = jiffies + q->perturb_period;
|
||||
+ add_timer(&q->perturb_timer);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ struct tc_esfq_qopt *ctl = RTA_DATA(opt);
|
||||
+ int old_perturb = q->perturb_period;
|
||||
+
|
||||
+ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ sch_tree_lock(sch);
|
||||
+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
|
||||
+ q->perturb_period = ctl->perturb_period*HZ;
|
||||
+// q->hash_divisor = ctl->divisor;
|
||||
+// q->tail = q->limit = q->depth = ctl->flows;
|
||||
+
|
||||
+ if (ctl->limit)
|
||||
+ q->limit = min_t(u32, ctl->limit, q->depth);
|
||||
+
|
||||
+ if (ctl->hash_kind) {
|
||||
+ q->hash_kind = ctl->hash_kind;
|
||||
+ if (q->hash_kind != TCA_SFQ_HASH_CLASSIC)
|
||||
+ q->perturb_period = 0;
|
||||
+ }
|
||||
+
|
||||
+ // is sch_tree_lock enough to do this ?
|
||||
+ while (sch->q.qlen >= q->limit-1)
|
||||
+ esfq_drop(sch);
|
||||
+
|
||||
+ if (old_perturb)
|
||||
+ del_timer(&q->perturb_timer);
|
||||
+ if (q->perturb_period) {
|
||||
+ q->perturb_timer.expires = jiffies + q->perturb_period;
|
||||
+ add_timer(&q->perturb_timer);
|
||||
+ } else {
|
||||
+ q->perturbation = 0;
|
||||
+ }
|
||||
+ sch_tree_unlock(sch);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ struct tc_esfq_qopt *ctl;
|
||||
+ esfq_index p = ~0U/2;
|
||||
+ int i;
|
||||
+
|
||||
+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ init_timer(&q->perturb_timer);
|
||||
+ q->perturb_timer.data = (unsigned long)sch;
|
||||
+ q->perturb_timer.function = esfq_perturbation;
|
||||
+ q->perturbation = 0;
|
||||
+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
|
||||
+ q->max_depth = 0;
|
||||
+ q->dyn_min = ~0U; /* maximum value for this type */
|
||||
+ q->dyn_max = 0; /* dyn_min/dyn_max will be set properly upon first packet */
|
||||
+ if (opt == NULL) {
|
||||
+ q->quantum = psched_mtu(sch->dev);
|
||||
+ q->perturb_period = 0;
|
||||
+ q->hash_divisor = 1024;
|
||||
+ q->tail = q->limit = q->depth = 128;
|
||||
+
|
||||
+ } else {
|
||||
+ ctl = RTA_DATA(opt);
|
||||
+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
|
||||
+ q->perturb_period = ctl->perturb_period*HZ;
|
||||
+ q->hash_divisor = ctl->divisor ? : 1024;
|
||||
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
|
||||
+
|
||||
+ if ( q->depth > p - 1 )
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ if (ctl->limit)
|
||||
+ q->limit = min_t(u32, ctl->limit, q->depth);
|
||||
+
|
||||
+ if (ctl->hash_kind) {
|
||||
+ q->hash_kind = ctl->hash_kind;
|
||||
+ }
|
||||
+
|
||||
+ if (q->perturb_period) {
|
||||
+ q->perturb_timer.expires = jiffies + q->perturb_period;
|
||||
+ add_timer(&q->perturb_timer);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
|
||||
+ if (!q->ht)
|
||||
+ goto err_case;
|
||||
+
|
||||
+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
|
||||
+ if (!q->dep)
|
||||
+ goto err_case;
|
||||
+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
|
||||
+ if (!q->next)
|
||||
+ goto err_case;
|
||||
+
|
||||
+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
|
||||
+ if (!q->allot)
|
||||
+ goto err_case;
|
||||
+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
|
||||
+ if (!q->hash)
|
||||
+ goto err_case;
|
||||
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
|
||||
+ if (!q->qs)
|
||||
+ goto err_case;
|
||||
+
|
||||
+ for (i=0; i< q->hash_divisor; i++)
|
||||
+ q->ht[i] = q->depth;
|
||||
+ for (i=0; i<q->depth; i++) {
|
||||
+ skb_queue_head_init(&q->qs[i]);
|
||||
+ q->dep[i+q->depth].next = i+q->depth;
|
||||
+ q->dep[i+q->depth].prev = i+q->depth;
|
||||
+ }
|
||||
+
|
||||
+ for (i=0; i<q->depth; i++)
|
||||
+ esfq_link(q, i);
|
||||
+ return 0;
|
||||
+err_case:
|
||||
+ del_timer(&q->perturb_timer);
|
||||
+ if (q->ht)
|
||||
+ kfree(q->ht);
|
||||
+ if (q->dep)
|
||||
+ kfree(q->dep);
|
||||
+ if (q->next)
|
||||
+ kfree(q->next);
|
||||
+ if (q->allot)
|
||||
+ kfree(q->allot);
|
||||
+ if (q->hash)
|
||||
+ kfree(q->hash);
|
||||
+ if (q->qs)
|
||||
+ kfree(q->qs);
|
||||
+ return -ENOBUFS;
|
||||
+}
|
||||
+
|
||||
+static void esfq_destroy(struct Qdisc *sch)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ del_timer(&q->perturb_timer);
|
||||
+ if(q->ht)
|
||||
+ kfree(q->ht);
|
||||
+ if(q->dep)
|
||||
+ kfree(q->dep);
|
||||
+ if(q->next)
|
||||
+ kfree(q->next);
|
||||
+ if(q->allot)
|
||||
+ kfree(q->allot);
|
||||
+ if(q->hash)
|
||||
+ kfree(q->hash);
|
||||
+ if(q->qs)
|
||||
+ kfree(q->qs);
|
||||
+}
|
||||
+
|
||||
+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
+{
|
||||
+ struct esfq_sched_data *q = qdisc_priv(sch);
|
||||
+ unsigned char *b = skb->tail;
|
||||
+ struct tc_esfq_qopt opt;
|
||||
+
|
||||
+ opt.quantum = q->quantum;
|
||||
+ opt.perturb_period = q->perturb_period/HZ;
|
||||
+
|
||||
+ opt.limit = q->limit;
|
||||
+ opt.divisor = q->hash_divisor;
|
||||
+ opt.flows = q->depth;
|
||||
+ opt.hash_kind = q->hash_kind;
|
||||
+
|
||||
+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
|
||||
+
|
||||
+ return skb->len;
|
||||
+
|
||||
+rtattr_failure:
|
||||
+ skb_trim(skb, b - skb->data);
|
||||
+ return -1;
|
||||
+}
|
||||
+
|
||||
+static struct Qdisc_ops esfq_qdisc_ops =
|
||||
+{
|
||||
+ .next = NULL,
|
||||
+ .cl_ops = NULL,
|
||||
+ .id = "esfq",
|
||||
+ .priv_size = sizeof(struct esfq_sched_data),
|
||||
+ .enqueue = esfq_enqueue,
|
||||
+ .dequeue = esfq_dequeue,
|
||||
+ .requeue = esfq_requeue,
|
||||
+ .drop = esfq_drop,
|
||||
+ .init = esfq_init,
|
||||
+ .reset = esfq_reset,
|
||||
+ .destroy = esfq_destroy,
|
||||
+ .change = NULL, /* esfq_change - needs more work */
|
||||
+ .dump = esfq_dump,
|
||||
+ .owner = THIS_MODULE,
|
||||
+};
|
||||
+
|
||||
+static int __init esfq_module_init(void)
|
||||
+{
|
||||
+ return register_qdisc(&esfq_qdisc_ops);
|
||||
+}
|
||||
+static void __exit esfq_module_exit(void)
|
||||
+{
|
||||
+ unregister_qdisc(&esfq_qdisc_ops);
|
||||
+}
|
||||
+module_init(esfq_module_init)
|
||||
+module_exit(esfq_module_exit)
|
||||
+MODULE_LICENSE("GPL");
|
|
@ -1,352 +0,0 @@
|
|||
--- a/include/linux/netfilter_ipv4/ip_nat.h
|
||||
+++ b/include/linux/netfilter_ipv4/ip_nat.h
|
||||
@@ -64,6 +64,13 @@ struct ip_nat_info
|
||||
|
||||
struct ip_conntrack;
|
||||
|
||||
+/* Call input routing for SNAT-ed traffic */
|
||||
+extern unsigned int ip_nat_route_input(unsigned int hooknum,
|
||||
+ struct sk_buff **pskb,
|
||||
+ const struct net_device *in,
|
||||
+ const struct net_device *out,
|
||||
+ int (*okfn)(struct sk_buff *));
|
||||
+
|
||||
/* Set up the info structure to map into this range. */
|
||||
extern unsigned int ip_nat_setup_info(struct ip_conntrack *conntrack,
|
||||
const struct ip_nat_range *range,
|
||||
--- a/include/linux/rtnetlink.h
|
||||
+++ b/include/linux/rtnetlink.h
|
||||
@@ -293,6 +293,8 @@ struct rtnexthop
|
||||
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
|
||||
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
|
||||
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
|
||||
+#define RTNH_F_SUSPECT 8 /* We don't know the real state */
|
||||
+#define RTNH_F_BADSTATE (RTNH_F_DEAD | RTNH_F_SUSPECT)
|
||||
|
||||
/* Macros to handle hexthops */
|
||||
|
||||
--- a/include/net/flow.h
|
||||
+++ b/include/net/flow.h
|
||||
@@ -19,6 +19,8 @@ struct flowi {
|
||||
struct {
|
||||
__be32 daddr;
|
||||
__be32 saddr;
|
||||
+ __u32 lsrc;
|
||||
+ __u32 gw;
|
||||
__u8 tos;
|
||||
__u8 scope;
|
||||
} ip4_u;
|
||||
@@ -43,6 +45,8 @@ struct flowi {
|
||||
#define fl6_flowlabel nl_u.ip6_u.flowlabel
|
||||
#define fl4_dst nl_u.ip4_u.daddr
|
||||
#define fl4_src nl_u.ip4_u.saddr
|
||||
+#define fl4_lsrc nl_u.ip4_u.lsrc
|
||||
+#define fl4_gw nl_u.ip4_u.gw
|
||||
#define fl4_tos nl_u.ip4_u.tos
|
||||
#define fl4_scope nl_u.ip4_u.scope
|
||||
|
||||
--- a/net/ipv4/route.c
|
||||
+++ b/net/ipv4/route.c
|
||||
@@ -1208,6 +1208,7 @@ void ip_rt_redirect(__be32 old_gw, __be3
|
||||
|
||||
/* Gateway is different ... */
|
||||
rt->rt_gateway = new_gw;
|
||||
+ if (rt->fl.fl4_gw) rt->fl.fl4_gw = new_gw;
|
||||
|
||||
/* Redirect received -> path was valid */
|
||||
dst_confirm(&rth->u.dst);
|
||||
@@ -1643,6 +1644,7 @@ static int ip_route_input_mc(struct sk_b
|
||||
rth->fl.fl4_tos = tos;
|
||||
rth->fl.mark = skb->mark;
|
||||
rth->fl.fl4_src = saddr;
|
||||
+ rth->fl.fl4_lsrc = 0;
|
||||
rth->rt_src = saddr;
|
||||
#ifdef CONFIG_NET_CLS_ROUTE
|
||||
rth->u.dst.tclassid = itag;
|
||||
@@ -1653,6 +1655,7 @@ static int ip_route_input_mc(struct sk_b
|
||||
dev_hold(rth->u.dst.dev);
|
||||
rth->idev = in_dev_get(rth->u.dst.dev);
|
||||
rth->fl.oif = 0;
|
||||
+ rth->fl.fl4_gw = 0;
|
||||
rth->rt_gateway = daddr;
|
||||
rth->rt_spec_dst= spec_dst;
|
||||
rth->rt_type = RTN_MULTICAST;
|
||||
@@ -1716,7 +1719,7 @@ static void ip_handle_martian_source(str
|
||||
static inline int __mkroute_input(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
struct in_device *in_dev,
|
||||
- __be32 daddr, __be32 saddr, u32 tos,
|
||||
+ __be32 daddr, __be32 saddr, u32 tos, u32 lsrc,
|
||||
struct rtable **result)
|
||||
{
|
||||
|
||||
@@ -1751,6 +1754,7 @@ static inline int __mkroute_input(struct
|
||||
flags |= RTCF_DIRECTSRC;
|
||||
|
||||
if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
|
||||
+ !lsrc &&
|
||||
(IN_DEV_SHARED_MEDIA(out_dev) ||
|
||||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
|
||||
flags |= RTCF_DOREDIRECT;
|
||||
@@ -1788,6 +1792,7 @@ static inline int __mkroute_input(struct
|
||||
rth->fl.mark = skb->mark;
|
||||
rth->fl.fl4_src = saddr;
|
||||
rth->rt_src = saddr;
|
||||
+ rth->fl.fl4_lsrc = lsrc;
|
||||
rth->rt_gateway = daddr;
|
||||
rth->rt_iif =
|
||||
rth->fl.iif = in_dev->dev->ifindex;
|
||||
@@ -1795,6 +1800,7 @@ static inline int __mkroute_input(struct
|
||||
dev_hold(rth->u.dst.dev);
|
||||
rth->idev = in_dev_get(rth->u.dst.dev);
|
||||
rth->fl.oif = 0;
|
||||
+ rth->fl.fl4_gw = 0;
|
||||
rth->rt_spec_dst= spec_dst;
|
||||
|
||||
rth->u.dst.input = ip_forward;
|
||||
@@ -1816,19 +1822,21 @@ static inline int ip_mkroute_input_def(s
|
||||
struct fib_result* res,
|
||||
const struct flowi *fl,
|
||||
struct in_device *in_dev,
|
||||
- __be32 daddr, __be32 saddr, u32 tos)
|
||||
+ __be32 daddr, __be32 saddr, u32 tos,
|
||||
+ u32 lsrc)
|
||||
{
|
||||
struct rtable* rth = NULL;
|
||||
int err;
|
||||
unsigned hash;
|
||||
|
||||
+ fib_select_default(fl, res);
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
- if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
|
||||
+ if (res->fi && res->fi->fib_nhs > 1)
|
||||
fib_select_multipath(fl, res);
|
||||
#endif
|
||||
|
||||
/* create a routing cache entry */
|
||||
- err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
|
||||
+ err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, lsrc, &rth);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1841,7 +1849,8 @@ static inline int ip_mkroute_input(struc
|
||||
struct fib_result* res,
|
||||
const struct flowi *fl,
|
||||
struct in_device *in_dev,
|
||||
- __be32 daddr, __be32 saddr, u32 tos)
|
||||
+ __be32 daddr, __be32 saddr, u32 tos,
|
||||
+ u32 lsrc)
|
||||
{
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
|
||||
struct rtable* rth = NULL, *rtres;
|
||||
@@ -1857,7 +1866,7 @@ static inline int ip_mkroute_input(struc
|
||||
/* distinguish between multipath and singlepath */
|
||||
if (hopcount < 2)
|
||||
return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
|
||||
- saddr, tos);
|
||||
+ saddr, tos, 0);
|
||||
|
||||
/* add all alternatives to the routing cache */
|
||||
for (hop = 0; hop < hopcount; hop++) {
|
||||
@@ -1869,7 +1878,7 @@ static inline int ip_mkroute_input(struc
|
||||
|
||||
/* create a routing cache entry */
|
||||
err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
|
||||
- &rth);
|
||||
+ 0, &rth);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1889,7 +1898,7 @@ static inline int ip_mkroute_input(struc
|
||||
skb->dst = &rtres->u.dst;
|
||||
return err;
|
||||
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
- return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
|
||||
+ return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos, lsrc);
|
||||
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
}
|
||||
|
||||
@@ -1905,18 +1914,18 @@ static inline int ip_mkroute_input(struc
|
||||
*/
|
||||
|
||||
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
- u8 tos, struct net_device *dev)
|
||||
+ u8 tos, struct net_device *dev, u32 lsrc)
|
||||
{
|
||||
struct fib_result res;
|
||||
struct in_device *in_dev = in_dev_get(dev);
|
||||
struct flowi fl = { .nl_u = { .ip4_u =
|
||||
{ .daddr = daddr,
|
||||
- .saddr = saddr,
|
||||
+ .saddr = lsrc ? : saddr,
|
||||
.tos = tos,
|
||||
.scope = RT_SCOPE_UNIVERSE,
|
||||
} },
|
||||
.mark = skb->mark,
|
||||
- .iif = dev->ifindex };
|
||||
+ .iif = lsrc? loopback_dev.ifindex : dev->ifindex };
|
||||
unsigned flags = 0;
|
||||
u32 itag = 0;
|
||||
struct rtable * rth;
|
||||
@@ -1949,6 +1958,12 @@ static int ip_route_input_slow(struct sk
|
||||
if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
|
||||
goto martian_destination;
|
||||
|
||||
+ if (lsrc) {
|
||||
+ if (MULTICAST(lsrc) || BADCLASS(lsrc) ||
|
||||
+ ZERONET(lsrc) || LOOPBACK(lsrc))
|
||||
+ goto e_inval;
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Now we are ready to route packet.
|
||||
*/
|
||||
@@ -1958,6 +1973,10 @@ static int ip_route_input_slow(struct sk
|
||||
goto no_route;
|
||||
}
|
||||
free_res = 1;
|
||||
+ if (lsrc && res.type != RTN_UNICAST && res.type != RTN_NAT)
|
||||
+ goto e_inval;
|
||||
+ fl.iif = dev->ifindex;
|
||||
+ fl.fl4_src = saddr;
|
||||
|
||||
RT_CACHE_STAT_INC(in_slow_tot);
|
||||
|
||||
@@ -1982,7 +2001,7 @@ static int ip_route_input_slow(struct sk
|
||||
if (res.type != RTN_UNICAST)
|
||||
goto martian_destination;
|
||||
|
||||
- err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
|
||||
+ err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos, lsrc);
|
||||
if (err == -ENOBUFS)
|
||||
goto e_nobufs;
|
||||
if (err == -EINVAL)
|
||||
@@ -1997,6 +2016,8 @@ out: return err;
|
||||
brd_input:
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
goto e_inval;
|
||||
+ if (lsrc)
|
||||
+ goto e_inval;
|
||||
|
||||
if (ZERONET(saddr))
|
||||
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
|
||||
@@ -2037,6 +2058,7 @@ local_input:
|
||||
rth->u.dst.dev = &loopback_dev;
|
||||
dev_hold(rth->u.dst.dev);
|
||||
rth->idev = in_dev_get(rth->u.dst.dev);
|
||||
+ rth->fl.fl4_gw = 0;
|
||||
rth->rt_gateway = daddr;
|
||||
rth->rt_spec_dst= spec_dst;
|
||||
rth->u.dst.input= ip_local_deliver;
|
||||
@@ -2086,8 +2108,9 @@ martian_source:
|
||||
goto e_inval;
|
||||
}
|
||||
|
||||
-int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
- u8 tos, struct net_device *dev)
|
||||
+static inline int
|
||||
+ip_route_input_cached(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
+ u8 tos, struct net_device *dev, u32 lsrc)
|
||||
{
|
||||
struct rtable * rth;
|
||||
unsigned hash;
|
||||
@@ -2102,6 +2125,7 @@ int ip_route_input(struct sk_buff *skb,
|
||||
if (rth->fl.fl4_dst == daddr &&
|
||||
rth->fl.fl4_src == saddr &&
|
||||
rth->fl.iif == iif &&
|
||||
+ rth->fl.fl4_lsrc == lsrc &&
|
||||
rth->fl.oif == 0 &&
|
||||
rth->fl.mark == skb->mark &&
|
||||
rth->fl.fl4_tos == tos) {
|
||||
@@ -2148,7 +2172,19 @@ int ip_route_input(struct sk_buff *skb,
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
- return ip_route_input_slow(skb, daddr, saddr, tos, dev);
|
||||
+ return ip_route_input_slow(skb, daddr, saddr, tos, dev, lsrc);
|
||||
+}
|
||||
+
|
||||
+int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
|
||||
+ u8 tos, struct net_device *dev)
|
||||
+{
|
||||
+ return ip_route_input_cached(skb, daddr, saddr, tos, dev, 0);
|
||||
+}
|
||||
+
|
||||
+int ip_route_input_lookup(struct sk_buff *skb, u32 daddr, u32 saddr,
|
||||
+ u8 tos, struct net_device *dev, u32 lsrc)
|
||||
+{
|
||||
+ return ip_route_input_cached(skb, daddr, saddr, tos, dev, lsrc);
|
||||
}
|
||||
|
||||
static inline int __mkroute_output(struct rtable **result,
|
||||
@@ -2227,6 +2263,7 @@ static inline int __mkroute_output(struc
|
||||
rth->fl.fl4_tos = tos;
|
||||
rth->fl.fl4_src = oldflp->fl4_src;
|
||||
rth->fl.oif = oldflp->oif;
|
||||
+ rth->fl.fl4_gw = oldflp->fl4_gw;
|
||||
rth->fl.mark = oldflp->mark;
|
||||
rth->rt_dst = fl->fl4_dst;
|
||||
rth->rt_src = fl->fl4_src;
|
||||
@@ -2367,6 +2404,7 @@ static int ip_route_output_slow(struct r
|
||||
struct flowi fl = { .nl_u = { .ip4_u =
|
||||
{ .daddr = oldflp->fl4_dst,
|
||||
.saddr = oldflp->fl4_src,
|
||||
+ .gw = oldflp->fl4_gw,
|
||||
.tos = tos & IPTOS_RT_MASK,
|
||||
.scope = ((tos & RTO_ONLINK) ?
|
||||
RT_SCOPE_LINK :
|
||||
@@ -2470,6 +2508,7 @@ static int ip_route_output_slow(struct r
|
||||
dev_out = &loopback_dev;
|
||||
dev_hold(dev_out);
|
||||
fl.oif = loopback_dev.ifindex;
|
||||
+ fl.fl4_gw = 0;
|
||||
res.type = RTN_LOCAL;
|
||||
flags |= RTCF_LOCAL;
|
||||
goto make_route;
|
||||
@@ -2477,7 +2516,7 @@ static int ip_route_output_slow(struct r
|
||||
|
||||
if (fib_lookup(&fl, &res)) {
|
||||
res.fi = NULL;
|
||||
- if (oldflp->oif) {
|
||||
+ if (oldflp->oif && dev_out->flags & IFF_UP) {
|
||||
/* Apparently, routing tables are wrong. Assume,
|
||||
that the destination is on link.
|
||||
|
||||
@@ -2517,6 +2556,7 @@ static int ip_route_output_slow(struct r
|
||||
dev_out = &loopback_dev;
|
||||
dev_hold(dev_out);
|
||||
fl.oif = dev_out->ifindex;
|
||||
+ fl.fl4_gw = 0;
|
||||
if (res.fi)
|
||||
fib_info_put(res.fi);
|
||||
res.fi = NULL;
|
||||
@@ -2524,13 +2564,12 @@ static int ip_route_output_slow(struct r
|
||||
goto make_route;
|
||||
}
|
||||
|
||||
+ if (res.type == RTN_UNICAST)
|
||||
+ fib_select_default(&fl, &res);
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
- if (res.fi->fib_nhs > 1 && fl.oif == 0)
|
||||
+ if (res.fi->fib_nhs > 1)
|
||||
fib_select_multipath(&fl, &res);
|
||||
- else
|
||||
#endif
|
||||
- if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
|
||||
- fib_select_default(&fl, &res);
|
||||
|
||||
if (!fl.fl4_src)
|
||||
fl.fl4_src = FIB_RES_PREFSRC(res);
|
||||
@@ -2567,6 +2606,7 @@ int __ip_route_output_key(struct rtable
|
||||
rth->fl.fl4_src == flp->fl4_src &&
|
||||
rth->fl.iif == 0 &&
|
||||
rth->fl.oif == flp->oif &&
|
||||
+ rth->fl.fl4_gw == flp->fl4_gw &&
|
||||
rth->fl.mark == flp->mark &&
|
||||
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
|
||||
(IPTOS_RT_MASK | RTO_ONLINK))) {
|
||||
@@ -3199,3 +3239,4 @@ int __init ip_rt_init(void)
|
||||
EXPORT_SYMBOL(__ip_select_ident);
|
||||
EXPORT_SYMBOL(ip_route_input);
|
||||
EXPORT_SYMBOL(ip_route_output_key);
|
||||
+EXPORT_SYMBOL(ip_route_input_lookup);
|
|
@ -1,12 +0,0 @@
|
|||
--- a/arch/mips/Makefile
|
||||
+++ b/arch/mips/Makefile
|
||||
@@ -605,6 +605,9 @@ core-$(CONFIG_TOSHIBA_RBTX4938) += arch/
|
||||
core-$(CONFIG_TOSHIBA_RBTX4938) += arch/mips/tx4938/common/
|
||||
load-$(CONFIG_TOSHIBA_RBTX4938) += 0xffffffff80100000
|
||||
|
||||
+# temporary until string.h is fixed
|
||||
+cflags-y += -ffreestanding
|
||||
+
|
||||
cflags-y += -Iinclude/asm-mips/mach-generic
|
||||
drivers-$(CONFIG_PCI) += arch/mips/pci/
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
--- a/fs/jffs2/build.c
|
||||
+++ b/fs/jffs2/build.c
|
||||
@@ -107,6 +107,17 @@ static int jffs2_build_filesystem(struct
|
||||
dbg_fsbuild("scanned flash completely\n");
|
||||
jffs2_dbg_dump_block_lists_nolock(c);
|
||||
|
||||
+ if (c->flags & (1 << 7)) {
|
||||
+ printk("%s(): unlocking the mtd device... ", __func__);
|
||||
+ if (c->mtd->unlock)
|
||||
+ c->mtd->unlock(c->mtd, 0, c->mtd->size);
|
||||
+ printk("done.\n");
|
||||
+
|
||||
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
|
||||
+ jffs2_erase_pending_blocks(c, -1);
|
||||
+ printk("done.\n");
|
||||
+ }
|
||||
+
|
||||
dbg_fsbuild("pass 1 starting\n");
|
||||
c->flags |= JFFS2_SB_FLAG_BUILDING;
|
||||
/* Now scan the directory tree, increasing nlink according to every dirent found. */
|
||||
--- a/fs/jffs2/scan.c
|
||||
+++ b/fs/jffs2/scan.c
|
||||
@@ -143,9 +143,12 @@ int jffs2_scan_medium(struct jffs2_sb_in
|
||||
|
||||
/* reset summary info for next eraseblock scan */
|
||||
jffs2_sum_reset_collected(s);
|
||||
-
|
||||
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
|
||||
- buf_size, s);
|
||||
+
|
||||
+ if (c->flags & (1 << 7))
|
||||
+ ret = BLK_STATE_ALLFF;
|
||||
+ else
|
||||
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
|
||||
+ buf_size, s);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -546,6 +549,17 @@ static int jffs2_scan_eraseblock (struct
|
||||
return err;
|
||||
}
|
||||
|
||||
+ if ((buf[0] == 0xde) &&
|
||||
+ (buf[1] == 0xad) &&
|
||||
+ (buf[2] == 0xc0) &&
|
||||
+ (buf[3] == 0xde)) {
|
||||
+ /* end of filesystem. erase everything after this point */
|
||||
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
|
||||
+ c->flags |= (1 << 7);
|
||||
+
|
||||
+ return BLK_STATE_ALLFF;
|
||||
+ }
|
||||
+
|
||||
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
|
||||
ofs = 0;
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
--- /dev/null
|
||||
+++ b/include/asm-powerpc/segment.h
|
||||
@@ -0,0 +1,6 @@
|
||||
+#ifndef _ASM_SEGMENT_H
|
||||
+#define _ASM_SEGMENT_H
|
||||
+
|
||||
+/* Only here because we have some old header files that expect it.. */
|
||||
+
|
||||
+#endif /* _ASM_SEGMENT_H */
|
|
@ -1,24 +0,0 @@
|
|||
--- a/drivers/net/r8169.c
|
||||
+++ b/drivers/net/r8169.c
|
||||
@@ -494,7 +494,7 @@ static int rtl8169_poll(struct net_devic
|
||||
#endif
|
||||
|
||||
static const u16 rtl8169_intr_mask =
|
||||
- SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
|
||||
+ LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
|
||||
static const u16 rtl8169_napi_event =
|
||||
RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
|
||||
static const unsigned int rtl8169_rx_config =
|
||||
@@ -2652,10 +2652,12 @@ rtl8169_interrupt(int irq, void *dev_ins
|
||||
if (!(status & rtl8169_intr_mask))
|
||||
break;
|
||||
|
||||
+#if 0
|
||||
if (unlikely(status & SYSErr)) {
|
||||
rtl8169_pcierr_interrupt(dev);
|
||||
break;
|
||||
}
|
||||
+#endif
|
||||
|
||||
if (status & LinkChg)
|
||||
rtl8169_check_link_status(dev, tp, ioaddr);
|
File diff suppressed because it is too large
Load Diff
|
@ -1,11 +0,0 @@
|
|||
--- a/include/linux/netdevice.h
|
||||
+++ b/include/linux/netdevice.h
|
||||
@@ -532,6 +532,8 @@ struct net_device
|
||||
struct device dev;
|
||||
/* space for optional statistics and wireless sysfs groups */
|
||||
struct attribute_group *sysfs_groups[3];
|
||||
+
|
||||
+ void *ieee80211_ptr;
|
||||
};
|
||||
#define to_net_dev(d) container_of(d, struct net_device, dev)
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
--- a/drivers/mtd/devices/block2mtd.c
|
||||
+++ b/drivers/mtd/devices/block2mtd.c
|
||||
@@ -41,7 +41,7 @@ struct block2mtd_dev {
|
||||
static LIST_HEAD(blkmtd_device_list);
|
||||
|
||||
|
||||
-#define PAGE_READAHEAD 64
|
||||
+#define PAGE_READAHEAD 0
|
||||
static void cache_readahead(struct address_space *mapping, int index)
|
||||
{
|
||||
filler_t *filler = (filler_t*)mapping->a_ops->readpage;
|
|
@ -1,11 +0,0 @@
|
|||
--- a/drivers/mtd/devices/block2mtd.c
|
||||
+++ b/drivers/mtd/devices/block2mtd.c
|
||||
@@ -112,7 +112,7 @@ static int _block2mtd_erase(struct block
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
- max = (u_long*)page_address(page) + PAGE_SIZE;
|
||||
+ max = (u_long*) ((u8 *) page_address(page) + PAGE_SIZE);
|
||||
for (p=(u_long*)page_address(page); p<max; p++)
|
||||
if (*p != -1UL) {
|
||||
lock_page(page);
|
|
@ -1,37 +0,0 @@
|
|||
--- a/lib/kobject_uevent.c
|
||||
+++ b/lib/kobject_uevent.c
|
||||
@@ -30,9 +30,22 @@ u64 uevent_seqnum;
|
||||
char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug";
|
||||
static DEFINE_SPINLOCK(sequence_lock);
|
||||
#if defined(CONFIG_NET)
|
||||
-static struct sock *uevent_sock;
|
||||
+struct sock *uevent_sock = NULL;
|
||||
+EXPORT_SYMBOL_GPL(uevent_sock);
|
||||
#endif
|
||||
|
||||
+u64 uevent_next_seqnum(void)
|
||||
+{
|
||||
+ u64 seq;
|
||||
+
|
||||
+ spin_lock(&sequence_lock);
|
||||
+ seq = ++uevent_seqnum;
|
||||
+ spin_unlock(&sequence_lock);
|
||||
+
|
||||
+ return seq;
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(uevent_next_seqnum);
|
||||
+
|
||||
static char *action_to_string(enum kobject_action action)
|
||||
{
|
||||
switch (action) {
|
||||
@@ -171,9 +184,7 @@ int kobject_uevent_env(struct kobject *k
|
||||
}
|
||||
|
||||
/* we will send an event, request a new sequence number */
|
||||
- spin_lock(&sequence_lock);
|
||||
- seq = ++uevent_seqnum;
|
||||
- spin_unlock(&sequence_lock);
|
||||
+ seq = uevent_next_seqnum();
|
||||
sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq);
|
||||
|
||||
#if defined(CONFIG_NET)
|
|
@ -1,29 +0,0 @@
|
|||
--- a/scripts/unifdef.c
|
||||
+++ b/scripts/unifdef.c
|
||||
@@ -206,7 +206,7 @@ static void done(void);
|
||||
static void error(const char *);
|
||||
static int findsym(const char *);
|
||||
static void flushline(bool);
|
||||
-static Linetype getline(void);
|
||||
+static Linetype get_line(void);
|
||||
static Linetype ifeval(const char **);
|
||||
static void ignoreoff(void);
|
||||
static void ignoreon(void);
|
||||
@@ -512,7 +512,7 @@ process(void)
|
||||
|
||||
for (;;) {
|
||||
linenum++;
|
||||
- lineval = getline();
|
||||
+ lineval = get_line();
|
||||
trans_table[ifstate[depth]][lineval]();
|
||||
debug("process %s -> %s depth %d",
|
||||
linetype_name[lineval],
|
||||
@@ -526,7 +526,7 @@ process(void)
|
||||
* help from skipcomment().
|
||||
*/
|
||||
static Linetype
|
||||
-getline(void)
|
||||
+get_line(void)
|
||||
{
|
||||
const char *cp;
|
||||
int cursym;
|
|
@ -1,21 +0,0 @@
|
|||
[MIPS] Fix computation of {PGD,PMD,PTE}_T_LOG2.
|
||||
|
||||
For the generation of asm-offset.h to work these need to be evaulatable
|
||||
by gcc as a constant expression. This issue did exist for a while but
|
||||
didn't bite because they're only in asm-offset.h for debugging purposes.
|
||||
|
||||
--- a/include/asm-mips/pgtable.h
|
||||
+++ b/include/asm-mips/pgtable.h
|
||||
@@ -168,9 +168,9 @@ static inline void pte_clear(struct mm_s
|
||||
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
|
||||
#endif
|
||||
|
||||
-#define PGD_T_LOG2 ffz(~sizeof(pgd_t))
|
||||
-#define PMD_T_LOG2 ffz(~sizeof(pmd_t))
|
||||
-#define PTE_T_LOG2 ffz(~sizeof(pte_t))
|
||||
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
|
||||
+#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
|
||||
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
|
||||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
time: prevent the loop in timespec_add_ns() from being optimised away
|
||||
|
||||
Since some architectures don't support __udivdi3().
|
||||
|
||||
--- a/include/linux/time.h
|
||||
+++ b/include/linux/time.h
|
||||
@@ -170,6 +170,10 @@ static inline void timespec_add_ns(struc
|
||||
{
|
||||
ns += a->tv_nsec;
|
||||
while(unlikely(ns >= NSEC_PER_SEC)) {
|
||||
+ /* The following asm() prevents the compiler from
|
||||
+ * optimising this loop into a modulo operation. */
|
||||
+ asm("" : "+r"(ns));
|
||||
+
|
||||
ns -= NSEC_PER_SEC;
|
||||
a->tv_sec++;
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
--- a/drivers/char/Kconfig
|
||||
+++ b/drivers/char/Kconfig
|
||||
@@ -984,6 +984,13 @@ config CS5535_GPIO
|
||||
|
||||
If compiled as a module, it will be called cs5535_gpio.
|
||||
|
||||
+config GPIO_DEVICE
|
||||
+ tristate "GPIO device support"
|
||||
+ depends on GENERIC_GPIO
|
||||
+ help
|
||||
+ Say Y to enable Linux GPIO device support. This allows control of
|
||||
+ GPIO pins using a character device
|
||||
+
|
||||
config GPIO_VR41XX
|
||||
tristate "NEC VR4100 series General-purpose I/O Unit support"
|
||||
depends on CPU_VR41XX
|
||||
--- a/drivers/char/Makefile
|
||||
+++ b/drivers/char/Makefile
|
||||
@@ -90,6 +90,7 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio
|
||||
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
|
||||
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
|
||||
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
|
||||
+obj-$(CONFIG_GPIO_DEVICE) += gpio_dev.o
|
||||
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
|
||||
obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
|
||||
obj-$(CONFIG_TELCLOCK) += tlclk.o
|
|
@ -1,17 +0,0 @@
|
|||
--- a/fs/Kconfig
|
||||
+++ b/fs/Kconfig
|
||||
@@ -419,6 +419,7 @@ config FS_POSIX_ACL
|
||||
|
||||
source "fs/xfs/Kconfig"
|
||||
source "fs/gfs2/Kconfig"
|
||||
+source "fs/yaffs2/Kconfig"
|
||||
|
||||
config OCFS2_FS
|
||||
tristate "OCFS2 file system support"
|
||||
--- a/fs/Makefile
|
||||
+++ b/fs/Makefile
|
||||
@@ -116,3 +116,4 @@ obj-$(CONFIG_HPPFS) += hppfs/
|
||||
obj-$(CONFIG_DEBUG_FS) += debugfs/
|
||||
obj-$(CONFIG_OCFS2_FS) += ocfs2/
|
||||
obj-$(CONFIG_GFS2_FS) += gfs2/
|
||||
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
|
|
@ -1,80 +0,0 @@
|
|||
--- a/fs/yaffs2/yaffs_fs.c
|
||||
+++ b/fs/yaffs2/yaffs_fs.c
|
||||
@@ -965,7 +965,7 @@ static int yaffs_readdir(struct file *f,
|
||||
f->f_version = inode->i_version;
|
||||
}
|
||||
|
||||
- list_for_each(i, &obj->variant.directoryVariant.children) {
|
||||
+ list_for_each(i, (struct list_head *)&obj->variant.directoryVariant.children) {
|
||||
curoffs++;
|
||||
if (curoffs >= offset) {
|
||||
l = list_entry(i, yaffs_Object, siblings);
|
||||
@@ -1269,7 +1269,7 @@ static int yaffs_rename(struct inode *ol
|
||||
|
||||
if (target &&
|
||||
target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
|
||||
- !list_empty(&target->variant.directoryVariant.children)) {
|
||||
+ !list_empty((struct list_head *)&target->variant.directoryVariant.children)) {
|
||||
|
||||
T(YAFFS_TRACE_OS, (KERN_DEBUG "target is non-empty dir\n"));
|
||||
|
||||
@@ -1503,7 +1503,7 @@ static void yaffs_put_super(struct super
|
||||
yaffs_GrossUnlock(dev);
|
||||
|
||||
/* we assume this is protected by lock_kernel() in mount/umount */
|
||||
- list_del(&dev->devList);
|
||||
+ list_del((struct list_head *)&dev->devList);
|
||||
|
||||
if(dev->spareBuffer){
|
||||
YFREE(dev->spareBuffer);
|
||||
@@ -1847,7 +1847,7 @@ static struct super_block *yaffs_interna
|
||||
dev->skipCheckpointWrite = options.skip_checkpoint_write;
|
||||
|
||||
/* we assume this is protected by lock_kernel() in mount/umount */
|
||||
- list_add_tail(&dev->devList, &yaffs_dev_list);
|
||||
+ list_add_tail((struct list_head *)&dev->devList, &yaffs_dev_list);
|
||||
|
||||
init_MUTEX(&dev->grossLock);
|
||||
|
||||
--- a/fs/yaffs2/yaffs_mtdif1.c
|
||||
+++ b/fs/yaffs2/yaffs_mtdif1.c
|
||||
@@ -323,7 +323,7 @@ static int nandmtd1_TestPrerequists(stru
|
||||
* Always returns YAFFS_OK.
|
||||
*/
|
||||
int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
|
||||
- yaffs_BlockState * pState, int *pSequenceNumber)
|
||||
+ yaffs_BlockState * pState, __u32 *pSequenceNumber)
|
||||
{
|
||||
struct mtd_info * mtd = dev->genericDevice;
|
||||
int chunkNo = blockNo * dev->nChunksPerBlock;
|
||||
--- a/fs/yaffs2/yaffs_mtdif1.h
|
||||
+++ b/fs/yaffs2/yaffs_mtdif1.h
|
||||
@@ -23,6 +23,6 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
|
||||
int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
|
||||
|
||||
int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
|
||||
- yaffs_BlockState * state, int *sequenceNumber);
|
||||
+ yaffs_BlockState * state, __u32 *sequenceNumber);
|
||||
|
||||
#endif
|
||||
--- a/fs/yaffs2/yaffs_mtdif2.c
|
||||
+++ b/fs/yaffs2/yaffs_mtdif2.c
|
||||
@@ -188,7 +188,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
|
||||
}
|
||||
|
||||
int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
|
||||
- yaffs_BlockState * state, int *sequenceNumber)
|
||||
+ yaffs_BlockState * state, __u32 *sequenceNumber)
|
||||
{
|
||||
struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
|
||||
int retval;
|
||||
--- a/fs/yaffs2/yaffs_mtdif2.h
|
||||
+++ b/fs/yaffs2/yaffs_mtdif2.h
|
||||
@@ -24,6 +24,6 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
|
||||
__u8 * data, yaffs_ExtendedTags * tags);
|
||||
int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
|
||||
int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
|
||||
- yaffs_BlockState * state, int *sequenceNumber);
|
||||
+ yaffs_BlockState * state, __u32 *sequenceNumber);
|
||||
|
||||
#endif
|
|
@ -1,46 +0,0 @@
|
|||
--- a/include/linux/time.h
|
||||
+++ b/include/linux/time.h
|
||||
@@ -1,6 +1,10 @@
|
||||
#ifndef _LINUX_TIME_H
|
||||
#define _LINUX_TIME_H
|
||||
|
||||
+#ifndef __KERNEL__
|
||||
+#include <time.h>
|
||||
+#else
|
||||
+
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
@@ -228,4 +232,6 @@ struct itimerval {
|
||||
*/
|
||||
#define TIMER_ABSTIME 0x01
|
||||
|
||||
+#endif /* __KERNEL__ DEBIAN */
|
||||
+
|
||||
#endif
|
||||
--- a/include/linux/types.h
|
||||
+++ b/include/linux/types.h
|
||||
@@ -1,6 +1,14 @@
|
||||
#ifndef _LINUX_TYPES_H
|
||||
#define _LINUX_TYPES_H
|
||||
|
||||
+/* Debian: Use userland types instead. */
|
||||
+#ifndef __KERNEL__
|
||||
+# include <sys/types.h>
|
||||
+/* For other kernel headers. */
|
||||
+# include <linux/posix_types.h>
|
||||
+# include <asm/types.h>
|
||||
+#else
|
||||
+
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define BITS_TO_LONGS(bits) \
|
||||
@@ -162,6 +170,8 @@ typedef unsigned long blkcnt_t;
|
||||
|
||||
#endif /* __KERNEL_STRICT_NAMES */
|
||||
|
||||
+#endif /* __KERNEL__ DEBIAN */
|
||||
+
|
||||
/*
|
||||
* Below are truly Linux-specific types that should never collide with
|
||||
* any application/library that wants linux/types.h.
|
|
@ -1,138 +0,0 @@
|
|||
--- a/scripts/genksyms/parse.c_shipped
|
||||
+++ b/scripts/genksyms/parse.c_shipped
|
||||
@@ -144,7 +144,9 @@
|
||||
|
||||
|
||||
#include <assert.h>
|
||||
+#ifndef __APPLE__
|
||||
#include <malloc.h>
|
||||
+#endif
|
||||
#include "genksyms.h"
|
||||
|
||||
static int is_typedef;
|
||||
--- a/scripts/genksyms/parse.y
|
||||
+++ b/scripts/genksyms/parse.y
|
||||
@@ -24,7 +24,9 @@
|
||||
%{
|
||||
|
||||
#include <assert.h>
|
||||
+#ifndef __APPLE__
|
||||
#include <malloc.h>
|
||||
+#endif
|
||||
#include "genksyms.h"
|
||||
|
||||
static int is_typedef;
|
||||
--- a/scripts/kallsyms.c
|
||||
+++ b/scripts/kallsyms.c
|
||||
@@ -30,6 +30,35 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
+#ifdef __APPLE__
|
||||
+/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */
|
||||
+void *memmem (const void *haystack, size_t haystack_len,
|
||||
+ const void *needle, size_t needle_len)
|
||||
+{
|
||||
+ const char *begin;
|
||||
+ const char *const last_possible
|
||||
+ = (const char *) haystack + haystack_len - needle_len;
|
||||
+
|
||||
+ if (needle_len == 0)
|
||||
+ /* The first occurrence of the empty string is deemed to occur at
|
||||
+ the beginning of the string. */
|
||||
+ return (void *) haystack;
|
||||
+
|
||||
+ /* Sanity check, otherwise the loop might search through the whole
|
||||
+ memory. */
|
||||
+ if (__builtin_expect (haystack_len < needle_len, 0))
|
||||
+ return NULL;
|
||||
+
|
||||
+ for (begin = (const char *) haystack; begin <= last_possible; ++begin)
|
||||
+ if (begin[0] == ((const char *) needle)[0] &&
|
||||
+ !memcmp ((const void *) &begin[1],
|
||||
+ (const void *) ((const char *) needle + 1),
|
||||
+ needle_len - 1))
|
||||
+ return (void *) begin;
|
||||
+
|
||||
+ return NULL;
|
||||
+}
|
||||
+#endif
|
||||
|
||||
#define KSYM_NAME_LEN 127
|
||||
|
||||
--- a/scripts/kconfig/Makefile
|
||||
+++ b/scripts/kconfig/Makefile
|
||||
@@ -87,6 +87,9 @@ check-lxdialog := $(srctree)/$(src)/lxd
|
||||
# we really need to do so. (Do not call gcc as part of make mrproper)
|
||||
HOST_EXTRACFLAGS = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags)
|
||||
HOST_LOADLIBES = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC))
|
||||
+ifeq ($(shell uname -s),Darwin)
|
||||
+HOST_LOADLIBES += -lncurses
|
||||
+endif
|
||||
|
||||
HOST_EXTRACFLAGS += -DLOCALE
|
||||
|
||||
--- a/scripts/mod/file2alias.c
|
||||
+++ b/scripts/mod/file2alias.c
|
||||
@@ -37,7 +37,21 @@ typedef unsigned char __u8;
|
||||
* even potentially has different endianness and word sizes, since
|
||||
* we handle those differences explicitly below */
|
||||
#include "../../include/linux/mod_devicetable.h"
|
||||
+#ifndef __APPLE__
|
||||
#include "../../include/linux/input.h"
|
||||
+#else
|
||||
+#define EV_MAX 0x1f
|
||||
+#define KEY_MUTE 113
|
||||
+#define KEY_MIN_INTERESTING KEY_MUTE
|
||||
+#define KEY_MAX 0x1ff
|
||||
+#define REL_MAX 0x0f
|
||||
+#define ABS_MAX 0x3f
|
||||
+#define MSC_MAX 0x07
|
||||
+#define LED_MAX 0x0f
|
||||
+#define SND_MAX 0x07
|
||||
+#define FF_MAX 0x7f
|
||||
+#define SW_MAX 0x0f
|
||||
+#endif
|
||||
|
||||
#define ADD(str, sep, cond, field) \
|
||||
do { \
|
||||
--- a/scripts/mod/mk_elfconfig.c
|
||||
+++ b/scripts/mod/mk_elfconfig.c
|
||||
@@ -1,7 +1,11 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
+#ifndef __APPLE__
|
||||
#include <elf.h>
|
||||
+#else
|
||||
+#include "../../../../../tools/sstrip/include/elf.h"
|
||||
+#endif
|
||||
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
--- a/scripts/mod/modpost.h
|
||||
+++ b/scripts/mod/modpost.h
|
||||
@@ -7,7 +7,11 @@
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
+#if !(defined(__APPLE__) || defined(__CYGWIN__))
|
||||
#include <elf.h>
|
||||
+#else
|
||||
+#include "../../../../../tools/sstrip/include/elf.h"
|
||||
+#endif
|
||||
|
||||
#include "elfconfig.h"
|
||||
|
||||
--- a/scripts/mod/sumversion.c
|
||||
+++ b/scripts/mod/sumversion.c
|
||||
@@ -8,6 +8,9 @@
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include "modpost.h"
|
||||
+#ifdef __APPLE__
|
||||
+#include <limits.h>
|
||||
+#endif
|
||||
|
||||
/*
|
||||
* Stolen form Cryptographic API.
|
|
@ -1,154 +0,0 @@
|
|||
--- a/drivers/net/wireless/hostap/hostap_ap.c
|
||||
+++ b/drivers/net/wireless/hostap/hostap_ap.c
|
||||
@@ -2346,13 +2346,13 @@ int prism2_ap_get_sta_qual(local_info_t
|
||||
addr[count].sa_family = ARPHRD_ETHER;
|
||||
memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
|
||||
if (sta->last_rx_silence == 0)
|
||||
- qual[count].qual = sta->last_rx_signal < 27 ?
|
||||
- 0 : (sta->last_rx_signal - 27) * 92 / 127;
|
||||
+ qual[count].qual = (sta->last_rx_signal - 156) == 0 ?
|
||||
+ 0 : (sta->last_rx_signal - 156) * 92 / 64;
|
||||
else
|
||||
- qual[count].qual = sta->last_rx_signal -
|
||||
- sta->last_rx_silence - 35;
|
||||
- qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
|
||||
- qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
|
||||
+ qual[count].qual = (sta->last_rx_signal -
|
||||
+ sta->last_rx_silence) * 92 / 64;
|
||||
+ qual[count].level = sta->last_rx_signal;
|
||||
+ qual[count].noise = sta->last_rx_silence;
|
||||
qual[count].updated = sta->last_rx_updated;
|
||||
|
||||
sta->last_rx_updated = IW_QUAL_DBM;
|
||||
@@ -2417,13 +2417,13 @@ int prism2_ap_translate_scan(struct net_
|
||||
memset(&iwe, 0, sizeof(iwe));
|
||||
iwe.cmd = IWEVQUAL;
|
||||
if (sta->last_rx_silence == 0)
|
||||
- iwe.u.qual.qual = sta->last_rx_signal < 27 ?
|
||||
- 0 : (sta->last_rx_signal - 27) * 92 / 127;
|
||||
+ iwe.u.qual.qual = (sta->last_rx_signal -156) == 0 ?
|
||||
+ 0 : (sta->last_rx_signal - 156) * 92 / 64;
|
||||
else
|
||||
- iwe.u.qual.qual = sta->last_rx_signal -
|
||||
- sta->last_rx_silence - 35;
|
||||
- iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
|
||||
- iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
|
||||
+ iwe.u.qual.qual = (sta->last_rx_signal -
|
||||
+ sta->last_rx_silence) * 92 / 64;
|
||||
+ iwe.u.qual.level = sta->last_rx_signal;
|
||||
+ iwe.u.qual.noise = sta->last_rx_silence;
|
||||
iwe.u.qual.updated = sta->last_rx_updated;
|
||||
iwe.len = IW_EV_QUAL_LEN;
|
||||
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
|
||||
--- a/drivers/net/wireless/hostap/hostap_config.h
|
||||
+++ b/drivers/net/wireless/hostap/hostap_config.h
|
||||
@@ -47,4 +47,9 @@
|
||||
*/
|
||||
/* #define PRISM2_NO_STATION_MODES */
|
||||
|
||||
+/* Enable TX power Setting functions
|
||||
+ * (min att = -128 , max att = 127)
|
||||
+ */
|
||||
+#define RAW_TXPOWER_SETTING
|
||||
+
|
||||
#endif /* HOSTAP_CONFIG_H */
|
||||
--- a/drivers/net/wireless/hostap/hostap.h
|
||||
+++ b/drivers/net/wireless/hostap/hostap.h
|
||||
@@ -90,6 +90,7 @@ extern const struct iw_handler_def hosta
|
||||
extern const struct ethtool_ops prism2_ethtool_ops;
|
||||
|
||||
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
||||
+int hostap_restore_power(struct net_device *dev);
|
||||
|
||||
|
||||
#endif /* HOSTAP_H */
|
||||
--- a/drivers/net/wireless/hostap/hostap_hw.c
|
||||
+++ b/drivers/net/wireless/hostap/hostap_hw.c
|
||||
@@ -933,6 +933,7 @@ static int hfa384x_set_rid(struct net_de
|
||||
prism2_hw_reset(dev);
|
||||
}
|
||||
|
||||
+ hostap_restore_power(dev);
|
||||
return res;
|
||||
}
|
||||
|
||||
--- a/drivers/net/wireless/hostap/hostap_info.c
|
||||
+++ b/drivers/net/wireless/hostap/hostap_info.c
|
||||
@@ -428,6 +428,11 @@ static void handle_info_queue_linkstatus
|
||||
}
|
||||
|
||||
/* Get BSSID if we have a valid AP address */
|
||||
+
|
||||
+ if ( val == HFA384X_LINKSTATUS_CONNECTED ||
|
||||
+ val == HFA384X_LINKSTATUS_DISCONNECTED )
|
||||
+ hostap_restore_power(local->dev);
|
||||
+
|
||||
if (connected) {
|
||||
netif_carrier_on(local->dev);
|
||||
netif_carrier_on(local->ddev);
|
||||
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
|
||||
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
|
||||
@@ -1504,23 +1504,20 @@ static int prism2_txpower_hfa386x_to_dBm
|
||||
val = 255;
|
||||
|
||||
tmp = val;
|
||||
- tmp >>= 2;
|
||||
|
||||
- return -12 - tmp;
|
||||
+ return tmp;
|
||||
}
|
||||
|
||||
static u16 prism2_txpower_dBm_to_hfa386x(int val)
|
||||
{
|
||||
signed char tmp;
|
||||
|
||||
- if (val > 20)
|
||||
- return 128;
|
||||
- else if (val < -43)
|
||||
+ if (val > 127)
|
||||
return 127;
|
||||
+ else if (val < -128)
|
||||
+ return 128;
|
||||
|
||||
tmp = val;
|
||||
- tmp = -12 - tmp;
|
||||
- tmp <<= 2;
|
||||
|
||||
return (unsigned char) tmp;
|
||||
}
|
||||
@@ -4080,3 +4077,35 @@ int hostap_ioctl(struct net_device *dev,
|
||||
|
||||
return ret;
|
||||
}
|
||||
+
|
||||
+/* BUG FIX: Restore power setting value when lost due to F/W bug */
|
||||
+
|
||||
+int hostap_restore_power(struct net_device *dev)
|
||||
+{
|
||||
+ struct hostap_interface *iface = dev->priv;
|
||||
+ local_info_t *local = iface->local;
|
||||
+
|
||||
+ u16 val;
|
||||
+ int ret = 0;
|
||||
+
|
||||
+ if (local->txpower_type == PRISM2_TXPOWER_OFF) {
|
||||
+ val = 0xff; /* use all standby and sleep modes */
|
||||
+ ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
|
||||
+ HFA386X_CR_A_D_TEST_MODES2,
|
||||
+ &val, NULL);
|
||||
+ }
|
||||
+
|
||||
+#ifdef RAW_TXPOWER_SETTING
|
||||
+ if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
|
||||
+ val = HFA384X_TEST_CFG_BIT_ALC;
|
||||
+ local->func->cmd(dev, HFA384X_CMDCODE_TEST |
|
||||
+ (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
|
||||
+ val = prism2_txpower_dBm_to_hfa386x(local->txpower);
|
||||
+ ret = (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
|
||||
+ HFA386X_CR_MANUAL_TX_POWER, &val, NULL));
|
||||
+ }
|
||||
+#endif /* RAW_TXPOWER_SETTING */
|
||||
+ return (ret ? -EOPNOTSUPP : 0);
|
||||
+}
|
||||
+
|
||||
+EXPORT_SYMBOL(hostap_restore_power);
|
|
@ -1,17 +0,0 @@
|
|||
--- a/include/linux/stddef.h
|
||||
+++ b/include/linux/stddef.h
|
||||
@@ -16,6 +16,7 @@ enum {
|
||||
false = 0,
|
||||
true = 1
|
||||
};
|
||||
+#endif /* __KERNEL__ */
|
||||
|
||||
#undef offsetof
|
||||
#ifdef __compiler_offsetof
|
||||
@@ -23,6 +24,5 @@ enum {
|
||||
#else
|
||||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
-#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
|
@ -1,20 +0,0 @@
|
|||
--- a/scripts/gen_initramfs_list.sh
|
||||
+++ b/scripts/gen_initramfs_list.sh
|
||||
@@ -125,7 +125,7 @@ parse() {
|
||||
str="${ftype} ${name} ${location} ${str}"
|
||||
;;
|
||||
"nod")
|
||||
- local dev=`LC_ALL=C ls -l "${location}"`
|
||||
+ local dev=`LC_ALL=C ls -l --time-style=locale "${location}"`
|
||||
local maj=`field 5 ${dev}`
|
||||
local min=`field 6 ${dev}`
|
||||
maj=${maj%,}
|
||||
@@ -135,7 +135,7 @@ parse() {
|
||||
str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
|
||||
;;
|
||||
"slink")
|
||||
- local target=`field 11 $(LC_ALL=C ls -l "${location}")`
|
||||
+ local target=`field 11 $(LC_ALL=C ls -l --time-style=locale "${location}")`
|
||||
str="${ftype} ${name} ${target} ${str}"
|
||||
;;
|
||||
*)
|
|
@ -1,7 +0,0 @@
|
|||
--- a/scripts/mod/sumversion.c
|
||||
+++ b/scripts/mod/sumversion.c
|
||||
@@ -1,3 +1,4 @@
|
||||
+#include <linux/limits.h>
|
||||
#include <netinet/in.h>
|
||||
#ifdef __sun__
|
||||
#include <inttypes.h>
|
|
@ -1,52 +0,0 @@
|
|||
--- a/include/linux/ip.h
|
||||
+++ b/include/linux/ip.h
|
||||
@@ -104,6 +104,16 @@ struct iphdr {
|
||||
/*The options start here. */
|
||||
};
|
||||
|
||||
+#ifdef __KERNEL__
|
||||
+#include <linux/skbuff.h>
|
||||
+
|
||||
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
|
||||
+{
|
||||
+ return (struct iphdr *)skb_network_header(skb);
|
||||
+}
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
struct ip_auth_hdr {
|
||||
__u8 nexthdr;
|
||||
__u8 hdrlen; /* This one is measured in 32 bit units! */
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -966,6 +966,16 @@ static inline void skb_reserve(struct sk
|
||||
skb->tail += len;
|
||||
}
|
||||
|
||||
+static inline unsigned char *skb_network_header(const struct sk_buff *skb)
|
||||
+{
|
||||
+ return skb->nh.raw;
|
||||
+}
|
||||
+
|
||||
+static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
|
||||
+{
|
||||
+ return skb->tail;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* CPUs often take a performance hit when accessing unaligned memory
|
||||
* locations. The actual performance hit varies, it can be small if the
|
||||
--- a/include/net/ip.h
|
||||
+++ b/include/net/ip.h
|
||||
@@ -43,6 +43,11 @@ struct inet_skb_parm
|
||||
#define IPSKB_REROUTED 16
|
||||
};
|
||||
|
||||
+static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
|
||||
+{
|
||||
+ return ip_hdr(skb)->ihl * 4;
|
||||
+}
|
||||
+
|
||||
struct ipcm_cookie
|
||||
{
|
||||
__be32 addr;
|
|
@ -1,14 +0,0 @@
|
|||
--- a/init/main.c
|
||||
+++ b/init/main.c
|
||||
@@ -764,10 +764,7 @@ static int noinline init_post(void)
|
||||
printk(KERN_WARNING "Failed to execute %s. Attempting "
|
||||
"defaults...\n", execute_command);
|
||||
}
|
||||
- run_init_process("/sbin/init");
|
||||
- run_init_process("/etc/init");
|
||||
- run_init_process("/bin/init");
|
||||
- run_init_process("/bin/sh");
|
||||
+ run_init_process("/etc/preinit");
|
||||
|
||||
panic("No init found. Try passing init= option to kernel.");
|
||||
}
|
Loading…
Reference in New Issue