remove 2.6.23 support, as the last target using that has been nuked, too

git-svn-id: svn://svn.openwrt.org/openwrt/trunk@16275 3c298f89-4303-0410-b956-a3cf2f4a3e73
master
Imre Kaloz 2009-06-01 18:16:10 +00:00
parent 87a6e2bb18
commit 671a899cc4
59 changed files with 0 additions and 42323 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,788 +0,0 @@
--- /dev/null
+++ b/include/linux/LzmaDecode.h
@@ -0,0 +1,100 @@
+/*
+ LzmaDecode.h
+ LZMA Decoder interface
+
+ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
+ http://www.7-zip.org/
+
+ LZMA SDK is licensed under two licenses:
+ 1) GNU Lesser General Public License (GNU LGPL)
+ 2) Common Public License (CPL)
+ It means that you can select one of these two licenses and
+ follow rules of that license.
+
+ SPECIAL EXCEPTION:
+ Igor Pavlov, as the author of this code, expressly permits you to
+ statically or dynamically link your code (or bind by name) to the
+ interfaces of this file without subjecting your linked code to the
+ terms of the CPL or GNU LGPL. Any modifications or additions
+ to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMADECODE_H
+#define __LZMADECODE_H
+
+/* #define _LZMA_IN_CB */
+/* Use callback for input data */
+
+/* #define _LZMA_OUT_READ */
+/* Use read function for output data */
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs,
+ but memory usage will be doubled in that case */
+
+/* #define _LZMA_LOC_OPT */
+/* Enable local speed optimizations inside code */
+
+#ifndef UInt32
+#ifdef _LZMA_UINT32_IS_ULONG
+#define UInt32 unsigned long
+#else
+#define UInt32 unsigned int
+#endif
+#endif
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb unsigned short
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+#define LZMA_RESULT_NOT_ENOUGH_MEM 2
+
+#ifdef _LZMA_IN_CB
+typedef struct _ILzmaInCallback
+{
+ int (*Read)(void *object, unsigned char **buffer, UInt32 *bufferSize);
+} ILzmaInCallback;
+#endif
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+/*
+bufferSize = (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)))* sizeof(CProb)
+bufferSize += 100 in case of _LZMA_OUT_READ
+by default CProb is unsigned short,
+but if specify _LZMA_PROB_32, CProb will be UInt32(unsigned int)
+*/
+
+#ifdef _LZMA_OUT_READ
+int LzmaDecoderInit(
+ unsigned char *buffer, UInt32 bufferSize,
+ int lc, int lp, int pb,
+ unsigned char *dictionary, UInt32 dictionarySize,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback
+ #else
+ unsigned char *inStream, UInt32 inSize
+ #endif
+);
+#endif
+
+int LzmaDecode(
+ unsigned char *buffer,
+ #ifndef _LZMA_OUT_READ
+ UInt32 bufferSize,
+ int lc, int lp, int pb,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback,
+ #else
+ unsigned char *inStream, UInt32 inSize,
+ #endif
+ #endif
+ unsigned char *outStream, UInt32 outSize,
+ UInt32 *outSizeProcessed);
+
+#endif
--- /dev/null
+++ b/lib/LzmaDecode.c
@@ -0,0 +1,663 @@
+/*
+ LzmaDecode.c
+ LZMA Decoder
+
+ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
+ http://www.7-zip.org/
+
+ LZMA SDK is licensed under two licenses:
+ 1) GNU Lesser General Public License (GNU LGPL)
+ 2) Common Public License (CPL)
+ It means that you can select one of these two licenses and
+ follow rules of that license.
+
+ SPECIAL EXCEPTION:
+ Igor Pavlov, as the author of this code, expressly permits you to
+ statically or dynamically link your code (or bind by name) to the
+ interfaces of this file without subjecting your linked code to the
+ terms of the CPL or GNU LGPL. Any modifications or additions
+ to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include <linux/LzmaDecode.h>
+
+#ifndef Byte
+#define Byte unsigned char
+#endif
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+typedef struct _CRangeDecoder
+{
+ Byte *Buffer;
+ Byte *BufferLim;
+ UInt32 Range;
+ UInt32 Code;
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *InCallback;
+ int Result;
+ #endif
+ int ExtraBytes;
+} CRangeDecoder;
+
+Byte RangeDecoderReadByte(CRangeDecoder *rd)
+{
+ if (rd->Buffer == rd->BufferLim)
+ {
+ #ifdef _LZMA_IN_CB
+ UInt32 size;
+ rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size);
+ rd->BufferLim = rd->Buffer + size;
+ if (size == 0)
+ #endif
+ {
+ rd->ExtraBytes = 1;
+ return 0xFF;
+ }
+ }
+ return (*rd->Buffer++);
+}
+
+/* #define ReadByte (*rd->Buffer++) */
+#define ReadByte (RangeDecoderReadByte(rd))
+
+void RangeDecoderInit(CRangeDecoder *rd,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback
+ #else
+ Byte *stream, UInt32 bufferSize
+ #endif
+ )
+{
+ int i;
+ #ifdef _LZMA_IN_CB
+ rd->InCallback = inCallback;
+ rd->Buffer = rd->BufferLim = 0;
+ #else
+ rd->Buffer = stream;
+ rd->BufferLim = stream + bufferSize;
+ #endif
+ rd->ExtraBytes = 0;
+ rd->Code = 0;
+ rd->Range = (0xFFFFFFFF);
+ for(i = 0; i < 5; i++)
+ rd->Code = (rd->Code << 8) | ReadByte;
+}
+
+#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code;
+#define RC_FLUSH_VAR rd->Range = range; rd->Code = code;
+#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; }
+
+UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits)
+{
+ RC_INIT_VAR
+ UInt32 result = 0;
+ int i;
+ for (i = numTotalBits; i > 0; i--)
+ {
+ /* UInt32 t; */
+ range >>= 1;
+
+ result <<= 1;
+ if (code >= range)
+ {
+ code -= range;
+ result |= 1;
+ }
+ /*
+ t = (code - range) >> 31;
+ t &= 1;
+ code -= range & (t - 1);
+ result = (result + result) | (1 - t);
+ */
+ RC_NORMALIZE
+ }
+ RC_FLUSH_VAR
+ return result;
+}
+
+int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd)
+{
+ UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob;
+ if (rd->Code < bound)
+ {
+ rd->Range = bound;
+ *prob += (kBitModelTotal - *prob) >> kNumMoveBits;
+ if (rd->Range < kTopValue)
+ {
+ rd->Code = (rd->Code << 8) | ReadByte;
+ rd->Range <<= 8;
+ }
+ return 0;
+ }
+ else
+ {
+ rd->Range -= bound;
+ rd->Code -= bound;
+ *prob -= (*prob) >> kNumMoveBits;
+ if (rd->Range < kTopValue)
+ {
+ rd->Code = (rd->Code << 8) | ReadByte;
+ rd->Range <<= 8;
+ }
+ return 1;
+ }
+}
+
+#define RC_GET_BIT2(prob, mi, A0, A1) \
+ UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \
+ if (code < bound) \
+ { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \
+ else \
+ { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \
+ RC_NORMALIZE
+
+#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;)
+
+int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
+{
+ int mi = 1;
+ int i;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ for(i = numLevels; i > 0; i--)
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + mi;
+ RC_GET_BIT(prob, mi)
+ #else
+ mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd);
+ #endif
+ }
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return mi - (1 << numLevels);
+}
+
+int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
+{
+ int mi = 1;
+ int i;
+ int symbol = 0;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ for(i = 0; i < numLevels; i++)
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + mi;
+ RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i))
+ #else
+ int bit = RangeDecoderBitDecode(probs + mi, rd);
+ mi = mi + mi + bit;
+ symbol |= (bit << i);
+ #endif
+ }
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return symbol;
+}
+
+Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
+{
+ int symbol = 1;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ do
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + symbol;
+ RC_GET_BIT(prob, symbol)
+ #else
+ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
+ #endif
+ }
+ while (symbol < 0x100);
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return symbol;
+}
+
+Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte)
+{
+ int symbol = 1;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ do
+ {
+ int bit;
+ int matchBit = (matchByte >> 7) & 1;
+ matchByte <<= 1;
+ #ifdef _LZMA_LOC_OPT
+ {
+ CProb *prob = probs + ((1 + matchBit) << 8) + symbol;
+ RC_GET_BIT2(prob, symbol, bit = 0, bit = 1)
+ }
+ #else
+ bit = RangeDecoderBitDecode(probs + ((1 + matchBit) << 8) + symbol, rd);
+ symbol = (symbol << 1) | bit;
+ #endif
+ if (matchBit != bit)
+ {
+ while (symbol < 0x100)
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + symbol;
+ RC_GET_BIT(prob, symbol)
+ #else
+ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
+ #endif
+ }
+ break;
+ }
+ }
+ while (symbol < 0x100);
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return symbol;
+}
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState)
+{
+ if(RangeDecoderBitDecode(p + LenChoice, rd) == 0)
+ return RangeDecoderBitTreeDecode(p + LenLow +
+ (posState << kLenNumLowBits), kLenNumLowBits, rd);
+ if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0)
+ return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid +
+ (posState << kLenNumMidBits), kLenNumMidBits, rd);
+ return kLenNumLowSymbols + kLenNumMidSymbols +
+ RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd);
+}
+
+#define kNumStates 12
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+#ifdef _LZMA_OUT_READ
+
+typedef struct _LzmaVarState
+{
+ CRangeDecoder RangeDecoder;
+ Byte *Dictionary;
+ UInt32 DictionarySize;
+ UInt32 DictionaryPos;
+ UInt32 GlobalPos;
+ UInt32 Reps[4];
+ int lc;
+ int lp;
+ int pb;
+ int State;
+ int PreviousIsMatch;
+ int RemainLen;
+} LzmaVarState;
+
+int LzmaDecoderInit(
+ unsigned char *buffer, UInt32 bufferSize,
+ int lc, int lp, int pb,
+ unsigned char *dictionary, UInt32 dictionarySize,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback
+ #else
+ unsigned char *inStream, UInt32 inSize
+ #endif
+ )
+{
+ LzmaVarState *vs = (LzmaVarState *)buffer;
+ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
+ UInt32 i;
+ if (bufferSize < numProbs * sizeof(CProb) + sizeof(LzmaVarState))
+ return LZMA_RESULT_NOT_ENOUGH_MEM;
+ vs->Dictionary = dictionary;
+ vs->DictionarySize = dictionarySize;
+ vs->DictionaryPos = 0;
+ vs->GlobalPos = 0;
+ vs->Reps[0] = vs->Reps[1] = vs->Reps[2] = vs->Reps[3] = 1;
+ vs->lc = lc;
+ vs->lp = lp;
+ vs->pb = pb;
+ vs->State = 0;
+ vs->PreviousIsMatch = 0;
+ vs->RemainLen = 0;
+ dictionary[dictionarySize - 1] = 0;
+ for (i = 0; i < numProbs; i++)
+ p[i] = kBitModelTotal >> 1;
+ RangeDecoderInit(&vs->RangeDecoder,
+ #ifdef _LZMA_IN_CB
+ inCallback
+ #else
+ inStream, inSize
+ #endif
+ );
+ return LZMA_RESULT_OK;
+}
+
+int LzmaDecode(unsigned char *buffer,
+ unsigned char *outStream, UInt32 outSize,
+ UInt32 *outSizeProcessed)
+{
+ LzmaVarState *vs = (LzmaVarState *)buffer;
+ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
+ CRangeDecoder rd = vs->RangeDecoder;
+ int state = vs->State;
+ int previousIsMatch = vs->PreviousIsMatch;
+ Byte previousByte;
+ UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+ UInt32 nowPos = 0;
+ UInt32 posStateMask = (1 << (vs->pb)) - 1;
+ UInt32 literalPosMask = (1 << (vs->lp)) - 1;
+ int lc = vs->lc;
+ int len = vs->RemainLen;
+ UInt32 globalPos = vs->GlobalPos;
+
+ Byte *dictionary = vs->Dictionary;
+ UInt32 dictionarySize = vs->DictionarySize;
+ UInt32 dictionaryPos = vs->DictionaryPos;
+
+ if (len == -1)
+ {
+ *outSizeProcessed = 0;
+ return LZMA_RESULT_OK;
+ }
+
+ while(len > 0 && nowPos < outSize)
+ {
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ len--;
+ }
+ if (dictionaryPos == 0)
+ previousByte = dictionary[dictionarySize - 1];
+ else
+ previousByte = dictionary[dictionaryPos - 1];
+#else
+
+int LzmaDecode(
+ Byte *buffer, UInt32 bufferSize,
+ int lc, int lp, int pb,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback,
+ #else
+ unsigned char *inStream, UInt32 inSize,
+ #endif
+ unsigned char *outStream, UInt32 outSize,
+ UInt32 *outSizeProcessed)
+{
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
+ CProb *p = (CProb *)buffer;
+ CRangeDecoder rd;
+ UInt32 i;
+ int state = 0;
+ int previousIsMatch = 0;
+ Byte previousByte = 0;
+ UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
+ UInt32 nowPos = 0;
+ UInt32 posStateMask = (1 << pb) - 1;
+ UInt32 literalPosMask = (1 << lp) - 1;
+ int len = 0;
+ if (bufferSize < numProbs * sizeof(CProb))
+ return LZMA_RESULT_NOT_ENOUGH_MEM;
+ for (i = 0; i < numProbs; i++)
+ p[i] = kBitModelTotal >> 1;
+ RangeDecoderInit(&rd,
+ #ifdef _LZMA_IN_CB
+ inCallback
+ #else
+ inStream, inSize
+ #endif
+ );
+#endif
+
+ *outSizeProcessed = 0;
+ while(nowPos < outSize)
+ {
+ int posState = (int)(
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ & posStateMask);
+ #ifdef _LZMA_IN_CB
+ if (rd.Result != LZMA_RESULT_OK)
+ return rd.Result;
+ #endif
+ if (rd.ExtraBytes != 0)
+ return LZMA_RESULT_DATA_ERROR;
+ if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0)
+ {
+ CProb *probs = p + Literal + (LZMA_LIT_SIZE *
+ (((
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ & literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+ if (state < 4) state = 0;
+ else if (state < 10) state -= 3;
+ else state -= 6;
+ if (previousIsMatch)
+ {
+ Byte matchByte;
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ matchByte = dictionary[pos];
+ #else
+ matchByte = outStream[nowPos - rep0];
+ #endif
+ previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte);
+ previousIsMatch = 0;
+ }
+ else
+ previousByte = LzmaLiteralDecode(probs, &rd);
+ outStream[nowPos++] = previousByte;
+ #ifdef _LZMA_OUT_READ
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #endif
+ }
+ else
+ {
+ previousIsMatch = 1;
+ if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1)
+ {
+ if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0)
+ {
+ if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0)
+ {
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos;
+ #endif
+ if (
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ == 0)
+ return LZMA_RESULT_DATA_ERROR;
+ state = state < 7 ? 9 : 11;
+ #ifdef _LZMA_OUT_READ
+ pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ previousByte = dictionary[pos];
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #else
+ previousByte = outStream[nowPos - rep0];
+ #endif
+ outStream[nowPos++] = previousByte;
+ continue;
+ }
+ }
+ else
+ {
+ UInt32 distance;
+ if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0)
+ distance = rep1;
+ else
+ {
+ if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0)
+ distance = rep2;
+ else
+ {
+ distance = rep3;
+ rep3 = rep2;
+ }
+ rep2 = rep1;
+ }
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ len = LzmaLenDecode(p + RepLenCoder, &rd, posState);
+ state = state < 7 ? 8 : 11;
+ }
+ else
+ {
+ int posSlot;
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ state = state < 7 ? 7 : 10;
+ len = LzmaLenDecode(p + LenCoder, &rd, posState);
+ posSlot = RangeDecoderBitTreeDecode(p + PosSlot +
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
+ kNumPosSlotBits), kNumPosSlotBits, &rd);
+ if (posSlot >= kStartPosModelIndex)
+ {
+ int numDirectBits = ((posSlot >> 1) - 1);
+ rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits);
+ if (posSlot < kEndPosModelIndex)
+ {
+ rep0 += RangeDecoderReverseBitTreeDecode(
+ p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd);
+ }
+ else
+ {
+ rep0 += RangeDecoderDecodeDirectBits(&rd,
+ numDirectBits - kNumAlignBits) << kNumAlignBits;
+ rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd);
+ }
+ }
+ else
+ rep0 = posSlot;
+ rep0++;
+ }
+ if (rep0 == (UInt32)(0))
+ {
+ /* it's for stream version */
+ len = -1;
+ break;
+ }
+ if (rep0 > nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ {
+ return LZMA_RESULT_DATA_ERROR;
+ }
+ len += kMatchMinLen;
+ do
+ {
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ previousByte = dictionary[pos];
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #else
+ previousByte = outStream[nowPos - rep0];
+ #endif
+ outStream[nowPos++] = previousByte;
+ len--;
+ }
+ while(len > 0 && nowPos < outSize);
+ }
+ }
+
+ #ifdef _LZMA_OUT_READ
+ vs->RangeDecoder = rd;
+ vs->DictionaryPos = dictionaryPos;
+ vs->GlobalPos = globalPos + nowPos;
+ vs->Reps[0] = rep0;
+ vs->Reps[1] = rep1;
+ vs->Reps[2] = rep2;
+ vs->Reps[3] = rep3;
+ vs->State = state;
+ vs->PreviousIsMatch = previousIsMatch;
+ vs->RemainLen = len;
+ #endif
+
+ *outSizeProcessed = nowPos;
+ return LZMA_RESULT_OK;
+}
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o kasprintf.o
+ bust_spinlocks.o hexdump.o kasprintf.o LzmaDecode.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -62,6 +62,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
+
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o

View File

@ -1,107 +0,0 @@
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -4,6 +4,9 @@
* Copyright (c) 2002, 2003, 2004, 2005, 2006
* Phillip Lougher <phillip@lougher.org.uk>
*
+ * LZMA decompressor support added by Oleg I. Vdovikin
+ * Copyright (c) 2005 Oleg I.Vdovikin <oleg@cs.msu.su>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
@@ -21,6 +24,7 @@
* inode.c
*/
+#define SQUASHFS_LZMA
#include <linux/types.h>
#include <linux/squashfs_fs.h>
#include <linux/module.h>
@@ -44,6 +48,19 @@
#include "squashfs.h"
+#ifdef SQUASHFS_LZMA
+#include <linux/LzmaDecode.h>
+
+/* default LZMA settings, should be in sync with mksquashfs */
+#define LZMA_LC 3
+#define LZMA_LP 0
+#define LZMA_PB 2
+
+#define LZMA_WORKSPACE_SIZE ((LZMA_BASE_SIZE + \
+ (LZMA_LIT_SIZE << (LZMA_LC + LZMA_LP))) * sizeof(CProb))
+
+#endif
+
static void squashfs_put_super(struct super_block *);
static int squashfs_statfs(struct dentry *, struct kstatfs *);
static int squashfs_symlink_readpage(struct file *file, struct page *page);
@@ -64,7 +81,11 @@ static int squashfs_get_sb(struct file_s
const char *, void *, struct vfsmount *);
+#ifdef SQUASHFS_LZMA
+static unsigned char lzma_workspace[LZMA_WORKSPACE_SIZE];
+#else
static z_stream stream;
+#endif
static struct file_system_type squashfs_fs_type = {
.owner = THIS_MODULE,
@@ -249,6 +270,15 @@ SQSH_EXTERN unsigned int squashfs_read_d
if (compressed) {
int zlib_err;
+#ifdef SQUASHFS_LZMA
+ if ((zlib_err = LzmaDecode(lzma_workspace,
+ LZMA_WORKSPACE_SIZE, LZMA_LC, LZMA_LP, LZMA_PB,
+ c_buffer, c_byte, buffer, msblk->read_size, &bytes)) != LZMA_RESULT_OK)
+ {
+ ERROR("lzma returned unexpected result 0x%x\n", zlib_err);
+ bytes = 0;
+ }
+#else
stream.next_in = c_buffer;
stream.avail_in = c_byte;
stream.next_out = buffer;
@@ -263,7 +293,7 @@ SQSH_EXTERN unsigned int squashfs_read_d
bytes = 0;
} else
bytes = stream.total_out;
-
+#endif
up(&msblk->read_data_mutex);
}
@@ -2045,15 +2075,19 @@ static int __init init_squashfs_fs(void)
printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
"Phillip Lougher\n");
+#ifndef SQUASHFS_LZMA
if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
ERROR("Failed to allocate zlib workspace\n");
destroy_inodecache();
err = -ENOMEM;
goto out;
}
+#endif
if ((err = register_filesystem(&squashfs_fs_type))) {
+#ifndef SQUASHFS_LZMA
vfree(stream.workspace);
+#endif
destroy_inodecache();
}
@@ -2064,7 +2098,9 @@ out:
static void __exit exit_squashfs_fs(void)
{
+#ifndef SQUASHFS_LZMA
vfree(stream.workspace);
+#endif
unregister_filesystem(&squashfs_fs_type);
destroy_inodecache();
}

View File

@ -1,12 +0,0 @@
--- a/Makefile
+++ b/Makefile
@@ -508,6 +508,9 @@ CFLAGS += $(call cc-option, -fn
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
CHECKFLAGS += $(NOSTDINC_FLAGS)
+# improve gcc optimization
+CFLAGS += $(call cc-option,-funit-at-a-time,)
+
# warn about C99 declaration after statement
CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)

View File

@ -1,11 +0,0 @@
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -181,7 +181,7 @@ extern __u64 __xchg_u64_unsupported_on_3
if something tries to do an invalid xchg(). */
extern void __xchg_called_with_bad_pointer(void);
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static __always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:

View File

@ -1,36 +0,0 @@
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -51,6 +51,7 @@
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define AT49BV6416 0x00d6
+#define MANUFACTURER_SAMSUNG 0x00ec
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -294,12 +295,19 @@ struct mtd_info *cfi_cmdset_0002(struct
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
- printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
- kfree(mtd);
- return NULL;
+ if (cfi->mfr == MANUFACTURER_SAMSUNG &&
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+ printk(KERN_NOTICE " Newer Samsung flash detected, "
+ "should be compatibile with Amd/Fujitsu.\n");
+ }
+ else {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
}
/* Install our own private info structure */

View File

@ -1,169 +0,0 @@
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -933,7 +933,7 @@ static void __xipram xip_enable(struct m
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
- unsigned long adr, unsigned int chip_op_time )
+ unsigned long adr, int *chip_op_time )
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@@ -942,7 +942,7 @@ static int __xipram xip_wait_for_operati
flstate_t oldstate, newstate;
start = xip_currtime();
- usec = chip_op_time * 8;
+ usec = *chip_op_time * 8;
if (usec == 0)
usec = 500000;
done = 0;
@@ -1052,8 +1052,8 @@ static int __xipram xip_wait_for_operati
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
- xip_wait_for_operation(map, chip, cmd_adr, usec)
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
+ xip_wait_for_operation(map, chip, cmd_adr, p_usec)
#else
@@ -1065,65 +1065,65 @@ static int __xipram xip_wait_for_operati
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
- unsigned int chip_op_time)
+ int *chip_op_time )
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
- int chip_state = chip->state;
- unsigned int timeo, sleep_time;
+ int z, chip_state = chip->state;
+ unsigned long timeo;
spin_unlock(chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+ if (*chip_op_time)
+ cfi_udelay(*chip_op_time);
spin_lock(chip->mutex);
- /* set our timeout to 8 times the expected delay */
- timeo = chip_op_time * 8;
- if (!timeo)
- timeo = 500000;
- sleep_time = chip_op_time / 2;
+ timeo = *chip_op_time * 8 * HZ / 1000000;
+ if (timeo < HZ/2)
+ timeo = HZ/2;
+ timeo += jiffies;
+ z = 0;
for (;;) {
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ spin_lock(chip->mutex);
+ continue;
+ }
+
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- if (!timeo) {
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
- /* OK Still waiting. Drop the lock, wait a while and retry. */
+ /* Latency issues. Drop the lock, wait a while and retry */
+ z++;
spin_unlock(chip->mutex);
- if (sleep_time >= 1000000/HZ) {
- /*
- * Half of the normal delay still remaining
- * can be performed with a sleeping delay instead
- * of busy waiting.
- */
- msleep(sleep_time/1000);
- timeo -= sleep_time;
- sleep_time = 1000000/HZ;
- } else {
- udelay(1);
- cond_resched();
- timeo--;
- }
+ cfi_udelay(1);
spin_lock(chip->mutex);
-
- while (chip->state != chip_state) {
- /* Someone's suspended the operation: sleep */
- DECLARE_WAITQUEUE(wait, current);
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
- }
}
+ if (!z) {
+ if (!--(*chip_op_time))
+ *chip_op_time = 1;
+ } else if (z > 1)
+ ++(*chip_op_time);
+
/* Done and happy. */
chip->state = FL_STATUS;
return 0;
@@ -1132,7 +1132,8 @@ static int inval_cache_and_wait_for_oper
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay) \
- INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
+ ({ int __udelay = (udelay); \
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1356,7 +1357,7 @@ static int __xipram do_write_oneword(str
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
- chip->word_write_time);
+ &chip->word_write_time);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@@ -1593,7 +1594,7 @@ static int __xipram do_write_buffer(stru
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
adr, len,
- chip->buffer_write_time);
+ &chip->buffer_write_time);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
@@ -1728,7 +1729,7 @@ static int __xipram do_erase_oneblock(st
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
- chip->erase_time);
+ &chip->erase_time);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;

View File

@ -1,19 +0,0 @@
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -4,4 +4,3 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += inode.o
-squashfs-y += squashfs2_0.o
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -24,6 +24,9 @@
#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
#endif
+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+#undef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+#endif
#ifdef SQUASHFS_TRACE
#define TRACE(s, args...) printk(KERN_NOTICE "SQUASHFS: "s, ## args)

View File

@ -1,11 +0,0 @@
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -130,6 +130,8 @@
#endif
.endm
+ j kernel_entry
+ nop
#ifndef CONFIG_NO_EXCEPT_FILL
/*
* Reserved space for exception handlers.

View File

@ -1,18 +0,0 @@
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -902,7 +902,6 @@ static __init void build_tlb_write_entry
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
- case CPU_4KC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
@@ -933,6 +932,7 @@ static __init void build_tlb_write_entry
tlbw(p);
break;
+ case CPU_4KC:
case CPU_4KEC:
case CPU_24K:
case CPU_34K:

View File

@ -1,18 +0,0 @@
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -656,6 +656,7 @@ void set_async_breakpoint(unsigned long
*epc = (unsigned long)async_breakpoint;
}
+#ifdef CONFIG_SMP
static void kgdb_wait(void *arg)
{
unsigned flags;
@@ -668,6 +669,7 @@ static void kgdb_wait(void *arg)
local_irq_restore(flags);
}
+#endif
/*
* GDB stub needs to call kgdb_wait on all processor with interrupts

View File

@ -1,112 +0,0 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
@@ -237,10 +238,11 @@ static void block2mtd_free_device(struct
/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
{
struct block_device *bdev;
struct block2mtd_dev *dev;
+ struct mtd_partition *part;
if (!devname)
return NULL;
@@ -279,14 +281,18 @@ static struct block2mtd_dev *add_device(
/* Setup the MTD structure */
/* make the name contain the block device in */
- dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
- GFP_KERNEL);
+
+ if (!mtdname)
+ mtdname = devname;
+
+ dev->mtd.name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
+
if (!dev->mtd.name)
goto devinit_err;
+
+ strcpy(dev->mtd.name, mtdname);
- sprintf(dev->mtd.name, "block2mtd: %s", devname);
-
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.type = MTD_RAM;
@@ -298,15 +304,18 @@ static struct block2mtd_dev *add_device(
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
-
- if (add_mtd_device(&dev->mtd)) {
+
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = dev->mtd.name;
+ part->offset = 0;
+ part->size = dev->mtd.size;
+ if (add_mtd_partitions(&dev->mtd, part, 1)) {
/* Device didnt get added, so free the entry */
goto devinit_err;
}
list_add(&dev->list, &blkmtd_device_list);
INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
- dev->mtd.name + strlen("blkmtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
devinit_err:
@@ -379,9 +388,9 @@ static char block2mtd_paramline[80 + 12]
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
char *str = buf;
- char *token[2];
+ char *token[3];
char *name;
size_t erase_size = PAGE_SIZE;
int i, ret;
@@ -392,7 +401,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str)
@@ -412,8 +421,10 @@ static int block2mtd_setup2(const char *
parse_err("illegal erase size");
}
}
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ parse_err("mtd device name too long");
- add_device(name, erase_size);
+ add_device(name, erase_size, token[2]);
return 0;
}
@@ -447,7 +458,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
static int __init block2mtd_init(void)
{

View File

@ -1,944 +0,0 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -47,6 +47,16 @@ config MTD_PARTITIONS
devices. Partitioning on NFTL 'devices' is a different - that's the
'normal' form of partitioning used on a block device.
+config MTD_ROOTFS_ROOT_DEV
+ bool "Automatically set 'rootfs' partition to be root filesystem"
+ depends on MTD_PARTITIONS
+ default y
+
+config MTD_ROOTFS_SPLIT
+ bool "Automatically split 'rootfs' partition for squashfs"
+ depends on MTD_PARTITIONS
+ default y
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
depends on MTD_PARTITIONS
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -20,6 +20,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/compatmac.h>
+#include <linux/squashfs_fs.h>
+#include <linux/root_dev.h>
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -39,7 +41,7 @@ struct mtd_part {
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
-
+#define IS_PART(mtd) (mtd->read == part_read)
/*
* MTD methods which simply translate the effective address and pass through
@@ -308,6 +310,312 @@ int del_mtd_partitions(struct mtd_info *
return 0;
}
+static u_int32_t cur_offset = 0;
+static int add_one_partition(struct mtd_info *master, const struct mtd_partition *part,
+ int i, struct mtd_part **slp)
+{
+ struct mtd_part *slave;
+
+ /* allocate the partition structure */
+ slave = kzalloc (sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ printk ("memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ del_mtd_partitions(master);
+ return -ENOMEM;
+ }
+ list_add(&slave->list, &mtd_partitions);
+
+ /* set up the MTD object for this partition */
+ slave->mtd.type = master->type;
+ slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.size = part->size;
+ slave->mtd.writesize = master->writesize;
+ slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
+ slave->mtd.subpage_sft = master->subpage_sft;
+
+ slave->mtd.name = part->name;
+ slave->mtd.owner = master->owner;
+
+ slave->mtd.read = part_read;
+ slave->mtd.write = part_write;
+ slave->mtd.refresh_device = part->refresh_partition;
+
+ if(master->point && master->unpoint){
+ slave->mtd.point = part_point;
+ slave->mtd.unpoint = part_unpoint;
+ }
+
+ if (master->read_oob)
+ slave->mtd.read_oob = part_read_oob;
+ if (master->write_oob)
+ slave->mtd.write_oob = part_write_oob;
+ if(master->read_user_prot_reg)
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+ if(master->read_fact_prot_reg)
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+ if(master->write_user_prot_reg)
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+ if(master->lock_user_prot_reg)
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+ if(master->get_user_prot_info)
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
+ if(master->get_fact_prot_info)
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+ if (master->sync)
+ slave->mtd.sync = part_sync;
+ if (!i && master->suspend && master->resume) {
+ slave->mtd.suspend = part_suspend;
+ slave->mtd.resume = part_resume;
+ }
+ if (master->writev)
+ slave->mtd.writev = part_writev;
+ if (master->lock)
+ slave->mtd.lock = part_lock;
+ if (master->unlock)
+ slave->mtd.unlock = part_unlock;
+ if (master->block_isbad)
+ slave->mtd.block_isbad = part_block_isbad;
+ if (master->block_markbad)
+ slave->mtd.block_markbad = part_block_markbad;
+ slave->mtd.erase = part_erase;
+ slave->master = master;
+ slave->offset = part->offset;
+ slave->index = i;
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if ((cur_offset % master->erasesize) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%08x -> 0x%08x\n", i,
+ cur_offset, slave->offset);
+ }
+ }
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
+ slave->mtd.size = master->size - slave->offset;
+ cur_offset = slave->offset + slave->mtd.size;
+
+ printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
+ slave->offset + slave->mtd.size, slave->mtd.name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->mtd.size = 0;
+ printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ }
+ if (slave->offset + slave->mtd.size > master->size) {
+ slave->mtd.size = master->size - slave->offset;
+ printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
+ part->name, master->name, slave->mtd.size);
+ }
+ if (master->numeraseregions>1) {
+ /* Deal with variable erase size stuff */
+ int i;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this partition. */
+ for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
+ ;
+
+ for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
+ if (slave->mtd.erasesize < regions[i].erasesize) {
+ slave->mtd.erasesize = regions[i].erasesize;
+ }
+ }
+ } else {
+ /* Single erase size */
+ slave->mtd.erasesize = master->erasesize;
+ }
+
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ (slave->offset % slave->mtd.erasesize)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ (slave->mtd.size % slave->mtd.erasesize)) {
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
+
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+ uint32_t offs = 0;
+
+ while(offs < slave->mtd.size) {
+ if (master->block_isbad(master,
+ offs + slave->offset))
+ slave->mtd.ecc_stats.badblocks++;
+ offs += slave->mtd.erasesize;
+ }
+ }
+
+ if(part->mtdp)
+ { /* store the object pointer (caller may or may not register it */
+ *part->mtdp = &slave->mtd;
+ slave->registered = 0;
+ }
+ else
+ {
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+ slave->registered = 1;
+ }
+
+ if (slp)
+ *slp = slave;
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+#define ROOTFS_SPLIT_NAME "rootfs_data"
+#define ROOTFS_REMOVED_NAME "<removed>"
+static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
+{
+ char buf[512];
+ struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
+ int len, ret;
+
+ ret = master->read(master, offset, sizeof(*sb), &len, buf);
+ if (ret || (len != sizeof(*sb))) {
+ printk(KERN_ALERT "split_squashfs: error occured while reading "
+ "from \"%s\"\n", master->name);
+ return -EINVAL;
+ }
+
+ if (*((u32 *) buf) != SQUASHFS_MAGIC) {
+ printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ if (sb->bytes_used <= 0) {
+ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ len = (u32) sb->bytes_used;
+ len += (offset & 0x000fffff);
+ len += (master->erasesize - 1);
+ len &= ~(master->erasesize - 1);
+ len -= (offset & 0x000fffff);
+ *split_offset = offset + len;
+
+ return 0;
+}
+
+static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, struct mtd_partition *part,
+ int index)
+{
+ struct mtd_partition *dpart;
+ struct mtd_part *slave = NULL;
+ int split_offset = 0;
+ int ret;
+
+ ret = split_squashfs(master, part->offset, &split_offset);
+ if (ret)
+ return ret;
+
+ if (split_offset <= 0)
+ return 0;
+
+ dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
+ if (dpart == NULL) {
+ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
+ ROOTFS_SPLIT_NAME);
+ return -ENOMEM;
+ }
+
+ memcpy(dpart, part, sizeof(*part));
+ dpart->name = (unsigned char *)&dpart[1];
+ strcpy(dpart->name, ROOTFS_SPLIT_NAME);
+
+ dpart->size -= split_offset - dpart->offset;
+ dpart->offset = split_offset;
+
+ if (dpart == NULL)
+ return 1;
+
+ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%X, len=%X \n",
+ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
+
+ ret = add_one_partition(master, dpart, index, &slave);
+ if (ret)
+ kfree(dpart);
+ else if (slave)
+ rpart->split = &slave->mtd;
+
+ return ret;
+}
+
+static int refresh_rootfs_split(struct mtd_info *mtd)
+{
+ struct mtd_partition tpart;
+ struct mtd_part *part;
+ int index = 0;
+ int offset, size;
+ int ret;
+
+ part = PART(mtd);
+
+ /* check for the new squashfs offset first */
+ ret = split_squashfs(part->master, part->offset, &offset);
+ if (ret)
+ return ret;
+
+ if ((offset > 0) && !mtd->split) {
+ printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
+ /* if we don't have a rootfs split partition, create a new one */
+ tpart.name = mtd->name;
+ tpart.size = mtd->size;
+ tpart.offset = part->offset;
+
+ /* find the index of the last partition */
+ if (!list_empty(&mtd_partitions))
+ index = list_first_entry(&mtd_partitions, struct mtd_part, list)->index + 1;
+
+ return split_rootfs_data(part->master, &part->mtd, &tpart, index);
+ } else if ((offset > 0) && mtd->split) {
+ /* update the offsets of the existing partition */
+ size = mtd->size + part->offset - offset;
+
+ part = PART(mtd->split);
+ part->offset = offset;
+ part->mtd.size = size;
+ printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
+ __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
+ part->offset, part->mtd.size);
+ strcpy(part->mtd.name, ROOTFS_SPLIT_NAME);
+ } else if ((offset <= 0) && mtd->split) {
+ printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
+
+ /* mark existing partition as removed */
+ part = PART(mtd->split);
+ strcpy(part->mtd.name, ROOTFS_REMOVED_NAME);
+ part->offset = 0;
+ part->mtd.size = 0;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MTD_ROOTFS_SPLIT */
+
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -320,168 +628,31 @@ int add_mtd_partitions(struct mtd_info *
int nbparts)
{
struct mtd_part *slave;
- u_int32_t cur_offset = 0;
- int i;
+ struct mtd_partition *part;
+ int i, j, ret = 0;
printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
- for (i = 0; i < nbparts; i++) {
-
- /* allocate the partition structure */
- slave = kzalloc (sizeof(*slave), GFP_KERNEL);
- if (!slave) {
- printk ("memory allocation error while creating partitions for \"%s\"\n",
- master->name);
- del_mtd_partitions(master);
- return -ENOMEM;
- }
- list_add(&slave->list, &mtd_partitions);
-
- /* set up the MTD object for this partition */
- slave->mtd.type = master->type;
- slave->mtd.flags = master->flags & ~parts[i].mask_flags;
- slave->mtd.size = parts[i].size;
- slave->mtd.writesize = master->writesize;
- slave->mtd.oobsize = master->oobsize;
- slave->mtd.oobavail = master->oobavail;
- slave->mtd.subpage_sft = master->subpage_sft;
-
- slave->mtd.name = parts[i].name;
- slave->mtd.owner = master->owner;
-
- slave->mtd.read = part_read;
- slave->mtd.write = part_write;
-
- if(master->point && master->unpoint){
- slave->mtd.point = part_point;
- slave->mtd.unpoint = part_unpoint;
- }
-
- if (master->read_oob)
- slave->mtd.read_oob = part_read_oob;
- if (master->write_oob)
- slave->mtd.write_oob = part_write_oob;
- if(master->read_user_prot_reg)
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
- if(master->read_fact_prot_reg)
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
- if(master->write_user_prot_reg)
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
- if(master->lock_user_prot_reg)
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
- if(master->get_user_prot_info)
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
- if(master->get_fact_prot_info)
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
- if (master->sync)
- slave->mtd.sync = part_sync;
- if (!i && master->suspend && master->resume) {
- slave->mtd.suspend = part_suspend;
- slave->mtd.resume = part_resume;
- }
- if (master->writev)
- slave->mtd.writev = part_writev;
- if (master->lock)
- slave->mtd.lock = part_lock;
- if (master->unlock)
- slave->mtd.unlock = part_unlock;
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
- slave->mtd.erase = part_erase;
- slave->master = master;
- slave->offset = parts[i].offset;
- slave->index = i;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if ((cur_offset % master->erasesize) != 0) {
- /* Round up to next erasesize */
- slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
- printk(KERN_NOTICE "Moving partition %d: "
- "0x%08x -> 0x%08x\n", i,
- cur_offset, slave->offset);
- }
- }
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = master->size - slave->offset;
- cur_offset = slave->offset + slave->mtd.size;
-
- printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
- slave->offset + slave->mtd.size, slave->mtd.name);
-
- /* let's do some sanity checks */
- if (slave->offset >= master->size) {
- /* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
- printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
- parts[i].name);
- }
- if (slave->offset + slave->mtd.size > master->size) {
- slave->mtd.size = master->size - slave->offset;
- printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
- parts[i].name, master->name, slave->mtd.size);
- }
- if (master->numeraseregions>1) {
- /* Deal with variable erase size stuff */
- int i;
- struct mtd_erase_region_info *regions = master->eraseregions;
-
- /* Find the first erase regions which is part of this partition. */
- for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
- ;
-
- for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
+ for (i = 0, j = 0; i < nbparts; i++) {
+ part = (struct mtd_partition *) &parts[i];
+ ret = add_one_partition(master, part, j, &slave);
+ if (ret)
+ return ret;
+ j++;
+
+ if (strcmp(part->name, "rootfs") == 0 && slave->registered) {
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
+ if (ROOT_DEV == 0) {
+ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
+ "set to be root filesystem\n");
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
}
- } else {
- /* Single erase size */
- slave->mtd.erasesize = master->erasesize;
- }
-
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->offset % slave->mtd.erasesize)) {
- /* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- parts[i].name);
- }
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->mtd.size % slave->mtd.erasesize)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- parts[i].name);
- }
-
- slave->mtd.ecclayout = master->ecclayout;
- if (master->block_isbad) {
- uint32_t offs = 0;
-
- while(offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
- }
- }
-
- if(parts[i].mtdp)
- { /* store the object pointer (caller may or may not register it */
- *parts[i].mtdp = &slave->mtd;
- slave->registered = 0;
- }
- else
- {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
+#endif
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ ret = split_rootfs_data(master, &slave->mtd, part, j);
+ if (ret == 0)
+ j++;
+#endif
}
}
@@ -557,6 +728,32 @@ int parse_mtd_partitions(struct mtd_info
return ret;
}
+int refresh_mtd_partitions(struct mtd_info *mtd)
+{
+ int ret = 0;
+
+ if (IS_PART(mtd)) {
+ struct mtd_part *part;
+ struct mtd_info *master;
+
+ part = PART(mtd);
+ master = part->master;
+ if (master->refresh_device)
+ ret = master->refresh_device(master);
+ }
+
+ if (!ret && mtd->refresh_device)
+ ret = mtd->refresh_device(mtd);
+
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
+ refresh_rootfs_split(mtd);
+#endif
+
+ return 0;
+}
+
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
+EXPORT_SYMBOL_GPL(refresh_mtd_partitions);
EXPORT_SYMBOL_GPL(register_mtd_parser);
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -34,6 +34,8 @@ struct block2mtd_dev {
struct block_device *blkdev;
struct mtd_info mtd;
struct mutex write_mutex;
+ rwlock_t bdev_mutex;
+ char devname[0];
};
@@ -86,6 +88,12 @@ static int block2mtd_erase(struct mtd_in
size_t len = instr->len;
int err;
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev) {
+ err = -EINVAL;
+ goto done;
+ }
+
instr->state = MTD_ERASING;
mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
@@ -98,6 +106,10 @@ static int block2mtd_erase(struct mtd_in
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
+
+done:
+ read_unlock(&dev->bdev_mutex);
+
return err;
}
@@ -109,10 +121,14 @@ static int block2mtd_read(struct mtd_inf
struct page *page;
int index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
- int cpylen;
+ int cpylen, err = 0;
+
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev || (from > mtd->size)) {
+ err = -EINVAL;
+ goto done;
+ }
- if (from > mtd->size)
- return -EINVAL;
if (from + len > mtd->size)
len = mtd->size - from;
@@ -127,10 +143,14 @@ static int block2mtd_read(struct mtd_inf
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
- if (!page)
- return -ENOMEM;
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (!page) {
+ err = -ENOMEM;
+ goto done;
+ }
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto done;
+ }
memcpy(buf, page_address(page) + offset, cpylen);
page_cache_release(page);
@@ -141,7 +161,10 @@ static int block2mtd_read(struct mtd_inf
offset = 0;
index++;
}
- return 0;
+
+done:
+ read_unlock(&dev->bdev_mutex);
+ return err;
}
@@ -193,12 +216,22 @@ static int block2mtd_write(struct mtd_in
size_t *retlen, const u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- int err;
+ int err = 0;
+
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev) {
+ err = -EINVAL;
+ goto done;
+ }
if (!len)
- return 0;
- if (to >= mtd->size)
- return -ENOSPC;
+ goto done;
+
+ if (to >= mtd->size) {
+ err = -ENOSPC;
+ goto done;
+ }
+
if (to + len > mtd->size)
len = mtd->size - to;
@@ -207,6 +240,9 @@ static int block2mtd_write(struct mtd_in
mutex_unlock(&dev->write_mutex);
if (err > 0)
err = 0;
+
+done:
+ read_unlock(&dev->bdev_mutex);
return err;
}
@@ -215,51 +251,29 @@ static int block2mtd_write(struct mtd_in
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->blkdev);
- return;
-}
-
-
-static void block2mtd_free_device(struct block2mtd_dev *dev)
-{
- if (!dev)
- return;
-
- kfree(dev->mtd.name);
- if (dev->blkdev) {
- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
- 0, -1);
- close_bdev_excl(dev->blkdev);
- }
+ read_lock(&dev->bdev_mutex);
+ if (dev->blkdev)
+ sync_blockdev(dev->blkdev);
+ read_unlock(&dev->bdev_mutex);
- kfree(dev);
+ return;
}
-/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+static int _open_bdev(struct block2mtd_dev *dev)
{
struct block_device *bdev;
- struct block2mtd_dev *dev;
- struct mtd_partition *part;
-
- if (!devname)
- return NULL;
-
- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
- if (!dev)
- return NULL;
/* Get a handle on the device */
- bdev = open_bdev_excl(devname, O_RDWR, NULL);
+ bdev = open_bdev_excl(dev->devname, O_RDWR, NULL);
#ifndef MODULE
if (IS_ERR(bdev)) {
/* We might not have rootfs mounted at this point. Try
to resolve the device name by other means. */
- dev_t devt = name_to_dev_t(devname);
+ dev_t devt = name_to_dev_t(dev->devname);
if (devt) {
bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
}
@@ -267,17 +281,96 @@ static struct block2mtd_dev *add_device(
#endif
if (IS_ERR(bdev)) {
- ERROR("error: cannot open device %s", devname);
- goto devinit_err;
+ ERROR("error: cannot open device %s", dev->devname);
+ return 1;
}
dev->blkdev = bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
ERROR("attempting to use an MTD device as a block device");
- goto devinit_err;
+ return 1;
}
+ return 0;
+}
+
+static void _close_bdev(struct block2mtd_dev *dev)
+{
+ struct block_device *bdev;
+
+ if (!dev->blkdev)
+ return;
+
+ bdev = dev->blkdev;
+ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
+ close_bdev_excl(dev->blkdev);
+ dev->blkdev = NULL;
+}
+
+static void block2mtd_free_device(struct block2mtd_dev *dev)
+{
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+ _close_bdev(dev);
+ kfree(dev);
+}
+
+
+static int block2mtd_refresh(struct mtd_info *mtd)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ struct block_device *bdev;
+ dev_t devt;
+ int err = 0;
+
+ /* no other mtd function can run at this point */
+ write_lock(&dev->bdev_mutex);
+
+ /* get the device number for the whole disk */
+ devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
+
+ /* close the old block device */
+ _close_bdev(dev);
+
+ /* open the whole disk, issue a partition rescan, then */
+ bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
+ if (!bdev || !bdev->bd_disk)
+ err = -EINVAL;
+ else {
+ err = rescan_partitions(bdev->bd_disk, bdev);
+ }
+ if (bdev)
+ close_bdev_excl(bdev);
+
+ /* try to open the partition block device again */
+ _open_bdev(dev);
+ write_unlock(&dev->bdev_mutex);
+
+ return err;
+}
+
+/* FIXME: ensure that mtd->size % erase_size == 0 */
+static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+{
+ struct block2mtd_dev *dev;
+ struct mtd_partition *part;
+
+ if (!devname)
+ return NULL;
+
+ dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ strcpy(dev->devname, devname);
+
+ if (_open_bdev(dev))
+ goto devinit_err;
+
mutex_init(&dev->write_mutex);
+ rwlock_init(&dev->bdev_mutex);
/* Setup the MTD structure */
/* make the name contain the block device in */
@@ -304,6 +397,7 @@ static struct block2mtd_dev *add_device(
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
+ dev->mtd.refresh_device = block2mtd_refresh;
part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
part->name = dev->mtd.name;
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -17,6 +17,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
+#include <linux/mtd/partitions.h>
#include <asm/uaccess.h>
@@ -753,6 +754,13 @@ static int mtd_ioctl(struct inode *inode
file->f_pos = 0;
break;
}
+#ifdef CONFIG_MTD_PARTITIONS
+ case MTDREFRESH:
+ {
+ ret = refresh_mtd_partitions(mtd);
+ break;
+ }
+#endif
default:
ret = -ENOTTY;
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -98,6 +98,7 @@ struct mtd_oob_ops {
uint8_t *oobbuf;
};
+struct mtd_info;
struct mtd_info {
u_char type;
u_int32_t flags;
@@ -195,6 +196,9 @@ struct mtd_info {
struct module *owner;
int usecount;
+ int (*refresh_device)(struct mtd_info *mtd);
+ struct mtd_info *split;
+
/* If the driver is something smart, like UBI, it may need to maintain
* its own reference counting. The below functions are only for driver.
* The driver may register its callbacks. These callbacks are not
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -36,6 +36,7 @@
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_partition;
struct mtd_partition {
char *name; /* identifier string */
u_int32_t size; /* partition size */
@@ -43,6 +44,7 @@ struct mtd_partition {
u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
struct mtd_info **mtdp; /* pointer to store the MTD object */
+ int (*refresh_partition)(struct mtd_info *);
};
#define MTDPART_OFS_NXTBLK (-2)
@@ -52,6 +54,7 @@ struct mtd_partition {
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
+int refresh_mtd_partitions(struct mtd_info *);
/*
* Functions dealing with the various ways of partitioning the space
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -95,6 +95,7 @@ struct otp_info {
#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
#define MTDFILEMODE _IO('M', 19)
+#define MTDREFRESH _IO('M', 23)
/*
* Obsolete legacy interface. Keep it in order not to break userspace

View File

@ -1,30 +0,0 @@
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -236,14 +236,21 @@ static int parse_redboot_partitions(stru
#endif
names += strlen(names)+1;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
- i++;
- parts[i].offset = parts[i-1].size + parts[i-1].offset;
- parts[i].size = fl->next->img->flash_base - parts[i].offset;
- parts[i].name = nullname;
- }
+ if (!strcmp(parts[i].name, "rootfs")) {
+ parts[i].size = fl->next->img->flash_base;
+ parts[i].size &= ~(master->erasesize - 1);
+ parts[i].size -= parts[i].offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ nrparts--;
+ } else {
+ i++;
+ parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
#endif
+ }
+ }
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);

View File

@ -1,32 +0,0 @@
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -572,6 +572,7 @@ struct platform_nand_chip {
int chip_delay;
unsigned int options;
const char **part_probe_types;
+ int (*chip_fixup)(struct mtd_info *mtd);
void *priv;
};
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -70,7 +70,18 @@ static int __init plat_nand_probe(struct
platform_set_drvdata(pdev, data);
/* Scan to find existance of the device */
- if (nand_scan(&data->mtd, 1)) {
+ if (nand_scan_ident(&data->mtd, 1)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+ if (pdata->chip.chip_fixup) {
+ res = pdata->chip.chip_fixup(&data->mtd);
+ if (res)
+ goto out;
+ }
+
+ if (nand_scan_tail(&data->mtd)) {
res = -ENXIO;
goto out;
}

View File

@ -1,109 +0,0 @@
--- a/include/linux/netfilter/xt_layer7.h
+++ b/include/linux/netfilter/xt_layer7.h
@@ -8,6 +8,7 @@ struct xt_layer7_info {
char protocol[MAX_PROTOCOL_LEN];
char pattern[MAX_PATTERN_LEN];
u_int8_t invert;
+ u_int8_t pkt;
};
#endif /* _XT_LAYER7_H */
--- a/net/netfilter/xt_layer7.c
+++ b/net/netfilter/xt_layer7.c
@@ -297,34 +297,36 @@ static int match_no_append(struct nf_con
}
/* add the new app data to the conntrack. Return number of bytes added. */
-static int add_data(struct nf_conn * master_conntrack,
- char * app_data, int appdatalen)
+static int add_datastr(char *target, int offset, char *app_data, int len)
{
int length = 0, i;
- int oldlength = master_conntrack->layer7.app_data_len;
-
- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
- clear on whether the race condition exists or whether this really
- fixes it. I might just be being dense... Anyway, if it's not really
- a fix, all it does is waste a very small amount of time. */
- if(!master_conntrack->layer7.app_data) return 0;
+
+ if (!target) return 0;
/* Strip nulls. Make everything lower case (our regex lib doesn't
do case insensitivity). Add it to the end of the current data. */
- for(i = 0; i < maxdatalen-oldlength-1 &&
- i < appdatalen; i++) {
+ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
if(app_data[i] != '\0') {
/* the kernel version of tolower mungs 'upper ascii' */
- master_conntrack->layer7.app_data[length+oldlength] =
+ target[length+offset] =
isascii(app_data[i])?
tolower(app_data[i]) : app_data[i];
length++;
}
}
+ target[length+offset] = '\0';
+
+ return length;
+}
- master_conntrack->layer7.app_data[length+oldlength] = '\0';
- master_conntrack->layer7.app_data_len = length + oldlength;
+/* add the new app data to the conntrack. Return number of bytes added. */
+static int add_data(struct nf_conn * master_conntrack,
+ char * app_data, int appdatalen)
+{
+ int length;
+ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
+ master_conntrack->layer7.app_data_len += length;
return length;
}
@@ -411,7 +413,7 @@ match(const struct sk_buff *skbin,
const struct xt_layer7_info * info = matchinfo;
enum ip_conntrack_info master_ctinfo, ctinfo;
struct nf_conn *master_conntrack, *conntrack;
- unsigned char * app_data;
+ unsigned char *app_data, *tmp_data;
unsigned int pattern_result, appdatalen;
regexp * comppattern;
@@ -439,8 +441,8 @@ match(const struct sk_buff *skbin,
master_conntrack = master_ct(master_conntrack);
/* if we've classified it or seen too many packets */
- if(TOTAL_PACKETS > num_packets ||
- master_conntrack->layer7.app_proto) {
+ if(!info->pkt && (TOTAL_PACKETS > num_packets ||
+ master_conntrack->layer7.app_proto)) {
pattern_result = match_no_append(conntrack, master_conntrack,
ctinfo, master_ctinfo, info);
@@ -473,6 +475,25 @@ match(const struct sk_buff *skbin,
/* the return value gets checked later, when we're ready to use it */
comppattern = compile_and_cache(info->pattern, info->protocol);
+ if (info->pkt) {
+ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
+ if(!tmp_data){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
+ return info->invert;
+ }
+
+ tmp_data[0] = '\0';
+ add_datastr(tmp_data, 0, app_data, appdatalen);
+ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
+
+ kfree(tmp_data);
+ tmp_data = NULL;
+ spin_unlock_bh(&l7_lock);
+
+ return (pattern_result ^ info->invert);
+ }
+
/* On the first packet of a connection, allocate space for app data */
if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
!master_conntrack->layer7.app_data){

View File

@ -1,91 +0,0 @@
--- /dev/null
+++ b/include/net/xfrmudp.h
@@ -0,0 +1,10 @@
+/*
+ * pointer to function for type that xfrm4_input wants, to permit
+ * decoupling of XFRM from udp.c
+ */
+#define HAVE_XFRM4_UDP_REGISTER
+
+typedef int (*xfrm4_rcv_encap_t)(struct sk_buff *skb, __u16 encap_type);
+extern int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
+ , xfrm4_rcv_encap_t *oldfunc);
+extern int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func);
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -224,6 +224,12 @@ config NET_IPGRE_BROADCAST
Network), but can be distributed all over the Internet. If you want
to do that, say Y here and to "IP multicast routing" below.
+config IPSEC_NAT_TRAVERSAL
+ bool "IPSEC NAT-Traversal (KLIPS compatible)"
+ depends on INET
+ ---help---
+ Includes support for RFC3947/RFC3948 NAT-Traversal of ESP over UDP.
+
config IP_MROUTE
bool "IP: multicast routing"
depends on IP_MULTICAST
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -15,6 +15,7 @@
#include <linux/netfilter_ipv4.h>
#include <net/ip.h>
#include <net/xfrm.h>
+#include <net/xfrmudp.h>
static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
{
@@ -161,6 +162,29 @@ drop:
return 0;
}
+#ifdef CONFIG_IPSEC_NAT_TRAVERSAL
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = NULL;
+
+int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func,
+ xfrm4_rcv_encap_t *oldfunc)
+{
+ if(oldfunc != NULL)
+ *oldfunc = xfrm4_rcv_encap_func;
+
+ xfrm4_rcv_encap_func = func;
+ return 0;
+}
+
+int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func)
+{
+ if(xfrm4_rcv_encap_func != func)
+ return -1;
+
+ xfrm4_rcv_encap_func = NULL;
+ return 0;
+}
+#endif /* CONFIG_IPSEC_NAT_TRAVERSAL */
+
/* If it's a keepalive packet, then just eat it.
* If it's an encapsulated packet, then pass it to the
* IPsec xfrm input.
@@ -251,7 +275,13 @@ int xfrm4_udp_encap_rcv(struct sock *sk,
iph->protocol = IPPROTO_ESP;
/* process ESP */
+#ifdef CONFIG_IPSEC_NAT_TRAVERSAL
+ if(xfrm4_rcv_encap_func == NULL)
+ goto drop;
+ ret = (*xfrm4_rcv_encap_func)(skb, up->encap_type);
+#else
ret = xfrm4_rcv_encap(skb, encap_type);
+#endif
return ret;
drop:
@@ -265,3 +295,8 @@ int xfrm4_rcv(struct sk_buff *skb)
}
EXPORT_SYMBOL(xfrm4_rcv);
+
+#ifdef CONFIG_IPSEC_NAT_TRAVERSAL
+EXPORT_SYMBOL(udp4_register_esp_rcvencap);
+EXPORT_SYMBOL(udp4_unregister_esp_rcvencap);
+#endif

View File

@ -1,239 +0,0 @@
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_time.h
@@ -0,0 +1,18 @@
+#ifndef __ipt_time_h_included__
+#define __ipt_time_h_included__
+
+
+struct ipt_time_info {
+ u_int8_t days_match; /* 1 bit per day. -SMTWTFS */
+ u_int16_t time_start; /* 0 < time_start < 23*60+59 = 1439 */
+ u_int16_t time_stop; /* 0:0 < time_stat < 23:59 */
+
+ /* FIXME: Keep this one for userspace iptables binary compability: */
+ u_int8_t kerneltime; /* ignore skb time (and use kerneltime) or not. */
+
+ time_t date_start;
+ time_t date_stop;
+};
+
+
+#endif /* __ipt_time_h_included__ */
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_time.c
@@ -0,0 +1,180 @@
+/*
+ This is a module which is used for time matching
+ It is using some modified code from dietlibc (localtime() function)
+ that you can find at http://www.fefe.de/dietlibc/
+ This file is distributed under the terms of the GNU General Public
+ License (GPL). Copies of the GPL can be obtained from: ftp://prep.ai.mit.edu/pub/gnu/GPL
+ 2001-05-04 Fabrice MARIE <fabrice@netfilter.org> : initial development.
+ 2001-21-05 Fabrice MARIE <fabrice@netfilter.org> : bug fix in the match code,
+ thanks to "Zeng Yu" <zengy@capitel.com.cn> for bug report.
+ 2001-26-09 Fabrice MARIE <fabrice@netfilter.org> : force the match to be in LOCAL_IN or PRE_ROUTING only.
+ 2001-30-11 Fabrice : added the possibility to use the match in FORWARD/OUTPUT with a little hack,
+ added Nguyen Dang Phuoc Dong <dongnd@tlnet.com.vn> patch to support timezones.
+ 2004-05-02 Fabrice : added support for date matching, from an idea of Fabien COELHO.
+*/
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_time.h>
+#include <linux/time.h>
+
+MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
+MODULE_DESCRIPTION("Match arrival timestamp/date");
+MODULE_LICENSE("GPL");
+
+struct tm
+{
+ int tm_sec; /* Seconds. [0-60] (1 leap second) */
+ int tm_min; /* Minutes. [0-59] */
+ int tm_hour; /* Hours. [0-23] */
+ int tm_mday; /* Day. [1-31] */
+ int tm_mon; /* Month. [0-11] */
+ int tm_year; /* Year - 1900. */
+ int tm_wday; /* Day of week. [0-6] */
+ int tm_yday; /* Days in year.[0-365] */
+ int tm_isdst; /* DST. [-1/0/1]*/
+
+ long int tm_gmtoff; /* we don't care, we count from GMT */
+ const char *tm_zone; /* we don't care, we count from GMT */
+};
+
+void
+localtime(const u32 time, struct tm *r);
+
+static bool
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct xt_match *match,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ bool *hotdrop)
+{
+ const struct ipt_time_info *info = matchinfo; /* match info for rule */
+ struct timeval tv;
+ struct tm currenttime; /* time human readable */
+ u_int8_t days_of_week[7] = {64, 32, 16, 8, 4, 2, 1};
+ u_int16_t packet_time;
+
+ /* We might not have a timestamp, get one */
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+
+ skb_get_timestamp(skb, &tv);
+ /* First we make sure we are in the date start-stop boundaries */
+ if ((tv.tv_sec < info->date_start) || (tv.tv_sec > info->date_stop))
+ return 0; /* We are outside the date boundaries */
+
+ /* Transform the timestamp of the packet, in a human readable form */
+ localtime(tv.tv_sec, &currenttime);
+
+ /* check if we match this timestamp, we start by the days... */
+ if ((days_of_week[currenttime.tm_wday] & info->days_match) != days_of_week[currenttime.tm_wday])
+ return 0; /* the day doesn't match */
+
+ /* ... check the time now */
+ packet_time = (currenttime.tm_hour * 60) + currenttime.tm_min;
+ if ((packet_time < info->time_start) || (packet_time > info->time_stop))
+ return 0;
+
+ /* here we match ! */
+ return 1;
+}
+
+static bool
+checkentry(const char *tablename,
+ const void *ip,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int hook_mask)
+{
+ struct ipt_time_info *info = matchinfo; /* match info for rule */
+
+ /* First, check that we are in the correct hooks */
+ if (hook_mask
+ & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) | (1 << NF_IP_LOCAL_OUT)))
+ {
+ printk("ipt_time: error, only valid for PRE_ROUTING, LOCAL_IN, FORWARD and OUTPUT)\n");
+ return 0;
+ }
+
+ /* Now check the coherence of the data ... */
+ if ((info->time_start > 1439) || /* 23*60+59 = 1439*/
+ (info->time_stop > 1439))
+ {
+ printk(KERN_WARNING "ipt_time: invalid argument\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct ipt_match time_match = {
+ .name = "time",
+ .match = &match,
+ .matchsize = sizeof(struct ipt_time_info),
+ .checkentry = &checkentry,
+ .me = THIS_MODULE
+};
+
+static int __init init(void)
+{
+ printk("ipt_time loading\n");
+ return xt_register_match(&time_match);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_match(&time_match);
+ printk("ipt_time unloaded\n");
+}
+
+module_init(init);
+module_exit(fini);
+
+
+/* The part below is borowed and modified from dietlibc */
+
+/* seconds per day */
+#define SPD 24*60*60
+
+void
+localtime(const u32 time, struct tm *r) {
+ u32 i, timep;
+ extern struct timezone sys_tz;
+ const unsigned int __spm[12] =
+ { 0,
+ (31),
+ (31+28),
+ (31+28+31),
+ (31+28+31+30),
+ (31+28+31+30+31),
+ (31+28+31+30+31+30),
+ (31+28+31+30+31+30+31),
+ (31+28+31+30+31+30+31+31),
+ (31+28+31+30+31+30+31+31+30),
+ (31+28+31+30+31+30+31+31+30+31),
+ (31+28+31+30+31+30+31+31+30+31+30),
+ };
+ register u32 work;
+
+ timep = time - (sys_tz.tz_minuteswest * 60);
+ work=timep%(SPD);
+ r->tm_sec=work%60; work/=60;
+ r->tm_min=work%60; r->tm_hour=work/60;
+ work=timep/(SPD);
+ r->tm_wday=(4+work)%7;
+ for (i=1970; ; ++i) {
+ register time_t k= (!(i%4) && ((i%100) || !(i%400)))?366:365;
+ if (work>k)
+ work-=k;
+ else
+ break;
+ }
+ r->tm_year=i-1900;
+ for (i=11; i && __spm[i]>work; --i) ;
+ r->tm_mon=i;
+ r->tm_mday=work-__spm[i]+1;
+}
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -72,6 +72,22 @@ config IP_NF_MATCH_TOS
To compile it as a module, choose M here. If unsure, say N.
+
+config IP_NF_MATCH_TIME
+ tristate 'TIME match support'
+ depends on IP_NF_IPTABLES
+ help
+ This option adds a `time' match, which allows you
+ to match based on the packet arrival time/date
+ (arrival time/date at the machine which netfilter is running on) or
+ departure time/date (for locally generated packets).
+
+ If you say Y here, try iptables -m time --help for more information.
+ If you want to compile it as a module, say M here and read
+
+ Documentation/modules.txt. If unsure, say `N'.
+
+
config IP_NF_MATCH_RECENT
tristate "recent match support"
depends on IP_NF_IPTABLES
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
+obj-$(CONFIG_IP_NF_MATCH_TIME) += ipt_time.o
obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o

View File

@ -1,849 +0,0 @@
--- /dev/null
+++ b/drivers/net/imq.c
@@ -0,0 +1,400 @@
+/*
+ * Pseudo-driver for the intermediate queue device.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Patrick McHardy, <kaber@trash.net>
+ *
+ * The first version was written by Martin Devera, <devik@cdi.cz>
+ *
+ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
+ * - Update patch to 2.4.21
+ * Sebastian Strollo <sstrollo@nortelnetworks.com>
+ * - Fix "Dead-loop on netdevice imq"-issue
+ * Marcel Sebek <sebek64@post.cz>
+ * - Update to 2.6.2-rc1
+ *
+ * After some time of inactivity there is a group taking care
+ * of IMQ again: http://www.linuximq.net
+ *
+ *
+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including
+ * the following changes:
+ *
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ * - Correction of imq_init_devs() issue that resulted in
+ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
+ * - Addition of functionality to choose number of IMQ devices
+ * during kernel config (Andre Correa)
+ * - Addition of functionality to choose how IMQ hooks on
+ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
+ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
+ *
+ *
+ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
+ * released with almost no problems. 2.6.14-x was released
+ * with some important changes: nfcache was removed; After
+ * some weeks of trouble we figured out that some IMQ fields
+ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
+ * These functions are correctly patched by this new patch version.
+ *
+ * Thanks for all who helped to figure out all the problems with
+ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ * I didn't forget anybody). I apologize again for my lack of time.
+ *
+ * More info at: http://www.linuximq.net/ (Andre Correa)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ #include <linux/netfilter_ipv6.h>
+#endif
+#include <linux/imq.h>
+#include <net/pkt_sched.h>
+
+extern int qdisc_restart1(struct net_device *dev);
+
+static nf_hookfn imq_nf_hook;
+
+static struct nf_hook_ops imq_ingress_ipv4 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_PRE_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ .priority = NF_IP_PRI_MANGLE + 1
+#else
+ .priority = NF_IP_PRI_NAT_DST + 1
+#endif
+};
+
+static struct nf_hook_ops imq_egress_ipv4 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_POST_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
+ .priority = NF_IP_PRI_LAST
+#else
+ .priority = NF_IP_PRI_NAT_SRC - 1
+#endif
+};
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+static struct nf_hook_ops imq_ingress_ipv6 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_PRE_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ .priority = NF_IP6_PRI_MANGLE + 1
+#else
+ .priority = NF_IP6_PRI_NAT_DST + 1
+#endif
+};
+
+static struct nf_hook_ops imq_egress_ipv6 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_POST_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
+ .priority = NF_IP6_PRI_LAST
+#else
+ .priority = NF_IP6_PRI_NAT_SRC - 1
+#endif
+};
+#endif
+
+#if defined(CONFIG_IMQ_NUM_DEVS)
+static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
+#else
+static unsigned int numdevs = 16;
+#endif
+
+static struct net_device *imq_devs;
+
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{
+ return (struct net_device_stats *)dev->priv;
+}
+
+/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb)
+{
+ struct nf_info *info = skb->nf_info;
+
+ if (info) {
+ if (info->indev)
+ dev_put(info->indev);
+ if (info->outdev)
+ dev_put(info->outdev);
+ kfree(info);
+ }
+}
+
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+
+ skb->imq_flags = 0;
+ skb->destructor = NULL;
+
+ dev->trans_start = jiffies;
+ nf_reinject(skb, skb->nf_info, NF_ACCEPT);
+ return 0;
+}
+
+static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, unsigned queue_num, void *data)
+{
+ struct net_device *dev;
+ struct net_device_stats *stats;
+ struct sk_buff *skb2 = NULL;
+ struct Qdisc *q;
+ unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
+ int ret = -1;
+
+ if (index > numdevs)
+ return -1;
+
+ dev = imq_devs + index;
+ if (!(dev->flags & IFF_UP)) {
+ skb->imq_flags = 0;
+ nf_reinject(skb, info, NF_ACCEPT);
+ return 0;
+ }
+ dev->last_rx = jiffies;
+
+ if (skb->destructor) {
+ skb2 = skb;
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ return -1;
+ }
+ skb->nf_info = info;
+
+ stats = (struct net_device_stats *)dev->priv;
+ stats->rx_bytes+= skb->len;
+ stats->rx_packets++;
+
+ spin_lock_bh(&dev->queue_lock);
+ q = dev->qdisc;
+ if (q->enqueue) {
+ q->enqueue(skb_get(skb), q);
+ if (skb_shared(skb)) {
+ skb->destructor = imq_skb_destructor;
+ kfree_skb(skb);
+ ret = 0;
+ }
+ }
+ if (spin_is_locked(&dev->_xmit_lock))
+ netif_schedule(dev);
+ else
+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
+ /* NOTHING */;
+
+ spin_unlock_bh(&dev->queue_lock);
+
+ if (skb2)
+ kfree_skb(ret ? skb : skb2);
+
+ return ret;
+}
+
+static struct nf_queue_handler nfqh = {
+ .name = "imq",
+ .outfn = imq_nf_queue,
+};
+
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ int (*okfn)(struct sk_buff *))
+{
+ if ((*pskb)->imq_flags & IMQ_F_ENQUEUE)
+ return NF_QUEUE;
+
+ return NF_ACCEPT;
+}
+
+
+static int __init imq_init_hooks(void)
+{
+ int err;
+
+ err = nf_register_queue_handler(PF_INET, &nfqh);
+ if (err > 0)
+ goto err1;
+ if ((err = nf_register_hook(&imq_ingress_ipv4)))
+ goto err2;
+ if ((err = nf_register_hook(&imq_egress_ipv4)))
+ goto err3;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh)))
+ goto err4;
+ if ((err = nf_register_hook(&imq_ingress_ipv6)))
+ goto err5;
+ if ((err = nf_register_hook(&imq_egress_ipv6)))
+ goto err6;
+#endif
+
+ return 0;
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+err6:
+ nf_unregister_hook(&imq_ingress_ipv6);
+err5:
+ nf_unregister_queue_handler(PF_INET6, &nfqh);
+err4:
+ nf_unregister_hook(&imq_egress_ipv4);
+#endif
+err3:
+ nf_unregister_hook(&imq_ingress_ipv4);
+err2:
+ nf_unregister_queue_handler(PF_INET, &nfqh);
+err1:
+ return err;
+}
+
+static void __exit imq_unhook(void)
+{
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ nf_unregister_hook(&imq_ingress_ipv6);
+ nf_unregister_hook(&imq_egress_ipv6);
+ nf_unregister_queue_handler(PF_INET6, &nfqh);
+#endif
+ nf_unregister_hook(&imq_ingress_ipv4);
+ nf_unregister_hook(&imq_egress_ipv4);
+ nf_unregister_queue_handler(PF_INET, &nfqh);
+}
+
+static int __init imq_dev_init(struct net_device *dev)
+{
+ dev->hard_start_xmit = imq_dev_xmit;
+ dev->type = ARPHRD_VOID;
+ dev->mtu = 16000;
+ dev->tx_queue_len = 11000;
+ dev->flags = IFF_NOARP;
+ dev->priv = kzalloc(sizeof(struct net_device_stats), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ dev->get_stats = imq_get_stats;
+
+ return 0;
+}
+
+static void imq_dev_uninit(struct net_device *dev)
+{
+ kfree(dev->priv);
+}
+
+static int __init imq_init_devs(void)
+{
+ struct net_device *dev;
+ int i,j;
+ j = numdevs;
+
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
+ IMQ_MAX_DEVS);
+ return -EINVAL;
+ }
+
+ imq_devs = kzalloc(sizeof(struct net_device) * numdevs, GFP_KERNEL);
+ if (!imq_devs)
+ return -ENOMEM;
+
+ /* we start counting at zero */
+ numdevs--;
+
+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
+ SET_MODULE_OWNER(dev);
+ strcpy(dev->name, "imq%d");
+ dev->init = imq_dev_init;
+ dev->uninit = imq_dev_uninit;
+
+ if (register_netdev(dev) < 0)
+ goto err_register;
+ }
+ printk(KERN_INFO "IMQ starting with %u devices...\n", j);
+ return 0;
+
+err_register:
+ for (; i; i--)
+ unregister_netdev(--dev);
+ kfree(imq_devs);
+ return -EIO;
+}
+
+static void imq_cleanup_devs(void)
+{
+ int i;
+ struct net_device *dev = imq_devs;
+
+ for (i = 0; i <= numdevs; i++)
+ unregister_netdev(dev++);
+
+ kfree(imq_devs);
+}
+
+static int __init imq_init_module(void)
+{
+ int err;
+
+ if ((err = imq_init_devs())) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs()\n");
+ return err;
+ }
+ if ((err = imq_init_hooks())) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+ imq_cleanup_devs();
+ return err;
+ }
+
+ printk(KERN_INFO "IMQ driver loaded successfully.\n");
+
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
+#else
+ printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
+#endif
+#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
+#else
+ printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
+#endif
+
+ return 0;
+}
+
+static void __exit imq_cleanup_module(void)
+{
+ imq_unhook();
+ imq_cleanup_devs();
+ printk(KERN_INFO "IMQ driver unloaded successfully.\n");
+}
+
+
+module_init(imq_init_module);
+module_exit(imq_cleanup_module);
+
+module_param(numdevs, int, 16);
+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -112,6 +112,129 @@ config EQUALIZER
To compile this driver as a module, choose M here: the module
will be called eql. If unsure, say N.
+config IMQ
+ tristate "IMQ (intermediate queueing device) support"
+ depends on NETDEVICES && NETFILTER
+ ---help---
+ The IMQ device(s) is used as placeholder for QoS queueing
+ disciplines. Every packet entering/leaving the IP stack can be
+ directed through the IMQ device where it's enqueued/dequeued to the
+ attached qdisc. This allows you to treat network devices as classes
+ and distribute bandwidth among them. Iptables is used to specify
+ through which IMQ device, if any, packets travel.
+
+ More information at: http://www.linuximq.net/
+
+ To compile this driver as a module, choose M here: the module
+ will be called imq. If unsure, say N.
+
+choice
+ prompt "IMQ behavior (PRE/POSTROUTING)"
+ depends on IMQ
+ default IMQ_BEHAVIOR_AB
+ help
+
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ IMQ can work in any of the following ways:
+
+ PREROUTING | POSTROUTING
+ -----------------|-------------------
+ #1 After NAT | After NAT
+ #2 After NAT | Before NAT
+ #3 Before NAT | After NAT
+ #4 Before NAT | Before NAT
+
+ The default behavior is to hook before NAT on PREROUTING
+ and after NAT on POSTROUTING (#3).
+
+ This settings are specially usefull when trying to use IMQ
+ to shape NATed clients.
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_AA
+ bool "IMQ AA"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: After NAT
+ POSTROUTING: After NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_AB
+ bool "IMQ AB"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: After NAT
+ POSTROUTING: Before NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_BA
+ bool "IMQ BA"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: Before NAT
+ POSTROUTING: After NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_BB
+ bool "IMQ BB"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: Before NAT
+ POSTROUTING: Before NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+endchoice
+
+config IMQ_NUM_DEVS
+
+ int "Number of IMQ devices"
+ range 2 16
+ depends on IMQ
+ default "16"
+ help
+
+ This settings defines how many IMQ devices will be
+ created.
+
+ The default value is 16.
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
config TUN
tristate "Universal TUN/TAP device driver support"
select CRC32
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_IMQ) += imq.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_DE600) += de600.o
--- /dev/null
+++ b/include/linux/imq.h
@@ -0,0 +1,9 @@
+#ifndef _IMQ_H
+#define _IMQ_H
+
+#define IMQ_MAX_DEVS 16
+
+#define IMQ_F_IFMASK 0x7f
+#define IMQ_F_ENQUEUE 0x80
+
+#endif /* _IMQ_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
@@ -0,0 +1,8 @@
+#ifndef _IPT_IMQ_H
+#define _IPT_IMQ_H
+
+struct ipt_imq_info {
+ unsigned int todev; /* target imq device */
+};
+
+#endif /* _IPT_IMQ_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
@@ -0,0 +1,8 @@
+#ifndef _IP6T_IMQ_H
+#define _IP6T_IMQ_H
+
+struct ip6t_imq_info {
+ unsigned int todev; /* target imq device */
+};
+
+#endif /* _IP6T_IMQ_H */
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -295,6 +295,10 @@ struct sk_buff {
struct nf_conntrack *nfct;
struct sk_buff *nfct_reasm;
#endif
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ unsigned char imq_flags;
+ struct nf_info *nf_info;
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
@@ -1725,6 +1729,10 @@ static inline void __nf_copy(struct sk_b
dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm);
#endif
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ dst->imq_flags = src->imq_flags;
+ dst->nf_info = src->nf_info;
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -94,6 +94,9 @@
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+#include <linux/imq.h>
+#endif
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
@@ -1462,7 +1465,11 @@ static int dev_gso_segment(struct sk_buf
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
if (likely(!skb->next)) {
- if (!list_empty(&ptype_all))
+ if (!list_empty(&ptype_all)
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ && !(skb->imq_flags & IMQ_F_ENQUEUE)
+#endif
+ )
dev_queue_xmit_nit(skb, dev);
if (netif_needs_gso(dev, skb)) {
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_IMQ.c
@@ -0,0 +1,69 @@
+/*
+ * This target marks packets to be enqueued to an imq device
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_IMQ.h>
+#include <linux/imq.h>
+
+static unsigned int imq_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ struct ipt_imq_info *mr = (struct ipt_imq_info*)targinfo;
+
+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
+
+ return XT_CONTINUE;
+}
+
+static bool imq_checkentry(const char *tablename,
+ const void *e,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ struct ipt_imq_info *mr;
+
+ mr = (struct ipt_imq_info*)targinfo;
+
+ if (mr->todev > IMQ_MAX_DEVS) {
+ printk(KERN_WARNING
+ "IMQ: invalid device specified, highest is %u\n",
+ IMQ_MAX_DEVS);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_target ipt_imq_reg = {
+ .name = "IMQ",
+ .family = AF_INET,
+ .target = imq_target,
+ .targetsize = sizeof(struct ipt_imq_info),
+ .checkentry = imq_checkentry,
+ .me = THIS_MODULE,
+ .table = "mangle"
+};
+
+static int __init init(void)
+{
+ return xt_register_target(&ipt_imq_reg);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ipt_imq_reg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -327,6 +327,17 @@ config IP_NF_MANGLE
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_IMQ
+ tristate "IMQ target support"
+ depends on IP_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which IMQ device packets should get enqueued/dequeued.
+
+ For more information visit: http://www.linuximq.net/
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_NF_TARGET_TOS
tristate "TOS target support"
depends on IP_NF_MANGLE
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ip
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_IMQ.c
@@ -0,0 +1,69 @@
+/*
+ * This target marks packets to be enqueued to an imq device
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6/ip6t_IMQ.h>
+#include <linux/imq.h>
+
+static unsigned int imq_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ struct ip6t_imq_info *mr = (struct ip6t_imq_info*)targinfo;
+
+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
+
+ return XT_CONTINUE;
+}
+
+static bool imq_checkentry(const char *tablename,
+ const void *entry,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ struct ip6t_imq_info *mr;
+
+ mr = (struct ip6t_imq_info*)targinfo;
+
+ if (mr->todev > IMQ_MAX_DEVS) {
+ printk(KERN_WARNING
+ "IMQ: invalid device specified, highest is %u\n",
+ IMQ_MAX_DEVS);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_target ip6t_imq_reg = {
+ .name = "IMQ",
+ .family = AF_INET6,
+ .target = imq_target,
+ .targetsize = sizeof(struct ip6t_imq_info),
+ .table = "mangle",
+ .checkentry = imq_checkentry,
+ .me = THIS_MODULE
+};
+
+static int __init init(void)
+{
+ return xt_register_target(&ip6t_imq_reg);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ip6t_imq_reg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -173,6 +173,15 @@ config IP6_NF_MANGLE
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_TARGET_IMQ
+ tristate "IMQ target support"
+ depends on IP6_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which imq device packets should get enqueued/dequeued.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP6_NF_TARGET_HL
tristate 'HL (hoplimit) target support'
depends on IP6_NF_MANGLE
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t
obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o
obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -191,6 +191,11 @@ static inline int qdisc_restart(struct n
return ret;
}
+int qdisc_restart1(struct net_device *dev)
+{
+ return qdisc_restart(dev);
+}
+
void __qdisc_run(struct net_device *dev)
{
do {
@@ -620,3 +625,4 @@ EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);
+EXPORT_SYMBOL(qdisc_restart1);

View File

@ -1,928 +0,0 @@
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_ROUTE.h
@@ -0,0 +1,23 @@
+/* Header file for iptables ipt_ROUTE target
+ *
+ * (C) 2002 by Cédric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+#ifndef _IPT_ROUTE_H_target
+#define _IPT_ROUTE_H_target
+
+#define IPT_ROUTE_IFNAMSIZ 16
+
+struct ipt_route_target_info {
+ char oif[IPT_ROUTE_IFNAMSIZ]; /* Output Interface Name */
+ char iif[IPT_ROUTE_IFNAMSIZ]; /* Input Interface Name */
+ u_int32_t gw; /* IP address of gateway */
+ u_int8_t flags;
+};
+
+/* Values for "flags" field */
+#define IPT_ROUTE_CONTINUE 0x01
+#define IPT_ROUTE_TEE 0x02
+
+#endif /*_IPT_ROUTE_H_target*/
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_ROUTE.h
@@ -0,0 +1,23 @@
+/* Header file for iptables ip6t_ROUTE target
+ *
+ * (C) 2003 by Cédric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+#ifndef _IPT_ROUTE_H_target
+#define _IPT_ROUTE_H_target
+
+#define IP6T_ROUTE_IFNAMSIZ 16
+
+struct ip6t_route_target_info {
+ char oif[IP6T_ROUTE_IFNAMSIZ]; /* Output Interface Name */
+ char iif[IP6T_ROUTE_IFNAMSIZ]; /* Input Interface Name */
+ u_int32_t gw[4]; /* IPv6 address of gateway */
+ u_int8_t flags;
+};
+
+/* Values for "flags" field */
+#define IP6T_ROUTE_CONTINUE 0x01
+#define IP6T_ROUTE_TEE 0x02
+
+#endif /*_IP6T_ROUTE_H_target*/
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_ROUTE.c
@@ -0,0 +1,463 @@
+/*
+ * This implements the ROUTE target, which enables you to setup unusual
+ * routes not supported by the standard kernel routing table.
+ *
+ * Copyright (C) 2002 Cedric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * v 1.11 2004/11/23
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter_ipv4/ipt_ROUTE.h>
+#include <linux/netdevice.h>
+#include <linux/route.h>
+#include <linux/version.h>
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/icmp.h>
+#include <net/checksum.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cedric de Launois <delaunois@info.ucl.ac.be>");
+MODULE_DESCRIPTION("iptables ROUTE target module");
+
+/* Try to route the packet according to the routing keys specified in
+ * route_info. Keys are :
+ * - ifindex :
+ * 0 if no oif preferred,
+ * otherwise set to the index of the desired oif
+ * - route_info->gw :
+ * 0 if no gateway specified,
+ * otherwise set to the next host to which the pkt must be routed
+ * If success, skb->dev is the output device to which the packet must
+ * be sent and skb->dst is not NULL
+ *
+ * RETURN: -1 if an error occured
+ * 1 if the packet was succesfully routed to the
+ * destination desired
+ * 0 if the kernel routing table could not route the packet
+ * according to the keys specified
+ */
+static int route(struct sk_buff *skb,
+ unsigned int ifindex,
+ const struct ipt_route_target_info *route_info)
+{
+ int err;
+ struct rtable *rt;
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi fl = {
+ .oif = ifindex,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = 0,
+ .tos = RT_TOS(iph->tos),
+ .scope = RT_SCOPE_UNIVERSE,
+ }
+ }
+ };
+
+ /* The destination address may be overloaded by the target */
+ if (route_info->gw)
+ fl.fl4_dst = route_info->gw;
+
+ /* Trying to route the packet using the standard routing table. */
+ if ((err = ip_route_output_key(&rt, &fl))) {
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: couldn't route pkt (err: %i)",err);
+ return -1;
+ }
+
+ /* Drop old route. */
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
+ /* Success if no oif specified or if the oif correspond to the
+ * one desired */
+ if (!ifindex || rt->u.dst.dev->ifindex == ifindex) {
+ skb->dst = &rt->u.dst;
+ skb->dev = skb->dst->dev;
+ skb->protocol = htons(ETH_P_IP);
+ return 1;
+ }
+
+ /* The interface selected by the routing table is not the one
+ * specified by the user. This may happen because the dst address
+ * is one of our own addresses.
+ */
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: failed to route as desired gw=%u.%u.%u.%u oif=%i (got oif=%i)\n",
+ NIPQUAD(route_info->gw), ifindex, rt->u.dst.dev->ifindex);
+
+ return 0;
+}
+
+
+/* Stolen from ip_finish_output2
+ * PRE : skb->dev is set to the device we are leaving by
+ * skb->dst is not NULL
+ * POST: the packet is sent with the link layer header pushed
+ * the packet is destroyed
+ */
+static void ip_direct_send(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct hh_cache *hh = dst->hh;
+ struct net_device *dev = dst->dev;
+ int hh_len = LL_RESERVED_SPACE(dev);
+
+ /* Be paranoid, rather than too clever. */
+ if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
+ if (skb2 == NULL) {
+ kfree_skb(skb);
+ return;
+ }
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ kfree_skb(skb);
+ skb = skb2;
+ }
+
+ if (hh) {
+ int hh_alen;
+
+ read_lock_bh(&hh->hh_lock);
+ hh_alen = HH_DATA_ALIGN(hh->hh_len);
+ memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
+ hh->hh_output(skb);
+ } else if (dst->neighbour)
+ dst->neighbour->output(skb);
+ else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
+ kfree_skb(skb);
+ }
+}
+
+
+/* PRE : skb->dev is set to the device we are leaving by
+ * POST: - the packet is directly sent to the skb->dev device, without
+ * pushing the link layer header.
+ * - the packet is destroyed
+ */
+static inline int dev_direct_send(struct sk_buff *skb)
+{
+ return dev_queue_xmit(skb);
+}
+
+
+static unsigned int route_oif(const struct ipt_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ unsigned int ifindex = 0;
+ struct net_device *dev_out = NULL;
+
+ /* The user set the interface name to use.
+ * Getting the current interface index.
+ */
+ if ((dev_out = dev_get_by_name(route_info->oif))) {
+ ifindex = dev_out->ifindex;
+ } else {
+ /* Unknown interface name : packet dropped */
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: oif interface %s not found\n", route_info->oif);
+ return NF_DROP;
+ }
+
+ /* Trying the standard way of routing packets */
+ switch (route(skb, ifindex, route_info)) {
+ case 1:
+ dev_put(dev_out);
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
+ return IPT_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+
+ case 0:
+ /* Failed to send to oif. Trying the hard way */
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
+ return NF_DROP;
+
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: forcing the use of %i\n",
+ ifindex);
+
+ /* We have to force the use of an interface.
+ * This interface must be a tunnel interface since
+ * otherwise we can't guess the hw address for
+ * the packet. For a tunnel interface, no hw address
+ * is needed.
+ */
+ if ((dev_out->type != ARPHRD_TUNNEL)
+ && (dev_out->type != ARPHRD_IPGRE)) {
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: can't guess the hw addr !\n");
+ dev_put(dev_out);
+ return NF_DROP;
+ }
+
+ /* Send the packet. This will also free skb
+ * Do not go through the POST_ROUTING hook because
+ * skb->dst is not set and because it will probably
+ * get confused by the destination IP address.
+ */
+ skb->dev = dev_out;
+ dev_direct_send(skb);
+ dev_put(dev_out);
+ return NF_STOLEN;
+
+ default:
+ /* Unexpected error */
+ dev_put(dev_out);
+ return NF_DROP;
+ }
+}
+
+
+static unsigned int route_iif(const struct ipt_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ struct net_device *dev_in = NULL;
+
+ /* Getting the current interface index. */
+ if (!(dev_in = dev_get_by_name(route_info->iif))) {
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: iif interface %s not found\n", route_info->iif);
+ return NF_DROP;
+ }
+
+ skb->dev = dev_in;
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
+ netif_rx(skb);
+ dev_put(dev_in);
+ return NF_STOLEN;
+}
+
+
+static unsigned int route_gw(const struct ipt_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ if (route(skb, 0, route_info)!=1)
+ return NF_DROP;
+
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
+ return IPT_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+}
+
+
+/* To detect and deter routed packet loopback when using the --tee option,
+ * we take a page out of the raw.patch book: on the copied skb, we set up
+ * a fake ->nfct entry, pointing to the local &route_tee_track. We skip
+ * routing packets when we see they already have that ->nfct.
+ */
+
+static struct nf_conn route_tee_track;
+
+static unsigned int ipt_route_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
+#else
+ const void *targinfo)
+#endif
+{
+ const struct ipt_route_target_info *route_info = targinfo;
+ struct sk_buff *skb = *pskb;
+ unsigned int res;
+
+ if (skb->nfct == &route_tee_track.ct_general) {
+ /* Loopback - a packet we already routed, is to be
+ * routed another time. Avoid that, now.
+ */
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: loopback - DROP!\n");
+ return NF_DROP;
+ }
+
+ /* If we are at PREROUTING or INPUT hook
+ * the TTL isn't decreased by the IP stack
+ */
+ if (hooknum == NF_IP_PRE_ROUTING ||
+ hooknum == NF_IP_LOCAL_IN) {
+
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->ttl <= 1) {
+ struct rtable *rt;
+ struct flowi fl = {
+ .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos),
+ .scope = ((iph->tos & RTO_ONLINK) ?
+ RT_SCOPE_LINK :
+ RT_SCOPE_UNIVERSE)
+ }
+ }
+ };
+
+ if (ip_route_output_key(&rt, &fl)) {
+ return NF_DROP;
+ }
+
+ if (skb->dev == rt->u.dst.dev) {
+ /* Drop old route. */
+ dst_release(skb->dst);
+ skb->dst = &rt->u.dst;
+
+ /* this will traverse normal stack, and
+ * thus call conntrack on the icmp packet */
+ icmp_send(skb, ICMP_TIME_EXCEEDED,
+ ICMP_EXC_TTL, 0);
+ }
+
+ return NF_DROP;
+ }
+
+ /*
+ * If we are at INPUT the checksum must be recalculated since
+ * the length could change as the result of a defragmentation.
+ */
+ if(hooknum == NF_IP_LOCAL_IN) {
+ iph->ttl = iph->ttl - 1;
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ } else {
+ ip_decrease_ttl(iph);
+ }
+ }
+
+ if ((route_info->flags & IPT_ROUTE_TEE)) {
+ /*
+ * Copy the *pskb, and route the copy. Will later return
+ * IPT_CONTINUE for the original skb, which should continue
+ * on its way as if nothing happened. The copy should be
+ * independantly delivered to the ROUTE --gw.
+ */
+ skb = skb_copy(*pskb, GFP_ATOMIC);
+ if (!skb) {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: copy failed!\n");
+ return IPT_CONTINUE;
+ }
+ }
+
+ /* Tell conntrack to forget this packet since it may get confused
+ * when a packet is leaving with dst address == our address.
+ * Good idea ? Dunno. Need advice.
+ *
+ * NEW: mark the skb with our &route_tee_track, so we avoid looping
+ * on any already routed packet.
+ */
+ if (!(route_info->flags & IPT_ROUTE_CONTINUE)) {
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &route_tee_track.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+ }
+
+ if (route_info->oif[0] != '\0') {
+ res = route_oif(route_info, skb);
+ } else if (route_info->iif[0] != '\0') {
+ res = route_iif(route_info, skb);
+ } else if (route_info->gw) {
+ res = route_gw(route_info, skb);
+ } else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: no parameter !\n");
+ res = IPT_CONTINUE;
+ }
+
+ if ((route_info->flags & IPT_ROUTE_TEE))
+ res = IPT_CONTINUE;
+
+ return res;
+}
+
+
+static bool ipt_route_checkentry(const char *tablename,
+ const void *e,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ if (strcmp(tablename, "mangle") != 0) {
+ printk("ipt_ROUTE: bad table `%s', use the `mangle' table.\n",
+ tablename);
+ return 0;
+ }
+
+ if (hook_mask & ~( (1 << NF_IP_PRE_ROUTING)
+ | (1 << NF_IP_LOCAL_IN)
+ | (1 << NF_IP_FORWARD)
+ | (1 << NF_IP_LOCAL_OUT)
+ | (1 << NF_IP_POST_ROUTING))) {
+ printk("ipt_ROUTE: bad hook\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+
+static struct ipt_target ipt_route_reg = {
+ .name = "ROUTE",
+ .target = ipt_route_target,
+ .targetsize = sizeof(struct ipt_route_target_info),
+ .checkentry = ipt_route_checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ /* Set up fake conntrack (stolen from raw.patch):
+ - to never be deleted, not in any hashes */
+ atomic_set(&route_tee_track.ct_general.use, 1);
+ /* - and look it like as a confirmed connection */
+ set_bit(IPS_CONFIRMED_BIT, &route_tee_track.status);
+ /* Initialize fake conntrack so that NAT will skip it */
+ route_tee_track.status |= IPS_NAT_DONE_MASK;
+
+ return xt_register_target(&ipt_route_reg);
+}
+
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ipt_route_reg);
+}
+
+module_init(init);
+module_exit(fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -546,5 +546,22 @@ config IP_NF_TARGET_SET
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_ROUTE
+ tristate 'ROUTE target support'
+ depends on IP_NF_MANGLE
+ help
+ This option adds a `ROUTE' target, which enables you to setup unusual
+ routes. For example, the ROUTE lets you route a received packet through
+ an interface or towards a host, even if the regular destination of the
+ packet is the router itself. The ROUTE target is also able to change the
+ incoming interface of a packet.
+
+ The target can be or not a final target. It has to be used inside the
+ mangle table.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. The module will be called ipt_ROUTE.o.
+ If unsure, say `N'.
+
endmenu
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_EC
obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
+obj-$(CONFIG_IP_NF_TARGET_ROUTE) += ipt_ROUTE.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -154,6 +154,8 @@ struct neigh_table nd_tbl = {
.gc_thresh3 = 1024,
};
+EXPORT_SYMBOL(nd_tbl);
+
/* ND options */
struct ndisc_options {
struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_ROUTE.c
@@ -0,0 +1,330 @@
+/*
+ * This implements the ROUTE v6 target, which enables you to setup unusual
+ * routes not supported by the standard kernel routing table.
+ *
+ * Copyright (C) 2003 Cedric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * v 1.1 2004/11/23
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6/ip6t_ROUTE.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+#include <net/ip6_route.h>
+#include <linux/icmpv6.h>
+
+#if 1
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define NIP6(addr) \
+ ntohs((addr).s6_addr16[0]), \
+ ntohs((addr).s6_addr16[1]), \
+ ntohs((addr).s6_addr16[2]), \
+ ntohs((addr).s6_addr16[3]), \
+ ntohs((addr).s6_addr16[4]), \
+ ntohs((addr).s6_addr16[5]), \
+ ntohs((addr).s6_addr16[6]), \
+ ntohs((addr).s6_addr16[7])
+
+/* Route the packet according to the routing keys specified in
+ * route_info. Keys are :
+ * - ifindex :
+ * 0 if no oif preferred,
+ * otherwise set to the index of the desired oif
+ * - route_info->gw :
+ * 0 if no gateway specified,
+ * otherwise set to the next host to which the pkt must be routed
+ * If success, skb->dev is the output device to which the packet must
+ * be sent and skb->dst is not NULL
+ *
+ * RETURN: 1 if the packet was succesfully routed to the
+ * destination desired
+ * 0 if the kernel routing table could not route the packet
+ * according to the keys specified
+ */
+static int
+route6(struct sk_buff *skb,
+ unsigned int ifindex,
+ const struct ip6t_route_target_info *route_info)
+{
+ struct rt6_info *rt = NULL;
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
+
+ DEBUGP("ip6t_ROUTE: called with: ");
+ DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ipv6h->daddr));
+ DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(*gw));
+ DEBUGP("OUT=%s\n", route_info->oif);
+
+ if (ipv6_addr_any(gw))
+ rt = rt6_lookup(&ipv6h->daddr, &ipv6h->saddr, ifindex, 1);
+ else
+ rt = rt6_lookup(gw, &ipv6h->saddr, ifindex, 1);
+
+ if (!rt)
+ goto no_route;
+
+ DEBUGP("ip6t_ROUTE: routing gives: ");
+ DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_dst.addr));
+ DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_gateway));
+ DEBUGP("OUT=%s\n", rt->rt6i_dev->name);
+
+ if (ifindex && rt->rt6i_dev->ifindex!=ifindex)
+ goto wrong_route;
+
+ if (!rt->rt6i_nexthop) {
+ DEBUGP("ip6t_ROUTE: discovering neighbour\n");
+ rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_dst.addr);
+ }
+
+ /* Drop old route. */
+ dst_release(skb->dst);
+ skb->dst = &rt->u.dst;
+ skb->dev = rt->rt6i_dev;
+ return 1;
+
+ wrong_route:
+ dst_release(&rt->u.dst);
+ no_route:
+ if (!net_ratelimit())
+ return 0;
+
+ printk("ip6t_ROUTE: no explicit route found ");
+ if (ifindex)
+ printk("via interface %s ", route_info->oif);
+ if (!ipv6_addr_any(gw))
+ printk("via gateway %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", NIP6(*gw));
+ printk("\n");
+ return 0;
+}
+
+
+/* Stolen from ip6_output_finish
+ * PRE : skb->dev is set to the device we are leaving by
+ * skb->dst is not NULL
+ * POST: the packet is sent with the link layer header pushed
+ * the packet is destroyed
+ */
+static void ip_direct_send(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct hh_cache *hh = dst->hh;
+
+ if (hh) {
+ read_lock_bh(&hh->hh_lock);
+ memcpy(skb->data - 16, hh->hh_data, 16);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
+ hh->hh_output(skb);
+ } else if (dst->neighbour)
+ dst->neighbour->output(skb);
+ else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: no hdr & no neighbour cache!\n");
+ kfree_skb(skb);
+ }
+}
+
+
+static unsigned int
+route6_oif(const struct ip6t_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ unsigned int ifindex = 0;
+ struct net_device *dev_out = NULL;
+
+ /* The user set the interface name to use.
+ * Getting the current interface index.
+ */
+ if ((dev_out = dev_get_by_name(route_info->oif))) {
+ ifindex = dev_out->ifindex;
+ } else {
+ /* Unknown interface name : packet dropped */
+ if (net_ratelimit())
+ DEBUGP("ip6t_ROUTE: oif interface %s not found\n", route_info->oif);
+
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ return IP6T_CONTINUE;
+ else
+ return NF_DROP;
+ }
+
+ /* Trying the standard way of routing packets */
+ if (route6(skb, ifindex, route_info)) {
+ dev_put(dev_out);
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ return IP6T_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+ } else
+ return NF_DROP;
+}
+
+
+static unsigned int
+route6_gw(const struct ip6t_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ if (route6(skb, 0, route_info)) {
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ return IP6T_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+ } else
+ return NF_DROP;
+}
+
+
+static unsigned int
+ip6t_route_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
+#else
+ const void *targinfo)
+#endif
+{
+ const struct ip6t_route_target_info *route_info = targinfo;
+ struct sk_buff *skb = *pskb;
+ struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
+ unsigned int res;
+
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ goto do_it;
+
+ /* If we are at PREROUTING or INPUT hook
+ * the TTL isn't decreased by the IP stack
+ */
+ if (hooknum == NF_IP6_PRE_ROUTING ||
+ hooknum == NF_IP6_LOCAL_IN) {
+
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ if (ipv6h->hop_limit <= 1) {
+ /* Force OUTPUT device used as source address */
+ skb->dev = skb->dst->dev;
+
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ ICMPV6_EXC_HOPLIMIT, 0, skb->dev);
+
+ return NF_DROP;
+ }
+
+ ipv6h->hop_limit--;
+ }
+
+ if ((route_info->flags & IP6T_ROUTE_TEE)) {
+ /*
+ * Copy the *pskb, and route the copy. Will later return
+ * IP6T_CONTINUE for the original skb, which should continue
+ * on its way as if nothing happened. The copy should be
+ * independantly delivered to the ROUTE --gw.
+ */
+ skb = skb_copy(*pskb, GFP_ATOMIC);
+ if (!skb) {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: copy failed!\n");
+ return IP6T_CONTINUE;
+ }
+ }
+
+do_it:
+ if (route_info->oif[0]) {
+ res = route6_oif(route_info, skb);
+ } else if (!ipv6_addr_any(gw)) {
+ res = route6_gw(route_info, skb);
+ } else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: no parameter !\n");
+ res = IP6T_CONTINUE;
+ }
+
+ if ((route_info->flags & IP6T_ROUTE_TEE))
+ res = IP6T_CONTINUE;
+
+ return res;
+}
+
+
+static int
+ip6t_route_checkentry(const char *tablename,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ const void *entry,
+#else
+ const struct ip6t_entry *entry
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+ void *targinfo,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
+#endif
+ unsigned int hook_mask)
+{
+ if (strcmp(tablename, "mangle") != 0) {
+ printk("ip6t_ROUTE: can only be called from \"mangle\" table.\n");
+ return 0;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_route_target_info))) {
+ printk(KERN_WARNING "ip6t_ROUTE: targinfosize %u != %Zu\n",
+ targinfosize,
+ IP6T_ALIGN(sizeof(struct ip6t_route_target_info)));
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+
+static struct ip6t_target ip6t_route_reg = {
+ .name = "ROUTE",
+ .target = ip6t_route_target,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ip6t_route_target_info),
+#endif
+ .checkentry = ip6t_route_checkentry,
+ .me = THIS_MODULE
+};
+
+
+static int __init init(void)
+{
+ printk(KERN_DEBUG "registering ipv6 ROUTE target\n");
+ if (xt_register_target(&ip6t_route_reg))
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ip6t_route_reg);
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -209,5 +209,18 @@ config IP6_NF_RAW
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config IP6_NF_TARGET_ROUTE
+ tristate 'ROUTE target support'
+ depends on IP6_NF_MANGLE
+ help
+ This option adds a `ROUTE' target, which enables you to setup unusual
+ routes. The ROUTE target is also able to change the incoming interface
+ of a packet.
+
+ The target can be or not a final target. It has to be used inside the
+ mangle table.
+
+ Not working as a module.
+
endmenu
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+obj-$(CONFIG_IP6_NF_TARGET_ROUTE) += ip6t_ROUTE.o
obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
# objects for l3 independent conntrack

View File

@ -1,20 +0,0 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -143,7 +143,7 @@ config NF_CONNTRACK_FTP
config NF_CONNTRACK_H323
tristate "H.323 protocol support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && NF_CONNTRACK && (IPV6 || IPV6=n)
+ depends on EXPERIMENTAL && NF_CONNTRACK
help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
important VoIP protocols, it is widely used by voice hardware and
@@ -387,7 +387,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
- depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
+ depends on NETFILTER_XTABLES
---help---
This option adds a `TCPMSS' target, which allows you to alter the
MSS value of TCP SYN packets, to control the maximum size for that

View File

@ -1,795 +0,0 @@
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -155,8 +155,37 @@ struct tc_sfq_qopt
*
* The only reason for this is efficiency, it is possible
* to change these parameters in compile time.
+ *
+ * If you need to play with these values, use esfq instead.
*/
+/* ESFQ section */
+
+enum
+{
+ /* traditional */
+ TCA_SFQ_HASH_CLASSIC,
+ TCA_SFQ_HASH_DST,
+ TCA_SFQ_HASH_SRC,
+ TCA_SFQ_HASH_FWMARK,
+ /* conntrack */
+ TCA_SFQ_HASH_CTORIGDST,
+ TCA_SFQ_HASH_CTORIGSRC,
+ TCA_SFQ_HASH_CTREPLDST,
+ TCA_SFQ_HASH_CTREPLSRC,
+ TCA_SFQ_HASH_CTNATCHG,
+};
+
+struct tc_esfq_qopt
+{
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+ unsigned hash_kind; /* Hash function to use for flow identification */
+};
+
/* RED section */
enum
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -144,6 +144,37 @@ config NET_SCH_SFQ
To compile this code as a module, choose M here: the
module will be called sch_sfq.
+config NET_SCH_ESFQ
+ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
+ ---help---
+ Say Y here if you want to use the Enhanced Stochastic Fairness
+ Queueing (ESFQ) packet scheduling algorithm for some of your network
+ devices or as a leaf discipline for a classful qdisc such as HTB or
+ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
+ references to the SFQ algorithm).
+
+ This is an enchanced SFQ version which allows you to control some
+ hardcoded values in the SFQ scheduler.
+
+ ESFQ also adds control of the hash function used to identify packet
+ flows. The original SFQ discipline hashes by connection; ESFQ add
+ several other hashing methods, such as by src IP or by dst IP, which
+ can be more fair to users in some networking situations.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_esfq.
+
+config NET_SCH_ESFQ_NFCT
+ bool "Connection Tracking Hash Types"
+ depends on NET_SCH_ESFQ && NF_CONNTRACK
+ ---help---
+ Say Y here to enable support for hashing based on netfilter connection
+ tracking information. This is useful for a router that is also using
+ NAT to connect privately-addressed hosts to the Internet. If you want
+ to provide fair distribution of upstream bandwidth, ESFQ must use
+ connection tracking information, since all outgoing packets will share
+ the same source address.
+
config NET_SCH_TEQL
tristate "True Link Equalizer (TEQL)"
---help---
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
--- /dev/null
+++ b/net/sched/sch_esfq.c
@@ -0,0 +1,702 @@
+/*
+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes: Alexander Atanasov, <alex@ssi.bg>
+ * Added dynamic depth,limit,divisor,hash_kind options.
+ * Added dst and src hashes.
+ *
+ * Alexander Clouter, <alex@digriz.org.uk>
+ * Ported ESFQ to Linux 2.6.
+ *
+ * Corey Hickey, <bugfood-c@fatooh.org>
+ * Maintenance of the Linux 2.6 port.
+ * Added fwmark hash (thanks to Robert Kurjata).
+ * Added usage of jhash.
+ * Added conntrack support.
+ * Added ctnatchg hash (thanks to Ben Pfountz).
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <linux/jhash.h>
+#include <net/netfilter/nf_conntrack.h>
+
+/* Stochastic Fairness Queuing algorithm.
+ For more comments look at sch_sfq.c.
+ The difference is that you can change limit, depth,
+ hash table size and choose alternate hash types.
+
+ classic: same as in sch_sfq.c
+ dst: destination IP address
+ src: source IP address
+ fwmark: netfilter mark value
+ ctorigdst: original destination IP address
+ ctorigsrc: original source IP address
+ ctrepldst: reply destination IP address
+ ctreplsrc: reply source IP
+
+*/
+
+#define ESFQ_HEAD 0
+#define ESFQ_TAIL 1
+
+/* This type should contain at least SFQ_DEPTH*2 values */
+typedef unsigned int esfq_index;
+
+struct esfq_head
+{
+ esfq_index next;
+ esfq_index prev;
+};
+
+struct esfq_sched_data
+{
+/* Parameters */
+ int perturb_period;
+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ int limit;
+ unsigned depth;
+ unsigned hash_divisor;
+ unsigned hash_kind;
+/* Variables */
+ struct timer_list perturb_timer;
+ int perturbation;
+ esfq_index tail; /* Index of current slot in round */
+ esfq_index max_depth; /* Maximal depth */
+
+ esfq_index *ht; /* Hash table */
+ esfq_index *next; /* Active slots link */
+ short *allot; /* Current allotment per slot */
+ unsigned short *hash; /* Hash value indexed by slots */
+ struct sk_buff_head *qs; /* Slot queue */
+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
+};
+
+/* This contains the info we will hash. */
+struct esfq_packet_info
+{
+ u32 proto; /* protocol or port */
+ u32 src; /* source from packet header */
+ u32 dst; /* destination from packet header */
+ u32 ctorigsrc; /* original source from conntrack */
+ u32 ctorigdst; /* original destination from conntrack */
+ u32 ctreplsrc; /* reply source from conntrack */
+ u32 ctrepldst; /* reply destination from conntrack */
+ u32 mark; /* netfilter mark (fwmark) */
+};
+
+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
+{
+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
+{
+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
+{
+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
+}
+
+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
+{
+ struct esfq_packet_info info;
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+#endif
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ {
+ struct iphdr *iph = ip_hdr(skb);
+ info.dst = iph->daddr;
+ info.src = iph->saddr;
+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
+ (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP ||
+ iph->protocol == IPPROTO_SCTP ||
+ iph->protocol == IPPROTO_DCCP ||
+ iph->protocol == IPPROTO_ESP))
+ info.proto = *(((u32*)iph) + iph->ihl);
+ else
+ info.proto = iph->protocol;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6):
+ {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ /* Hash ipv6 addresses into a u32. This isn't ideal,
+ * but the code is simple. */
+ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
+ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
+ if (iph->nexthdr == IPPROTO_TCP ||
+ iph->nexthdr == IPPROTO_UDP ||
+ iph->nexthdr == IPPROTO_SCTP ||
+ iph->nexthdr == IPPROTO_DCCP ||
+ iph->nexthdr == IPPROTO_ESP)
+ info.proto = *(u32*)&iph[1];
+ else
+ info.proto = iph->nexthdr;
+ break;
+ }
+ default:
+ info.dst = (u32)(unsigned long)skb->dst;
+ info.src = (u32)(unsigned long)skb->sk;
+ info.proto = skb->protocol;
+ }
+
+ info.mark = skb->mark;
+
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ /* defaults if there is no conntrack info */
+ info.ctorigsrc = info.src;
+ info.ctorigdst = info.dst;
+ info.ctreplsrc = info.dst;
+ info.ctrepldst = info.src;
+ /* collect conntrack info */
+ if (ct && ct != &nf_conntrack_untracked) {
+ if (skb->protocol == __constant_htons(ETH_P_IP)) {
+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ }
+ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+ /* Again, hash ipv6 addresses into a single u32. */
+ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
+ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
+ }
+
+ }
+#endif
+
+ switch(q->hash_kind) {
+ case TCA_SFQ_HASH_CLASSIC:
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+ case TCA_SFQ_HASH_DST:
+ return esfq_jhash_1word(q, info.dst);
+ case TCA_SFQ_HASH_SRC:
+ return esfq_jhash_1word(q, info.src);
+ case TCA_SFQ_HASH_FWMARK:
+ return esfq_jhash_1word(q, info.mark);
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ case TCA_SFQ_HASH_CTORIGDST:
+ return esfq_jhash_1word(q, info.ctorigdst);
+ case TCA_SFQ_HASH_CTORIGSRC:
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ case TCA_SFQ_HASH_CTREPLDST:
+ return esfq_jhash_1word(q, info.ctrepldst);
+ case TCA_SFQ_HASH_CTREPLSRC:
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ case TCA_SFQ_HASH_CTNATCHG:
+ {
+ if (info.ctorigdst == info.ctreplsrc)
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ }
+#endif
+ default:
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
+ }
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+}
+
+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d = q->qs[x].qlen + q->depth;
+
+ p = d;
+ n = q->dep[d].next;
+ q->dep[x].next = n;
+ q->dep[x].prev = p;
+ q->dep[p].next = q->dep[n].prev = x;
+}
+
+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+
+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
+ q->max_depth--;
+
+ esfq_link(q, x);
+}
+
+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+ d = q->qs[x].qlen;
+ if (q->max_depth < d)
+ q->max_depth = d;
+
+ esfq_link(q, x);
+}
+
+static unsigned int esfq_drop(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index d = q->max_depth;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ /* Queue is full! Find the longest slot and
+ drop a packet from it */
+
+ if (d > 1) {
+ esfq_index x = q->dep[d+q->depth].next;
+ skb = q->qs[x].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[x]);
+ kfree_skb(skb);
+ esfq_dec(q, x);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ if (d == 1) {
+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
+ d = q->next[q->tail];
+ q->next[q->tail] = q->next[d];
+ q->allot[q->next[d]] += q->quantum;
+ skb = q->qs[d].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[d]);
+ kfree_skb(skb);
+ esfq_dec(q, d);
+ sch->q.qlen--;
+ q->ht[q->hash[d]] = q->depth;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ return 0;
+}
+
+static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end)
+{
+ unsigned hash = esfq_hash(q, skb);
+ unsigned depth = q->depth;
+ esfq_index x;
+
+ x = q->ht[hash];
+ if (x == depth) {
+ q->ht[hash] = x = q->dep[depth].next;
+ q->hash[x] = hash;
+ }
+
+ if (end == ESFQ_TAIL)
+ __skb_queue_tail(&q->qs[x], skb);
+ else
+ __skb_queue_head(&q->qs[x], skb);
+
+ esfq_inc(q, x);
+ if (q->qs[x].qlen == 1) { /* The flow is new */
+ if (q->tail == depth) { /* It is the first flow */
+ q->tail = x;
+ q->next[x] = x;
+ q->allot[x] = q->quantum;
+ } else {
+ q->next[x] = q->next[q->tail];
+ q->next[q->tail] = x;
+ q->tail = x;
+ }
+ }
+}
+
+static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_enqueue(skb, q, ESFQ_TAIL);
+ sch->qstats.backlog += skb->len;
+ if (++sch->q.qlen < q->limit-1) {
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+ return 0;
+ }
+
+ sch->qstats.drops++;
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+
+static int esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_enqueue(skb, q, ESFQ_HEAD);
+ sch->qstats.backlog += skb->len;
+ if (++sch->q.qlen < q->limit - 1) {
+ sch->qstats.requeues++;
+ return 0;
+ }
+
+ sch->qstats.drops++;
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q)
+{
+ struct sk_buff *skb;
+ unsigned depth = q->depth;
+ esfq_index a, old_a;
+
+ /* No active slots */
+ if (q->tail == depth)
+ return NULL;
+
+ a = old_a = q->next[q->tail];
+
+ /* Grab packet */
+ skb = __skb_dequeue(&q->qs[a]);
+ esfq_dec(q, a);
+
+ /* Is the slot empty? */
+ if (q->qs[a].qlen == 0) {
+ q->ht[q->hash[a]] = depth;
+ a = q->next[a];
+ if (a == old_a) {
+ q->tail = depth;
+ return skb;
+ }
+ q->next[q->tail] = a;
+ q->allot[a] += q->quantum;
+ } else if ((q->allot[a] -= skb->len) <= 0) {
+ q->tail = a;
+ a = q->next[a];
+ q->allot[a] += q->quantum;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *esfq_dequeue(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = esfq_q_dequeue(q);
+ if (skb == NULL)
+ return NULL;
+ sch->q.qlen--;
+ sch->qstats.backlog -= skb->len;
+ return skb;
+}
+
+static void esfq_q_destroy(struct esfq_sched_data *q)
+{
+ del_timer(&q->perturb_timer);
+ if(q->ht)
+ kfree(q->ht);
+ if(q->dep)
+ kfree(q->dep);
+ if(q->next)
+ kfree(q->next);
+ if(q->allot)
+ kfree(q->allot);
+ if(q->hash)
+ kfree(q->hash);
+ if(q->qs)
+ kfree(q->qs);
+}
+
+static void esfq_destroy(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_destroy(q);
+}
+
+
+static void esfq_reset(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = esfq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void esfq_perturbation(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc*)arg;
+ struct esfq_sched_data *q = qdisc_priv(sch);
+
+ q->perturbation = net_random()&0x1F;
+
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+}
+
+static unsigned int esfq_check_hash(unsigned int kind)
+{
+ switch (kind) {
+ case TCA_SFQ_HASH_CTORIGDST:
+ case TCA_SFQ_HASH_CTORIGSRC:
+ case TCA_SFQ_HASH_CTREPLDST:
+ case TCA_SFQ_HASH_CTREPLSRC:
+ case TCA_SFQ_HASH_CTNATCHG:
+#ifndef CONFIG_NET_SCH_ESFQ_NFCT
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+#endif
+ case TCA_SFQ_HASH_CLASSIC:
+ case TCA_SFQ_HASH_DST:
+ case TCA_SFQ_HASH_SRC:
+ case TCA_SFQ_HASH_FWMARK:
+ return kind;
+ default:
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+ }
+}
+
+static int esfq_q_init(struct esfq_sched_data *q, struct rtattr *opt)
+{
+ struct tc_esfq_qopt *ctl = RTA_DATA(opt);
+ esfq_index p = ~0U/2;
+ int i;
+
+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
+ return -EINVAL;
+
+ q->perturbation = 0;
+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
+ q->max_depth = 0;
+ if (opt == NULL) {
+ q->perturb_period = 0;
+ q->hash_divisor = 1024;
+ q->tail = q->limit = q->depth = 128;
+
+ } else {
+ struct tc_esfq_qopt *ctl = RTA_DATA(opt);
+ if (ctl->quantum)
+ q->quantum = ctl->quantum;
+ q->perturb_period = ctl->perturb_period*HZ;
+ q->hash_divisor = ctl->divisor ? : 1024;
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
+
+ if ( q->depth > p - 1 )
+ return -EINVAL;
+
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
+
+ if (ctl->hash_kind) {
+ q->hash_kind = esfq_check_hash(ctl->hash_kind);
+ }
+ }
+
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->ht)
+ goto err_case;
+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
+ if (!q->dep)
+ goto err_case;
+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->next)
+ goto err_case;
+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
+ if (!q->allot)
+ goto err_case;
+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
+ if (!q->hash)
+ goto err_case;
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!q->qs)
+ goto err_case;
+
+ for (i=0; i< q->hash_divisor; i++)
+ q->ht[i] = q->depth;
+ for (i=0; i<q->depth; i++) {
+ skb_queue_head_init(&q->qs[i]);
+ q->dep[i+q->depth].next = i+q->depth;
+ q->dep[i+q->depth].prev = i+q->depth;
+ }
+
+ for (i=0; i<q->depth; i++)
+ esfq_link(q, i);
+ return 0;
+err_case:
+ esfq_q_destroy(q);
+ return -ENOBUFS;
+}
+
+static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ q->quantum = psched_mtu(sch->dev); /* default */
+ if ((err = esfq_q_init(q, opt)))
+ return err;
+
+ init_timer(&q->perturb_timer);
+ q->perturb_timer.data = (unsigned long)sch;
+ q->perturb_timer.function = esfq_perturbation;
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+
+ return 0;
+}
+
+static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct esfq_sched_data new;
+ struct sk_buff *skb;
+ int err;
+
+ /* set up new queue */
+ memset(&new, 0, sizeof(struct esfq_sched_data));
+ new.quantum = psched_mtu(sch->dev); /* default */
+ if ((err = esfq_q_init(&new, opt)))
+ return err;
+
+ /* copy all packets from the old queue to the new queue */
+ sch_tree_lock(sch);
+ while ((skb = esfq_q_dequeue(q)) != NULL)
+ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
+
+ /* clean up the old queue */
+ esfq_q_destroy(q);
+
+ /* copy elements of the new queue into the old queue */
+ q->perturb_period = new.perturb_period;
+ q->quantum = new.quantum;
+ q->limit = new.limit;
+ q->depth = new.depth;
+ q->hash_divisor = new.hash_divisor;
+ q->hash_kind = new.hash_kind;
+ q->tail = new.tail;
+ q->max_depth = new.max_depth;
+ q->ht = new.ht;
+ q->dep = new.dep;
+ q->next = new.next;
+ q->allot = new.allot;
+ q->hash = new.hash;
+ q->qs = new.qs;
+
+ /* finish up */
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ } else {
+ q->perturbation = 0;
+ }
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned char *b = skb->tail;
+ struct tc_esfq_qopt opt;
+
+ opt.quantum = q->quantum;
+ opt.perturb_period = q->perturb_period/HZ;
+
+ opt.limit = q->limit;
+ opt.divisor = q->hash_divisor;
+ opt.flows = q->depth;
+ opt.hash_kind = q->hash_kind;
+
+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+
+static struct Qdisc_ops esfq_qdisc_ops =
+{
+ .next = NULL,
+ .cl_ops = NULL,
+ .id = "esfq",
+ .priv_size = sizeof(struct esfq_sched_data),
+ .enqueue = esfq_enqueue,
+ .dequeue = esfq_dequeue,
+ .requeue = esfq_requeue,
+ .drop = esfq_drop,
+ .init = esfq_init,
+ .reset = esfq_reset,
+ .destroy = esfq_destroy,
+ .change = esfq_change,
+ .dump = esfq_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init esfq_module_init(void)
+{
+ return register_qdisc(&esfq_qdisc_ops);
+}
+static void __exit esfq_module_exit(void)
+{
+ unregister_qdisc(&esfq_qdisc_ops);
+}
+module_init(esfq_module_init)
+module_exit(esfq_module_exit)
+MODULE_LICENSE("GPL");

View File

@ -1,299 +0,0 @@
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -293,6 +293,8 @@ struct rtnexthop
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
+#define RTNH_F_SUSPECT 8 /* We don't know the real state */
+#define RTNH_F_BADSTATE (RTNH_F_DEAD | RTNH_F_SUSPECT)
/* Macros to handle hexthops */
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -19,6 +19,8 @@ struct flowi {
struct {
__be32 daddr;
__be32 saddr;
+ __u32 lsrc;
+ __u32 gw;
__u8 tos;
__u8 scope;
} ip4_u;
@@ -43,6 +45,8 @@ struct flowi {
#define fl6_flowlabel nl_u.ip6_u.flowlabel
#define fl4_dst nl_u.ip4_u.daddr
#define fl4_src nl_u.ip4_u.saddr
+#define fl4_lsrc nl_u.ip4_u.lsrc
+#define fl4_gw nl_u.ip4_u.gw
#define fl4_tos nl_u.ip4_u.tos
#define fl4_scope nl_u.ip4_u.scope
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1116,6 +1116,7 @@ void ip_rt_redirect(__be32 old_gw, __be3
/* Gateway is different ... */
rt->rt_gateway = new_gw;
+ if (rt->fl.fl4_gw) rt->fl.fl4_gw = new_gw;
/* Redirect received -> path was valid */
dst_confirm(&rth->u.dst);
@@ -1551,6 +1552,7 @@ static int ip_route_input_mc(struct sk_b
rth->fl.fl4_tos = tos;
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
+ rth->fl.fl4_lsrc = 0;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
rth->u.dst.tclassid = itag;
@@ -1561,6 +1563,7 @@ static int ip_route_input_mc(struct sk_b
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->fl.oif = 0;
+ rth->fl.fl4_gw = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->rt_type = RTN_MULTICAST;
@@ -1624,7 +1627,7 @@ static void ip_handle_martian_source(str
static inline int __mkroute_input(struct sk_buff *skb,
struct fib_result* res,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos,
+ __be32 daddr, __be32 saddr, u32 tos, u32 lsrc,
struct rtable **result)
{
@@ -1659,6 +1662,7 @@ static inline int __mkroute_input(struct
flags |= RTCF_DIRECTSRC;
if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
+ !lsrc &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
flags |= RTCF_DOREDIRECT;
@@ -1692,6 +1696,7 @@ static inline int __mkroute_input(struct
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
+ rth->fl.fl4_lsrc = lsrc;
rth->rt_gateway = daddr;
rth->rt_iif =
rth->fl.iif = in_dev->dev->ifindex;
@@ -1699,6 +1704,7 @@ static inline int __mkroute_input(struct
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->fl.oif = 0;
+ rth->fl.fl4_gw = 0;
rth->rt_spec_dst= spec_dst;
rth->u.dst.input = ip_forward;
@@ -1720,19 +1726,21 @@ static inline int ip_mkroute_input(struc
struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos)
+ __be32 daddr, __be32 saddr, u32 tos,
+ u32 lsrc)
{
struct rtable* rth = NULL;
int err;
unsigned hash;
+ fib_select_default(fl, res);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
+ if (res->fi && res->fi->fib_nhs > 1)
fib_select_multipath(fl, res);
#endif
/* create a routing cache entry */
- err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
+ err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, lsrc, &rth);
if (err)
return err;
@@ -1752,18 +1760,18 @@ static inline int ip_mkroute_input(struc
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+ u8 tos, struct net_device *dev, u32 lsrc)
{
struct fib_result res;
struct in_device *in_dev = in_dev_get(dev);
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = daddr,
- .saddr = saddr,
+ .saddr = lsrc ? : saddr,
.tos = tos,
.scope = RT_SCOPE_UNIVERSE,
} },
.mark = skb->mark,
- .iif = dev->ifindex };
+ .iif = lsrc? loopback_dev.ifindex : dev->ifindex };
unsigned flags = 0;
u32 itag = 0;
struct rtable * rth;
@@ -1796,6 +1804,12 @@ static int ip_route_input_slow(struct sk
if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
goto martian_destination;
+ if (lsrc) {
+ if (MULTICAST(lsrc) || BADCLASS(lsrc) ||
+ ZERONET(lsrc) || LOOPBACK(lsrc))
+ goto e_inval;
+ }
+
/*
* Now we are ready to route packet.
*/
@@ -1805,6 +1819,10 @@ static int ip_route_input_slow(struct sk
goto no_route;
}
free_res = 1;
+ if (lsrc && res.type != RTN_UNICAST && res.type != RTN_NAT)
+ goto e_inval;
+ fl.iif = dev->ifindex;
+ fl.fl4_src = saddr;
RT_CACHE_STAT_INC(in_slow_tot);
@@ -1829,7 +1847,7 @@ static int ip_route_input_slow(struct sk
if (res.type != RTN_UNICAST)
goto martian_destination;
- err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
+ err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos, lsrc);
if (err == -ENOBUFS)
goto e_nobufs;
if (err == -EINVAL)
@@ -1844,6 +1862,8 @@ out: return err;
brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
+ if (lsrc)
+ goto e_inval;
if (ZERONET(saddr))
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -1884,6 +1904,7 @@ local_input:
rth->u.dst.dev = &loopback_dev;
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
+ rth->fl.fl4_gw = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->u.dst.input= ip_local_deliver;
@@ -1933,8 +1954,9 @@ martian_source:
goto e_inval;
}
-int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+static inline int
+ip_route_input_cached(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, u32 lsrc)
{
struct rtable * rth;
unsigned hash;
@@ -1949,6 +1971,7 @@ int ip_route_input(struct sk_buff *skb,
if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == saddr &&
rth->fl.iif == iif &&
+ rth->fl.fl4_lsrc == lsrc &&
rth->fl.oif == 0 &&
rth->fl.mark == skb->mark &&
rth->fl.fl4_tos == tos) {
@@ -1995,7 +2018,19 @@ int ip_route_input(struct sk_buff *skb,
rcu_read_unlock();
return -EINVAL;
}
- return ip_route_input_slow(skb, daddr, saddr, tos, dev);
+ return ip_route_input_slow(skb, daddr, saddr, tos, dev, lsrc);
+}
+
+int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
+ u8 tos, struct net_device *dev)
+{
+ return ip_route_input_cached(skb, daddr, saddr, tos, dev, 0);
+}
+
+int ip_route_input_lookup(struct sk_buff *skb, u32 daddr, u32 saddr,
+ u8 tos, struct net_device *dev, u32 lsrc)
+{
+ return ip_route_input_cached(skb, daddr, saddr, tos, dev, lsrc);
}
static inline int __mkroute_output(struct rtable **result,
@@ -2067,6 +2102,7 @@ static inline int __mkroute_output(struc
rth->fl.fl4_tos = tos;
rth->fl.fl4_src = oldflp->fl4_src;
rth->fl.oif = oldflp->oif;
+ rth->fl.fl4_gw = oldflp->fl4_gw;
rth->fl.mark = oldflp->mark;
rth->rt_dst = fl->fl4_dst;
rth->rt_src = fl->fl4_src;
@@ -2145,6 +2181,7 @@ static int ip_route_output_slow(struct r
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = oldflp->fl4_dst,
.saddr = oldflp->fl4_src,
+ .gw = oldflp->fl4_gw,
.tos = tos & IPTOS_RT_MASK,
.scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK :
@@ -2248,6 +2285,7 @@ static int ip_route_output_slow(struct r
dev_out = &loopback_dev;
dev_hold(dev_out);
fl.oif = loopback_dev.ifindex;
+ fl.fl4_gw = 0;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
@@ -2255,7 +2293,7 @@ static int ip_route_output_slow(struct r
if (fib_lookup(&fl, &res)) {
res.fi = NULL;
- if (oldflp->oif) {
+ if (oldflp->oif && dev_out->flags & IFF_UP) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -2295,6 +2333,7 @@ static int ip_route_output_slow(struct r
dev_out = &loopback_dev;
dev_hold(dev_out);
fl.oif = dev_out->ifindex;
+ fl.fl4_gw = 0;
if (res.fi)
fib_info_put(res.fi);
res.fi = NULL;
@@ -2302,13 +2341,12 @@ static int ip_route_output_slow(struct r
goto make_route;
}
+ if (res.type == RTN_UNICAST)
+ fib_select_default(&fl, &res);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ if (res.fi->fib_nhs > 1)
fib_select_multipath(&fl, &res);
- else
#endif
- if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(&fl, &res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2345,6 +2383,7 @@ int __ip_route_output_key(struct rtable
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
rth->fl.oif == flp->oif &&
+ rth->fl.fl4_gw == flp->fl4_gw &&
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK))) {
@@ -3030,3 +3069,4 @@ int __init ip_rt_init(void)
EXPORT_SYMBOL(__ip_select_ident);
EXPORT_SYMBOL(ip_route_input);
EXPORT_SYMBOL(ip_route_output_key);
+EXPORT_SYMBOL(ip_route_input_lookup);

View File

@ -1,12 +0,0 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -565,6 +565,9 @@ core-$(CONFIG_TOSHIBA_RBTX4938) += arch/
cflags-$(CONFIG_TOSHIBA_RBTX4938) += -Iinclude/asm-mips/mach-tx49xx
load-$(CONFIG_TOSHIBA_RBTX4938) += 0xffffffff80100000
+# temporary until string.h is fixed
+cflags-y += -ffreestanding
+
cflags-y += -Iinclude/asm-mips/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/

View File

@ -1,56 +0,0 @@
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -105,6 +105,17 @@ static int jffs2_build_filesystem(struct
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
+ if (c->flags & (1 << 7)) {
+ printk("%s(): unlocking the mtd device... ", __func__);
+ if (c->mtd->unlock)
+ c->mtd->unlock(c->mtd, 0, c->mtd->size);
+ printk("done.\n");
+
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
+ jffs2_erase_pending_blocks(c, -1);
+ printk("done.\n");
+ }
+
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -142,9 +142,12 @@ int jffs2_scan_medium(struct jffs2_sb_in
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
-
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
- buf_size, s);
+
+ if (c->flags & (1 << 7))
+ ret = BLK_STATE_ALLFF;
+ else
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
if (ret < 0)
goto out;
@@ -545,6 +548,17 @@ static int jffs2_scan_eraseblock (struct
return err;
}
+ if ((buf[0] == 0xde) &&
+ (buf[1] == 0xad) &&
+ (buf[2] == 0xc0) &&
+ (buf[3] == 0xde)) {
+ /* end of filesystem. erase everything after this point */
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+ c->flags |= (1 << 7);
+
+ return BLK_STATE_ALLFF;
+ }
+
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;

View File

@ -1,9 +0,0 @@
--- /dev/null
+++ b/include/asm-powerpc/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* _ASM_SEGMENT_H */

View File

@ -1,42 +0,0 @@
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1459,7 +1459,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8169,
.region = 1,
.align = 0,
- .intr_event = SYSErr | LinkChg | RxOverflow |
+ .intr_event = LinkChg | RxOverflow |
RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
.napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow
},
@@ -1467,7 +1467,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow |
+ .intr_event = LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow
},
@@ -1475,7 +1475,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8101,
.region = 2,
.align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
+ .intr_event = LinkChg | RxOverflow | PCSTimeout |
RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
.napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow
}
@@ -2779,10 +2779,12 @@ static irqreturn_t rtl8169_interrupt(int
break;
}
+#if 0
if (unlikely(status & SYSErr)) {
rtl8169_pcierr_interrupt(dev);
break;
}
+#endif
if (status & LinkChg)
rtl8169_check_link_status(dev, tp, ioaddr);

File diff suppressed because it is too large Load Diff

View File

@ -1,806 +0,0 @@
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -106,6 +106,75 @@ struct ieee80211_hdr {
} __attribute__ ((packed));
+struct ieee80211_ht_capability {
+ __le16 capabilities_info;
+ u8 mac_ht_params_info;
+ u8 supported_mcs_set[16];
+ __le16 extended_ht_capability_info;
+ __le32 tx_BF_capability_info;
+ u8 antenna_selection_info;
+}__attribute__ ((packed));
+
+struct ieee80211_ht_additional_info {
+ u8 control_chan;
+ u8 ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ u8 basic_set[16];
+}__attribute__ ((packed));
+
+
+#define IEEE80211_TSINFO_TYPE(a) ((a.byte1 & 0x01) >> 0)
+#define IEEE80211_TSINFO_TSID(a) ((a.byte1 & 0x1E) >> 1)
+#define IEEE80211_TSINFO_DIR(a) ((a.byte1 & 0x60) >> 5)
+#define IEEE80211_TSINFO_POLICY(a) ((a.byte1 & 0x80) >> 7 + \
+ (a.byte2 & 0x01) << 1)
+#define IEEE80211_TSINFO_AGG(a) ((a.byte2 & 0x02) >> 1)
+#define IEEE80211_TSINFO_APSD(a) ((a.byte2 & 0x04) >> 2)
+#define IEEE80211_TSINFO_UP(a) ((a.byte2 & 0x38) >> 3)
+#define IEEE80211_TSINFO_ACK(a) ((a.byte2 & 0xC0) >> 6)
+#define IEEE80211_TSINFO_SCHEDULE(a) ((a.byte3 & 0x01) >> 0)
+
+#define IEEE80211_SET_TSINFO_TYPE(i, d) (i.byte1 |= (d << 0) & 0x01)
+#define IEEE80211_SET_TSINFO_TSID(i, d) (i.byte1 |= (d << 1) & 0x1E)
+#define IEEE80211_SET_TSINFO_DIR(i, d) (i.byte1 |= (d << 5) & 0x60)
+#define IEEE80211_SET_TSINFO_POLICY(i, d) \
+do { \
+ i.byte1 |= (d & 0x01) << 7; \
+ i.byte2 |= (d & 0x02) >> 1; \
+} while(0)
+#define IEEE80211_SET_TSINFO_AGG(i, d) (i.byte2 |= (d << 1) & 0x02)
+#define IEEE80211_SET_TSINFO_APSD(i, d) (i.byte2 |= (d << 2) & 0x04)
+#define IEEE80211_SET_TSINFO_UP(i, d) (i.byte2 |= (d << 3) & 0x38)
+#define IEEE80211_SET_TSINFO_ACK(i, d) (i.byte2 |= (d << 6) & 0xC0)
+#define IEEE80211_SET_TSINFO_SCHEDULE(i, d) (i.byte3 |= (d << 0) & 0x01)
+
+struct ieee80211_ts_info {
+ u8 byte1;
+ u8 byte2;
+ u8 byte3;
+} __attribute__ ((packed));
+
+struct ieee80211_elem_tspec {
+ struct ieee80211_ts_info ts_info;
+ __le16 nominal_msdu_size;
+ __le16 max_msdu_size;
+ __le32 min_service_interval;
+ __le32 max_service_interval;
+ __le32 inactivity_interval;
+ __le32 suspension_interval;
+ __le32 service_start_time;
+ __le32 min_data_rate;
+ __le32 mean_data_rate;
+ __le32 peak_data_rate;
+ __le32 burst_size;
+ __le32 delay_bound;
+ __le32 min_phy_rate;
+ __le16 surplus_band_allow;
+ __le16 medium_time;
+} __attribute__ ((packed));
+
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -173,9 +242,51 @@ struct ieee80211_mgmt {
struct {
u8 action_code;
u8 dialog_token;
+ u8 variable[0];
+ } __attribute__ ((packed)) addts_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status_code;
+ u8 variable[0];
+ } __attribute__ ((packed)) addts_resp;
+ struct {
+ u8 action_code;
+ struct ieee80211_ts_info ts_info;
+ __le16 reason_code;
+ } __attribute__ ((packed)) delts;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
u8 status_code;
u8 variable[0];
} __attribute__ ((packed)) wme_action;
+ struct {
+ u8 action_code;
+ u8 dest[6];
+ u8 src[6];
+ __le16 capab_info;
+ __le16 timeout;
+ /* Followed by Supported Rates and
+ * Extended Supported Rates */
+ u8 variable[0];
+ } __attribute__ ((packed)) dls_req;
+ struct {
+ u8 action_code;
+ __le16 status_code;
+ u8 dest[6];
+ u8 src[6];
+ /* Followed by Capability Information,
+ * Supported Rates and Extended
+ * Supported Rates */
+ u8 variable[0];
+ } __attribute__ ((packed)) dls_resp;
+ struct {
+ u8 action_code;
+ u8 dest[6];
+ u8 src[6];
+ __le16 reason_code;
+ } __attribute__ ((packed)) dls_teardown;
struct{
u8 action_code;
u8 element_id;
@@ -184,6 +295,25 @@ struct ieee80211_mgmt {
u8 new_chan;
u8 switch_count;
} __attribute__((packed)) chan_switch;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capab;
+ __le16 timeout;
+ __le16 start_seq_num;
+ } __attribute__((packed)) addba_req;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status;
+ __le16 capab;
+ __le16 timeout;
+ } __attribute__((packed)) addba_resp;
+ struct{
+ u8 action_code;
+ __le16 params;
+ __le16 reason_code;
+ }__attribute__((packed)) delba;
} u;
} __attribute__ ((packed)) action;
} u;
@@ -270,6 +400,18 @@ enum ieee80211_statuscode {
WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
+ /* 802.11e */
+ WLAN_STATUS_UNSPECIFIED_QOS = 32,
+ WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33,
+ WLAN_STATUS_ASSOC_DENIED_LOWACK = 34,
+ WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35,
+ WLAN_STATUS_REQUEST_DECLINED = 37,
+ WLAN_STATUS_INVALID_QOS_PARAM = 38,
+ WLAN_STATUS_CHANGE_TSPEC = 39,
+ WLAN_STATUS_WAIT_TS_DELAY = 47,
+ WLAN_STATUS_NO_DIRECT_LINK = 48,
+ WLAN_STATUS_STA_NOT_PRESENT = 49,
+ WLAN_STATUS_STA_NOT_QSTA = 50,
};
@@ -300,9 +442,50 @@ enum ieee80211_reasoncode {
WLAN_REASON_INVALID_RSN_IE_CAP = 22,
WLAN_REASON_IEEE8021X_FAILED = 23,
WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
+ /* 802.11e */
+ WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32,
+ WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33,
+ WLAN_REASON_DISASSOC_LOW_ACK = 34,
+ WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35,
+ WLAN_REASON_QSTA_LEAVE_QBSS = 36,
+ WLAN_REASON_QSTA_NOT_USE = 37,
+ WLAN_REASON_QSTA_REQUIRE_SETUP = 38,
+ WLAN_REASON_QSTA_TIMEOUT = 39,
+ WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45,
};
+/* Category Code */
+enum ieee80211_category {
+ WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ WLAN_CATEGORY_QOS = 1,
+ WLAN_CATEGORY_DLS = 2,
+ WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_WMM = 17,
+};
+
+/* QoS Action Code */
+enum ieee80211_qos_actioncode {
+ WLAN_ACTION_QOS_ADDTS_REQ = 0,
+ WLAN_ACTION_QOS_ADDTS_RESP = 1,
+ WLAN_ACTION_QOS_DELTS = 2,
+ WLAN_ACTION_QOS_SCHEDULE = 3,
+};
+
+/* DLS Action Code */
+enum ieee80211_dls_actioncode {
+ WLAN_ACTION_DLS_REQ = 0,
+ WLAN_ACTION_DLS_RESP = 1,
+ WLAN_ACTION_DLS_TEARDOWN = 2,
+};
+
+/* BACK Action Code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
/* Information Element IDs */
enum ieee80211_eid {
WLAN_EID_SSID = 0,
@@ -318,6 +501,15 @@ enum ieee80211_eid {
WLAN_EID_HP_PARAMS = 8,
WLAN_EID_HP_TABLE = 9,
WLAN_EID_REQUEST = 10,
+ /* 802.11e */
+ WLAN_EID_QBSS_LOAD = 11,
+ WLAN_EID_EDCA_PARAM_SET = 12,
+ WLAN_EID_TSPEC = 13,
+ WLAN_EID_TCLAS = 14,
+ WLAN_EID_SCHEDULE = 15,
+ WLAN_EID_TS_DELAY = 43,
+ WLAN_EID_TCLAS_PROCESSING = 44,
+ WLAN_EID_QOS_CAPA = 46,
/* 802.11h */
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,
@@ -332,6 +524,9 @@ enum ieee80211_eid {
/* 802.11g */
WLAN_EID_ERP_INFO = 42,
WLAN_EID_EXT_SUPP_RATES = 50,
+ /* 802.11n */
+ WLAN_EID_HT_CAPABILITY = 45,
+ WLAN_EID_HT_EXTRA_INFO = 61,
/* 802.11i */
WLAN_EID_RSN = 48,
WLAN_EID_WPA = 221,
@@ -340,6 +535,9 @@ enum ieee80211_eid {
WLAN_EID_QOS_PARAMETER = 222
};
+/* 80211n */
+#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080
+
/* cipher suite selectors */
#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
@@ -350,4 +548,37 @@ enum ieee80211_eid {
#define WLAN_MAX_KEY_LEN 32
+enum ieee80211_tsinfo_direction {
+ WLAN_TSINFO_UPLINK = 0,
+ WLAN_TSINFO_DOWNLINK = 1,
+ WLAN_TSINFO_DIRECTLINK = 2,
+ WLAN_TSINFO_BIDIRECTIONAL = 3,
+};
+
+enum ieee80211_tsinfo_access {
+ WLAN_TSINFO_EDCA = 1,
+ WLAN_TSINFO_HCCA = 2,
+ WLAN_TSINFO_HEMM = 3,
+};
+
+enum ieee80211_tsinfo_psb {
+ WLAN_TSINFO_PSB_LEGACY = 0,
+ WLAN_TSINFO_PSB_APSD = 1,
+};
+
+
+/* WI-FI Alliance OUI Type and Subtype */
+enum wifi_oui_type {
+ WIFI_OUI_TYPE_WPA = 1,
+ WIFI_OUI_TYPE_WMM = 2,
+ WIFI_OUI_TYPE_WSC = 4,
+ WIFI_OUI_TYPE_PSD = 6,
+};
+
+enum wifi_oui_stype_wmm {
+ WIFI_OUI_STYPE_WMM_INFO = 0,
+ WIFI_OUI_STYPE_WMM_PARAM = 1,
+ WIFI_OUI_STYPE_WMM_TSPEC = 2,
+};
+
#endif /* IEEE80211_H */
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -7,6 +7,217 @@
*/
/**
+ * enum nl80211_commands - supported nl80211 commands
+ * @NL80211_CMD_UNSPEC: unspecified command to catch errors
+ * @NL80211_CMD_RENAME_WIPHY: rename a wiphy, needs
+ * %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME
+ * @NL80211_CMD_WIPHY_NEWNAME: rename notification
+ * @NL80211_CMD_GET_CMDLIST: TO BE DEFINED PROPERLY. currently the code makes
+ * it depend on the wiphy only but it really should depend on the
+ * interface type too....
+ * @NL80211_CMD_NEW_CMDLIST: command list result
+ * @NL80211_CMD_ADD_VIRTUAL_INTERFACE: create a virtual interface for the
+ * wiphy identified by an %NL80211_ATTR_WIPHY attribute with the given
+ * %NL80211_ATTR_IFTYPE and %NL80211_ATTR_IFNAME.
+ * @NL80211_CMD_DEL_VIRTUAL_INTERFACE: destroy a virtual interface identified
+ * by %NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_CHANGE_VIRTUAL_INTERFACE: change type of virtual interface to
+ * the type given by %NL80211_ATTR_IFTYPE, the interface is identified by
+ * %NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_GET_WIPHYS: request a list of all wiphys present in the system
+ * @NL80211_CMD_NEW_WIPHYS: returned list of all wiphys
+ * @NL80211_CMD_GET_INTERFACES: request a list of all interfaces belonging to
+ * the wiphy identified by %NL80211_ATTR_WIPHY
+ * @NL80211_CMD_NEW_INTERFACES: result for %NL80211_CMD_GET_INTERFACES
+ * @NL80211_CMD_INITIATE_SCAN: initiate a scan with the passed parameters. THe
+ * parameters may contain %NL80211_ATTR_FLAG_SCAN_ACTIVE,
+ * %NL80211_ATTR_PHYMODE and a list of channels in an
+ * %NL80211_ATTR_CHANNEL_LIST attribute (an array of nested attributes)
+ * containing %NL80211_ATTR_CHANNEL, %NL80211_ATTR_PHYMODE, and possibly
+ * %NL80211_ATTR_FLAG_SCAN_ACTIVE. The outer %NL80211_ATTR_FLAG_SCAN_ACTIVE
+ * is ignored when a channel list is present.
+ * @NL80211_CMD_SCAN_RESULT: scan result, contains an array in
+ * %NL80211_ATTR_BSS_LIST.
+ * @NL80211_CMD_ASSOCIATE: associate with the given parameters
+ * (%NL80211_ATTR_SSID is mandatory, %NL80211_ATTR_TIMEOUT_TU,
+ * %NL80211_ATTR_BSSID, %NL80211_ATTR_CHANNEL, %NL80211_ATTR_PHYMODE,
+ * and %NL80211_ATTR_IE may be given)
+ * @NL80211_CMD_ADD_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
+ * %NL80211_ATTR_KEY_ID, %NL80211_ATTR_KEY_TYPE, %NL80211_ATTR_MAC and
+ * %NL80211_ATTR_KEY_CIPHER attributes.
+ * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_ID,
+ * %NL80211_ATTR_KEY_TYPE and %NL80211_ATTR_MAC or all keys.
+ * @__NL80211_CMD_AFTER_LAST: internal use
+ */
+enum nl80211_commands {
+/* don't change the order or add anything inbetween, this is ABI! */
+ NL80211_CMD_UNSPEC,
+ /* %input: wiphy, wiphy_name */
+ NL80211_CMD_RENAME_WIPHY,
+ NL80211_CMD_WIPHY_NEWNAME,
+ /* %input: wiphy|ifindex */
+ NL80211_CMD_GET_CMDLIST,
+ NL80211_CMD_NEW_CMDLIST,
+ /* %input: wiphy, ifname, {iftype} */
+ NL80211_CMD_ADD_VIRTUAL_INTERFACE,
+ /* %input: wiphy, ifindex */
+ NL80211_CMD_DEL_VIRTUAL_INTERFACE,
+ /* %input: ifindex, iftype */
+ NL80211_CMD_CHANGE_VIRTUAL_INTERFACE,
+ /* %input: */
+ NL80211_CMD_GET_WIPHYS,
+ NL80211_CMD_NEW_WIPHYS,
+ /* %input: wiphy */
+ NL80211_CMD_GET_INTERFACES,
+ NL80211_CMD_NEW_INTERFACES,
+ NL80211_CMD_INITIATE_SCAN,
+ NL80211_CMD_SCAN_RESULT,
+ NL80211_CMD_GET_ASSOCIATION,
+ NL80211_CMD_ASSOCIATION_CHANGED,
+ NL80211_CMD_ASSOCIATE,
+ NL80211_CMD_DISASSOCIATE,
+ NL80211_CMD_DEAUTH,
+ NL80211_CMD_GET_AUTH_LIST,
+ NL80211_CMD_NEW_AUTH_LIST,
+ NL80211_CMD_AUTHENTICATION_CHANGED,
+ NL80211_CMD_AP_SET_BEACON,
+ NL80211_CMD_AP_ADD_STA,
+ NL80211_CMD_AP_UPDATE_STA,
+ NL80211_CMD_AP_GET_STA_INFO,
+ NL80211_CMD_AP_SET_RATESETS,
+ NL80211_CMD_ADD_KEY,
+ NL80211_CMD_DEL_KEY,
+
+ /* add commands here */
+
+ /* used to define NL80211_CMD_MAX below */
+ __NL80211_CMD_AFTER_LAST
+};
+#define NL80211_CMD_MAX (__NL80211_CMD_AFTER_LAST - 1)
+
+
+/**
+ * enum nl80211_attrs - nl80211 netlink attributes
+ * @NL80211_ATTR_UNSPEC: unspecified attribute to catch errors
+ * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
+ * @NL80211_ATTR_IFNAME: network interface name
+ * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf.
+ * /sys/class/ieee80211/<phyname>/index
+ * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming)
+ * @NL80211_ATTR_CMDS: list of u8's identifying commands a device supports
+ * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype
+ * @NL80211_ATTR_INTERFACE_LIST: interface array, nested netlink attribute
+ * @NL80211_ATTR_WIPHY_LIST: wiphy array, nested netlink attribute
+ * @NL80211_ATTR_BSSID: BSSID (must be 6 bytes)
+ * @NL80211_ATTR_SSID: SSID (1-32 bytes)
+ * @NL80211_ATTR_CHANNEL: channel number
+ * @NL80211_ATTR_PHYMODE: PHY mode, see &enum nl80211_phymode
+ * @NL80211_ATTR_CHANNEL_LIST: netlink nested attribute array containing scan
+ * parameters for channels
+ * @NL80211_ATTR_BSS_LIST: nested attribute containing an array
+ * @NL80211_ATTR_BSSTYPE: BSS type, see &enum nl80211_bsstype
+ * @NL80211_ATTR_BEACON_PERIOD: beacon period
+ * @NL80211_ATTR_DTIM_PERIOD: DTIM period
+ * @NL80211_ATTR_TIMESTAMP: 64-bit timestamp of received beacon/probe response
+ * @NL80211_ATTR_IE: information element(s), maximum length %NL80211_MAX_IE_LEN
+ * @NL80211_ATTR_AUTH_ALGORITHM: authentication algorithm
+ * @NL80211_ATTR_TIMEOUT_TU: timeout in TU (TO BE USED)
+ * @NL80211_ATTR_REASON_CODE: 802.11 reason code
+ * @NL80211_ATTR_ASSOCIATION_ID: association ID (u16, 1-2007)
+ * @NL80211_ATTR_DEAUTHENTICATED: TO BE USED
+ * @NL80211_ATTR_RX_SENSITIVITY: receiver sensitivity in dBm
+ * @NL80211_ATTR_TRANSMIT_POWER: transmit power in mW
+ * @NL80211_ATTR_FRAG_THRESHOLD: fragmentation threshold (bytes)
+ * @NL80211_ATTR_FLAG_SCAN_ACTIVE: netlink flag indiciating active scan
+ * @NL80211_ATTR_KEY_DATA: temporal key data
+ * @NL80211_ATTR_KEY_ID: key ID (u8, 0-3)
+ * @NL80211_ATTR_KEY_TYPE: key type (see &enum nl80211_keytype)
+ * @NL80211_ATTR_MAC: MAC address
+ * @NL80211_ATTR_KEY_CIPHER: key cipher suite (u32)
+ * @__NL80211_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_attrs {
+/* don't change the order or add anything inbetween, this is ABI! */
+ NL80211_ATTR_UNSPEC,
+ /* %type: u32 */
+ NL80211_ATTR_IFINDEX,
+ /* %type: nulstring */
+ NL80211_ATTR_IFNAME,
+ /* %type: u32 */
+ NL80211_ATTR_WIPHY,
+ /* %type: nulstring */
+ NL80211_ATTR_WIPHY_NAME,
+ NL80211_ATTR_CMDS,
+ /* %type: u32 */
+ NL80211_ATTR_IFTYPE,
+ NL80211_ATTR_INTERFACE_LIST,
+ NL80211_ATTR_WIPHY_LIST,
+ NL80211_ATTR_BSSID,
+ NL80211_ATTR_SSID,
+ NL80211_ATTR_CHANNEL,
+ NL80211_ATTR_PHYMODE,
+ NL80211_ATTR_CHANNEL_LIST,
+ NL80211_ATTR_BSS_LIST,
+ NL80211_ATTR_BSSTYPE,
+ NL80211_ATTR_BEACON_PERIOD,
+ NL80211_ATTR_DTIM_PERIOD,
+ NL80211_ATTR_TIMESTAMP,
+ NL80211_ATTR_IE,
+ NL80211_ATTR_AUTH_ALGORITHM,
+ NL80211_ATTR_TIMEOUT_TU,
+ NL80211_ATTR_REASON_CODE,
+ NL80211_ATTR_ASSOCIATION_ID,
+ NL80211_ATTR_DEAUTHENTICATED,
+ NL80211_ATTR_RX_SENSITIVITY,
+ NL80211_ATTR_TRANSMIT_POWER,
+ NL80211_ATTR_FRAG_THRESHOLD,
+ NL80211_ATTR_FLAG_SCAN_ACTIVE,
+
+ NL80211_ATTR_KEY_DATA,
+ NL80211_ATTR_KEY_ID,
+ NL80211_ATTR_KEY_TYPE,
+ NL80211_ATTR_MAC,
+ NL80211_ATTR_KEY_CIPHER,
+
+ NL80211_ATTR_BEACON_HEAD,
+ NL80211_ATTR_BEACON_TAIL,
+
+ /* add attributes here, update the policy in nl80211.c */
+
+ /* used to define NL80211_ATTR_MAX below */
+ __NL80211_ATTR_AFTER_LAST,
+};
+#define NL80211_ATTR_MAX (__NL80211_ATTR_AFTER_LAST - 1)
+
+/**
+ * enum nl80211_multicast_groups - multicast groups for nl80211
+ * @NL80211_GROUP_CONFIG: members of this group are notified of
+ * configuration changes
+ */
+enum nl80211_multicast_groups {
+ /* be notified of configuration changes like wiphy renames */
+ NL80211_GROUP_CONFIG,
+
+ /* add groups here */
+
+ /* keep last */
+ __NL80211_GROUP_AFTER_LAST
+};
+#define NL80211_GROUP_MAX (__NL80211_GROUP_AFTER_LAST - 1)
+
+/*
+ * maximum length of IE(s) passed in an NL80211_ATTR_IE.
+ * this is an arbitrary limit, 774 means three full-length
+ * IEs would fit... increase if necessary */
+#define NL80211_MAX_IE_LEN 774
+
+/*
+ * maximum number of items in an ATTR_CHANNEL_LIST,
+ * just to avoid too large allocations
+ */
+#define NL80211_MAX_CHANNEL_LIST_ITEM 200
+
+/**
* enum nl80211_iftype - (virtual) interface types
* @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides
* @NL80211_IFTYPE_ADHOC: independent BSS member
@@ -35,4 +246,56 @@ enum nl80211_iftype {
};
#define NL80211_IFTYPE_MAX (__NL80211_IFTYPE_AFTER_LAST - 1)
+/**
+ * enum nl80211_phymode - PHY modes
+ * @NL80211_PHYMODE_A: 5 GHz PHY
+ * @NL80211_PHYMODE_B: 2.4 GHz PHY (B mode)
+ * @NL80211_PHYMODE_G: 2.4 GHz PHY (G, compatible with B)
+ * @__NL80211_PHYMODE_AFTER_LAST: internal use
+ *
+ * These values are used for %NL80211_ATTR_PHYMODE.
+ */
+enum nl80211_phymode {
+ NL80211_PHYMODE_A,
+ NL80211_PHYMODE_B,
+ NL80211_PHYMODE_G,
+
+ /* keep last */
+ __NL80211_PHYMODE_AFTER_LAST
+};
+#define NL80211_PHYMODE_MAX (__NL80211_PHYMODE_AFTER_LAST - 1)
+
+/**
+ * enum nl80211_bsstype - BSS types
+ * @NL80211_BSSTYPE_INFRASTRUCTURE: infrastructure BSS
+ * @NL80211_BSSTYPE_INDEPENDENT: independent BSS (ad-hoc network)
+ * @__NL80211_BSSTYPE_AFTER_LAST: internal use
+ *
+ * These values are used for %NL80211_ATTR_BSSTYPE.
+ */
+enum nl80211_bsstype {
+ NL80211_BSSTYPE_INFRASTRUCTURE,
+ NL80211_BSSTYPE_INDEPENDENT,
+
+ /* keep last */
+ __NL80211_BSSTYPE_AFTER_LAST
+};
+#define NL80211_BSSTYPE_MAX (__NL80211_BSSTYPE_AFTER_LAST - 1)
+
+/**
+ * enum nl80211_keytype - key types
+ * @NL80211_KEYTYPE_GROUP: group key
+ * @NL80211_KEYTYPE_PAIRWISE: pairwise key
+ * @NL80211_KEYTYPE_PEER: peer key
+ */
+enum nl80211_keytype {
+ NL80211_KEYTYPE_GROUP,
+ NL80211_KEYTYPE_PAIRWISE,
+ NL80211_KEYTYPE_PEER,
+
+ /* keep last */
+ __NL80211_KEYTYPE_AFTER_LAST
+};
+#define NL80211_KEYTYPE_MAX (__NL80211_KEYTYPE_AFTER_LAST - 1)
+
#endif /* __LINUX_NL80211_H */
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3,6 +3,7 @@
#include <linux/netlink.h>
#include <linux/skbuff.h>
+#include <linux/nl80211.h>
#include <net/genetlink.h>
/*
@@ -49,6 +50,69 @@ extern int ieee80211_radiotap_iterator_n
struct ieee80211_radiotap_iterator *iterator);
+/**
+ * struct scan_channel - describes a single channel to scan
+ * @phymode: PHY mode for this channel
+ * @channel: channel number (1-14, ...)
+ * @active: scan actively or passively on this channel
+ */
+struct scan_channel {
+ enum nl80211_phymode phymode;
+ u32 channel;
+ int active;
+};
+
+/**
+ * struct scan_params - describes scan parameters
+ * @n_channels: number of items in @channels array or -1 to indicate all
+ * channels should be scanned (in that case @channels will be %NULL)
+ * @active: when n_channels is -1 this determines active/passive scanning.
+ * @phymode: when n_channels is -1 this determines PHY mode to scan. It is
+ * not possible to scan different PHY modes in one request w/o giving
+ * a channel list.
+ * @channels: array containing @n_channels &struct scan_channel items
+ */
+struct scan_params {
+ int n_channels;
+ int active;
+ enum nl80211_phymode phymode;
+ struct scan_channel *channels;
+};
+
+/**
+ * struct association_params - describes association parameters
+ * @valid: this member contains flags which items are valid
+ * @bssid: the BSSID of the BSS to associate [%ASSOC_PARAMS_BSSID]
+ * @timeout: timeout (in TU) [%ASSOC_PARAMS_TIMEOUT]
+ * @ie: information element(s) to include in the association frames [%ASSOC_PARAMS_IE]
+ * @ie_len: length of the information element(s)
+ * @ssid: the SSID, always valid.
+ * @ssid_len: length of the SSID
+ */
+struct association_params {
+ u8 *bssid;
+ u32 timeout;
+ u8 *ie;
+ int ie_len;
+ u8 *ssid;
+ int ssid_len;
+
+ unsigned int valid;
+};
+#define ASSOC_PARAMS_TIMEOUT (1<<0)
+
+/**
+ * struct key_params - key information
+ */
+struct key_params {
+ u8 *key;
+ int key_len;
+ int key_id;
+ u32 key_type;
+ u8 *macaddress;
+ u32 cipher;
+};
+
/* from net/wireless.h */
struct wiphy;
@@ -68,11 +132,62 @@ struct wiphy;
* @add_virtual_intf: create a new virtual interface with the given name
*
* @del_virtual_intf: remove the virtual interface determined by ifindex.
+ *
+ * @change_virtual_intf: change type of virtual interface
+ *
+ * @associate: associate with given parameters
+ *
+ * @disassociate: disassociate from current AP
+ *
+ * @deauth: deauth from current AP
+ *
+ * @initiate_scan: scan with the given information (see &struct scan_params above)
+ *
+ * @get_association: get BSSID of the BSS that the device is currently
+ * associated to and return 1, or return 0 if not
+ * associated (or a negative error code)
+ * @get_auth_list: get list of BSSIDs of all BSSs the device has
+ * authenticated with, must call next_bssid for each,
+ * next_bssid returns non-zero on error, the given data
+ * is to be passed to that callback
+ * @add_key: add a key using &struct key_params
+ * @del_key: delete a key using info from &struct key_params
*/
struct cfg80211_ops {
int (*add_virtual_intf)(struct wiphy *wiphy, char *name,
- unsigned int type);
+ enum nl80211_iftype type);
int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex);
+ int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex,
+ enum nl80211_iftype type);
+
+ int (*associate)(struct wiphy *wiphy, struct net_device *dev,
+ struct association_params *params);
+ int (*disassociate)(struct wiphy *wiphy, struct net_device *dev);
+ int (*deauth)(struct wiphy *wiphy, struct net_device *dev);
+
+
+ int (*initiate_scan)(struct wiphy *wiphy, struct net_device *dev,
+ struct scan_params *params);
+
+
+ int (*get_association)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *bssid);
+
+ int (*get_auth_list)(struct wiphy *wiphy, struct net_device *dev,
+ void *data,
+ int (*next_bssid)(void *data, u8 *bssid));
+
+ int (*add_key)(struct wiphy *wiphy, struct net_device *dev,
+ struct key_params *params);
+ int (*del_key)(struct wiphy *wiphy, struct net_device *dev,
+ struct key_params *params);
};
+
+/* helper functions specific to nl80211 */
+extern void *nl80211hdr_put(struct sk_buff *skb, u32 pid,
+ u32 seq, int flags, u8 cmd);
+extern void *nl80211msg_new(struct sk_buff **skb, u32 pid,
+ u32 seq, int flags, u8 cmd);
+
#endif /* __NET_CFG80211_H */
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -431,7 +431,13 @@ struct iw_public_data {
* Those may be called only within the kernel.
*/
-/* functions that may be called by driver modules */
+/* First : function strictly used inside the kernel */
+
+/* Handle /proc/net/wireless, called in net/code/dev.c */
+extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
+ int length);
+
+/* Second : functions that may be called by driver modules */
/* Send a single event to user space */
extern void wireless_send_event(struct net_device * dev,
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -300,7 +300,6 @@ struct ieee80211_conf {
/* Following five fields are used for IEEE 802.11H */
unsigned int radar_detect;
unsigned int spect_mgmt;
- /* All following fields are currently unused. */
unsigned int quiet_duration; /* duration of quiet period */
unsigned int quiet_offset; /* how far into the beacon is the quiet
* period */
@@ -521,6 +520,9 @@ struct ieee80211_hw {
* per-packet RC4 key with each TX frame when doing hwcrypto */
#define IEEE80211_HW_TKIP_REQ_PHASE2_KEY (1<<14)
+ /* The device capable of supporting 11n */
+#define IEEE80211_HW_SUPPORT_HT_MODE (1<<15)
+
u32 flags; /* hardware flags defined above */
/* Set to the size of a needed device specific skb headroom for TX skbs. */
@@ -649,8 +651,7 @@ struct ieee80211_ops {
* used if the wlan hardware or low-level driver implements PAE.
* 80211.o module will anyway filter frames based on authorization
* state, so this function pointer can be NULL if low-level driver does
- * not require event notification about port state changes.
- * Currently unused. */
+ * not require event notification about port state changes. */
int (*set_port_auth)(struct ieee80211_hw *hw, u8 *addr,
int authorized);
@@ -702,8 +703,9 @@ struct ieee80211_ops {
/* Get statistics of the current TX queue status. This is used to get
* number of currently queued packets (queue length), maximum queue
* size (limit), and total number of packets sent using each TX queue
- * (count).
- * Currently unused. */
+ * (count). This information is used for WMM to find out which TX
+ * queues have room for more packets and by hostapd to provide
+ * statistics about the current queueing state to external programs. */
int (*get_tx_stats)(struct ieee80211_hw *hw,
struct ieee80211_tx_queue_stats *stats);
@@ -713,12 +715,25 @@ struct ieee80211_ops {
* Must be atomic. */
u64 (*get_tsf)(struct ieee80211_hw *hw);
+ /* Call low level driver with 11n Block Ack action */
+ int (*handle_ba_action)(struct ieee80211_hw *hw,
+ struct ieee80211_mgmt *mgmt);
+
/* Reset the TSF timer and allow firmware/hardware to synchronize with
* other STAs in the IBSS. This is only used in IBSS mode. This
* function is optional if the firmware/hardware takes full care of
* TSF synchronization. */
void (*reset_tsf)(struct ieee80211_hw *hw);
+ /* Configure ht parameters. */
+ int (*conf_ht)(struct ieee80211_hw *hw,
+ struct ieee80211_ht_capability *ht_cap_param,
+ struct ieee80211_ht_additional_info *ht_extra_param);
+
+ /* Get ht capabilities from the device */
+ void (*get_ht_capab)(struct ieee80211_hw *hw,
+ struct ieee80211_ht_capability *ht_cap_param);
+
/* Setup beacon data for IBSS beacons. Unlike access point (Master),
* IBSS uses a fixed beacon frame which is configured using this
* function. This handler is required only for IBSS mode. */

View File

@ -1,37 +0,0 @@
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -40,9 +40,22 @@ u64 uevent_seqnum;
char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug";
static DEFINE_SPINLOCK(sequence_lock);
#if defined(CONFIG_NET)
-static struct sock *uevent_sock;
+struct sock *uevent_sock = NULL;
+EXPORT_SYMBOL_GPL(uevent_sock);
#endif
+u64 uevent_next_seqnum(void)
+{
+ u64 seq;
+
+ spin_lock(&sequence_lock);
+ seq = ++uevent_seqnum;
+ spin_unlock(&sequence_lock);
+
+ return seq;
+}
+EXPORT_SYMBOL_GPL(uevent_next_seqnum);
+
/**
* kobject_uevent_env - send an uevent with environmental data
*
@@ -159,9 +172,7 @@ int kobject_uevent_env(struct kobject *k
}
/* we will send an event, request a new sequence number */
- spin_lock(&sequence_lock);
- seq = ++uevent_seqnum;
- spin_unlock(&sequence_lock);
+ seq = uevent_next_seqnum();
sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq);
#if defined(CONFIG_NET)

View File

@ -1,11 +0,0 @@
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -9,7 +9,7 @@ config SND_PCM
depends on SND
config SND_HWDEP
- tristate
+ tristate "Sound hardware support"
depends on SND
config SND_RAWMIDI

File diff suppressed because it is too large Load Diff

View File

@ -1,152 +0,0 @@
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -481,16 +481,16 @@ static void sdhci_finish_data(struct sdh
* Controller doesn't count down when in single block mode.
*/
if (data->blocks == 1)
- blocks = (data->error == MMC_ERR_NONE) ? 0 : 1;
+ blocks = (data->error == 0) ? 0 : 1;
else
blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
data->bytes_xfered = data->blksz * (data->blocks - blocks);
- if ((data->error == MMC_ERR_NONE) && blocks) {
+ if (!data->error && blocks) {
printk(KERN_ERR "%s: Controller signalled completion even "
"though there were blocks left.\n",
mmc_hostname(host->mmc));
- data->error = MMC_ERR_FAILED;
+ data->error = -EIO;
}
if (data->stop) {
@@ -498,7 +498,7 @@ static void sdhci_finish_data(struct sdh
* The controller needs a reset of internal state machines
* upon error conditions.
*/
- if (data->error != MMC_ERR_NONE) {
+ if (data->error) {
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
}
@@ -533,7 +533,7 @@ static void sdhci_send_command(struct sd
printk(KERN_ERR "%s: Controller never released "
"inhibit bit(s).\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
- cmd->error = MMC_ERR_FAILED;
+ cmd->error = -EIO;
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -554,7 +554,7 @@ static void sdhci_send_command(struct sd
if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
printk(KERN_ERR "%s: Unsupported response type!\n",
mmc_hostname(host->mmc));
- cmd->error = MMC_ERR_INVALID;
+ cmd->error = -EINVAL;
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -601,7 +601,7 @@ static void sdhci_finish_command(struct
}
}
- host->cmd->error = MMC_ERR_NONE;
+ host->cmd->error = 0;
if (host->data && host->data_early)
sdhci_finish_data(host);
@@ -722,7 +722,7 @@ static void sdhci_request(struct mmc_hos
host->mrq = mrq;
if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
- host->mrq->cmd->error = MMC_ERR_TIMEOUT;
+ host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
} else
sdhci_send_command(host, mrq->cmd);
@@ -831,7 +831,7 @@ static void sdhci_tasklet_card(unsigned
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
- host->mrq->cmd->error = MMC_ERR_FAILED;
+ host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
}
}
@@ -859,9 +859,9 @@ static void sdhci_tasklet_finish(unsigne
* The controller needs a reset of internal state machines
* upon error conditions.
*/
- if ((mrq->cmd->error != MMC_ERR_NONE) ||
- (mrq->data && ((mrq->data->error != MMC_ERR_NONE) ||
- (mrq->data->stop && (mrq->data->stop->error != MMC_ERR_NONE))))) {
+ if (mrq->cmd->error ||
+ (mrq->data && (mrq->data->error ||
+ (mrq->data->stop && mrq->data->stop->error)))) {
/* Some controllers need this kick or reset won't work here */
if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
@@ -906,13 +906,13 @@ static void sdhci_timeout_timer(unsigned
sdhci_dumpregs(host);
if (host->data) {
- host->data->error = MMC_ERR_TIMEOUT;
+ host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
} else {
if (host->cmd)
- host->cmd->error = MMC_ERR_TIMEOUT;
+ host->cmd->error = -ETIMEDOUT;
else
- host->mrq->cmd->error = MMC_ERR_TIMEOUT;
+ host->mrq->cmd->error = -ETIMEDOUT;
tasklet_schedule(&host->finish_tasklet);
}
@@ -941,13 +941,12 @@ static void sdhci_cmd_irq(struct sdhci_h
}
if (intmask & SDHCI_INT_TIMEOUT)
- host->cmd->error = MMC_ERR_TIMEOUT;
- else if (intmask & SDHCI_INT_CRC)
- host->cmd->error = MMC_ERR_BADCRC;
- else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX))
- host->cmd->error = MMC_ERR_FAILED;
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX))
+ host->cmd->error = -EILSEQ;
- if (host->cmd->error != MMC_ERR_NONE)
+ if (host->cmd->error)
tasklet_schedule(&host->finish_tasklet);
else if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
@@ -974,13 +973,11 @@ static void sdhci_data_irq(struct sdhci_
}
if (intmask & SDHCI_INT_DATA_TIMEOUT)
- host->data->error = MMC_ERR_TIMEOUT;
- else if (intmask & SDHCI_INT_DATA_CRC)
- host->data->error = MMC_ERR_BADCRC;
- else if (intmask & SDHCI_INT_DATA_END_BIT)
- host->data->error = MMC_ERR_FAILED;
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+ host->data->error = -EILSEQ;
- if (host->data->error != MMC_ERR_NONE)
+ if (host->data->error)
sdhci_finish_data(host);
else {
if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
@@ -1312,7 +1309,7 @@ static int __devinit sdhci_probe_slot(st
mmc->ops = &sdhci_ops;
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
if (caps & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;

View File

@ -1,18 +0,0 @@
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -133,4 +133,8 @@ config LEDS_TRIGGER_HEARTBEAT
load average.
If unsure, say Y.
+config LEDS_TRIGGER_MORSE
+ tristate "LED Morse Trigger"
+ depends on LEDS_TRIGGERS
+
endif # NEW_LEDS
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
+obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o

View File

@ -1,25 +0,0 @@
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -81,6 +81,12 @@ config LEDS_WRAP
help
This option enables support for the PCEngines WRAP programmable LEDs.
+config LEDS_ALIX
+ tristate "LED Support for the ALIX 2/3 boards"
+ depends on LEDS_CLASS
+ help
+ This option enables support for the three LEDs on the PCEngines ALIX 2/3 boards.
+
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
depends LEDS_CLASS && ARCH_H1940
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c2
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
+obj-$(CONFIG_LEDS_ALIX) += leds-alix.o
obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
obj-$(CONFIG_LEDS_COBALT) += leds-cobalt.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o

View File

@ -1,21 +0,0 @@
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -143,4 +143,11 @@ config LEDS_TRIGGER_MORSE
tristate "LED Morse Trigger"
depends on LEDS_TRIGGERS
+config LEDS_TRIGGER_DEFAULT_ON
+ tristate "LED Default ON Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be initialised in the ON state.
+ If unsure, say Y.
+
endif # NEW_LEDS
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledt
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
+obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o

View File

@ -1,21 +0,0 @@
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -150,4 +150,11 @@ config LEDS_TRIGGER_DEFAULT_ON
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
+config LEDS_TRIGGER_NETDEV
+ tristate "LED Netdev Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
endif # NEW_LEDS
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += l
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o

View File

@ -1,30 +0,0 @@
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -183,4 +183,20 @@ config HP_SDC_RTC
Say Y here if you want to support the built-in real time clock
of the HP SDC controller.
+config INPUT_GPIO_BUTTONS
+ tristate "Polled GPIO buttons interface"
+ depends on GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for buttons connected
+ to GPIO pins of various CPUs (and some other chips).
+
+ Say Y here if your device has buttons connected
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio-buttons.
+
endif
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_INPUT_POWERMATE) += powerm
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
+obj-$(CONFIG_INPUT_GPIO_BUTTONS) += gpio_buttons.o

View File

@ -1,26 +0,0 @@
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -970,6 +970,13 @@ config CS5535_GPIO
If compiled as a module, it will be called cs5535_gpio.
+config GPIO_DEVICE
+ tristate "GPIO device support"
+ depends on GENERIC_GPIO
+ help
+ Say Y to enable Linux GPIO device support. This allows control of
+ GPIO pins using a character device
+
config GPIO_VR41XX
tristate "NEC VR4100 series General-purpose I/O Unit support"
depends on CPU_VR41XX
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
+obj-$(CONFIG_GPIO_DEVICE) += gpio_dev.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o

View File

@ -1,17 +0,0 @@
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -419,6 +419,7 @@ config FS_POSIX_ACL
source "fs/xfs/Kconfig"
source "fs/gfs2/Kconfig"
+source "fs/yaffs2/Kconfig"
config OCFS2_FS
tristate "OCFS2 file system support"
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -120,3 +120,4 @@ obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_DEBUG_FS) += debugfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_GFS2_FS) += gfs2/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/

View File

@ -1,63 +0,0 @@
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -344,6 +344,50 @@ int phy_ethtool_gset(struct phy_device *
}
EXPORT_SYMBOL(phy_ethtool_gset);
+int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr)
+{
+ u32 cmd;
+ int tmp;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+
+ if (get_user(cmd, (u32 *) useraddr))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GSET:
+ phy_ethtool_gset(phydev, &ecmd);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
+
+ case ETHTOOL_SSET:
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ return phy_ethtool_sset(phydev, &ecmd);
+
+ case ETHTOOL_NWAY_RST:
+ /* if autoneg is off, it's an error */
+ tmp = phy_read(phydev, MII_BMCR);
+ if (tmp & BMCR_ANENABLE) {
+ tmp |= (BMCR_ANRESTART);
+ phy_write(phydev, MII_BMCR, tmp);
+ return 0;
+ }
+ return -EINVAL;
+
+ case ETHTOOL_GLINK:
+ edata.data = (phy_read(phydev,
+ MII_BMSR) & BMSR_LSTATUS) ? 1 : 0;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(phy_ethtool_ioctl);
+
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -393,6 +393,7 @@ void phy_start_machine(struct phy_device
void phy_stop_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr);
int phy_mii_ioctl(struct phy_device *phydev,
struct mii_ioctl_data *mii_data, int cmd);
int phy_start_interrupts(struct phy_device *phydev);

View File

@ -1,11 +0,0 @@
--- a/init/main.c
+++ b/init/main.c
@@ -780,7 +780,7 @@ static int noinline init_post(void)
numa_default_policy();
if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
- printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+ printk(KERN_WARNING "Please be patient, while OpenWrt loads ...\n");
(void) sys_dup(0);
(void) sys_dup(0);

View File

@ -1,25 +0,0 @@
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -58,6 +58,7 @@ static struct usb_driver usb_serial_driv
drivers depend on it.
*/
+static ushort maxSize = 0;
static int debug;
static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; /* initially all NULL */
static DEFINE_MUTEX(table_lock);
@@ -866,7 +867,7 @@ int usb_serial_probe(struct usb_interfac
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = (endpoint->wMaxPacketSize > maxSize) ? endpoint->wMaxPacketSize : maxSize;
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
port->bulk_in_buffer = kmalloc (buffer_size, GFP_KERNEL);
@@ -1276,3 +1277,5 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
+module_param(maxSize, ushort,0);
+MODULE_PARM_DESC(maxSize,"User specified USB endpoint size");

View File

@ -1,46 +0,0 @@
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -1,6 +1,10 @@
#ifndef _LINUX_TIME_H
#define _LINUX_TIME_H
+#ifndef __KERNEL__
+#include <time.h>
+#else
+
#include <linux/types.h>
#ifdef __KERNEL__
@@ -227,4 +231,6 @@ struct itimerval {
*/
#define TIMER_ABSTIME 0x01
+#endif /* __KERNEL__ DEBIAN */
+
#endif
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -1,6 +1,14 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
+/* Debian: Use userland types instead. */
+#ifndef __KERNEL__
+# include <sys/types.h>
+/* For other kernel headers. */
+# include <linux/posix_types.h>
+# include <asm/types.h>
+#else
+
#ifdef __KERNEL__
#define BITS_TO_LONGS(bits) \
@@ -162,6 +170,8 @@ typedef unsigned long blkcnt_t;
#endif /* __KERNEL_STRICT_NAMES */
+#endif /* __KERNEL__ DEBIAN */
+
/*
* Below are truly Linux-specific types that should never collide with
* any application/library that wants linux/types.h.

View File

@ -1,102 +0,0 @@
--- a/scripts/genksyms/parse.c_shipped
+++ b/scripts/genksyms/parse.c_shipped
@@ -144,7 +144,9 @@
#include <assert.h>
+#ifndef __APPLE__
#include <malloc.h>
+#endif
#include "genksyms.h"
static int is_typedef;
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -24,7 +24,9 @@
%{
#include <assert.h>
+#ifndef __APPLE__
#include <malloc.h>
+#endif
#include "genksyms.h"
static int is_typedef;
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -28,6 +28,35 @@
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
+#ifdef __APPLE__
+/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */
+void *memmem (const void *haystack, size_t haystack_len,
+ const void *needle, size_t needle_len)
+{
+ const char *begin;
+ const char *const last_possible
+ = (const char *) haystack + haystack_len - needle_len;
+
+ if (needle_len == 0)
+ /* The first occurrence of the empty string is deemed to occur at
+ the beginning of the string. */
+ return (void *) haystack;
+
+ /* Sanity check, otherwise the loop might search through the whole
+ memory. */
+ if (__builtin_expect (haystack_len < needle_len, 0))
+ return NULL;
+
+ for (begin = (const char *) haystack; begin <= last_possible; ++begin)
+ if (begin[0] == ((const char *) needle)[0] &&
+ !memcmp ((const void *) &begin[1],
+ (const void *) ((const char *) needle + 1),
+ needle_len - 1))
+ return (void *) begin;
+
+ return NULL;
+}
+#endif
#define KSYM_NAME_LEN 128
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -88,6 +88,9 @@ check-lxdialog := $(srctree)/$(src)/lxd
# we really need to do so. (Do not call gcc as part of make mrproper)
HOST_EXTRACFLAGS = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags)
HOST_LOADLIBES = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC))
+ifeq ($(shell uname -s),Darwin)
+HOST_LOADLIBES += -lncurses
+endif
HOST_EXTRACFLAGS += -DLOCALE
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -1,7 +1,11 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#ifndef __APPLE__
#include <elf.h>
+#else
+#include "../../../../../tools/sstrip/include/elf.h"
+#endif
int
main(int argc, char **argv)
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -7,7 +7,11 @@
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
+#ifndef __APPLE__
#include <elf.h>
+#else
+#include "../../../../../tools/sstrip/include/elf.h"
+#endif
#include "elfconfig.h"

View File

@ -1,17 +0,0 @@
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -16,6 +16,7 @@ enum {
false = 0,
true = 1
};
+#endif /* __KERNEL__ */
#undef offsetof
#ifdef __compiler_offsetof
@@ -23,6 +24,5 @@ enum {
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
-#endif /* __KERNEL__ */
#endif

View File

@ -1,20 +0,0 @@
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -125,7 +125,7 @@ parse() {
str="${ftype} ${name} ${location} ${str}"
;;
"nod")
- local dev=`LC_ALL=C ls -l "${location}"`
+ local dev=`LC_ALL=C ls -l --time-style=locale "${location}"`
local maj=`field 5 ${dev}`
local min=`field 6 ${dev}`
maj=${maj%,}
@@ -135,7 +135,7 @@ parse() {
str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
;;
"slink")
- local target=`field 11 $(LC_ALL=C ls -l "${location}")`
+ local target=`field 11 $(LC_ALL=C ls -l --time-style=locale "${location}")`
str="${ftype} ${name} ${target} ${str}"
;;
*)

View File

@ -1,10 +0,0 @@
--- a/arch/i386/boot/tools/build.c
+++ b/arch/i386/boot/tools/build.c
@@ -29,7 +29,6 @@
#include <stdarg.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <sys/sysmacros.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>

View File

@ -1,35 +0,0 @@
From: Lennert Buytenhek <buytenh@wantstofly.org>
Date: Fri, 12 Oct 2007 13:38:54 +0000 (+0100)
Subject: [ARM] 4600/1: fix kernel build failure with build-id-supporting binutils
X-Git-Tag: v2.6.24-rc1~1273^2~3
X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=1e621a8e3752367d4aae78a8ab00a18fb2793f34;hp=033b8ffe3f1ea8174d51d125838ac6deea60f63f
[ARM] 4600/1: fix kernel build failure with build-id-supporting binutils
Newer versions of binutils support --build-id, which adds an ELF
note section called ".note.gnu.build-id" to the output. On the ARM
kernel build, because there is no explicit mention of this section
in the shipped ld script, this section is placed at vaddr 0x00000000
(whereas the normal kernel text/data typically starts at vaddr
0xc0008000), causing the output of objcopy (Image) to produce a 3G+
file.
This patch makes objcopy strip the .note.gnu.build-id section from
the Image file along with all other note sections, which fixes the
build.
Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -12,7 +12,7 @@
LDFLAGS_vmlinux :=-p --no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
-OBJCOPYFLAGS :=-O binary -R .note -R .comment -S
+OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
#CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:

View File

@ -1,78 +0,0 @@
From a.othieno@bluewin.ch Tue Oct 11 07:50:21 2005
From: Arthur Othieno <a.othieno@bluewin.ch>
Subject: Big-endian I/O memory accessors.
Date: Tue, 11 Oct 2005 07:50:21 +1000
X-Patchwork-ID: 2759
From: Arthur Othieno <a.othieno@bluewin.ch>
I/O memory accessors. Big endian version. For those busses/devices
that do export big-endian I/O memory.
Of notable relevance/reference:
http://lwn.net/Articles/132804/
http://ozlabs.org/pipermail/linuxppc-embedded/2005-August/019798.html
http://ozlabs.org/pipermail/linuxppc-embedded/2005-August/019752.html
Signed-off-by: Arthur Othieno <a.othieno@bluewin.ch>
---
Paulus,
A similar patch for ppc64 made it upstream with your big ppc64 merge.
This one is still sitting in http://patchwork.ozlabs.org/linuxppc/
and didn't make it with the ppc32 equivalent. Thanks.
include/asm-ppc/io.h | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
---
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -453,11 +453,21 @@ static inline unsigned int ioread16(void
return readw(addr);
}
+static inline unsigned int ioread16be(void __iomem *addr)
+{
+ return in_be16(addr);
+}
+
static inline unsigned int ioread32(void __iomem *addr)
{
return readl(addr);
}
+static inline unsigned int ioread32be(void __iomem *addr)
+{
+ return in_be32(addr);
+}
+
static inline void iowrite8(u8 val, void __iomem *addr)
{
writeb(val, addr);
@@ -468,11 +478,21 @@ static inline void iowrite16(u16 val, vo
writew(val, addr);
}
+static inline void iowrite16be(u16 val, void __iomem *addr)
+{
+ out_be16(addr, val);
+}
+
static inline void iowrite32(u32 val, void __iomem *addr)
{
writel(val, addr);
}
+static inline void iowrite32be(u32 val, void __iomem *addr)
+{
+ out_be32(addr, val);
+}
+
static inline void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
{
_insb(addr, dst, count);

View File

@ -1,24 +0,0 @@
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -100,6 +100,11 @@ config SPI_BUTTERFLY
inexpensive battery powered microcontroller evaluation board.
This same cable can be used to flash new firmware.
+config SPI_GPIO
+ tristate "GPIO API based bitbanging SPI controller"
+ depends on SPI_MASTER && GENERIC_GPIO && EXPERIMENTAL
+ select SPI_BITBANG
+
config SPI_IMX
tristate "Freescale iMX SPI controller"
depends on SPI_MASTER && ARCH_IMX && EXPERIMENTAL
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
+obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o

View File

@ -1,178 +0,0 @@
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,5 +42,15 @@ config W1_MASTER_DS1WM
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
hx4700.
+config W1_MASTER_GPIO
+ tristate "GPIO 1-wire busmaster"
+ depends on GENERIC_GPIO
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ GPIO pins. This driver uses the GPIO API to control the wire.
+
+ This support is also available as a module. If so, the module
+ will be called w1-gpio.ko.
+
endmenu
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_W1_MASTER_MATROX) += matro
obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
+obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
--- /dev/null
+++ b/drivers/w1/masters/w1-gpio.c
@@ -0,0 +1,124 @@
+/*
+ * w1-gpio - GPIO w1 bus master driver
+ *
+ * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/w1-gpio.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+
+#include <asm/gpio.h>
+
+static void w1_gpio_write_bit_dir(void *data, u8 bit)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ if (bit)
+ gpio_direction_input(pdata->pin);
+ else
+ gpio_direction_output(pdata->pin, 0);
+}
+
+static void w1_gpio_write_bit_val(void *data, u8 bit)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ gpio_set_value(pdata->pin, bit);
+}
+
+static u8 w1_gpio_read_bit(void *data)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ return gpio_get_value(pdata->pin);
+}
+
+static int __init w1_gpio_probe(struct platform_device *pdev)
+{
+ struct w1_bus_master *master;
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+
+ if (!pdata)
+ return -ENXIO;
+
+ master = kzalloc(sizeof(struct w1_bus_master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ err = gpio_request(pdata->pin, "w1");
+ if (err)
+ goto free_master;
+
+ master->data = pdata;
+ master->read_bit = w1_gpio_read_bit;
+
+ if (pdata->is_open_drain) {
+ gpio_direction_output(pdata->pin, 1);
+ master->write_bit = w1_gpio_write_bit_val;
+ } else {
+ gpio_direction_input(pdata->pin);
+ master->write_bit = w1_gpio_write_bit_dir;
+ }
+
+ err = w1_add_master_device(master);
+ if (err)
+ goto free_gpio;
+
+ platform_set_drvdata(pdev, master);
+
+ return 0;
+
+ free_gpio:
+ gpio_free(pdata->pin);
+ free_master:
+ kfree(master);
+
+ return err;
+}
+
+static int __exit w1_gpio_remove(struct platform_device *pdev)
+{
+ struct w1_bus_master *master = platform_get_drvdata(pdev);
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+ w1_remove_master_device(master);
+ gpio_free(pdata->pin);
+ kfree(master);
+
+ return 0;
+}
+
+static struct platform_driver w1_gpio_driver = {
+ .driver = {
+ .name = "w1-gpio",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(w1_gpio_remove),
+};
+
+static int __init w1_gpio_init(void)
+{
+ return platform_driver_probe(&w1_gpio_driver, w1_gpio_probe);
+}
+
+static void __exit w1_gpio_exit(void)
+{
+ platform_driver_unregister(&w1_gpio_driver);
+}
+
+module_init(w1_gpio_init);
+module_exit(w1_gpio_exit);
+
+MODULE_DESCRIPTION("GPIO w1 bus master driver");
+MODULE_AUTHOR("Ville Syrjala <syrjala@sci.fi>");
+MODULE_LICENSE("GPL");
--- /dev/null
+++ b/include/linux/w1-gpio.h
@@ -0,0 +1,23 @@
+/*
+ * w1-gpio interface to platform code
+ *
+ * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_W1_GPIO_H
+#define _LINUX_W1_GPIO_H
+
+/**
+ * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
+ * @pin: GPIO pin to use
+ * @is_open_drain: GPIO pin is configured as open drain
+ */
+struct w1_gpio_platform_data {
+ unsigned int pin;
+ unsigned int is_open_drain:1;
+};
+
+#endif /* _LINUX_W1_GPIO_H */

View File

@ -1,85 +0,0 @@
This patch ports ssb to the 2.6.23 kernel.
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -223,3 +223,5 @@ void ssb_mipscore_init(struct ssb_mipsco
ssb_mips_serial_init(mcore);
ssb_mips_flash_detect(mcore);
}
+
+EXPORT_SYMBOL(ssb_mips_irq);
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -352,4 +352,19 @@ struct parisc_device_id {
#define PA_HVERSION_ANY_ID 0xffff
#define PA_SVERSION_ANY_ID 0xffffffff
+/* SSB core, see drivers/ssb/ */
+struct ssb_device_id {
+ __u16 vendor;
+ __u16 coreid;
+ __u8 revision;
+};
+#define SSB_DEVICE(_vendor, _coreid, _revision) \
+ { .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
+#define SSB_DEVTABLE_END \
+ { 0, },
+
+#define SSB_ANY_VENDOR 0xFFFF
+#define SSB_ANY_ID 0xFFFF
+#define SSB_ANY_REV 0xFF
+
#endif /* LINUX_MOD_DEVICETABLE_H */
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -321,14 +321,16 @@ static int ssb_bus_match(struct device *
return 0;
}
-static int ssb_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ssb_device_uevent(struct device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size)
{
struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
+ int i = 0, len = 0;
if (!dev)
return -ENODEV;
- return add_uevent_var(env,
+ return add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
"MODALIAS=ssb:v%04Xid%04Xrev%02X",
ssb_dev->id.vendor, ssb_dev->id.coreid,
ssb_dev->id.revision);
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -391,12 +391,14 @@ int ssb_bus_scan(struct ssb_bus *bus,
if (bus->bustype == SSB_BUSTYPE_PCI) {
/* Ignore PCI cores on PCI-E cards.
* Ignore PCI-E cores on PCI cards. */
+ //FIXME kernel 2.6.23 doesn't have is_pcie. Add this check back when updating to 2.6.24 or later.
if (dev->id.coreid == SSB_DEV_PCI) {
- if (bus->host_pci->is_pcie)
- continue;
+//FIXME if (bus->host_pci->is_pcie)
+//FIXME continue;
} else {
- if (!bus->host_pci->is_pcie)
- continue;
+ continue; //FIXME
+//FIXME if (!bus->host_pci->is_pcie)
+//FIXME continue;
}
}
if (bus->pcicore.dev) {
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -362,7 +362,7 @@ static int pcicore_is_in_hostmode(struct
chipid_top != 0x5300)
return 0;
- if (bus->sprom.r1.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
+ if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
return 0;
/* The 200-pin BCM4712 package does not bond out PCI. Even when

View File

@ -1,14 +0,0 @@
--- a/init/main.c
+++ b/init/main.c
@@ -802,10 +802,7 @@ static int noinline init_post(void)
printk(KERN_WARNING "Failed to execute %s. Attempting "
"defaults...\n", execute_command);
}
- run_init_process("/sbin/init");
- run_init_process("/etc/init");
- run_init_process("/bin/init");
- run_init_process("/bin/sh");
+ run_init_process("/etc/preinit");
panic("No init found. Try passing init= option to kernel.");
}