preliminary 3.12 support

Signed-off-by: Imre Kaloz <kaloz@openwrt.org>

SVN-Revision: 38528
lede-17.01
Imre Kaloz 2013-10-24 13:50:11 +00:00
parent 4341ea4037
commit e72c7e17a4
175 changed files with 33916 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
From 7d75ef5f56dbc2bedb9a893eb9ecfd03c456338d Mon Sep 17 00:00:00 2001
From: Flavio Silveira <fggs@terra.com.br>
Date: Thu, 29 Aug 2013 08:51:48 -0300
Subject: [PATCH v2] mtd: m25p80: Add support for ESMT F25L32PA
This flashchip is used in D-Link DIR-610 A1 router board
and maybe several others, yet is not kernel upstream.
So add support for it according to datasheet [0], making it easier
to support other boards using this flashchip in the future.
Changelog v2:
- Better description
- Datasheet link at the bottom, similar to other patches.
[0] http://www.esmt.com.tw/DB/manager/upload/F25L32PA.pdf
Signed-off-by: Flavio Silveira <fggs@terra.com.br>
---
drivers/mtd/devices/m25p80.c | 3 +++
1 file changed, 3 insertions(+)
http://patchwork.ozlabs.org/patch/272438/
http://lists.infradead.org/pipermail/linux-mtd/2013-September/048511.html
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -756,6 +756,9 @@ static const struct spi_device_id m25p_i
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },

View File

@ -0,0 +1,36 @@
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -468,8 +468,10 @@ static const struct usb_device_id hso_id
{USB_DEVICE(0x0af0, 0x8400)},
{USB_DEVICE(0x0af0, 0x8600)},
{USB_DEVICE(0x0af0, 0x8800)},
- {USB_DEVICE(0x0af0, 0x8900)},
- {USB_DEVICE(0x0af0, 0x9000)},
+ {USB_DEVICE(0x0af0, 0x8900)}, /* GTM 67xx */
+ {USB_DEVICE(0x0af0, 0x9000)}, /* GTM 66xx */
+ {USB_DEVICE(0x0af0, 0x9200)}, /* GTM 67xxWFS */
+ {USB_DEVICE(0x0af0, 0x9300)}, /* GTM 66xxWFS */
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1259,6 +1259,18 @@ UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
0 ),
+UNUSUAL_DEV( 0x0af0, 0x9200, 0x0000, 0x0000,
+ "Option",
+ "Globetrotter 67xxWFS SD-Card",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x9300, 0x0000, 0x0000,
+ "Option",
+ "Globetrotter 66xxWFS SD-Card",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 0 ),
+
UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
"Option",
"GI 070x SD-Card",

View File

@ -0,0 +1,10 @@
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -48,6 +48,7 @@ extern char * strstr(const char * s1, co
#ifdef CONFIG_KERNEL_XZ
#define memmove memmove
#define memcpy memcpy
+extern char * strstr(const char *, const char *);
#include "../../../../lib/decompress_unxz.c"
#endif

View File

@ -0,0 +1,66 @@
From 0db3db45f5bd6df4bdc03bbd5dec672e16164c4e Mon Sep 17 00:00:00 2001
From: Florian Fainelli <florian@openwrt.org>
Date: Mon, 12 Nov 2012 12:31:55 +0100
Subject: [PATCH] MIPS: decompressor: fix build failure on memcpy() in
decompress.c
The decompress.c file includes linux/kernel.h which causes the following
inclusion chain to be pulled:
linux/kernel.h ->
linux/dynamic_debug.h ->
linux/string.h ->
asm/string.h
We end up having a the GCC builtin + architecture specific memcpy() expanding
into this:
void *({ size_t __len = (size_t n); void *__ret; if
(__builtin_constant_p(size_t n) && __len >= 64) __ret = memcpy((void *dest),
(const void *src), __len); else __ret = __builtin_memcpy((void *dest), (const
void *src), __len); __ret; })
{
[memcpy implementation in decompress.c starts here]
int i;
const char *s = src;
char *d = dest;
for (i = 0; i < n; i++)
d[i] = s[i];
return dest;
}
raising the following compilation error:
arch/mips/boot/compressed/decompress.c:46:8: error: expected identifier or '('
before '{' token
There are at least three possibilities to fix this issue:
1) define _LINUX_STRING_H_ at the beginning of decompress.c to prevent
further linux/string.h definitions and declarations from being used, and add
an explicit strstr() declaration for linux/dynamic_debug.h
2) remove the inclusion of linux/kernel.h because we actually use no definition
or declaration from this header file
3) undefine memcpy or re-define memcpy to memcpy thus resulting in picking up
the local memcpy() implementation to this compilation unit
This patch uses the second option which is the less intrusive one.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
---
arch/mips/boot/compressed/decompress.c | 2 --
1 file changed, 2 deletions(-)
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -10,9 +10,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-
#include <linux/types.h>
-#include <linux/kernel.h>
#include <asm/addrspace.h>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
From 1e311820ec3055e3f08e687de6564692a7cec675 Mon Sep 17 00:00:00 2001
From: Florian Fainelli <florian@openwrt.org>
Date: Mon, 28 Jan 2013 20:06:29 +0100
Subject: [PATCH 11/12] USB: EHCI: add ignore_oc flag to disable overcurrent
checking
This patch adds an ignore_oc flag which can be set by EHCI controller
not supporting or wanting to disable overcurrent checking. The EHCI
platform data in include/linux/usb/ehci_pdriver.h is also augmented to
take advantage of this new flag.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
---
drivers/usb/host/ehci-hcd.c | 2 +-
drivers/usb/host/ehci-hub.c | 4 ++--
drivers/usb/host/ehci-platform.c | 1 +
drivers/usb/host/ehci.h | 1 +
include/linux/usb/ehci_pdriver.h | 1 +
5 files changed, 6 insertions(+), 3 deletions(-)
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -630,7 +630,7 @@ static int ehci_run (struct usb_hcd *hcd
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
temp >> 8, temp & 0xff,
- ignore_oc ? ", overcurrent ignored" : "");
+ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
ehci_writel(ehci, INTR_MASK,
&ehci->regs->intr_enable); /* Turn On Interrupts */
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -623,7 +623,7 @@ ehci_hub_status_data (struct usb_hcd *hc
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
- if (!ignore_oc)
+ if (!ignore_oc && !ehci->ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
@@ -983,7 +983,7 @@ static int ehci_hub_control (
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
- if ((temp & PORT_OCC) && !ignore_oc){
+ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -47,6 +47,7 @@ static int ehci_platform_reset(struct us
ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
ehci->big_endian_desc = pdata->big_endian_desc;
ehci->big_endian_mmio = pdata->big_endian_mmio;
+ ehci->ignore_oc = pdata->ignore_oc;
if (pdata->pre_setup) {
retval = pdata->pre_setup(hcd);
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -203,6 +203,7 @@ struct ehci_hcd { /* one per controlle
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned ignore_oc:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -45,6 +45,7 @@ struct usb_ehci_pdata {
unsigned big_endian_desc:1;
unsigned big_endian_mmio:1;
unsigned no_io_watchdog:1;
+ unsigned ignore_oc:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);

View File

@ -0,0 +1,54 @@
From: Stephen Hemminger <stephen@networkplumber.org>
Subject: bridge: allow receiption on disabled port
When an ethernet device is enslaved to a bridge, and the bridge STP
detects loss of carrier (or operational state down), then normally
packet receiption is blocked.
This breaks control applications like WPA which maybe expecting to
receive packets to negotiate to bring link up. The bridge needs to
block forwarding packets from these disabled ports, but there is no
hard requirement to not allow local packet delivery.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -144,11 +144,13 @@ drop:
static int br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
- u16 vid = 0;
+ if (p->state != BR_STATE_DISABLED) {
+ u16 vid = 0;
- br_vlan_get_tag(skb, &vid);
- if (p->flags & BR_LEARNING)
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+ br_vlan_get_tag(skb, &vid);
+ if (p->flags & BR_LEARNING)
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+ }
return 0; /* process further */
}
@@ -218,6 +220,18 @@ rx_handler_result_t br_handle_frame(stru
forward:
switch (p->state) {
+ case BR_STATE_DISABLED:
+ if (ether_addr_equal(p->br->dev->dev_addr, dest))
+ skb->pkt_type = PACKET_HOST;
+
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_local_finish))
+ break;
+
+ BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
+ br_pass_frame_up(skb);
+ break;
+
case BR_STATE_FORWARDING:
rhook = rcu_dereference(br_should_route_hook);
if (rhook) {

View File

@ -0,0 +1,185 @@
From 151c4e4a06b0b8d16c2fd392bb0e33868b12357f Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@openwrt.org>
Date: Mon, 12 Aug 2013 12:45:52 +0200
Subject: [PATCH] MIPS: remove unnecessary platform dma helper functions
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h | 12 ------------
arch/mips/include/asm/mach-generic/dma-coherence.h | 10 ----------
arch/mips/include/asm/mach-ip27/dma-coherence.h | 10 ----------
arch/mips/include/asm/mach-ip32/dma-coherence.h | 11 -----------
arch/mips/include/asm/mach-jazz/dma-coherence.h | 10 ----------
arch/mips/include/asm/mach-loongson/dma-coherence.h | 10 ----------
arch/mips/include/asm/mach-powertv/dma-coherence.h | 10 ----------
arch/mips/mm/dma-default.c | 4 +---
8 files changed, 1 insertion(+), 76 deletions(-)
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -52,23 +52,11 @@ static inline int plat_dma_supported(str
return 0;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
- BUG();
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 1;
}
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- BUG();
- return 0;
-}
-
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -47,16 +47,6 @@ static inline int plat_dma_supported(str
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
#ifdef CONFIG_DMA_COHERENT
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -58,16 +58,6 @@ static inline int plat_dma_supported(str
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 1; /* IP27 non-cohernet mode is unsupported */
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -80,17 +80,6 @@ static inline int plat_dma_supported(str
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
- return;
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0; /* IP32 is non-cohernet */
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -48,16 +48,6 @@ static inline int plat_dma_supported(str
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -53,16 +53,6 @@ static inline int plat_dma_supported(str
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -99,16 +99,6 @@ static inline int plat_dma_supported(str
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -297,7 +297,6 @@ static void mips_dma_sync_single_for_cpu
static void mips_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
@@ -327,7 +326,7 @@ static void mips_dma_sync_sg_for_device(
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return plat_dma_mapping_error(dev, dma_addr);
+ return 0;
}
int mips_dma_supported(struct device *dev, u64 mask)
@@ -340,7 +339,6 @@ void dma_cache_sync(struct device *dev,
{
BUG_ON(direction == DMA_NONE);
- plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync_virtual(vaddr, size, direction);
}

View File

@ -0,0 +1,91 @@
From d593f8fc627f8cdaee9c14e4d22b0770a09baaf1 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@openwrt.org>
Date: Thu, 15 Aug 2013 10:47:47 +0200
Subject: [PATCH] MIPS: improve checks for noncoherent DMA
Only one MIPS development board actually supports enabling/disabling DMA
coherency at runtime, so it's not a good idea to push the overhead of
checking that configuration setting onto every other supported target as
well.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
arch/mips/Kconfig | 6 +++++-
arch/mips/include/asm/dma-coherence.h | 7 +++++++
arch/mips/include/asm/mach-generic/dma-coherence.h | 4 ----
arch/mips/mm/dma-default.c | 2 ++
4 files changed, 14 insertions(+), 5 deletions(-)
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -299,7 +299,7 @@ config MIPS_MALTA
select CEVT_R4K
select CSRC_R4K
select CSRC_GIC
- select DMA_NONCOHERENT
+ select DMA_MAYBE_COHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
select IRQ_CPU
@@ -912,6 +912,10 @@ config FW_CFE
config ARCH_DMA_ADDR_T_64BIT
def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT
+config DMA_MAYBE_COHERENT
+ select DMA_NONCOHERENT
+ bool
+
config DMA_COHERENT
bool
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -9,7 +9,16 @@
#ifndef __ASM_DMA_COHERENCE_H
#define __ASM_DMA_COHERENCE_H
+#ifdef CONFIG_DMA_MAYBE_COHERENT
extern int coherentio;
extern int hw_coherentio;
+#else
+#ifdef CONFIG_DMA_COHERENT
+#define coherentio 1
+#else
+#define coherentio 0
+#endif
+#define hw_coherentio 0
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
#endif
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -49,11 +49,7 @@ static inline int plat_dma_supported(str
static inline int plat_device_is_coherent(struct device *dev)
{
-#ifdef CONFIG_DMA_COHERENT
- return 1;
-#else
return coherentio;
-#endif
}
#ifdef CONFIG_SWIOTLB
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -23,6 +23,7 @@
#include <dma-coherence.h>
+#ifdef CONFIG_DMA_MAYBE_COHERENT
int coherentio = 0; /* User defined DMA coherency from command line. */
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
@@ -42,6 +43,7 @@ static int __init setnocoherentio(char *
return 0;
}
early_param("nocoherentio", setnocoherentio);
+#endif
static inline struct page *dma_addr_to_page(struct device *dev,
dma_addr_t dma_addr)

View File

@ -0,0 +1,668 @@
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1394,6 +1394,7 @@ config CPU_CAVIUM_OCTEON
select LIBFDT
select USE_OF
select USB_EHCI_BIG_ENDIAN_MMIO
+ select SYS_HAS_DMA_OPS
help
The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor
@@ -1614,6 +1615,9 @@ config SYS_HAS_CPU_XLR
config SYS_HAS_CPU_XLP
bool
+config SYS_HAS_DMA_OPS
+ bool
+
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -1,9 +1,16 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
+#include <linux/kmemcheck.h>
+#include <linux/bug.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/dma-attrs.h>
+
#include <asm/scatterlist.h>
#include <asm/dma-coherence.h>
#include <asm/cache.h>
+#include <asm/cpu-type.h>
#include <asm-generic/dma-coherent.h>
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
@@ -12,12 +19,48 @@
extern struct dma_map_ops *mips_dma_map_ops;
+void __dma_sync(struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs);
+void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs);
+
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
+#ifdef CONFIG_SYS_HAS_DMA_OPS
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
else
return mips_dma_map_ops;
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * Warning on the terminology - Linux calls an uncached area coherent;
+ * MIPS terminology calls memory areas with hardware maintained coherency
+ * coherent.
+ */
+
+static inline int cpu_needs_post_dma_flush(struct device *dev)
+{
+#ifndef CONFIG_SYS_HAS_CPU_R10000
+ return 0;
+#endif
+ return !plat_device_is_coherent(dev) &&
+ (boot_cpu_type() == CPU_R10000 ||
+ boot_cpu_type() == CPU_R12000 ||
+ boot_cpu_type() == CPU_BMIPS5000);
+}
+
+static inline struct page *dma_addr_to_page(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return pfn_to_page(
+ plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
@@ -30,12 +73,312 @@ static inline bool dma_capable(struct de
static inline void dma_mark_clean(void *addr, size_t size) {}
-#include <asm-generic/dma-mapping-common.h>
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
+ struct page *page = virt_to_page(ptr);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ } else {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, offset, size, dir);
+
+ addr = plat_map_dma_mem_page(dev, page) + offset;
+ }
+ debug_dma_map_page(dev, page, offset, size, dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->unmap_page(dev, addr, size, dir, attrs);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+
+ plat_unmap_dma_mem(dev, addr, size, dir);
+ }
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ } else {
+ for_each_sg(sg, s, nents, i) {
+ struct page *page = sg_page(s);
+
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, s->offset, s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address =
+ plat_map_dma_mem_page(dev, page) + s->offset;
+ }
+ ents = nents;
+ }
+ debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+ return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+ if (ops) {
+ ops->unmap_sg(dev, sg, nents, dir, attrs);
+ return;
+ }
+
+ for_each_sg(sg, s, nents, i) {
+ if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
+ }
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ } else {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, offset, size, dir);
+
+ addr = plat_map_dma_mem_page(dev, page) + offset;
+ }
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->unmap_page(dev, addr, size, dir, NULL);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+
+ plat_unmap_dma_mem(dev, addr, size, dir);
+ }
+ debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_cpu(dev, addr, size, dir);
+ else if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_device(dev, addr, size, dir);
+ else if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+ else if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr + offset),
+ (addr + offset) & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_device(dev, addr + offset, size, dir);
+ else if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, addr + offset),
+ (addr + offset) & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (cpu_needs_post_dma_flush(dev)) {
+ for_each_sg(sg, s, nelems, i)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ }
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_sg_for_device(dev, sg, nelems, dir);
+ else if (!plat_device_is_coherent(dev)) {
+ for_each_sg(sg, s, nelems, i)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ }
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops && ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops && ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
- return ops->dma_supported(dev, mask);
+ if (ops)
+ return ops->dma_supported(dev, mask);
+ return plat_dma_supported(dev, mask);
}
static inline int dma_mapping_error(struct device *dev, u64 mask)
@@ -43,7 +386,9 @@ static inline int dma_mapping_error(stru
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, mask);
- return ops->mapping_error(dev, mask);
+ if (ops)
+ return ops->mapping_error(dev, mask);
+ return 0;
}
static inline int
@@ -69,7 +414,11 @@ static inline void *dma_alloc_attrs(stru
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
- ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
+ if (ops)
+ ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
+ else
+ ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
+ attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
@@ -84,7 +433,10 @@ static inline void dma_free_attrs(struct
{
struct dma_map_ops *ops = get_dma_ops(dev);
- ops->free(dev, size, vaddr, dma_handle, attrs);
+ if (ops)
+ ops->free(dev, size, vaddr, dma_handle, attrs);
+ else
+ mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
}
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -25,7 +25,7 @@
#ifdef CONFIG_DMA_MAYBE_COHERENT
int coherentio = 0; /* User defined DMA coherency from command line. */
-EXPORT_SYMBOL_GPL(coherentio);
+EXPORT_SYMBOL(coherentio);
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
static int __init setcoherentio(char *str)
@@ -45,30 +45,6 @@ static int __init setnocoherentio(char *
early_param("nocoherentio", setnocoherentio);
#endif
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- */
-static inline int cpu_needs_post_dma_flush(struct device *dev)
-{
- return !plat_device_is_coherent(dev) &&
- (boot_cpu_type() == CPU_R10000 ||
- boot_cpu_type() == CPU_R12000 ||
- boot_cpu_type() == CPU_BMIPS5000);
-}
-
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
@@ -124,8 +100,9 @@ void *dma_alloc_noncoherent(struct devic
}
EXPORT_SYMBOL(dma_alloc_noncoherent);
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
@@ -149,6 +126,7 @@ static void *mips_dma_alloc_coherent(str
return ret;
}
+EXPORT_SYMBOL(mips_dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
@@ -159,8 +137,8 @@ void dma_free_noncoherent(struct device
}
EXPORT_SYMBOL(dma_free_noncoherent);
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
int order = get_order(size);
@@ -175,6 +153,7 @@ static void mips_dma_free_coherent(struc
free_pages(addr, get_order(size));
}
+EXPORT_SYMBOL(mips_dma_free_coherent);
static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
@@ -203,8 +182,8 @@ static inline void __dma_sync_virtual(vo
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
-static inline void __dma_sync(struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
+void __dma_sync(struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
size_t left = size;
@@ -233,108 +212,7 @@ static inline void __dma_sync(struct pag
left -= len;
} while (left);
}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
-
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- if (!plat_device_is_coherent(dev))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- int i;
-
- for (i = 0; i < nhwentries; i++, sg++) {
- if (!plat_device_is_coherent(dev) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- int i;
-
- if (cpu_needs_post_dma_flush(dev))
- for (i = 0; i < nelems; i++, sg++)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- int i;
-
- if (!plat_device_is_coherent(dev))
- for (i = 0; i < nelems; i++, sg++)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-}
-
-int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
+EXPORT_SYMBOL(__dma_sync);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
@@ -347,23 +225,10 @@ void dma_cache_sync(struct device *dev,
EXPORT_SYMBOL(dma_cache_sync);
-static struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .mapping_error = mips_dma_mapping_error,
- .dma_supported = mips_dma_supported
-};
-
-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+#ifdef CONFIG_SYS_HAS_DMA_OPS
+struct dma_map_ops *mips_dma_map_ops = NULL;
EXPORT_SYMBOL(mips_dma_map_ops);
+#endif
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

View File

@ -0,0 +1,11 @@
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -164,7 +164,7 @@ else
# annotated or signed tagged state (as git describe only
# looks at signed or annotated tags - git tag -a/-s) and
# LOCALVERSION= is not specified
- if test "${LOCALVERSION+set}" != "set"; then
+ if test "${CONFIG_LOCALVERSION+set}" != "set"; then
scm=$(scm_version --short)
res="$res${scm:++}"
fi

View File

@ -0,0 +1,14 @@
--- a/Makefile
+++ b/Makefile
@@ -571,9 +571,9 @@ endif # $(dot-config)
all: vmlinux
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += -Os $(EXTRA_OPTIMIZATION) $(call cc-disable-warning,maybe-uninitialized,)
else
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
endif
include $(srctree)/arch/$(SRCARCH)/Makefile

View File

@ -0,0 +1,11 @@
--- a/Makefile
+++ b/Makefile
@@ -379,7 +379,7 @@ KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)

View File

@ -0,0 +1,108 @@
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -54,6 +54,7 @@ static struct text_range text_ranges[] =
static struct sym_entry *table;
static unsigned int table_size, table_cnt;
static int all_symbols = 0;
+static int uncompressed = 0;
static char symbol_prefix_char = '\0';
int token_profit[0x10000];
@@ -360,6 +361,9 @@ static void write_src(void)
free(markers);
+ if (uncompressed)
+ return;
+
output_label("kallsyms_token_table");
off = 0;
for (i = 0; i < 256; i++) {
@@ -418,6 +422,9 @@ static void *find_token(unsigned char *s
{
int i;
+ if (uncompressed)
+ return NULL;
+
for (i = 0; i < len - 1; i++) {
if (str[i] == token[0] && str[i+1] == token[1])
return &str[i];
@@ -490,6 +497,9 @@ static void optimize_result(void)
{
int i, best;
+ if (uncompressed)
+ return;
+
/* using the '\0' symbol last allows compress_symbols to use standard
* fast string functions */
for (i = 255; i >= 0; i--) {
@@ -646,7 +656,9 @@ int main(int argc, char **argv)
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++;
symbol_prefix_char = *p;
- } else
+ } else if (strcmp(argv[i], "--uncompressed") == 0)
+ uncompressed = 1;
+ else
usage();
}
} else if (argc != 1)
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1289,6 +1289,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
the unaligned access emulation.
see arch/parisc/kernel/unaligned.c for reference
+config KALLSYMS_UNCOMPRESSED
+ bool "Keep kallsyms uncompressed"
+ depends on KALLSYMS
+ help
+ Normally kallsyms contains compressed symbols (using a token table),
+ reducing the uncompressed kernel image size. Keeping the symbol table
+ uncompressed significantly improves the size of this part in compressed
+ kernel images.
+
+ Say N unless you need compressed kernel images to be small.
+
config HAVE_PCSPKR_PLATFORM
bool
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,6 +82,10 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
+ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
+ kallsymopt="${kallsymopt} --uncompressed"
+ fi
+
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -108,6 +108,11 @@ static unsigned int kallsyms_expand_symb
* For every byte on the compressed symbol data, copy the table
* entry for that byte.
*/
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+ memcpy(result, data + 1, len - 1);
+ result += len - 1;
+ len = 0;
+#endif
while (len) {
tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
data++;
@@ -140,6 +145,9 @@ tail:
*/
static char kallsyms_get_symbol_type(unsigned int off)
{
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+ return kallsyms_names[off + 1];
+#endif
/*
* Get just the first code, look it up in the token table,
* and return the first char from this token.

View File

@ -0,0 +1,200 @@
From: Felix Fietkau <nbd@openwrt.org>
Subject: [PATCH] build: add a hack for removing non-essential module info
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -83,7 +83,7 @@ void sort_extable(struct exception_table
void sort_main_extable(void);
void trim_init_extable(struct module *m);
-#ifdef MODULE
+#if defined(MODULE) && !defined(CONFIG_MODULE_STRIPPED)
#define MODULE_GENERIC_TABLE(gtype,name) \
extern const struct gtype##_id __mod_##gtype##_table \
__attribute__ ((unused, alias(__stringify(name))))
@@ -94,9 +94,10 @@ extern const struct gtype##_id __mod_##g
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+#define MODULE_INFO_STRIP(tag, info) __MODULE_INFO_STRIP(tag, tag, info)
/* For userspace: you can also call me... */
-#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
+#define MODULE_ALIAS(_alias) MODULE_INFO_STRIP(alias, _alias)
/* Soft module dependencies. See man modprobe.d for details.
* Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
@@ -137,10 +138,10 @@ extern const struct gtype##_id __mod_##g
* Author(s), use "Name <email>" or just "Name", for multiple
* authors use multiple MODULE_AUTHOR() statements/lines.
*/
-#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
+#define MODULE_AUTHOR(_author) MODULE_INFO_STRIP(author, _author)
/* What your module does. */
-#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+#define MODULE_DESCRIPTION(_description) MODULE_INFO_STRIP(description, _description)
#define MODULE_DEVICE_TABLE(type,name) \
MODULE_GENERIC_TABLE(type##_device,name)
@@ -161,7 +162,9 @@ extern const struct gtype##_id __mod_##g
*/
#if defined(MODULE) || !defined(CONFIG_SYSFS)
-#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#define MODULE_VERSION(_version) MODULE_INFO_STRIP(version, _version)
+#elif defined(CONFIG_MODULE_STRIPPED)
+#define MODULE_VERSION(_version) __MODULE_INFO_DISABLED(version)
#else
#define MODULE_VERSION(_version) \
static struct module_version_attribute ___modver_attr = { \
@@ -183,7 +186,7 @@ extern const struct gtype##_id __mod_##g
/* Optional firmware file (or files) needed by the module
* format is simply firmware file name. Multiple firmware
* files require multiple MODULE_FIRMWARE() specifiers */
-#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+#define MODULE_FIRMWARE(_firmware) MODULE_INFO_STRIP(firmware, _firmware)
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,6 +16,16 @@
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO_DISABLED(name) \
+ struct __UNIQUE_ID(name) {}
+
+#ifdef CONFIG_MODULE_STRIPPED
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO_DISABLED(name)
+#else
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO(tag, name, info)
+#endif
+
#ifdef MODULE
#define __MODULE_INFO(tag, name, info) \
static const char __UNIQUE_ID(name)[] \
@@ -23,8 +33,7 @@ static const char __UNIQUE_ID(name)[]
= __stringify(tag) "=" info
#else /* !MODULE */
/* This struct is here for syntactic coherency, it is not used */
-#define __MODULE_INFO(tag, name, info) \
- struct __UNIQUE_ID(name) {}
+#define __MODULE_INFO(tag, name, info) __MODULE_INFO_DISABLED(name)
#endif
#define __MODULE_PARM_TYPE(name, _type) \
__MODULE_INFO(parmtype, name##type, #name ":" _type)
@@ -32,7 +41,7 @@ static const char __UNIQUE_ID(name)[]
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
#define MODULE_PARM_DESC(_parm, desc) \
- __MODULE_INFO(parm, _parm, #_parm ":" desc)
+ __MODULE_INFO_STRIP(parm, _parm, #_parm ":" desc)
struct kernel_param;
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1829,6 +1829,13 @@ config MODULE_SIG_HASH
default "sha384" if MODULE_SIG_SHA384
default "sha512" if MODULE_SIG_SHA512
+config MODULE_STRIPPED
+ bool "Reduce module size"
+ depends on MODULES
+ help
+ Remove module parameter descriptions, author info, version, aliases,
+ device tables, etc.
+
endif # MODULES
config INIT_ALL_POSSIBLE
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2705,6 +2705,7 @@ static struct module *setup_load_info(st
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
+#ifndef CONFIG_MODULE_STRIPPED
const char *modmagic = get_modinfo(info, "vermagic");
int err;
@@ -2731,6 +2732,7 @@ static int check_modinfo(struct module *
" the quality is unknown, you have been warned.\n",
mod->name);
}
+#endif
/* Set up license info based on the info section */
set_license(mod, get_modinfo(info, "license"));
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1707,7 +1707,9 @@ static void read_symbols(char *modname)
symname = info.strtab + sym->st_name;
handle_modversions(mod, &info, sym, symname);
+#ifndef CONFIG_MODULE_STRIPPED
handle_moddevtable(mod, &info, sym, symname);
+#endif
}
if (!is_vmlinux(modname) ||
(is_vmlinux(modname) && vmlinux_section_warnings))
@@ -1851,7 +1853,9 @@ static void add_header(struct buffer *b,
buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
+#ifndef CONFIG_MODULE_STRIPPED
buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
+#endif
buf_printf(b, "\n");
buf_printf(b, "struct module __this_module\n");
buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n");
@@ -1868,16 +1872,20 @@ static void add_header(struct buffer *b,
static void add_intree_flag(struct buffer *b, int is_intree)
{
+#ifndef CONFIG_MODULE_STRIPPED
if (is_intree)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
+#endif
}
static void add_staging_flag(struct buffer *b, const char *name)
{
+#ifndef CONFIG_MODULE_STRIPPED
static const char *staging_dir = "drivers/staging";
if (strncmp(staging_dir, name, strlen(staging_dir)) == 0)
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
+#endif
}
/**
@@ -1970,11 +1978,13 @@ static void add_depends(struct buffer *b
static void add_srcversion(struct buffer *b, struct module *mod)
{
+#ifndef CONFIG_MODULE_STRIPPED
if (mod->srcversion[0]) {
buf_printf(b, "\n");
buf_printf(b, "MODULE_INFO(srcversion, \"%s\");\n",
mod->srcversion);
}
+#endif
}
static void write_if_changed(struct buffer *b, const char *fname)
@@ -2200,7 +2210,9 @@ int main(int argc, char **argv)
add_staging_flag(&buf, mod->name);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
+#ifndef CONFIG_MODULE_STRIPPED
add_moddevtable(&buf, mod);
+#endif
add_srcversion(&buf, mod);
sprintf(fname, "%s.mod.c", mod->name);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
--- a/tools/include/tools/be_byteshift.h
+++ b/tools/include/tools/be_byteshift.h
@@ -1,6 +1,10 @@
#ifndef _TOOLS_BE_BYTESHIFT_H
#define _TOOLS_BE_BYTESHIFT_H
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
#include <stdint.h>
static inline uint16_t __get_unaligned_be16(const uint8_t *p)
--- a/tools/include/tools/le_byteshift.h
+++ b/tools/include/tools/le_byteshift.h
@@ -1,6 +1,10 @@
#ifndef _TOOLS_LE_BYTESHIFT_H
#define _TOOLS_LE_BYTESHIFT_H
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
#include <stdint.h>
static inline uint16_t __get_unaligned_le16(const uint8_t *p)
--- /dev/null
+++ b/tools/include/tools/linux_types.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_TYPES_H
+#define __LINUX_TYPES_H
+
+#include <stdint.h>
+
+typedef uint8_t __u8;
+typedef uint8_t __be8;
+typedef uint8_t __le8;
+
+typedef uint16_t __u16;
+typedef uint16_t __be16;
+typedef uint16_t __le16;
+
+typedef uint32_t __u32;
+typedef uint32_t __be32;
+typedef uint32_t __le32;
+
+typedef uint64_t __u64;
+typedef uint64_t __be64;
+typedef uint64_t __le64;
+
+#endif

View File

@ -0,0 +1,538 @@
From: Felix Fietkau <nbd@openwrt.org>
use -ffunction-sections, -fdata-sections and --gc-sections
In combination with kernel symbol export stripping this significantly reduces
the kernel image size. Used on both ARM and MIPS architectures.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
#
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
cflags-y += -msoft-float
-LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+ifndef CONFIG_FUNCTION_TRACER
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+endif
+
cflags-y += -ffreestanding
#
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -67,7 +67,7 @@ SECTIONS
/* Exception table for data bus errors */
__dbe_table : {
__start___dbe_table = .;
- *(__dbe_table)
+ KEEP(*(__dbe_table))
__stop___dbe_table = .;
}
@@ -112,7 +112,7 @@ SECTIONS
. = ALIGN(4);
.mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
__mips_machines_start = .;
- *(.mips.machines.init)
+ KEEP(*(.mips.machines.init))
__mips_machines_end = .;
}
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -87,7 +87,7 @@
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#define MCOUNT_REC() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_mcount_loc) = .; \
- *(__mcount_loc) \
+ KEEP(*(__mcount_loc)) \
VMLINUX_SYMBOL(__stop_mcount_loc) = .;
#else
#define MCOUNT_REC()
@@ -95,7 +95,7 @@
#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
- *(_ftrace_annotated_branch) \
+ KEEP(*(_ftrace_annotated_branch)) \
VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
#else
#define LIKELY_PROFILE()
@@ -103,7 +103,7 @@
#ifdef CONFIG_PROFILE_ALL_BRANCHES
#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
- *(_ftrace_branch) \
+ KEEP(*(_ftrace_branch)) \
VMLINUX_SYMBOL(__stop_branch_profile) = .;
#else
#define BRANCH_PROFILE()
@@ -112,7 +112,7 @@
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
- *(_ftrace_events) \
+ KEEP(*(_ftrace_events)) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
#define FTRACE_EVENTS()
@@ -120,7 +120,7 @@
#ifdef CONFIG_TRACING
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
- *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
*(__tracepoint_str) /* Trace_printk fmt' pointer */ \
@@ -133,7 +133,7 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
- *(__syscalls_metadata) \
+ KEEP(*(__syscalls_metadata)) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
#define TRACE_SYSCALLS()
@@ -142,8 +142,8 @@
#ifdef CONFIG_CLKSRC_OF
#define CLKSRC_OF_TABLES() . = ALIGN(8); \
VMLINUX_SYMBOL(__clksrc_of_table) = .; \
- *(__clksrc_of_table) \
- *(__clksrc_of_table_end)
+ KEEP(*(__clksrc_of_table)) \
+ KEEP(*(__clksrc_of_table_end))
#else
#define CLKSRC_OF_TABLES()
#endif
@@ -152,8 +152,8 @@
#define IRQCHIP_OF_MATCH_TABLE() \
. = ALIGN(8); \
VMLINUX_SYMBOL(__irqchip_begin) = .; \
- *(__irqchip_of_table) \
- *(__irqchip_of_end)
+ KEEP(*(__irqchip_of_table)) \
+ KEEP(*(__irqchip_of_end))
#else
#define IRQCHIP_OF_MATCH_TABLE()
#endif
@@ -161,8 +161,8 @@
#ifdef CONFIG_COMMON_CLK
#define CLK_OF_TABLES() . = ALIGN(8); \
VMLINUX_SYMBOL(__clk_of_table) = .; \
- *(__clk_of_table) \
- *(__clk_of_table_end)
+ KEEP(*(__clk_of_table)) \
+ KEEP(*(__clk_of_table_end))
#else
#define CLK_OF_TABLES()
#endif
@@ -170,7 +170,7 @@
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
- *(.dtb.init.rodata) \
+ KEEP(*(.dtb.init.rodata)) \
VMLINUX_SYMBOL(__dtb_end) = .;
/* .data section */
@@ -186,16 +186,17 @@
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___jump_table) = .; \
- *(__jump_table) \
+ KEEP(*(__jump_table)) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
- *(__verbose) \
+ KEEP(*(__verbose)) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ *(.data.[a-zA-Z_]*)
/*
* Data section helpers
@@ -249,32 +250,32 @@
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
- *(.pci_fixup_early) \
+ KEEP(*(.pci_fixup_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
- *(.pci_fixup_header) \
+ KEEP(*(.pci_fixup_header)) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
- *(.pci_fixup_final) \
+ KEEP(*(.pci_fixup_final)) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
- *(.pci_fixup_enable) \
+ KEEP(*(.pci_fixup_enable)) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
- *(.pci_fixup_resume) \
+ KEEP(*(.pci_fixup_resume)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
- *(.pci_fixup_resume_early) \
+ KEEP(*(.pci_fixup_resume_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
- *(.pci_fixup_suspend) \
+ KEEP(*(.pci_fixup_suspend)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
- *(.builtin_fw) \
+ KEEP(*(.builtin_fw)) \
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
} \
\
@@ -283,49 +284,49 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(SORT(___ksymtab+*)) \
+ KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(SORT(___ksymtab_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(SORT(___ksymtab_unused+*)) \
+ KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(SORT(___ksymtab_unused_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(SORT(___ksymtab_gpl_future+*)) \
+ KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(SORT(___kcrctab+*)) \
+ KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(SORT(___kcrctab_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
@@ -339,14 +340,14 @@
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(SORT(___kcrctab_unused_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(SORT(___kcrctab_gpl_future+*)) \
+ KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
@@ -365,14 +366,14 @@
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
- *(__param) \
+ KEEP(*(__param)) \
VMLINUX_SYMBOL(__stop___param) = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___modver) = .; \
- *(__modver) \
+ KEEP(*(__modver)) \
VMLINUX_SYMBOL(__stop___modver) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
@@ -428,7 +429,7 @@
#define ENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__entry_text_start) = .; \
- *(.entry.text) \
+ KEEP(*(.entry.text)) \
VMLINUX_SYMBOL(__entry_text_end) = .;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -456,7 +457,7 @@
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ex_table) = .; \
- *(__ex_table) \
+ KEEP(*(__ex_table)) \
VMLINUX_SYMBOL(__stop___ex_table) = .; \
}
@@ -472,7 +473,7 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
- *(.ctors) \
+ KEEP(*(.ctors)) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
@@ -516,7 +517,7 @@
#define SBSS(sbss_align) \
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
- *(.sbss) \
+ *(.sbss .sbss.*) \
*(.scommon) \
}
@@ -534,7 +535,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
- *(.bss) \
+ *(.bss .bss.*) \
*(COMMON) \
}
@@ -583,7 +584,7 @@
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___bug_table) = .; \
- *(__bug_table) \
+ KEEP(*(__bug_table)) \
VMLINUX_SYMBOL(__stop___bug_table) = .; \
}
#else
@@ -595,7 +596,7 @@
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__tracedata_start) = .; \
- *(.tracedata) \
+ KEEP(*(.tracedata)) \
VMLINUX_SYMBOL(__tracedata_end) = .; \
}
#else
@@ -612,17 +613,17 @@
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
VMLINUX_SYMBOL(__setup_start) = .; \
- *(.init.setup) \
+ KEEP(*(.init.setup)) \
VMLINUX_SYMBOL(__setup_end) = .;
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
- *(.initcall##level##.init) \
- *(.initcall##level##s.init) \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
- *(.initcallearly.init) \
+ KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
INIT_CALLS_LEVEL(2) \
@@ -636,21 +637,21 @@
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
- *(.con_initcall.init) \
+ KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
- *(.init.ramfs) \
+ KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
- *(.init.ramfs.info)
+ KEEP(*(.init.ramfs.info))
#else
#define INIT_RAM_FS
#endif
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -17,11 +17,16 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
+LDFLAGS_vmlinux += --gc-sections
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
+ifndef CONFIG_FUNCTION_TRACER
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+endif
+
# Never generate .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,13 +12,13 @@
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
+ KEEP(*(.proc.info.init)) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
+ KEEP(*(.idmap.text)) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
. = ALIGN(32); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
@@ -93,7 +93,7 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
- *(.exception.text)
+ KEEP(*(.exception.text))
__exception_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
@@ -118,7 +118,7 @@ SECTIONS
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
#ifdef CONFIG_MMU
- *(__ex_table)
+ KEEP(*(__ex_table))
#endif
__stop___ex_table = .;
}
@@ -130,12 +130,12 @@ SECTIONS
. = ALIGN(8);
.ARM.unwind_idx : {
__start_unwind_idx = .;
- *(.ARM.exidx*)
+ KEEP(*(.ARM.exidx*))
__stop_unwind_idx = .;
}
.ARM.unwind_tab : {
__start_unwind_tab = .;
- *(.ARM.extab*)
+ KEEP(*(.ARM.extab*))
__stop_unwind_tab = .;
}
#endif
@@ -154,14 +154,14 @@ SECTIONS
*/
__vectors_start = .;
.vectors 0 : AT(__vectors_start) {
- *(.vectors)
+ KEEP(*(.vectors))
}
. = __vectors_start + SIZEOF(.vectors);
__vectors_end = .;
__stubs_start = .;
.stubs 0x1000 : AT(__stubs_start) {
- *(.stubs)
+ KEEP(*(.stubs))
}
. = __stubs_start + SIZEOF(.stubs);
__stubs_end = .;
@@ -175,24 +175,24 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
- *(.taglist.init)
+ KEEP(*(.taglist.init))
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
- *(.alt.smp.init)
+ KEEP(*(.alt.smp.init))
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
- *(.pv_table)
+ KEEP(*(.pv_table))
__pv_table_end = .;
}
.init.data : {
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -124,6 +124,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
+KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -DZIMAGE

View File

@ -0,0 +1,88 @@
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -52,6 +52,16 @@
#define LOAD_OFFSET 0
#endif
+#ifndef SYMTAB_KEEP
+#define SYMTAB_KEEP KEEP(*(SORT(___ksymtab+*)))
+#define SYMTAB_KEEP_GPL KEEP(*(SORT(___ksymtab_gpl+*)))
+#endif
+
+#ifndef SYMTAB_DISCARD
+#define SYMTAB_DISCARD
+#define SYMTAB_DISCARD_GPL
+#endif
+
#include <linux/export.h>
/* Align . to a 8 byte boundary equals to maximum function alignment. */
@@ -284,14 +294,14 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- KEEP(*(SORT(___ksymtab+*))) \
+ SYMTAB_KEEP \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- KEEP(*(SORT(___ksymtab_gpl+*))) \
+ SYMTAB_KEEP_GPL \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
@@ -353,7 +363,7 @@
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ *(__ksymtab_strings+*) \
} \
\
/* __*init sections */ \
@@ -670,6 +680,8 @@
EXIT_TEXT \
EXIT_DATA \
EXIT_CALL \
+ SYMTAB_DISCARD \
+ SYMTAB_DISCARD_GPL \
*(.discard) \
*(.discard.*) \
}
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -349,7 +349,7 @@ targets += $(extra-y) $(MAKECMDGOALS) $(
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
quiet_cmd_cpp_lds_S = LDS $@
- cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \
+ cmd_cpp_lds_S = $(CPP) $(EXTRA_LDSFLAGS) $(cpp_flags) -P -C -U$(ARCH) \
-D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
$(obj)/%.lds: $(src)/%.lds.S FORCE
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -52,12 +52,19 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef MODULE
+#define __EXPORT_SUFFIX(sym)
+#else
+#define __EXPORT_SUFFIX(sym) "+" #sym
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define __EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings" \
+ __EXPORT_SUFFIX(sym)), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
static const struct kernel_symbol __ksymtab_##sym \
__used \

View File

@ -0,0 +1,54 @@
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -303,7 +303,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^)
quiet_cmd_lzma = LZMA $@
cmd_lzma = (cat $(filter-out FORCE,$^) | \
- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
quiet_cmd_lzo = LZO $@
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -226,7 +226,7 @@ cpio_list=
output="/dev/stdout"
output_file=""
is_cpio_compressed=
-compr="gzip -n -9 -f"
+compr="gzip -n -9 -f -"
arg="$1"
case "$arg" in
@@ -240,9 +240,9 @@ case "$arg" in
output_file="$1"
cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
output=${cpio_list}
- echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f"
- echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
- echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+ echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f -"
+ echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f -"
+ echo "$output_file" | grep -q "\.lzma$" && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
echo "$output_file" | grep -q "\.xz$" && \
compr="xz --check=crc32 --lzma2=dict=1MiB"
echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
@@ -303,7 +303,7 @@ if [ ! -z ${output_file} ]; then
if [ "${is_cpio_compressed}" = "compressed" ]; then
cat ${cpio_tfile} > ${output_file}
else
- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
+ (cat ${cpio_tfile} | ${compr} > ${output_file}) \
|| (rm -f ${output_file} ; false)
fi
[ -z ${cpio_file} ] && rm ${cpio_tfile}
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -47,6 +47,7 @@ static const struct compress_format comp
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
+ { {0x6d, 0x00}, "lzma-openwrt", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },

View File

@ -0,0 +1,18 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -195,7 +195,6 @@ config NF_CONNTRACK_FTP
config NF_CONNTRACK_H323
tristate "H.323 protocol support"
- depends on (IPV6 || IPV6=n)
depends on NETFILTER_ADVANCED
help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
@@ -750,7 +749,6 @@ config NETFILTER_XT_TARGET_SECMARK
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
- depends on (IPV6 || IPV6=n)
default m if NETFILTER_ADVANCED=n
---help---
This option adds a `TCPMSS' target, which allows you to alter the

View File

@ -0,0 +1,18 @@
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -10,13 +10,13 @@ config SND_DMAENGINE_PCM
tristate
config SND_HWDEP
- tristate
+ tristate "Sound hardware support"
config SND_RAWMIDI
tristate
config SND_COMPRESS_OFFLOAD
- tristate
+ tristate "Compression offloading support"
# To be effective this also requires INPUT - users should say:
# select SND_JACK if INPUT=y || INPUT=SND

View File

@ -0,0 +1,10 @@
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA
depends on PLAT_ORION
select CRYPTO_ALGAPI
select CRYPTO_AES
+ select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_HASH
help

View File

@ -0,0 +1,29 @@
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -29,6 +29,7 @@ config SSB_SPROM
config SSB_BLOCKIO
bool
depends on SSB
+ default y
config SSB_PCIHOST_POSSIBLE
bool
@@ -49,7 +50,7 @@ config SSB_PCIHOST
config SSB_B43_PCI_BRIDGE
bool
depends on SSB_PCIHOST
- default n
+ default y
config SSB_PCMCIAHOST_POSSIBLE
bool
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -17,6 +17,7 @@ config BCMA
config BCMA_BLOCKIO
bool
depends on BCMA
+ default y
config BCMA_HOST_PCI_POSSIBLE
bool

View File

@ -0,0 +1,23 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -308,16 +308,16 @@ config BCH_CONST_T
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
- boolean
+ boolean "Textsearch support"
config TEXTSEARCH_KMP
- tristate
+ tristate "Textsearch KMP"
config TEXTSEARCH_BM
- tristate
+ tristate "Textsearch BM"
config TEXTSEARCH_FSM
- tristate
+ tristate "Textsearch FSM"
config BTREE
boolean

View File

@ -0,0 +1,19 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -149,13 +149,13 @@ config LIB80211
Drivers should select this themselves if needed.
config LIB80211_CRYPT_WEP
- tristate
+ tristate "LIB80211_CRYPT_WEP"
config LIB80211_CRYPT_CCMP
- tristate
+ tristate "LIB80211_CRYPT_CCMP"
config LIB80211_CRYPT_TKIP
- tristate
+ tristate "LIB80211_CRYPT_TKIP"
config LIB80211_DEBUG
bool "lib80211 debugging messages"

View File

@ -0,0 +1,47 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -31,7 +31,7 @@ config CRYPTO_FIPS
this is.
config CRYPTO_ALGAPI
- tristate
+ tristate "ALGAPI"
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
@@ -40,7 +40,7 @@ config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
- tristate
+ tristate "AEAD"
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
@@ -49,7 +49,7 @@ config CRYPTO_AEAD2
select CRYPTO_ALGAPI2
config CRYPTO_BLKCIPHER
- tristate
+ tristate "BLKCIPHER"
select CRYPTO_BLKCIPHER2
select CRYPTO_ALGAPI
@@ -60,7 +60,7 @@ config CRYPTO_BLKCIPHER2
select CRYPTO_WORKQUEUE
config CRYPTO_HASH
- tristate
+ tristate "HASH"
select CRYPTO_HASH2
select CRYPTO_ALGAPI
@@ -69,7 +69,7 @@ config CRYPTO_HASH2
select CRYPTO_ALGAPI2
config CRYPTO_RNG
- tristate
+ tristate "RNG"
select CRYPTO_RNG2
select CRYPTO_ALGAPI

View File

@ -0,0 +1,22 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,5 @@
config WIRELESS_EXT
- bool
+ bool "Wireless extensions"
config WEXT_CORE
def_bool y
@@ -11,10 +11,10 @@ config WEXT_PROC
depends on WEXT_CORE
config WEXT_SPY
- bool
+ bool "WEXT_SPY"
config WEXT_PRIV
- bool
+ bool "WEXT_PRIV"
config CFG80211
tristate "cfg80211 - wireless configuration API"

View File

@ -0,0 +1,11 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -2,7 +2,7 @@ menu "Core Netfilter Configuration"
depends on NET && INET && NETFILTER
config NETFILTER_NETLINK
- tristate
+ tristate "Netfilter NFNETLINK interface"
config NETFILTER_NETLINK_ACCT
tristate "Netfilter NFACCT over NFNETLINK interface"

View File

@ -0,0 +1,72 @@
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,20 +3,24 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
- bool
+ tristate "Regmap"
config REGMAP_I2C
- tristate
+ select REGMAP
+ tristate "Regmap I2C"
config REGMAP_SPI
- tristate
+ select REGMAP
+ depends on SPI_MASTER
+ tristate "Regmap SPI"
config REGMAP_MMIO
+ select REGMAP
tristate
config REGMAP_IRQ
+ select REGMAP
bool
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -48,7 +48,7 @@ struct reg_default {
unsigned int def;
};
-#ifdef CONFIG_REGMAP
+#if IS_ENABLED(CONFIG_REGMAP)
enum regmap_endian {
/* Unspecified -> 0 -> Backwards compatible default */
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,6 +1,8 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-lzo.o regcache-flat.o
+ifdef CONFIG_DEBUG_FS
+regmap-core-objs += regmap-debugfs.o
+endif
+obj-$(CONFIG_REGMAP) += regmap-core.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/rbtree.h>
@@ -1959,3 +1960,5 @@ static int __init regmap_initcall(void)
return 0;
}
postcore_initcall(regmap_initcall);
+
+MODULE_LICENSE("GPL");

View File

@ -0,0 +1,37 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -95,10 +95,10 @@ config CRYPTO_MANAGER
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
- select CRYPTO_AEAD2
- select CRYPTO_HASH2
- select CRYPTO_BLKCIPHER2
- select CRYPTO_PCOMP2
+ select CRYPTO_AEAD2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_HASH2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_PCOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -248,6 +248,9 @@ static int cryptomgr_schedule_test(struc
type = alg->cra_flags;
/* This piece of crap needs to disappear into per-type test hooks. */
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ type |= CRYPTO_ALG_TESTED;
+#else
if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
@@ -256,6 +259,7 @@ static int cryptomgr_schedule_test(struc
(!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
type |= CRYPTO_ALG_TESTED;
+#endif
param->type = type;

View File

@ -0,0 +1,219 @@
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ /dev/null
@@ -1,96 +0,0 @@
-#ifndef __BCM963XX_TAG_H
-#define __BCM963XX_TAG_H
-
-#define TAGVER_LEN 4 /* Length of Tag Version */
-#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
-#define SIG1_LEN 20 /* Company Signature 1 Length */
-#define SIG2_LEN 14 /* Company Signature 2 Length */
-#define BOARDID_LEN 16 /* Length of BoardId */
-#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
-#define CHIPID_LEN 6 /* Chip Id Length */
-#define IMAGE_LEN 10 /* Length of Length Field */
-#define ADDRESS_LEN 12 /* Length of Address field */
-#define DUALFLAG_LEN 2 /* Dual Image flag Length */
-#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
-#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
-#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
-#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
-#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
-#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
-
-#define NUM_PIRELLI 2
-#define IMAGETAG_CRC_START 0xFFFFFFFF
-
-#define PIRELLI_BOARDS { \
- "AGPF-S0", \
- "DWV-S0", \
-}
-
-/*
- * The broadcom firmware assumes the rootfs starts the image,
- * therefore uses the rootfs start (flash_image_address)
- * to determine where to flash the image. Since we have the kernel first
- * we have to give it the kernel address, but the crc uses the length
- * associated with this address (root_length), which is added to the kernel
- * length (kernel_length) to determine the length of image to flash and thus
- * needs to be rootfs + deadcode (jffs2 EOF marker)
-*/
-
-struct bcm_tag {
- /* 0-3: Version of the image tag */
- char tag_version[TAGVER_LEN];
- /* 4-23: Company Line 1 */
- char sig_1[SIG1_LEN];
- /* 24-37: Company Line 2 */
- char sig_2[SIG2_LEN];
- /* 38-43: Chip this image is for */
- char chip_id[CHIPID_LEN];
- /* 44-59: Board name */
- char board_id[BOARDID_LEN];
- /* 60-61: Map endianness -- 1 BE 0 LE */
- char big_endian[ENDIANFLAG_LEN];
- /* 62-71: Total length of image */
- char total_length[IMAGE_LEN];
- /* 72-83: Address in memory of CFE */
- char cfe__address[ADDRESS_LEN];
- /* 84-93: Size of CFE */
- char cfe_length[IMAGE_LEN];
- /* 94-105: Address in memory of image start
- * (kernel for OpenWRT, rootfs for stock firmware)
- */
- char flash_image_start[ADDRESS_LEN];
- /* 106-115: Size of rootfs */
- char root_length[IMAGE_LEN];
- /* 116-127: Address in memory of kernel */
- char kernel_address[ADDRESS_LEN];
- /* 128-137: Size of kernel */
- char kernel_length[IMAGE_LEN];
- /* 138-139: Unused at the moment */
- char dual_image[DUALFLAG_LEN];
- /* 140-141: Unused at the moment */
- char inactive_flag[INACTIVEFLAG_LEN];
- /* 142-161: RSA Signature (not used; some vendors may use this) */
- char rsa_signature[RSASIG_LEN];
- /* 162-191: Compilation and related information (not used in OpenWrt) */
- char information1[TAGINFO1_LEN];
- /* 192-195: Version flash layout */
- char flash_layout_ver[FLASHLAYOUTVER_LEN];
- /* 196-199: kernel+rootfs CRC32 */
- __u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
- char information2[TAGINFO2_LEN];
- /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
- __u32 image_crc;
- /* 220-223: CRC32 of rootfs partition */
- __u32 rootfs_crc;
- /* 224-227: CRC32 of kernel partition */
- __u32 kernel_crc;
- /* 228-235: Unused at present */
- char reserved1[8];
- /* 236-239: CRC32 of header excluding last 20 bytes */
- __u32 header_crc;
- /* 240-255: Unused at present */
- char reserved2[16];
-};
-
-#endif /* __BCM63XX_TAG_H */
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -34,7 +34,7 @@
#include <linux/mtd/partitions.h>
#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <linux/bcm963xx_tag.h>
#include <asm/mach-bcm63xx/board_bcm963xx.h>
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -68,6 +68,7 @@ header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpqether.h
+header-y += bcm963xx_tag.h
header-y += bsg.h
header-y += btrfs.h
header-y += can.h
--- /dev/null
+++ b/include/uapi/linux/bcm963xx_tag.h
@@ -0,0 +1,96 @@
+#ifndef __BCM963XX_TAG_H
+#define __BCM963XX_TAG_H
+
+#define TAGVER_LEN 4 /* Length of Tag Version */
+#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
+#define SIG1_LEN 20 /* Company Signature 1 Length */
+#define SIG2_LEN 14 /* Company Signature 2 Length */
+#define BOARDID_LEN 16 /* Length of BoardId */
+#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
+#define CHIPID_LEN 6 /* Chip Id Length */
+#define IMAGE_LEN 10 /* Length of Length Field */
+#define ADDRESS_LEN 12 /* Length of Address field */
+#define DUALFLAG_LEN 2 /* Dual Image flag Length */
+#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
+#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
+#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
+#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
+#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
+#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
+
+#define NUM_PIRELLI 2
+#define IMAGETAG_CRC_START 0xFFFFFFFF
+
+#define PIRELLI_BOARDS { \
+ "AGPF-S0", \
+ "DWV-S0", \
+}
+
+/*
+ * The broadcom firmware assumes the rootfs starts the image,
+ * therefore uses the rootfs start (flash_image_address)
+ * to determine where to flash the image. Since we have the kernel first
+ * we have to give it the kernel address, but the crc uses the length
+ * associated with this address (root_length), which is added to the kernel
+ * length (kernel_length) to determine the length of image to flash and thus
+ * needs to be rootfs + deadcode (jffs2 EOF marker)
+*/
+
+struct bcm_tag {
+ /* 0-3: Version of the image tag */
+ char tag_version[TAGVER_LEN];
+ /* 4-23: Company Line 1 */
+ char sig_1[SIG1_LEN];
+ /* 24-37: Company Line 2 */
+ char sig_2[SIG2_LEN];
+ /* 38-43: Chip this image is for */
+ char chip_id[CHIPID_LEN];
+ /* 44-59: Board name */
+ char board_id[BOARDID_LEN];
+ /* 60-61: Map endianness -- 1 BE 0 LE */
+ char big_endian[ENDIANFLAG_LEN];
+ /* 62-71: Total length of image */
+ char total_length[IMAGE_LEN];
+ /* 72-83: Address in memory of CFE */
+ char cfe__address[ADDRESS_LEN];
+ /* 84-93: Size of CFE */
+ char cfe_length[IMAGE_LEN];
+ /* 94-105: Address in memory of image start
+ * (kernel for OpenWRT, rootfs for stock firmware)
+ */
+ char flash_image_start[ADDRESS_LEN];
+ /* 106-115: Size of rootfs */
+ char root_length[IMAGE_LEN];
+ /* 116-127: Address in memory of kernel */
+ char kernel_address[ADDRESS_LEN];
+ /* 128-137: Size of kernel */
+ char kernel_length[IMAGE_LEN];
+ /* 138-139: Unused at the moment */
+ char dual_image[DUALFLAG_LEN];
+ /* 140-141: Unused at the moment */
+ char inactive_flag[INACTIVEFLAG_LEN];
+ /* 142-161: RSA Signature (not used; some vendors may use this) */
+ char rsa_signature[RSASIG_LEN];
+ /* 162-191: Compilation and related information (not used in OpenWrt) */
+ char information1[TAGINFO1_LEN];
+ /* 192-195: Version flash layout */
+ char flash_layout_ver[FLASHLAYOUTVER_LEN];
+ /* 196-199: kernel+rootfs CRC32 */
+ __u32 fskernel_crc;
+ /* 200-215: Unused except on Alice Gate where is is information */
+ char information2[TAGINFO2_LEN];
+ /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
+ __u32 image_crc;
+ /* 220-223: CRC32 of rootfs partition */
+ __u32 rootfs_crc;
+ /* 224-227: CRC32 of kernel partition */
+ __u32 kernel_crc;
+ /* 228-235: Unused at present */
+ char reserved1[8];
+ /* 236-239: CRC32 of header excluding last 20 bytes */
+ __u32 header_crc;
+ /* 240-255: Unused at present */
+ char reserved2[16];
+};
+
+#endif /* __BCM63XX_TAG_H */

View File

@ -0,0 +1,23 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -193,16 +193,16 @@ config AUDIT_GENERIC
# compression support is select'ed if needed
#
config ZLIB_INFLATE
- tristate
+ tristate "ZLIB inflate support"
config ZLIB_DEFLATE
- tristate
+ tristate "ZLIB deflate support"
config LZO_COMPRESS
- tristate
+ tristate "LZO compress support"
config LZO_DECOMPRESS
- tristate
+ tristate "LZO decompress support"
config LZ4_COMPRESS
tristate

View File

@ -0,0 +1,39 @@
From: Mark Miller <mark@mirell.org>
This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
certain Broadcom chipsets running CFE in order to load the kernel.
Signed-off-by: Mark Miller <mark@mirell.org>
Acked-by: Rob Landley <rob@landley.net>
---
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -860,9 +860,6 @@ config FW_ARC
config ARCH_MAY_HAVE_PC_FDC
bool
-config BOOT_RAW
- bool
-
config CEVT_BCM1480
bool
@@ -2364,6 +2361,18 @@ config USE_OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
+config BOOT_RAW
+ bool "Enable the kernel to be executed from the load address"
+ default n
+ help
+ Allow the kernel to be executed from the load address for
+ bootloaders which cannot read the ELF format. This places
+ a jump to start_kernel at the load address.
+
+ If unsure, say N.
+
+
+
endmenu
config LOCKDEP_SUPPORT

View File

@ -0,0 +1,28 @@
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -957,6 +957,10 @@ config SYNC_R4K
config MIPS_MACHINE
def_bool n
+config IMAGE_CMDLINE_HACK
+ bool "OpenWrt specific image command line hack"
+ default n
+
config NO_IOPORT
def_bool n
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -101,6 +101,12 @@ FEXPORT(__kernel_entry)
j kernel_entry
#endif
+#ifdef CONFIG_IMAGE_CMDLINE_HACK
+ .ascii "CMDLINE:"
+EXPORT(__image_cmdline)
+ .fill 0x400
+#endif /* CONFIG_IMAGE_CMDLINE_HACK */
+
__REF
NESTED(kernel_entry, 16, sp) # kernel entry point

View File

@ -0,0 +1,11 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -87,7 +87,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
# machines may also. Since BFD is incredibly buggy with respect to
# crossformat linking we rely on the elf2ecoff tool for format conversion.
#
-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
+cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
KBUILD_AFLAGS_MODULE += -mlong-calls

View File

@ -0,0 +1,160 @@
MIPS: allow disabling the kernel FPU emulator
This patch allows turning off the in-kernel Algorithmics
FPU emulator support, which allows one to save a couple of
precious blocks on an embedded system.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
--
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -945,6 +945,17 @@ config I8259
config MIPS_BONITO64
bool
+config MIPS_FPU_EMU
+ bool "Enable FPU emulation"
+ default y
+ help
+ This option allows building a kernel with or without the Algorithmics
+ FPU emulator enabled. Turning off this option results in a kernel which
+ does not catch floating operations exceptions. Make sure that your toolchain
+ is configured to enable software floating point emulation in that case.
+
+ If unsure say Y here.
+
config MIPS_MSC
bool
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -2,10 +2,12 @@
# Makefile for the Linux/MIPS kernel FPU emulation.
#
-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
+obj-y := kernel_linkage.o
+
+obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \
dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \
dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
+ dp_sqrt.o sp_sqrt.o dsemul.o cp1emu.o
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -29,6 +29,7 @@
#define SIGNALLING_NAN 0x7ff800007ff80000LL
+#ifdef CONFIG_MIPS_FPU_EMU
void fpu_emulator_init_fpu(void)
{
static int first = 1;
@@ -113,3 +114,36 @@ int fpu_emulator_restore_context32(struc
return err;
}
#endif
+
+#else
+
+void fpu_emulator_init_fpu(void)
+{
+ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain"
+ "was compiled with software floating point support (soft-float)\n");
+ return;
+}
+
+int fpu_emulator_save_context(struct sigcontext __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_restore_context(struct sigcontext __user *sc)
+{
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
+{
+ return 0;
+}
+#endif /* CONFIG_64BIT */
+
+#endif /* CONFIG_MIPS_FPU_EMU */
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -27,6 +27,8 @@
#include <asm/inst.h>
#include <asm/local.h>
+#ifdef CONFIG_MIPS_FPU_EMU
+
#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
@@ -60,6 +62,38 @@ extern int fpu_emulator_cop1Handler(stru
int process_fpemu_return(int sig, void __user *fault_addr);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
+#else
+static inline int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long cpc)
+{
+ return 0;
+}
+
+static inline int do_dsemulret(struct pt_regs *xcp)
+{
+ return 0;
+}
+
+static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx,
+ int has_fpu,
+ void *__user *fault_addr)
+{
+ return 0;
+}
+
+static inline int process_fpemu_return(int sig, void __user *fault_addr)
+{
+ return -EINVAL;
+}
+
+static inline int mm_isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_FPU_EMU */
/*
* Instruction inserted following the badinst to further tag the sequence
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -694,6 +694,7 @@ asmlinkage void do_ov(struct pt_regs *re
exception_exit(prev_state);
}
+#ifdef CONFIG_MIPS_FPU_EMU
int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
@@ -717,6 +718,7 @@ int process_fpemu_return(int sig, void _
return 0;
}
}
+#endif /* CONFIG_MIPS_FPU_EMU */
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX

View File

@ -0,0 +1,352 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -90,8 +90,13 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
+ifdef CONFIG_64BIT
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+else
+KBUILD_AFLAGS_MODULE += -mno-long-calls
+KBUILD_CFLAGS_MODULE += -mno-long-calls
+endif
ifndef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -11,6 +11,11 @@ struct mod_arch_specific {
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
struct mips_hi16 *r_mips_hi16_list;
+
+ void *phys_plt_tbl;
+ void *virt_plt_tbl;
+ unsigned int phys_plt_offset;
+ unsigned int virt_plt_offset;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -42,14 +42,222 @@ struct mips_hi16 {
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
-#ifdef MODULE_START
+/*
+ * Get the potential max trampolines size required of the init and
+ * non-init sections. Only used if we cannot find enough contiguous
+ * physically mapped memory to put the module into.
+ */
+static unsigned int
+get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ const char *secstrings, unsigned int symindex, bool is_init)
+{
+ unsigned long ret = 0;
+ unsigned int i, j;
+ Elf_Sym *syms;
+
+ /* Everything marked ALLOC (this includes the exported symbols) */
+ for (i = 1; i < hdr->e_shnum; ++i) {
+ unsigned int info = sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_REL
+ && sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ /* If it's called *.init*, and we're not init, we're
+ not interested */
+ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
+ != is_init)
+ continue;
+
+ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
+ if (sechdrs[i].sh_type == SHT_REL) {
+ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rel[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ } else {
+ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rela[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ }
+ }
+
+ return ret;
+}
+
+#ifndef MODULE_START
+static void *alloc_phys(unsigned long size)
+{
+ unsigned order;
+ struct page *page;
+ struct page *p;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_THISNODE, order);
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
+ __free_page(p);
+
+ return page_address(page);
+}
+#endif
+
+static void free_phys(void *ptr, unsigned long size)
+{
+ struct page *page;
+ struct page *end;
+
+ page = virt_to_page(ptr);
+ end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
+
+ for (; page < end; ++page)
+ __free_page(page);
+}
+
+
void *module_alloc(unsigned long size)
{
+#ifdef MODULE_START
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
+#else
+ void *ptr;
+
+ if (size == 0)
+ return NULL;
+
+ ptr = alloc_phys(size);
+
+ /* If we failed to allocate physically contiguous memory,
+ * fall back to regular vmalloc. The module loader code will
+ * create jump tables to handle long jumps */
+ if (!ptr)
+ return vmalloc(size);
+
+ return ptr;
+#endif
}
+
+static inline bool is_phys_addr(void *ptr)
+{
+#ifdef CONFIG_64BIT
+ return (KSEGX((unsigned long)ptr) == CKSEG0);
+#else
+ return (KSEGX(ptr) == KSEG0);
#endif
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ if (is_phys_addr(module_region)) {
+ if (mod->module_init == module_region)
+ free_phys(module_region, mod->init_size);
+ else if (mod->module_core == module_region)
+ free_phys(module_region, mod->core_size);
+ else
+ BUG();
+ } else {
+ vfree(module_region);
+ }
+}
+
+static void *__module_alloc(int size, bool phys)
+{
+ void *ptr;
+
+ if (phys)
+ ptr = kmalloc(size, GFP_KERNEL);
+ else
+ ptr = vmalloc(size);
+ return ptr;
+}
+
+static void __module_free(void *ptr)
+{
+ if (is_phys_addr(ptr))
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int symindex = 0;
+ unsigned int core_size, init_size;
+ int i;
+
+ mod->arch.phys_plt_offset = 0;
+ mod->arch.virt_plt_offset = 0;
+ mod->arch.phys_plt_tbl = NULL;
+ mod->arch.virt_plt_tbl = NULL;
+
+ if (IS_ENABLED(CONFIG_64BIT))
+ return 0;
+
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (sechdrs[i].sh_type == SHT_SYMTAB)
+ symindex = i;
+
+ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
+ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
+
+ if ((core_size + init_size) == 0)
+ return 0;
+
+ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
+ if (!mod->arch.phys_plt_tbl)
+ return -ENOMEM;
+
+ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
+ if (!mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
@@ -63,8 +271,39 @@ static int apply_r_mips_32_rel(struct mo
return 0;
}
+static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
+ void *start, Elf_Addr v)
+{
+ unsigned *tramp = start + *plt_offset;
+ *plt_offset += 4 * sizeof(int);
+
+ /* adjust carry for addiu */
+ if (v & 0x00008000)
+ v += 0x10000;
+
+ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
+ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
+ tramp[2] = 0x03200008; /* jr t9 */
+ tramp[3] = 0x00000000; /* nop */
+
+ return (Elf_Addr) tramp;
+}
+
+static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
+{
+ if (is_phys_addr(location))
+ return add_plt_entry_to(&me->arch.phys_plt_offset,
+ me->arch.phys_plt_tbl, v);
+ else
+ return add_plt_entry_to(&me->arch.virt_plt_offset,
+ me->arch.virt_plt_tbl, v);
+
+}
+
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
+ u32 ofs = *location & 0x03ffffff;
+
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
@@ -72,14 +311,17 @@ static int apply_r_mips_26_rel(struct mo
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
+ v = add_plt_entry(me, location, v + (ofs << 2));
+ if (!v) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+ ofs = 0;
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((ofs + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -286,11 +528,32 @@ int module_finalize(const Elf_Ehdr *hdr,
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
+
+ /* Get rid of the fixup trampoline if we're running the module
+ * from physically mapped address space */
+ if (me->arch.phys_plt_offset == 0) {
+ __module_free(me->arch.phys_plt_tbl);
+ me->arch.phys_plt_tbl = NULL;
+ }
+ if (me->arch.virt_plt_offset == 0) {
+ __module_free(me->arch.virt_plt_tbl);
+ me->arch.virt_plt_tbl = NULL;
+ }
+
return 0;
}
void module_arch_cleanup(struct module *mod)
{
+ if (mod->arch.phys_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ }
+ if (mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.virt_plt_tbl);
+ mod->arch.virt_plt_tbl = NULL;
+ }
+
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);

View File

@ -0,0 +1,83 @@
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+#define memset(__s, __c, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memset((__s), (__c), __len); \
+ else \
+ __ret = __builtin_memset((__s), (__c), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+#define memcpy(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memcpy((dst), (src), __len); \
+ else \
+ __ret = __builtin_memcpy((dst), (src), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+#define memmove(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memmove((dst), (src), __len); \
+ else \
+ __ret = __builtin_memmove((dst), (src), __len); \
+ __ret; \
+})
+
+#define __HAVE_ARCH_MEMCMP
+#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
#endif /* _ASM_STRING_H */
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -4,7 +4,7 @@
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strlen_user.o strncpy_user.o \
- strnlen_user.o uncached.o
+ strnlen_user.o uncached.o memcmp.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
--- /dev/null
+++ b/arch/mips/lib/memcmp.c
@@ -0,0 +1,22 @@
+/*
+ * copied from linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+EXPORT_SYMBOL(memcmp);
+

View File

@ -0,0 +1,32 @@
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -39,6 +39,7 @@ void (*__flush_kernel_vmap_range)(unsign
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+EXPORT_SYMBOL(__flush_cache_all);
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -20,6 +20,9 @@
#include <linux/swap.h>
#include <linux/splice.h>
#include <linux/aio.h>
+#ifdef CONFIG_MIPS
+#include <asm/cacheflush.h>
+#endif
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -759,6 +762,9 @@ static int fuse_copy_fill(struct fuse_co
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
+#ifdef CONFIG_MIPS
+ __flush_cache_all();
+#endif
if (val) {
if (cs->write)
memcpy(cs->buf, *val, ncpy);

View File

@ -0,0 +1,13 @@
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -81,6 +81,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
return -ENOEXEC;
}
+ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
+ ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {

View File

@ -0,0 +1,31 @@
Upstream doesn't optimize the kernel and bootwrappers for ppc44x because
they still want to support gcc 3.3 -- well, we don't.
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -154,7 +154,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
KBUILD_CFLAGS += -mno-sched-epilog
endif
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
+cpu-as-$(CONFIG_40x) += -Wa,-m405
+cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
cpu-as-$(CONFIG_E200) += -Wa,-me200
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -38,10 +38,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob
DTC_FLAGS ?= -p 1024
$(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
+$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
+$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405

View File

@ -0,0 +1,10 @@
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -121,7 +121,6 @@ CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
# No AltiVec or VSX instructions when building kernel
KBUILD_CFLAGS += $(call cc-option,-mno-altivec)

View File

@ -0,0 +1,10 @@
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -19,6 +19,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/random.h>

View File

@ -0,0 +1,293 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -12,6 +12,32 @@ menuconfig MTD
if MTD
+menu "OpenWrt specific MTD options"
+
+config MTD_ROOTFS_ROOT_DEV
+ bool "Automatically set 'rootfs' partition to be root filesystem"
+ default y
+
+config MTD_ROOTFS_SPLIT
+ bool "Automatically split 'rootfs' partition for squashfs"
+ default y
+
+config MTD_SPLIT_FIRMWARE
+ bool "Automatically split firmware partition for kernel+rootfs"
+ default y
+
+config MTD_SPLIT_FIRMWARE_NAME
+ string "Firmware partition name"
+ depends on MTD_SPLIT_FIRMWARE
+ default "firmware"
+
+config MTD_UIMAGE_SPLIT
+ bool "Enable split support for firmware partitions containing a uImage"
+ depends on MTD_SPLIT_FIRMWARE
+ default y
+
+endmenu
+
config MTD_TESTS
tristate "MTD tests support (DANGEROUS)"
depends on m
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,6 +29,8 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/root_dev.h>
+#include <linux/magic.h>
#include <linux/err.h>
#include "mtdcore.h"
@@ -45,13 +47,14 @@ struct mtd_part {
struct list_head list;
};
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
+
/*
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
-
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
@@ -534,8 +537,10 @@ out_register:
return slave;
}
-int mtd_add_partition(struct mtd_info *master, char *name,
- long long offset, long long length)
+
+static int
+__mtd_add_partition(struct mtd_info *master, char *name,
+ long long offset, long long length, bool dup_check)
{
struct mtd_partition part;
struct mtd_part *p, *new;
@@ -567,21 +572,24 @@ int mtd_add_partition(struct mtd_info *m
end = offset + length;
mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(p, &mtd_partitions, list)
- if (p->master == master) {
- if ((start >= p->offset) &&
- (start < (p->offset + p->mtd.size)))
- goto err_inv;
-
- if ((end >= p->offset) &&
- (end < (p->offset + p->mtd.size)))
- goto err_inv;
- }
+ if (dup_check) {
+ list_for_each_entry(p, &mtd_partitions, list)
+ if (p->master == master) {
+ if ((start >= p->offset) &&
+ (start < (p->offset + p->mtd.size)))
+ goto err_inv;
+
+ if ((end >= p->offset) &&
+ (end < (p->offset + p->mtd.size)))
+ goto err_inv;
+ }
+ }
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
+ mtd_partition_split(master, new);
return ret;
err_inv:
@@ -591,6 +599,12 @@ err_inv:
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
+int mtd_add_partition(struct mtd_info *master, char *name,
+ long long offset, long long length)
+{
+ return __mtd_add_partition(master, name, offset, length, true);
+}
+
int mtd_del_partition(struct mtd_info *master, int partno)
{
struct mtd_part *slave, *next;
@@ -614,6 +628,151 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+static inline unsigned long
+mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
+{
+ unsigned long mask = mtd->erasesize - 1;
+
+ len += offset & mask;
+ len = (len + mask) & ~mask;
+ len -= offset & mask;
+ return len;
+}
+
+#define ROOTFS_SPLIT_NAME "rootfs_data"
+
+struct squashfs_super_block {
+ __le32 s_magic;
+ __le32 pad0[9];
+ __le64 bytes_used;
+};
+
+
+static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
+{
+ struct squashfs_super_block sb;
+ int len, ret;
+
+ ret = mtd_read(master, offset, sizeof(sb), &len, (void *) &sb);
+ if (ret || (len != sizeof(sb))) {
+ printk(KERN_ALERT "split_squashfs: error occured while reading "
+ "from \"%s\"\n", master->name);
+ return -EINVAL;
+ }
+
+ if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
+ printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ if (le64_to_cpu((sb.bytes_used)) <= 0) {
+ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ len = (u32) le64_to_cpu(sb.bytes_used);
+ len = mtd_pad_erasesize(master, offset, len);
+ *split_offset = offset + len;
+
+ return 0;
+}
+
+static void split_rootfs_data(struct mtd_info *master, struct mtd_part *part)
+{
+ unsigned int split_offset = 0;
+ unsigned int split_size;
+ int ret;
+
+ ret = split_squashfs(master, part->offset, &split_offset);
+ if (ret)
+ return;
+
+ if (split_offset <= 0)
+ return;
+
+ split_size = part->mtd.size - (split_offset - part->offset);
+ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=0x%x, len=0x%x\n",
+ ROOTFS_SPLIT_NAME, split_offset, split_size);
+
+ __mtd_add_partition(master, ROOTFS_SPLIT_NAME, split_offset,
+ split_size, false);
+}
+
+#define UBOOT_MAGIC 0x27051956
+
+static void split_uimage(struct mtd_info *master, struct mtd_part *part)
+{
+ struct {
+ __be32 magic;
+ __be32 pad[2];
+ __be32 size;
+ } hdr;
+ size_t len;
+
+ if (mtd_read(master, part->offset, sizeof(hdr), &len, (void *) &hdr))
+ return;
+
+ if (len != sizeof(hdr) || hdr.magic != cpu_to_be32(UBOOT_MAGIC))
+ return;
+
+ len = be32_to_cpu(hdr.size) + 0x40;
+ len = mtd_pad_erasesize(master, part->offset, len);
+ if (len + master->erasesize > part->mtd.size)
+ return;
+
+ __mtd_add_partition(master, "rootfs", part->offset + len,
+ part->mtd.size - len, false);
+}
+
+#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
+#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
+#else
+#define SPLIT_FIRMWARE_NAME "unused"
+#endif
+
+static void split_firmware(struct mtd_info *master, struct mtd_part *part)
+{
+ if (config_enabled(CONFIG_MTD_UIMAGE_SPLIT))
+ split_uimage(master, part);
+}
+
+void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,
+ int offset, int size)
+{
+}
+
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
+{
+ static int rootfs_found = 0;
+
+ if (rootfs_found)
+ return;
+
+ if (!strcmp(part->mtd.name, "rootfs")) {
+ rootfs_found = 1;
+
+ if (config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ ROOT_DEV == 0) {
+ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
+ "set to be root filesystem\n");
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, part->mtd.index);
+ }
+
+ if (config_enabled(CONFIG_MTD_ROOTFS_SPLIT))
+ split_rootfs_data(master, part);
+ }
+
+ if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
+ config_enabled(CONFIG_MTD_SPLIT_FIRMWARE))
+ split_firmware(master, part);
+
+ arch_split_mtd_part(master, part->mtd.name, part->offset,
+ part->mtd.size);
+}
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -643,6 +802,7 @@ int add_mtd_partitions(struct mtd_info *
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
+ mtd_partition_split(master, slave);
cur_offset = slave->offset + slave->mtd.size;
}
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -84,5 +84,7 @@ int mtd_add_partition(struct mtd_info *m
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+extern void __weak arch_split_mtd_part(struct mtd_info *master,
+ const char *name, int offset, int size);
#endif

View File

@ -0,0 +1,113 @@
From 02cff0ccaa6d364f5c1eeea83f47ac80ccc967d4 Mon Sep 17 00:00:00 2001
From: Gabor Juhos <juhosg@openwrt.org>
Date: Tue, 3 Sep 2013 18:11:50 +0200
Subject: [PATCH] mtd: add support for different partition parser types
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
drivers/mtd/mtdpart.c | 56 ++++++++++++++++++++++++++++++++++++++++
include/linux/mtd/partitions.h | 11 ++++++++
2 files changed, 67 insertions(+)
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -832,6 +832,30 @@ static struct mtd_part_parser *get_parti
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+static struct mtd_part_parser *
+get_partition_parser_by_type(enum mtd_parser_type type,
+ struct mtd_part_parser *start)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ p = list_prepare_entry(start, &part_parsers, list);
+ if (start)
+ put_partition_parser(start);
+
+ list_for_each_entry_continue(p, &part_parsers, list) {
+ if (p->type == type && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
int register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
@@ -908,6 +932,38 @@ int parse_mtd_partitions(struct mtd_info
return ret;
}
+int parse_mtd_partitions_by_type(struct mtd_info *master,
+ enum mtd_parser_type type,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_part_parser *prev = NULL;
+ int ret = 0;
+
+ while (1) {
+ struct mtd_part_parser *parser;
+
+ parser = get_partition_parser_by_type(type, prev);
+ if (!parser)
+ break;
+
+ ret = (*parser->parse_fn)(master, pparts, data);
+
+ if (ret > 0) {
+ put_partition_parser(parser);
+ printk(KERN_NOTICE
+ "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
+ break;
+ }
+
+ prev = parser;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
+
int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -68,12 +68,17 @@ struct mtd_part_parser_data {
* Functions dealing with the various ways of partitioning the space
*/
+enum mtd_parser_type {
+ MTD_PARSER_TYPE_DEVICE = 0,
+};
+
struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
struct mtd_part_parser_data *);
+ enum mtd_parser_type type;
};
extern int register_mtd_parser(struct mtd_part_parser *parser);
@@ -87,4 +92,9 @@ uint64_t mtd_get_device_size(const struc
extern void __weak arch_split_mtd_part(struct mtd_info *master,
const char *name, int offset, int size);
+int parse_mtd_partitions_by_type(struct mtd_info *master,
+ enum mtd_parser_type type,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
#endif

View File

@ -0,0 +1,75 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -628,6 +628,37 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+static int
+run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type)
+{
+ struct mtd_partition *parts;
+ int nr_parts;
+ int i;
+
+ nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, &parts,
+ NULL);
+ if (nr_parts <= 0)
+ return nr_parts;
+
+ if (WARN_ON(!parts))
+ return 0;
+
+ for (i = 0; i < nr_parts; i++) {
+ /* adjust partition offsets */
+ parts[i].offset += slave->offset;
+
+ __mtd_add_partition(slave->master,
+ parts[i].name,
+ parts[i].offset,
+ parts[i].size,
+ false);
+ }
+
+ kfree(parts);
+
+ return nr_parts;
+}
+
static inline unsigned long
mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
{
@@ -687,6 +718,10 @@ static void split_rootfs_data(struct mtd
unsigned int split_size;
int ret;
+ ret = run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
+ if (ret > 0)
+ return;
+
ret = split_squashfs(master, part->offset, &split_offset);
if (ret)
return;
@@ -736,6 +771,12 @@ static void split_uimage(struct mtd_info
static void split_firmware(struct mtd_info *master, struct mtd_part *part)
{
+ int ret;
+
+ ret = run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
+ if (ret > 0)
+ return;
+
if (config_enabled(CONFIG_MTD_UIMAGE_SPLIT))
split_uimage(master, part);
}
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -70,6 +70,8 @@ struct mtd_part_parser_data {
enum mtd_parser_type {
MTD_PARSER_TYPE_DEVICE = 0,
+ MTD_PARSER_TYPE_ROOTFS,
+ MTD_PARSER_TYPE_FIRMWARE,
};
struct mtd_part_parser {

View File

@ -0,0 +1,25 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -36,6 +36,11 @@ config MTD_UIMAGE_SPLIT
depends on MTD_SPLIT_FIRMWARE
default y
+config MTD_SPLIT
+ def_bool n
+ help
+ Generic MTD split support.
+
endmenu
config MTD_TESTS
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+mtd-$(CONFIG_MTD_SPLIT) += mtdsplit.o
+
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o

View File

@ -0,0 +1,69 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -34,6 +34,7 @@
#include <linux/err.h>
#include "mtdcore.h"
+#include "mtdsplit.h"
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -670,43 +671,16 @@ mtd_pad_erasesize(struct mtd_info *mtd,
return len;
}
-#define ROOTFS_SPLIT_NAME "rootfs_data"
-
-struct squashfs_super_block {
- __le32 s_magic;
- __le32 pad0[9];
- __le64 bytes_used;
-};
-
-
static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
{
- struct squashfs_super_block sb;
+ size_t squashfs_len;
int len, ret;
- ret = mtd_read(master, offset, sizeof(sb), &len, (void *) &sb);
- if (ret || (len != sizeof(sb))) {
- printk(KERN_ALERT "split_squashfs: error occured while reading "
- "from \"%s\"\n", master->name);
- return -EINVAL;
- }
-
- if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
- printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
- master->name);
- *split_offset = 0;
- return 0;
- }
-
- if (le64_to_cpu((sb.bytes_used)) <= 0) {
- printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
- master->name);
- *split_offset = 0;
- return 0;
- }
+ ret = mtd_get_squashfs_len(master, offset, &squashfs_len);
+ if (ret)
+ return ret;
- len = (u32) le64_to_cpu(sb.bytes_used);
- len = mtd_pad_erasesize(master, offset, len);
+ len = mtd_pad_erasesize(master, offset, squashfs_len);
*split_offset = offset + len;
return 0;
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -20,6 +20,7 @@ config MTD_ROOTFS_ROOT_DEV
config MTD_ROOTFS_SPLIT
bool "Automatically split 'rootfs' partition for squashfs"
+ select MTD_SPLIT
default y
config MTD_SPLIT_FIRMWARE

View File

@ -0,0 +1,83 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -435,14 +435,12 @@ static struct mtd_part *allocate_partiti
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if (mtd_mod_by_eb(cur_offset, master) != 0) {
- /* Round up to next erasesize */
- slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ /* Round up to next erasesize */
+ slave->offset = mtd_roundup_to_eb(cur_offset, master);
+ if (slave->offset != cur_offset)
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
- }
}
if (slave->offset == MTDPART_OFS_RETAIN) {
slave->offset = cur_offset;
@@ -996,6 +994,24 @@ int mtd_is_partition(const struct mtd_in
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return (struct mtd_info *)mtd;
+
+ return PART(mtd)->master;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_master);
+
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return 0;
+
+ return PART(mtd)->offset;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_offset);
+
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -90,6 +90,8 @@ int mtd_is_partition(const struct mtd_in
int mtd_add_partition(struct mtd_info *master, char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
extern void __weak arch_split_mtd_part(struct mtd_info *master,
const char *name, int offset, int size);
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -334,6 +334,24 @@ static inline uint32_t mtd_mod_by_eb(uin
return do_div(sz, mtd->erasesize);
}
+static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd_mod_by_eb(sz, mtd) == 0)
+ return sz;
+
+ /* Round up to next erase block */
+ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
+}
+
+static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd_mod_by_eb(sz, mtd) == 0)
+ return sz;
+
+ /* Round down to the start of the current erase block */
+ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)

View File

@ -0,0 +1,30 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -37,6 +37,17 @@ config MTD_UIMAGE_SPLIT
depends on MTD_SPLIT_FIRMWARE
default y
+comment "Rootfs partition parsers"
+
+config MTD_SPLIT_SQUASHFS_ROOT
+ bool "Squashfs based root partition parser"
+ select MTD_SPLIT
+ default n
+ help
+ This provides a parsing function which allows to detect the
+ offset and size of the unused portion of a rootfs partition
+ containing a squashfs.
+
config MTD_SPLIT
def_bool n
help
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
mtd-$(CONFIG_MTD_SPLIT) += mtdsplit.o
+mtd-$(CONFIG_MTD_SPLIT_SQUASHFS_ROOT) += mtdsplit_squashfs.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o

View File

@ -0,0 +1,25 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -48,6 +48,12 @@ config MTD_SPLIT_SQUASHFS_ROOT
offset and size of the unused portion of a rootfs partition
containing a squashfs.
+comment "Firmware partition parsers"
+
+config MTD_SPLIT_UIMAGE_FW
+ bool "uImage based firmware partition parser"
+ select MTD_SPLIT
+
config MTD_SPLIT
def_bool n
help
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -8,6 +8,7 @@ mtd-y := mtdcore.o mtdsuper.o mtdconc
mtd-$(CONFIG_MTD_SPLIT) += mtdsplit.o
mtd-$(CONFIG_MTD_SPLIT_SQUASHFS_ROOT) += mtdsplit_squashfs.o
+mtd-$(CONFIG_MTD_SPLIT_UIMAGE_FW) += mtdsplit_uimage.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o

View File

@ -0,0 +1,23 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -50,6 +50,10 @@ config MTD_SPLIT_SQUASHFS_ROOT
comment "Firmware partition parsers"
+config MTD_SPLIT_SEAMA_FW
+ bool "Seama firmware parser"
+ select MTD_SPLIT
+
config MTD_SPLIT_UIMAGE_FW
bool "uImage based firmware partition parser"
select MTD_SPLIT
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
mtd-$(CONFIG_MTD_SPLIT) += mtdsplit.o
+mtd-$(CONFIG_MTD_SPLIT_SEAMA_FW) += mtdsplit_seama.o
mtd-$(CONFIG_MTD_SPLIT_SQUASHFS_ROOT) += mtdsplit_squashfs.o
mtd-$(CONFIG_MTD_SPLIT_UIMAGE_FW) += mtdsplit_uimage.o

View File

@ -0,0 +1,18 @@
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -35,6 +35,7 @@
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_info;
struct mtd_partition {
char *name; /* identifier string */
@@ -50,7 +51,6 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
-struct mtd_info;
struct device_node;
/**

View File

@ -0,0 +1,145 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -36,6 +36,8 @@
#include "mtdcore.h"
#include "mtdsplit.h"
+#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -232,13 +234,60 @@ static int part_erase(struct mtd_info *m
struct mtd_part *part = PART(mtd);
int ret;
+
+ instr->partial_start = false;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ size_t readlen = 0;
+ u64 mtd_ofs;
+
+ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
+ if (!instr->erase_buf)
+ return -ENOMEM;
+
+ mtd_ofs = part->offset + instr->addr;
+ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->addr -= instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ instr->addr + part->offset,
+ part->master->erasesize,
+ &readlen, instr->erase_buf);
+
+ instr->partial_start = true;
+ } else {
+ mtd_ofs = part->offset + part->mtd.size;
+ instr->erase_buf_ofs = part->master->erasesize -
+ do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->len += instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ part->offset + instr->addr +
+ instr->len - part->master->erasesize,
+ part->master->erasesize, &readlen,
+ instr->erase_buf);
+ } else {
+ ret = 0;
+ }
+ }
+ if (ret < 0) {
+ kfree(instr->erase_buf);
+ return ret;
+ }
+
+ }
+
instr->addr += part->offset;
ret = part->master->_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL)
+ kfree(instr->erase_buf);
}
+
return ret;
}
@@ -246,7 +295,25 @@ void mtd_erase_callback(struct erase_inf
{
if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
+ size_t wrlen = 0;
+ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
+ if (instr->partial_start) {
+ part->master->_write(part->master,
+ instr->addr, instr->erase_buf_ofs,
+ &wrlen, instr->erase_buf);
+ instr->addr += instr->erase_buf_ofs;
+ } else {
+ instr->len -= instr->erase_buf_ofs;
+ part->master->_write(part->master,
+ instr->addr + instr->len,
+ instr->erase_buf_ofs, &wrlen,
+ instr->erase_buf +
+ part->master->erasesize -
+ instr->erase_buf_ofs);
+ }
+ kfree(instr->erase_buf);
+ }
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
@@ -504,18 +571,24 @@ static struct mtd_part *allocate_partiti
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of
- * _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- part->name);
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+ if (((u32) slave->mtd.size) > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- part->name);
+ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+
+ if ((u32) slave->mtd.size > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
+ if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
+ printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
+ part->name);
slave->mtd.ecclayout = master->ecclayout;
slave->mtd.ecc_step_size = master->ecc_step_size;
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -58,6 +58,10 @@ struct erase_info {
u_long priv;
u_char state;
struct erase_info *next;
+
+ u8 *erase_buf;
+ u32 erase_buf_ofs;
+ bool partial_start;
};
struct mtd_erase_region_info {

View File

@ -0,0 +1,18 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -332,7 +332,14 @@ static int part_lock(struct mtd_info *mt
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->_unlock(part->master, ofs + part->offset, len);
+
+ ofs += part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ /* round up len to next erasesize and round down offset to prev block */
+ len = (mtd_div_by_eb(len, part->master) + 1) * part->master->erasesize;
+ ofs &= ~(part->master->erasesize - 1);
+ }
+ return part->master->_unlock(part->master, ofs, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)

View File

@ -0,0 +1,30 @@
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -265,14 +265,21 @@ static int parse_redboot_partitions(stru
#endif
names += strlen(names)+1;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
- i++;
- parts[i].offset = parts[i-1].size + parts[i-1].offset;
- parts[i].size = fl->next->img->flash_base - parts[i].offset;
- parts[i].name = nullname;
- }
+ if (!strcmp(parts[i].name, "rootfs")) {
+ parts[i].size = fl->next->img->flash_base;
+ parts[i].size &= ~(master->erasesize - 1);
+ parts[i].size -= parts[i].offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ nrparts--;
+ } else {
+ i++;
+ parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
#endif
+ }
+ }
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);

View File

@ -0,0 +1,35 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -208,6 +208,22 @@ config MTD_BCM47XX_PARTS
This provides partitions parser for devices based on BCM47xx
boards.
+config MTD_MYLOADER_PARTS
+ tristate "MyLoader partition parsing"
+ depends on ADM5120 || ATHEROS_AR231X || ATHEROS_AR71XX || ATH79
+ ---help---
+ MyLoader is a bootloader which allows the user to define partitions
+ in flash devices, by putting a table in the second erase block
+ on the device, similar to a partition table. This table gives the
+ offsets and lengths of the user defined partitions.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically.
+
comment "User Modules And Translation Layers"
config MTD_BLKDEVS
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o

View File

@ -0,0 +1,117 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -209,11 +210,12 @@ static void block2mtd_free_device(struct
/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
{
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
struct block_device *bdev;
struct block2mtd_dev *dev;
+ struct mtd_partition *part;
char *name;
if (!devname)
@@ -252,13 +254,16 @@ static struct block2mtd_dev *add_device(
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+ if (!mtdname)
+ mtdname = devname;
+ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
if (!name)
goto devinit_err;
+ strcpy(name, mtdname);
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
@@ -271,15 +276,18 @@ static struct block2mtd_dev *add_device(
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (mtd_device_register(&dev->mtd, NULL, 0)) {
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = name;
+ part->offset = 0;
+ part->size = dev->mtd.size;
+ if (mtd_device_register(&dev->mtd, part, 1)) {
/* Device didn't get added, so free the entry */
goto devinit_err;
}
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
devinit_err:
@@ -346,9 +354,9 @@ static char block2mtd_paramline[80 + 12]
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
char *str = buf;
- char *token[2];
+ char *token[3];
char *name;
size_t erase_size = PAGE_SIZE;
int i, ret;
@@ -361,7 +369,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str) {
@@ -387,8 +395,10 @@ static int block2mtd_setup2(const char *
return 0;
}
}
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ parse_err("mtd device name too long");
- add_device(name, erase_size);
+ add_device(name, erase_size, token[2]);
return 0;
}
@@ -422,7 +432,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
static int __init block2mtd_init(void)
{
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -548,6 +548,7 @@ int invalidate_partitions(struct gendisk
return 0;
}
+EXPORT_SYMBOL(rescan_partitions);
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
{

View File

@ -0,0 +1,10 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -233,6 +233,7 @@ static struct block2mtd_dev *add_device(
/* We might not have rootfs mounted at this point. Try
to resolve the device name by other means. */
+ wait_for_device_probe();
dev_t devt = name_to_dev_t(devname);
if (devt)
bdev = blkdev_get_by_dev(devt, mode, dev);

View File

@ -0,0 +1,37 @@
---
drivers/mtd/nand/plat_nand.c | 13 ++++++++++++-
include/linux/mtd/nand.h | 1 +
2 files changed, 13 insertions(+), 1 deletion(-)
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -722,6 +722,7 @@ struct platform_nand_chip {
unsigned int options;
unsigned int bbt_options;
const char **part_probe_types;
+ int (*chip_fixup)(struct mtd_info *mtd);
};
/* Keep gcc happy */
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -103,7 +103,18 @@ static int plat_nand_probe(struct platfo
}
/* Scan to find existence of the device */
- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (pdata->chip.chip_fixup) {
+ err = pdata->chip.chip_fixup(&data->mtd);
+ if (err)
+ goto out;
+ }
+
+ if (nand_scan_tail(&data->mtd)) {
err = -ENXIO;
goto out;
}

View File

@ -0,0 +1,11 @@
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -507,7 +507,7 @@ int __nand_correct_data(unsigned char *b
return 1; /* error in ECC data; no action needed */
pr_err("%s: uncorrectable ECC error", __func__);
- return -1;
+ return -EBADMSG;
}
EXPORT_SYMBOL(__nand_correct_data);

View File

@ -0,0 +1,11 @@
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -780,7 +780,7 @@ static int get_chip(struct map_info *map
return 0;
case FL_ERASING:
- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;

View File

@ -0,0 +1,18 @@
From: George Kashperko <george@znau.edu.ua>
Issue map read after Write Buffer Load command to ensure chip is ready
to receive data.
Signed-off-by: George Kashperko <george@znau.edu.ua>
---
drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1498,6 +1498,7 @@ static int __xipram do_write_buffer(stru
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
+ (void) map_read(map, cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;

View File

@ -0,0 +1,41 @@
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -109,6 +109,14 @@ config MTD_SPEAR_SMI
help
This enable SNOR support on SPEAR platforms using SMI controller
+config M25PXX_PREFER_SMALL_SECTOR_ERASE
+ bool "Prefer small sector erase"
+ depends on MTD_M25P80
+ default y
+ help
+ This option enables use of the small erase sectors if that is
+ supported by the flash chip.
+
config MTD_SST25L
tristate "Support SST25L (non JEDEC) SPI Flash chips"
depends on SPI_MASTER
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -82,6 +82,12 @@
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+#ifdef CONFIG_M25PXX_PREFER_SMALL_SECTOR_ERASE
+#define PREFER_SMALL_SECTOR_ERASE 1
+#else
+#define PREFER_SMALL_SECTOR_ERASE 0
+#endif
+
/****************************************************************************/
struct m25p {
@@ -1046,7 +1052,7 @@ static int m25p_probe(struct spi_device
flash->mtd._write = m25p80_write;
/* prefer "small sector" erase if possible */
- if (info->flags & SECT_4K) {
+ if (PREFER_SMALL_SECTOR_ERASE && (info->flags & SECT_4K)) {
flash->erase_opcode = OPCODE_BE_4K;
flash->mtd.erasesize = 4096;
} else if (info->flags & SECT_4K_PMC) {

View File

@ -0,0 +1,18 @@
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -39,6 +39,7 @@ source "fs/gfs2/Kconfig"
source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
+source "fs/yaffs2/Kconfig"
endif # BLOCK
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -127,3 +127,5 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
+

View File

@ -0,0 +1,31 @@
From 2505e8b0a13d3d5c5bbeaaae4eb889864f44c9df Mon Sep 17 00:00:00 2001
From: Charles Manning <cdhmanning@gmail.com>
Date: Thu, 3 Feb 2011 05:55:30 +1300
Subject: [PATCH] yaffs: Fix directory unlinking in yaffs1 mode
commit 964b3425a71890e6701c830e38b04d8557c04f49 upstream.
Treat both yaffs2 and yaffs1 paths the same.
Signed-off-by: Charles Manning <cdhmanning@gmail.com>
---
yaffs_guts.c | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
--- a/fs/yaffs2/yaffs_guts.c
+++ b/fs/yaffs2/yaffs_guts.c
@@ -1708,13 +1708,7 @@ static int yaffs_change_obj_name(yaffs_o
YBUG();
}
- /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
- if (obj->my_dev->param.is_yaffs2)
- unlinkOp = (new_dir == obj->my_dev->unlinked_dir);
- else
- unlinkOp = (new_dir == obj->my_dev->unlinked_dir
- && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
-
+ unlinkOp = (new_dir == obj->my_dev->unlinked_dir);
deleteOp = (new_dir == obj->my_dev->del_dir);
existingTarget = yaffs_find_by_name(new_dir, new_name);

View File

@ -0,0 +1,138 @@
From c0c289363e84c53b5872f7c0c5069045096dca07 Mon Sep 17 00:00:00 2001
From: Charles Manning <cdhmanning@gmail.com>
Date: Wed, 3 Nov 2010 16:01:12 +1300
Subject: [PATCH] yaffs: Switch from semaphores to mutexes
commit 73c54aa8c1de3f61a4c211cd47431293a6092f18 upstream.
Mutex is faster and init_MUTEX has been deprecated, so we'll just switch
to mutexes.
Signed-off-by: Charles Manning <cdhmanning@gmail.com>
---
yaffs_linux.h | 2 +-
yaffs_vfs.c | 24 ++++++++++++------------
yaffs_vfs_multi.c | 26 +++++++++++++-------------
3 files changed, 26 insertions(+), 26 deletions(-)
--- a/fs/yaffs2/yaffs_linux.h
+++ b/fs/yaffs2/yaffs_linux.h
@@ -25,7 +25,7 @@ struct yaffs_LinuxContext {
struct super_block * superBlock;
struct task_struct *bgThread; /* Background thread for this device */
int bgRunning;
- struct semaphore grossLock; /* Gross locking semaphore */
+ struct mutex grossLock; /* Gross locking mutex*/
__u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
* at compile time so we have to allocate it.
*/
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -515,14 +515,14 @@ static unsigned yaffs_gc_control_callbac
static void yaffs_gross_lock(yaffs_dev_t *dev)
{
T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current));
- down(&(yaffs_dev_to_lc(dev)->grossLock));
+ mutex_lock(&(yaffs_dev_to_lc(dev)->grossLock));
T(YAFFS_TRACE_LOCK, (TSTR("yaffs locked %p\n"), current));
}
static void yaffs_gross_unlock(yaffs_dev_t *dev)
{
T(YAFFS_TRACE_LOCK, (TSTR("yaffs unlocking %p\n"), current));
- up(&(yaffs_dev_to_lc(dev)->grossLock));
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->grossLock));
}
#ifdef YAFFS_COMPILE_EXPORTFS
@@ -2542,7 +2542,7 @@ static void yaffs_read_inode(struct inod
#endif
static YLIST_HEAD(yaffs_context_list);
-struct semaphore yaffs_context_lock;
+struct mutex yaffs_context_lock;
static void yaffs_put_super(struct super_block *sb)
{
@@ -2568,9 +2568,9 @@ static void yaffs_put_super(struct super
yaffs_gross_unlock(dev);
- down(&yaffs_context_lock);
+ mutex_lock(&yaffs_context_lock);
ylist_del_init(&(yaffs_dev_to_lc(dev)->contextList));
- up(&yaffs_context_lock);
+ mutex_unlock(&yaffs_context_lock);
if (yaffs_dev_to_lc(dev)->spareBuffer) {
YFREE(yaffs_dev_to_lc(dev)->spareBuffer);
@@ -3016,7 +3016,7 @@ static struct super_block *yaffs_interna
param->skip_checkpt_rd = options.skip_checkpoint_read;
param->skip_checkpt_wr = options.skip_checkpoint_write;
- down(&yaffs_context_lock);
+ mutex_lock(&yaffs_context_lock);
/* Get a mount id */
found = 0;
for(mount_id=0; ! found; mount_id++){
@@ -3030,13 +3030,13 @@ static struct super_block *yaffs_interna
context->mount_id = mount_id;
ylist_add_tail(&(yaffs_dev_to_lc(dev)->contextList), &yaffs_context_list);
- up(&yaffs_context_lock);
+ mutex_unlock(&yaffs_context_lock);
/* Directory search handling...*/
YINIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->searchContexts));
param->remove_obj_fn = yaffs_remove_obj_callback;
- init_MUTEX(&(yaffs_dev_to_lc(dev)->grossLock));
+ mutex_init(&(yaffs_dev_to_lc(dev)->grossLock));
yaffs_gross_lock(dev);
@@ -3268,7 +3268,7 @@ static int yaffs_proc_read(char *page,
else {
step-=2;
- down(&yaffs_context_lock);
+ mutex_lock(&yaffs_context_lock);
/* Locate and print the Nth entry. Order N-squared but N is small. */
ylist_for_each(item, &yaffs_context_list) {
@@ -3287,7 +3287,7 @@ static int yaffs_proc_read(char *page,
break;
}
- up(&yaffs_context_lock);
+ mutex_unlock(&yaffs_context_lock);
}
return buf - page < count ? buf - page : count;
@@ -3301,7 +3301,7 @@ static int yaffs_stats_proc_read(char *p
char *buf = page;
int n = 0;
- down(&yaffs_context_lock);
+ mutex_lock(&yaffs_context_lock);
/* Locate and print the Nth entry. Order N-squared but N is small. */
ylist_for_each(item, &yaffs_context_list) {
@@ -3317,7 +3317,7 @@ static int yaffs_stats_proc_read(char *p
dev->bg_gcs, dev->oldest_dirty_gc_count,
dev->n_obj, dev->n_tnodes);
}
- up(&yaffs_context_lock);
+ mutex_unlock(&yaffs_context_lock);
return buf - page < count ? buf - page : count;
@@ -3494,7 +3494,7 @@ static int __init init_yaffs_fs(void)
- init_MUTEX(&yaffs_context_lock);
+ mutex_init(&yaffs_context_lock);
/* Install the proc_fs entries */
my_proc_entry = create_proc_entry("yaffs",

View File

@ -0,0 +1,72 @@
From cd6657c4bde20886b0805ea9f2cbac7ec25ac2e5 Mon Sep 17 00:00:00 2001
From: Charles Manning <cdhmanning@gmail.com>
Date: Tue, 30 Nov 2010 16:01:28 +1300
Subject: [PATCH 1/2] yaffs: Replace yaffs_dir_llseek with Linux generic
llseek
commit ed8188fb7659cfb65b5adbe154d143190ade0324 upstream.
There was not much point in having the yaffs version as it is
functionally equivalent to the kernel one.
This also gets rid of using BKL in yaffs2.
Signed-off-by: Charles Manning <cdhmanning@gmail.com>
---
yaffs_vfs.c | 30 +-----------------------------
yaffs_vfs_multi.c | 30 +-----------------------------
2 files changed, 2 insertions(+), 58 deletions(-)
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -342,8 +342,6 @@ static int yaffs_follow_link(struct dent
static void yaffs_touch_super(yaffs_dev_t *dev);
-static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin);
-
static int yaffs_vfs_setattr(struct inode *, struct iattr *);
@@ -460,7 +458,7 @@ static const struct file_operations yaff
.read = generic_read_dir,
.readdir = yaffs_readdir,
.fsync = yaffs_sync_object,
- .llseek = yaffs_dir_llseek,
+ .llseek = generic_file_llseek,
};
static const struct super_operations yaffs_super_ops = {
@@ -1534,32 +1532,6 @@ static void yaffs_release_space(struct f
}
-static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin)
-{
- long long retval;
-
- lock_kernel();
-
- switch (origin){
- case 2:
- offset += i_size_read(file->f_path.dentry->d_inode);
- break;
- case 1:
- offset += file->f_pos;
- }
- retval = -EINVAL;
-
- if (offset >= 0){
- if (offset != file->f_pos)
- file->f_pos = offset;
-
- retval = offset;
- }
- unlock_kernel();
- return retval;
-}
-
-
static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
{
yaffs_obj_t *obj;

View File

@ -0,0 +1,110 @@
From e1537a700c2e750c5eacc5ad93f30821f1e94424 Mon Sep 17 00:00:00 2001
From: Charles Manning <cdhmanning@gmail.com>
Date: Mon, 15 Aug 2011 11:40:30 +1200
Subject: [PATCH 2/2] Mods for Linux 3.0 and fix a typo
commit a7b5dcf904ba6f7890e4b77ce1f56388b855d0f6 upstream.
Roll in NCB's patch and some other changes for Linux 3.0.
Also fix a dumb type retired_writes->retried_writes
Signed-off-by: Charles Manning <cdhmanning@gmail.com>
---
patch-ker.sh | 2 +-
yaffs_vfs_glue.c | 42 ++++++++++++++++++++++++++++++++++--------
2 files changed, 35 insertions(+), 9 deletions(-)
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -72,7 +72,9 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
#include <linux/smp_lock.h>
+#endif
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include <linux/interrupt.h>
@@ -236,7 +238,9 @@ static int yaffs_file_flush(struct file
static int yaffs_file_flush(struct file *file);
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
static int yaffs_sync_object(struct file *file, int datasync);
#else
static int yaffs_sync_object(struct file *file, struct dentry *dentry,
@@ -1864,7 +1868,9 @@ static int yaffs_symlink(struct inode *d
return -ENOMEM;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
static int yaffs_sync_object(struct file *file, int datasync)
#else
static int yaffs_sync_object(struct file *file, struct dentry *dentry,
@@ -3067,7 +3073,13 @@ static int yaffs_internal_read_super_mtd
return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
static int yaffs_read_super(struct file_system_type *fs,
int flags, const char *dev_name,
void *data, struct vfsmount *mnt)
@@ -3090,8 +3102,12 @@ static struct super_block *yaffs_read_su
static struct file_system_type yaffs_fs_type = {
.owner = THIS_MODULE,
.name = "yaffs",
- .get_sb = yaffs_read_super,
- .kill_sb = kill_block_super,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs_mount,
+#else
+ .get_sb = yaffs_read_super,
+#endif
+ .kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
#else
@@ -3115,7 +3131,13 @@ static int yaffs2_internal_read_super_mt
return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
static int yaffs2_read_super(struct file_system_type *fs,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
@@ -3137,8 +3159,12 @@ static struct super_block *yaffs2_read_s
static struct file_system_type yaffs2_fs_type = {
.owner = THIS_MODULE,
.name = "yaffs2",
- .get_sb = yaffs2_read_super,
- .kill_sb = kill_block_super,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs2_mount,
+#else
+ .get_sb = yaffs2_read_super,
+#endif
+ .kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
#else

View File

@ -0,0 +1,54 @@
--- a/fs/yaffs2/yaffs_mtdif1.c
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -127,7 +127,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
#endif
memset(&ops, 0, sizeof(ops));
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = (data) ? chunkBytes : 0;
ops.ooblen = YTAG1_SIZE;
ops.datbuf = (__u8 *)data;
@@ -179,7 +179,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
int deleted;
memset(&ops, 0, sizeof(ops));
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = (data) ? chunkBytes : 0;
ops.ooblen = YTAG1_SIZE;
ops.datbuf = data;
--- a/fs/yaffs2/yaffs_mtdif2.c
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -71,7 +71,7 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
yaffs_PackTags2(&pt, tags, !dev->param.no_tags_ecc);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
ops.len = dev->param.total_bytes_per_chunk;
ops.ooboffs = 0;
@@ -136,7 +136,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
&dummy, data);
else if (tags) {
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.ooblen = packed_tags_size;
ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
ops.ooboffs = 0;
--- a/fs/yaffs2/yaffs_mtdif.h
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -24,4 +24,11 @@ extern struct nand_oobinfo yaffs_noeccin
#endif
int nandmtd_EraseBlockInNAND(yaffs_dev_t *dev, int blockNumber);
int nandmtd_InitialiseNAND(yaffs_dev_t *dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+#include <mtd/mtd-abi.h>
+#else
+#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
+#endif
+
#endif

View File

@ -0,0 +1,78 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -220,6 +220,29 @@ static struct inode *yaffs_iget(struct s
#define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->u.generic_sbp)
#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static inline void yaffs_set_nlink(struct inode *inode, unsigned int nlink)
+{
+ set_nlink(inode, nlink);
+}
+
+static inline void yaffs_dec_link_count(struct inode *inode)
+{
+ inode_dec_link_count(inode);
+}
+#else
+static inline void yaffs_set_nlink(struct inode *inode, unsigned int nlink)
+{
+ inode->i_nlink = nlink;
+}
+
+static inline void yaffs_dec_link_count(struct inode *inode)
+{
+ inode->i_nlink--;
+ mark_inode_dirty(inode)
+}
+#endif
+
#define update_dir_time(dir) do {\
(dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
@@ -1362,7 +1385,7 @@ static void yaffs_fill_inode_from_obj(st
inode->i_size = yaffs_get_obj_length(obj);
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_nlink = yaffs_get_obj_link_count(obj);
+ yaffs_set_nlink(inode, yaffs_get_obj_link_count(obj));
T(YAFFS_TRACE_OS,
(TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"),
@@ -1784,10 +1807,9 @@ static int yaffs_unlink(struct inode *di
retVal = yaffs_unlinker(obj, dentry->d_name.name);
if (retVal == YAFFS_OK) {
- dentry->d_inode->i_nlink--;
+ yaffs_dec_link_count(dentry->d_inode);
dir->i_version++;
yaffs_gross_unlock(dev);
- mark_inode_dirty(dentry->d_inode);
update_dir_time(dir);
return 0;
}
@@ -1818,7 +1840,8 @@ static int yaffs_link(struct dentry *old
obj);
if (link) {
- old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
+ yaffs_set_nlink(old_dentry->d_inode,
+ yaffs_get_obj_link_count(obj));
d_instantiate(dentry, old_dentry->d_inode);
atomic_inc(&old_dentry->d_inode->i_count);
T(YAFFS_TRACE_OS,
@@ -1937,11 +1960,9 @@ static int yaffs_rename(struct inode *ol
yaffs_gross_unlock(dev);
if (retVal == YAFFS_OK) {
- if (target) {
- new_dentry->d_inode->i_nlink--;
- mark_inode_dirty(new_dentry->d_inode);
- }
-
+ if (target)
+ yaffs_dec_link_count(new_dentry->d_inode);
+
update_dir_time(old_dir);
if(old_dir != new_dir)
update_dir_time(new_dir);

View File

@ -0,0 +1,71 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -273,8 +273,13 @@ static int yaffs_sync_object(struct file
static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ struct nameidata *n);
+#else
static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
struct nameidata *n);
+#endif
static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *n);
#else
@@ -286,9 +291,17 @@ static int yaffs_link(struct dentry *old
static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+#else
static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t dev);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
dev_t dev);
#else
@@ -1679,7 +1692,10 @@ out:
#define YCRED(x) (x->cred)
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t rdev)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
dev_t rdev)
#else
@@ -1769,7 +1785,11 @@ static int yaffs_mknod(struct inode *dir
return error;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+#else
static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+#endif
{
int retVal;
T(YAFFS_TRACE_OS, (TSTR("yaffs_mkdir\n")));
@@ -1777,7 +1797,10 @@ static int yaffs_mkdir(struct inode *dir
return retVal;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ struct nameidata *n)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
struct nameidata *n)
#else

View File

@ -0,0 +1,160 @@
--- a/fs/yaffs2/yaffs_mtdif1.c
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -133,7 +133,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
ops.datbuf = (__u8 *)data;
ops.oobbuf = (__u8 *)&pt1;
- retval = mtd->write_oob(mtd, addr, &ops);
+ retval = mtd_write_oob(mtd, addr, &ops);
if (retval) {
T(YAFFS_TRACE_MTD,
(TSTR("write_oob failed, chunk %d, mtd error %d"TENDSTR),
@@ -194,7 +194,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
/* Read page and oob using MTD.
* Check status and determine ECC result.
*/
- retval = mtd->read_oob(mtd, addr, &ops);
+ retval = mtd_read_oob(mtd, addr, &ops);
if (retval) {
T(YAFFS_TRACE_MTD,
(TSTR("read_oob failed, chunk %d, mtd error %d"TENDSTR),
@@ -218,7 +218,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
/* fall into... */
default:
rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
- etags->block_bad = (mtd->block_isbad)(mtd, addr);
+ etags->block_bad = mtd_block_isbad(mtd, addr);
return YAFFS_FAIL;
}
@@ -286,7 +286,7 @@ int nandmtd1_MarkNANDBlockBad(struct yaf
T(YAFFS_TRACE_BAD_BLOCKS,(TSTR("marking block %d bad"TENDSTR), block_no));
- retval = mtd->block_markbad(mtd, (loff_t)blocksize * block_no);
+ retval = mtd_block_markbad(mtd, (loff_t)blocksize * block_no);
return (retval) ? YAFFS_FAIL : YAFFS_OK;
}
@@ -336,7 +336,7 @@ int nandmtd1_QueryNANDBlock(struct yaffs
return YAFFS_FAIL;
retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
- etags.block_bad = (mtd->block_isbad)(mtd, addr);
+ etags.block_bad = mtd_block_isbad(mtd, addr);
if (etags.block_bad) {
T(YAFFS_TRACE_BAD_BLOCKS,
(TSTR("block %d is marked bad"TENDSTR), block_no));
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -2607,8 +2607,8 @@ static void yaffs_MTDPutSuper(struct sup
{
struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_SuperToDevice(sb));
- if (mtd->sync)
- mtd->sync(mtd);
+ if (mtd)
+ mtd_sync(mtd);
put_mtd_device(mtd);
}
--- a/fs/yaffs2/yaffs_mtdif2.c
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -77,7 +77,7 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
ops.ooboffs = 0;
ops.datbuf = (__u8 *)data;
ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
- retval = mtd->write_oob(mtd, addr, &ops);
+ retval = mtd_write_oob(mtd, addr, &ops);
#else
if (!dev->param.inband_tags) {
@@ -133,7 +133,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
if (dev->param.inband_tags || (data && !tags))
- retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ retval = mtd_read(mtd, addr, dev->param.total_bytes_per_chunk,
&dummy, data);
else if (tags) {
ops.mode = MTD_OPS_AUTO_OOB;
@@ -142,7 +142,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
ops.ooboffs = 0;
ops.datbuf = data;
ops.oobbuf = yaffs_dev_to_lc(dev)->spareBuffer;
- retval = mtd->read_oob(mtd, addr, &ops);
+ retval = mtd_read_oob(mtd, addr, &ops);
}
#else
if (!dev->param.inband_tags && data && tags) {
@@ -201,7 +201,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
(TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), block_no));
retval =
- mtd->block_markbad(mtd,
+ mtd_block_markbad(mtd,
block_no * dev->param.chunks_per_block *
dev->param.total_bytes_per_chunk);
@@ -221,7 +221,7 @@ int nandmtd2_QueryNANDBlock(struct yaffs
T(YAFFS_TRACE_MTD,
(TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), block_no));
retval =
- mtd->block_isbad(mtd,
+ mtd_block_isbad(mtd,
block_no * dev->param.chunks_per_block *
dev->param.total_bytes_per_chunk);
--- a/fs/yaffs2/yaffs_mtdif.h
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -31,4 +31,39 @@ int nandmtd_InitialiseNAND(yaffs_dev_t *
#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
+static inline int mtd_erase(struct mdt_info *mtd, struct erase_info *ei)
+{
+ return mtd->erase(mtd, ei);
+}
+
+static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ return mtd->block_mark_bad(mtd, ofs);
+}
+
+static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ return mtd->block_is_bad(mtd, ofs);
+}
+
+static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ return mtd->read_oob(mtd, from, ops);
+}
+
+static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ return mtd->write_oob(mtd, to, ops);
+}
+
+static inline void mtd_sync(struct mtd_info *mtd)
+{
+ if (mtd->sync)
+ mtd->sync(mtd);
+}
+#endif
+
#endif
--- a/fs/yaffs2/yaffs_mtdif.c
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -41,7 +41,7 @@ int nandmtd_EraseBlockInNAND(yaffs_dev_t
ei.callback = NULL;
ei.priv = (u_long) dev;
- retval = mtd->erase(mtd, &ei);
+ retval = mtd_erase(mtd, &ei);
if (retval == 0)
return YAFFS_OK;

View File

@ -0,0 +1,72 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -2793,6 +2793,15 @@ static struct super_block *yaffs_interna
return NULL;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->_erase));
+ T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->_read));
+ T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->_write));
+ T(YAFFS_TRACE_OS, (TSTR(" readoob %p\n"), mtd->_read_oob));
+ T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->_write_oob));
+ T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->_block_isbad));
+ T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->_block_markbad));
+#else
T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->erase));
T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->read));
T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->write));
@@ -2800,6 +2809,7 @@ static struct super_block *yaffs_interna
T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->write_oob));
T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->block_isbad));
T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->block_markbad));
+#endif
T(YAFFS_TRACE_OS, (TSTR(" %s %d\n"), WRITE_SIZE_STR, WRITE_SIZE(mtd)));
T(YAFFS_TRACE_OS, (TSTR(" oobsize %d\n"), mtd->oobsize));
T(YAFFS_TRACE_OS, (TSTR(" erasesize %d\n"), mtd->erasesize));
@@ -2828,6 +2838,15 @@ static struct super_block *yaffs_interna
if (yaffs_version == 2) {
/* Check for version 2 style functions */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+ if (!mtd->_erase ||
+ !mtd->_block_isbad ||
+ !mtd->_block_markbad ||
+ !mtd->_read ||
+ !mtd->_write ||
+ !mtd->_read_oob ||
+ !mtd->_write_oob) {
+#else
if (!mtd->erase ||
!mtd->block_isbad ||
!mtd->block_markbad ||
@@ -2839,6 +2858,7 @@ static struct super_block *yaffs_interna
!mtd->write_ecc ||
!mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
#endif
+#endif
T(YAFFS_TRACE_ALWAYS,
(TSTR("yaffs: MTD device does not support required "
"functions\n")));
@@ -2855,6 +2875,13 @@ static struct super_block *yaffs_interna
}
} else {
/* Check for V1 style functions */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+ if (!mtd->_erase ||
+ !mtd->_read ||
+ !mtd->_write ||
+ !mtd->_read_oob ||
+ !mtd->_write_oob) {
+#else
if (!mtd->erase ||
!mtd->read ||
!mtd->write ||
@@ -2864,6 +2891,7 @@ static struct super_block *yaffs_interna
!mtd->write_ecc ||
!mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
#endif
+#endif
T(YAFFS_TRACE_ALWAYS,
(TSTR("yaffs: MTD device does not support required "
"functions\n")));

View File

@ -0,0 +1,14 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -3119,7 +3119,11 @@ static struct super_block *yaffs_interna
T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: got root inode\n")));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+ root = d_make_root(inode);
+#else
root = d_alloc_root(inode);
+#endif
T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: d_alloc_root done\n")));

View File

@ -0,0 +1,15 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -924,7 +924,11 @@ static void yaffs_evict_inode( struct in
if (!inode->i_nlink && !is_bad_inode(inode))
deleteme = 1;
truncate_inode_pages(&inode->i_data,0);
- end_writeback(inode);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ clear_inode(inode);
+#else
+ end_writeback(inode);
+#endif
if(deleteme && obj){
dev = obj->my_dev;

View File

@ -0,0 +1,570 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -243,11 +243,10 @@ static inline void yaffs_dec_link_count(
}
#endif
-
#define update_dir_time(dir) do {\
(dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
} while(0)
-
+
static void yaffs_put_super(struct super_block *sb);
static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
@@ -397,6 +396,33 @@ static struct address_space_operations y
#endif
};
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid())
+#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid())
+#else
+#define YCRED_FSUID() YCRED(current)->fsuid
+#define YCRED_FSGID() YCRED(current)->fsgid
+
+static inline uid_t i_uid_read(const struct inode *inode)
+{
+ return inode->i_uid;
+}
+
+static inline gid_t i_gid_read(const struct inode *inode)
+{
+ return inode->i_gid;
+}
+
+static inline void i_uid_write(struct inode *inode, uid_t uid)
+{
+ inode->i_uid = uid;
+}
+
+static inline void i_gid_write(struct inode *inode, gid_t gid)
+{
+ inode->i_gid = gid;
+}
+#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
static const struct file_operations yaffs_file_operations = {
@@ -549,7 +575,7 @@ static unsigned yaffs_gc_control_callbac
{
return yaffs_gc_control;
}
-
+
static void yaffs_gross_lock(yaffs_dev_t *dev)
{
T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current));
@@ -1379,8 +1405,8 @@ static void yaffs_fill_inode_from_obj(st
inode->i_ino = obj->obj_id;
inode->i_mode = obj->yst_mode;
- inode->i_uid = obj->yst_uid;
- inode->i_gid = obj->yst_gid;
+ i_uid_write(inode, obj->yst_uid);
+ i_gid_write(inode, obj->yst_gid);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
inode->i_blksize = inode->i_sb->s_blocksize;
#endif
@@ -1406,7 +1432,7 @@ static void yaffs_fill_inode_from_obj(st
T(YAFFS_TRACE_OS,
(TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"),
- inode->i_mode, inode->i_uid, inode->i_gid,
+ inode->i_mode, i_uid_read(inode), i_gid_read(inode),
(int)inode->i_size, atomic_read(&inode->i_count)));
switch (obj->yst_mode & S_IFMT) {
@@ -1715,8 +1741,8 @@ static int yaffs_mknod(struct inode *dir
yaffs_obj_t *parent = yaffs_InodeToObject(dir);
int error = -ENOSPC;
- uid_t uid = YCRED(current)->fsuid;
- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+ uid_t uid = YCRED_FSUID();
+ gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
mode |= S_ISGID;
@@ -1892,8 +1918,8 @@ static int yaffs_symlink(struct inode *d
{
yaffs_obj_t *obj;
yaffs_dev_t *dev;
- uid_t uid = YCRED(current)->fsuid;
- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+ uid_t uid = YCRED_FSUID();
+ gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
T(YAFFS_TRACE_OS, (TSTR("yaffs_symlink\n")));
@@ -2009,7 +2035,7 @@ static int yaffs_setattr(struct dentry *
(TSTR("yaffs_setattr of object %d\n"),
yaffs_InodeToObject(inode)->obj_id));
- /* Fail if a requested resize >= 2GB */
+ /* Fail if a requested resize >= 2GB */
if (attr->ia_valid & ATTR_SIZE &&
(attr->ia_size >> 31))
error = -EINVAL;
@@ -2240,7 +2266,7 @@ static void yaffs_flush_inodes(struct su
{
struct inode *iptr;
yaffs_obj_t *obj;
-
+
list_for_each_entry(iptr,&sb->s_inodes, i_sb_list){
obj = yaffs_InodeToObject(iptr);
if(obj){
@@ -2254,10 +2280,10 @@ static void yaffs_flush_inodes(struct su
static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
{
- yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
+ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
if(!dev)
return;
-
+
yaffs_flush_inodes(sb);
yaffs_update_dirty_dirs(dev);
yaffs_flush_whole_cache(dev);
@@ -2325,7 +2351,7 @@ static int yaffs_do_sync_fs(struct super
* yaffs_bg_start() launches the background thread.
* yaffs_bg_stop() cleans up the background thread.
*
- * NB:
+ * NB:
* The thread should only run after the yaffs is initialised
* The thread should be stopped before yaffs is unmounted.
* The thread should not do any writing while the fs is in read only.
@@ -2924,7 +2950,7 @@ static struct super_block *yaffs_interna
dev = kmalloc(sizeof(yaffs_dev_t), GFP_KERNEL);
context = kmalloc(sizeof(struct yaffs_LinuxContext),GFP_KERNEL);
-
+
if(!dev || !context ){
if(dev)
kfree(dev);
@@ -2957,7 +2983,7 @@ static struct super_block *yaffs_interna
#else
sb->u.generic_sbp = dev;
#endif
-
+
dev->driver_context = mtd;
param->name = mtd->name;
@@ -3057,7 +3083,7 @@ static struct super_block *yaffs_interna
param->gc_control = yaffs_gc_control_callback;
yaffs_dev_to_lc(dev)->superBlock= sb;
-
+
#ifndef CONFIG_YAFFS_DOES_ECC
param->use_nand_ecc = 1;
@@ -3099,10 +3125,10 @@ static struct super_block *yaffs_interna
T(YAFFS_TRACE_OS,
(TSTR("yaffs_read_super: guts initialised %s\n"),
(err == YAFFS_OK) ? "OK" : "FAILED"));
-
+
if(err == YAFFS_OK)
yaffs_bg_start(dev);
-
+
if(!context->bgThread)
param->defered_dir_update = 0;
@@ -3345,7 +3371,7 @@ static int yaffs_proc_read(char *page,
buf += sprintf(buf,"\n");
else {
step-=2;
-
+
mutex_lock(&yaffs_context_lock);
/* Locate and print the Nth entry. Order N-squared but N is small. */
@@ -3362,7 +3388,7 @@ static int yaffs_proc_read(char *page,
buf = yaffs_dump_dev_part0(buf, dev);
} else
buf = yaffs_dump_dev_part1(buf, dev);
-
+
break;
}
mutex_unlock(&yaffs_context_lock);
@@ -3389,7 +3415,7 @@ static int yaffs_stats_proc_read(char *p
int erasedChunks;
erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
-
+
buf += sprintf(buf,"%d, %d, %d, %u, %u, %u, %u\n",
n, dev->n_free_chunks, erasedChunks,
dev->bg_gcs, dev->oldest_dirty_gc_count,
--- a/fs/yaffs2/yaffs_guts.c
+++ b/fs/yaffs2/yaffs_guts.c
@@ -370,7 +370,7 @@ static int yaffs_verify_chunk_written(ya
yaffs_ext_tags tempTags;
__u8 *buffer = yaffs_get_temp_buffer(dev,__LINE__);
int result;
-
+
result = yaffs_rd_chunk_tags_nand(dev,nand_chunk,buffer,&tempTags);
if(memcmp(buffer,data,dev->data_bytes_per_chunk) ||
tempTags.obj_id != tags->obj_id ||
@@ -424,7 +424,7 @@ static int yaffs_write_new_chunk(struct
* lot of checks that are most likely not needed.
*
* Mods to the above
- * If an erase check fails or the write fails we skip the
+ * If an erase check fails or the write fails we skip the
* rest of the block.
*/
@@ -486,7 +486,7 @@ static int yaffs_write_new_chunk(struct
}
-
+
/*
* Block retiring for handling a broken block.
*/
@@ -496,7 +496,7 @@ static void yaffs_retire_block(yaffs_dev
yaffs_block_info_t *bi = yaffs_get_block_info(dev, flash_block);
yaffs2_checkpt_invalidate(dev);
-
+
yaffs2_clear_oldest_dirty_seq(dev,bi);
if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
@@ -899,7 +899,7 @@ static int yaffs_find_chunk_in_group(yaf
for (j = 0; theChunk && j < dev->chunk_grp_size; j++) {
if (yaffs_check_chunk_bit(dev, theChunk / dev->param.chunks_per_block,
theChunk % dev->param.chunks_per_block)) {
-
+
if(dev->chunk_grp_size == 1)
return theChunk;
else {
@@ -1802,7 +1802,7 @@ int yaffs_rename_obj(yaffs_obj_t *old_di
yaffs_update_parent(old_dir);
if(new_dir != old_dir)
yaffs_update_parent(new_dir);
-
+
return result;
}
return YAFFS_FAIL;
@@ -2125,7 +2125,7 @@ static int yaffs_gc_block(yaffs_dev_t *d
if(bi->block_state == YAFFS_BLOCK_STATE_FULL)
bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
-
+
bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
dev->gc_disable = 1;
@@ -2207,7 +2207,7 @@ static int yaffs_gc_block(yaffs_dev_t *d
* No need to copy this, just forget about it and
* fix up the object.
*/
-
+
/* Free chunks already includes softdeleted chunks.
* How ever this chunk is going to soon be really deleted
* which will increment free chunks.
@@ -2752,7 +2752,7 @@ int yaffs_put_chunk_in_file(yaffs_obj_t
NULL);
if (!tn)
return YAFFS_FAIL;
-
+
if(!nand_chunk)
/* Dummy insert, bail now */
return YAFFS_OK;
@@ -2881,7 +2881,7 @@ void yaffs_chunk_del(yaffs_dev_t *dev, i
chunk_id));
bi = yaffs_get_block_info(dev, block);
-
+
yaffs2_update_oldest_dirty_seq(dev, block, bi);
T(YAFFS_TRACE_DELETION,
@@ -2966,8 +2966,8 @@ static int yaffs_wr_data_obj(yaffs_obj_t
(TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), n_bytes));
YBUG();
}
-
-
+
+
newChunkId =
yaffs_write_new_chunk(dev, buffer, &newTags,
useReserve);
@@ -3795,14 +3795,14 @@ int yaffs_resize_file(yaffs_obj_t *in, l
if (new_size == oldFileSize)
return YAFFS_OK;
-
+
if(new_size > oldFileSize){
yaffs2_handle_hole(in,new_size);
in->variant.file_variant.file_size = new_size;
} else {
- /* new_size < oldFileSize */
+ /* new_size < oldFileSize */
yaffs_resize_file_down(in, new_size);
- }
+ }
/* Write a new object header to reflect the resize.
* show we've shrunk the file, if need be
@@ -4231,7 +4231,7 @@ static void yaffs_strip_deleted_objs(yaf
* This fixes the problem where directories might have inadvertently been deleted
* leaving the object "hanging" without being rooted in the directory tree.
*/
-
+
static int yaffs_has_null_parent(yaffs_dev_t *dev, yaffs_obj_t *obj)
{
return (obj == dev->del_dir ||
@@ -4262,7 +4262,7 @@ static void yaffs_fix_hanging_objs(yaffs
if (lh) {
obj = ylist_entry(lh, yaffs_obj_t, hash_link);
parent= obj->parent;
-
+
if(yaffs_has_null_parent(dev,obj)){
/* These directories are not hanging */
hanging = 0;
@@ -4311,7 +4311,7 @@ static void yaffs_del_dir_contents(yaffs
if(dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
YBUG();
-
+
ylist_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
if (lh) {
obj = ylist_entry(lh, yaffs_obj_t, siblings);
@@ -4325,10 +4325,10 @@ static void yaffs_del_dir_contents(yaffs
/* Need to use UnlinkObject since Delete would not handle
* hardlinked objects correctly.
*/
- yaffs_unlink_obj(obj);
+ yaffs_unlink_obj(obj);
}
}
-
+
}
static void yaffs_empty_l_n_f(yaffs_dev_t *dev)
@@ -4410,7 +4410,7 @@ static void yaffs_check_obj_details_load
* If the directory updating is defered then yaffs_update_dirty_dirs must be
* called periodically.
*/
-
+
static void yaffs_update_parent(yaffs_obj_t *obj)
{
yaffs_dev_t *dev;
@@ -4422,8 +4422,8 @@ static void yaffs_update_parent(yaffs_ob
obj->dirty = 1;
obj->yst_mtime = obj->yst_ctime = Y_CURRENT_TIME;
if(dev->param.defered_dir_update){
- struct ylist_head *link = &obj->variant.dir_variant.dirty;
-
+ struct ylist_head *link = &obj->variant.dir_variant.dirty;
+
if(ylist_empty(link)){
ylist_add(link,&dev->dirty_dirs);
T(YAFFS_TRACE_BACKGROUND, (TSTR("Added object %d to dirty directories" TENDSTR),obj->obj_id));
@@ -4446,7 +4446,7 @@ void yaffs_update_dirty_dirs(yaffs_dev_t
while(!ylist_empty(&dev->dirty_dirs)){
link = dev->dirty_dirs.next;
ylist_del_init(link);
-
+
dS=ylist_entry(link,yaffs_dir_s,dirty);
oV = ylist_entry(dS,yaffs_obj_variant,dir_variant);
obj = ylist_entry(oV,yaffs_obj_t,variant);
@@ -4474,7 +4474,7 @@ static void yaffs_remove_obj_from_dir(ya
ylist_del_init(&obj->siblings);
obj->parent = NULL;
-
+
yaffs_verify_dir(parent);
}
@@ -4645,7 +4645,7 @@ yaffs_obj_t *yaffs_get_equivalent_obj(ya
* system to share files.
*
* These automatic unicode are stored slightly differently...
- * - If the name can fit in the ASCII character space then they are saved as
+ * - If the name can fit in the ASCII character space then they are saved as
* ascii names as per above.
* - If the name needs Unicode then the name is saved in Unicode
* starting at oh->name[1].
@@ -4686,7 +4686,7 @@ static void yaffs_load_name_from_oh(yaff
asciiOhName++;
n--;
}
- } else
+ } else
yaffs_strncpy(name,ohName+1, bufferSize -1);
} else
#endif
@@ -4705,7 +4705,7 @@ static void yaffs_load_oh_from_name(yaff
isAscii = 1;
w = name;
-
+
/* Figure out if the name will fit in ascii character set */
while(isAscii && *w){
if((*w) & 0xff00)
@@ -4729,7 +4729,7 @@ static void yaffs_load_oh_from_name(yaff
yaffs_strncpy(ohName+1,name, YAFFS_MAX_NAME_LENGTH -2);
}
}
- else
+ else
#endif
yaffs_strncpy(ohName,name, YAFFS_MAX_NAME_LENGTH - 1);
@@ -4738,12 +4738,12 @@ static void yaffs_load_oh_from_name(yaff
int yaffs_get_obj_name(yaffs_obj_t * obj, YCHAR * name, int buffer_size)
{
memset(name, 0, buffer_size * sizeof(YCHAR));
-
+
yaffs_check_obj_details_loaded(obj);
if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
- }
+ }
#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
else if (obj->short_name[0]) {
yaffs_strcpy(name, obj->short_name);
@@ -4861,9 +4861,9 @@ int yaffs_set_attribs(yaffs_obj_t *obj,
if (valid & ATTR_MODE)
obj->yst_mode = attr->ia_mode;
if (valid & ATTR_UID)
- obj->yst_uid = attr->ia_uid;
+ obj->yst_uid = ia_uid_read(attr);
if (valid & ATTR_GID)
- obj->yst_gid = attr->ia_gid;
+ obj->yst_gid = ia_gid_read(attr);
if (valid & ATTR_ATIME)
obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
@@ -4886,9 +4886,9 @@ int yaffs_get_attribs(yaffs_obj_t *obj,
attr->ia_mode = obj->yst_mode;
valid |= ATTR_MODE;
- attr->ia_uid = obj->yst_uid;
+ ia_uid_write(attr, obj->yst_uid);
valid |= ATTR_UID;
- attr->ia_gid = obj->yst_gid;
+ ia_gid_write(attr, obj->yst_gid);
valid |= ATTR_GID;
Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
--- a/fs/yaffs2/yportenv.h
+++ b/fs/yaffs2/yportenv.h
@@ -170,7 +170,7 @@
#define O_RDWR 02
#endif
-#ifndef O_CREAT
+#ifndef O_CREAT
#define O_CREAT 0100
#endif
@@ -218,7 +218,7 @@
#define EACCES 13
#endif
-#ifndef EXDEV
+#ifndef EXDEV
#define EXDEV 18
#endif
@@ -281,7 +281,7 @@
#define S_IFREG 0100000
#endif
-#ifndef S_IREAD
+#ifndef S_IREAD
#define S_IREAD 0000400
#endif
--- a/fs/yaffs2/devextras.h
+++ b/fs/yaffs2/devextras.h
@@ -87,6 +87,8 @@ struct iattr {
unsigned int ia_attr_flags;
};
+/* TODO: add ia_* functions */
+
#endif
#else
@@ -95,7 +97,48 @@ struct iattr {
#include <linux/fs.h>
#include <linux/stat.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+ return from_kuid(&init_user_ns, iattr->ia_uid);
+}
+
+static inline gid_t ia_gid_read(const struct iattr *iattr)
+{
+ return from_kgid(&init_user_ns, iattr->ia_gid);
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+ iattr->ia_uid = make_kuid(&init_user_ns, uid);
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+ iattr->ia_gid = make_kgid(&init_user_ns, gid);
+}
+#else
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+ return iattr->ia_uid;
+}
+
+static inline gid_t ia_gid_read(const struct iattr *inode)
+{
+ return iattr->ia_gid;
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+ iattr->ia_uid = uid;
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+ iattr->ia_gid = gid;
+}
#endif
+#endif
#endif

View File

@ -0,0 +1,60 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -271,20 +271,29 @@ static int yaffs_sync_object(struct file
static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *n);
-#else
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
struct nameidata *n);
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *n);
#else
-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
#endif
+
static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry);
static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
@@ -837,7 +846,10 @@ struct inode *yaffs_get_inode(struct sup
/*
* Lookup is used to find objects in the fs
*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *n)
@@ -1827,7 +1839,10 @@ static int yaffs_mkdir(struct inode *dir
return retVal;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *n)
#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))

View File

@ -0,0 +1,180 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -393,6 +393,84 @@ static void yaffs_touch_super(yaffs_dev_
static int yaffs_vfs_setattr(struct inode *, struct iattr *);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev_s *)sb->s_fs_info)
+
+static inline struct yaffs_LinuxContext *
+yaffs_sb_to_ylc(struct super_block *sb)
+{
+ struct yaffs_dev_s *ydev;
+ struct yaffs_LinuxContext *ylc;
+
+ ydev = yaffs_super_to_dev(sb);
+ ylc = yaffs_dev_to_lc(ydev);
+ return ylc;
+}
+
+static inline struct super_block *yaffs_work_to_sb(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct yaffs_LinuxContext *ylc;
+
+ dwork = container_of(work, struct delayed_work, work);
+ ylc = container_of(dwork, struct yaffs_LinuxContext, sb_sync_dwork);
+ return ylc->superBlock;
+}
+
+static void yaffs_sb_sync_dwork_func(struct work_struct *work)
+{
+ struct super_block *sb = yaffs_work_to_sb(work);
+
+ yaffs_write_super(sb);
+}
+
+static void yaffs_init_sb_sync_dwork(struct yaffs_LinuxContext *ylc)
+{
+ INIT_DELAYED_WORK(&ylc->sb_sync_dwork, yaffs_sb_sync_dwork_func);
+}
+
+static void yaffs_cancel_sb_sync_dwork(struct super_block *sb)
+{
+ struct yaffs_LinuxContext *ylc = yaffs_sb_to_ylc(sb);
+
+ cancel_delayed_work_sync(&ylc->sb_sync_dwork);
+}
+
+static inline bool yaffs_sb_is_dirty(struct super_block *sb)
+{
+ struct yaffs_LinuxContext *ylc = yaffs_sb_to_ylc(sb);
+
+ return !!ylc->sb_dirty;
+}
+
+static inline void yaffs_sb_set_dirty(struct super_block *sb, int dirty)
+{
+ struct yaffs_LinuxContext *ylc = yaffs_sb_to_ylc(sb);
+
+ if (ylc->sb_dirty == dirty)
+ return;
+
+ ylc->sb_dirty = dirty;
+ if (dirty)
+ queue_delayed_work(system_long_wq, &ylc->sb_sync_dwork,
+ msecs_to_jiffies(5000));
+}
+#else
+static inline bool yaffs_sb_is_dirty(struct super_block *sb)
+{
+ return !!sb->s_dirt;
+}
+
+static inline void yaffs_sb_set_dirty(struct super_block *sb, int dirty)
+{
+ sb->s_dirt = dirty;
+}
+
+static inline void yaffs_init_sb_sync_dwork(struct yaffs_LinuxContext *ylc) {}
+static inline void yaffs_cancel_sb_sync_dwork(struct super_block *sb) {}
+#endif /* >= 3.6.0 */
+
static struct address_space_operations yaffs_file_address_operations = {
.readpage = yaffs_readpage,
.writepage = yaffs_writepage,
@@ -553,7 +631,9 @@ static const struct super_operations yaf
.clear_inode = yaffs_clear_inode,
#endif
.sync_fs = yaffs_sync_fs,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
.write_super = yaffs_write_super,
+#endif
};
@@ -2340,7 +2420,7 @@ static int yaffs_do_sync_fs(struct super
T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
(TSTR("yaffs_do_sync_fs: gc-urgency %d %s %s%s\n"),
gc_urgent,
- sb->s_dirt ? "dirty" : "clean",
+ yaffs_sb_is_dirty(sb) ? "dirty" : "clean",
request_checkpoint ? "checkpoint requested" : "no checkpoint",
oneshot_checkpoint ? " one-shot" : "" ));
@@ -2349,9 +2429,9 @@ static int yaffs_do_sync_fs(struct super
oneshot_checkpoint) &&
!dev->is_checkpointed;
- if (sb->s_dirt || do_checkpoint) {
+ if (yaffs_sb_is_dirty(sb) || do_checkpoint) {
yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
- sb->s_dirt = 0;
+ yaffs_sb_set_dirty(sb, 0);
if(oneshot_checkpoint)
yaffs_auto_checkpoint &= ~4;
}
@@ -2627,6 +2707,8 @@ static void yaffs_put_super(struct super
yaffs_flush_super(sb,1);
+ yaffs_cancel_sb_sync_dwork(sb);
+
if (yaffs_dev_to_lc(dev)->putSuperFunc)
yaffs_dev_to_lc(dev)->putSuperFunc(sb);
@@ -2665,7 +2747,7 @@ static void yaffs_touch_super(yaffs_dev_
T(YAFFS_TRACE_OS, (TSTR("yaffs_touch_super() sb = %p\n"), sb));
if (sb)
- sb->s_dirt = 1;
+ yaffs_sb_set_dirty(sb, 1);
}
typedef struct {
@@ -2991,6 +3073,8 @@ static struct super_block *yaffs_interna
context->dev = dev;
context->superBlock = sb;
+ yaffs_init_sb_sync_dwork(context);
+
dev->read_only = read_only;
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
@@ -3177,7 +3261,7 @@ static struct super_block *yaffs_interna
return NULL;
}
sb->s_root = root;
- sb->s_dirt = !dev->is_checkpointed;
+ yaffs_sb_set_dirty(sb, !dev->is_checkpointed);
T(YAFFS_TRACE_ALWAYS,
(TSTR("yaffs_read_super: is_checkpointed %d\n"),
dev->is_checkpointed));
--- a/fs/yaffs2/yaffs_linux.h
+++ b/fs/yaffs2/yaffs_linux.h
@@ -34,6 +34,11 @@ struct yaffs_LinuxContext {
struct task_struct *readdirProcess;
unsigned mount_id;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ struct delayed_work sb_sync_dwork; /* superblock write-out work */
+ int sb_dirty; /* superblock is dirty */
+#endif
};
#define yaffs_dev_to_lc(dev) ((struct yaffs_LinuxContext *)((dev)->os_context))
--- a/fs/yaffs2/yportenv.h
+++ b/fs/yaffs2/yportenv.h
@@ -49,6 +49,9 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/xattr.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+#include <linux/workqueue.h>
+#endif
#define YCHAR char
#define YUCHAR unsigned char

View File

@ -0,0 +1,45 @@
--- a/fs/yaffs2/yaffs_vfs_glue.c
+++ b/fs/yaffs2/yaffs_vfs_glue.c
@@ -3385,6 +3385,7 @@ static DECLARE_FSTYPE(yaffs2_fs_type, "y
#endif /* CONFIG_YAFFS_YAFFS2 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
static struct proc_dir_entry *my_proc_entry;
static struct proc_dir_entry *debug_proc_entry;
@@ -3668,6 +3669,7 @@ static int yaffs_proc_write(struct file
{
return yaffs_proc_write_trace_options(file, buf, count, data);
}
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
/* Stuff to handle installation of file systems */
struct file_system_to_install {
@@ -3699,6 +3701,7 @@ static int __init init_yaffs_fs(void)
mutex_init(&yaffs_context_lock);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
/* Install the proc_fs entries */
my_proc_entry = create_proc_entry("yaffs",
S_IRUGO | S_IFREG,
@@ -3721,6 +3724,7 @@ static int __init init_yaffs_fs(void)
debug_proc_entry->data = NULL;
} else
return -ENOMEM;
+#endif
/* Now add the file system entries */
@@ -3757,8 +3761,10 @@ static void __exit exit_yaffs_fs(void)
T(YAFFS_TRACE_ALWAYS,
(TSTR("yaffs built " __DATE__ " " __TIME__ " removing. \n")));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
remove_proc_entry("yaffs", YPROC_ROOT);
remove_proc_entry("yaffs_stats", YPROC_ROOT);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
fsinst = fs_to_install;

View File

@ -0,0 +1,25 @@
From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001
From: Jonas Gorski <jonas.gorski@gmail.com>
Date: Tue, 12 Apr 2011 19:55:41 +0200
Subject: [PATCH] squashfs: update xz compressor options struct.
Update the xz compressor options struct to match the squashfs userspace
one.
---
fs/squashfs/xz_wrapper.c | 4 +++-
1 files changed, 3 insertions(+), 1 deletions(-)
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -39,8 +39,10 @@ struct squashfs_xz {
};
struct comp_opts {
- __le32 dictionary_size;
__le32 flags;
+ __le16 bit_opts;
+ __le16 fb;
+ __le32 dictionary_size;
};
static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,53 @@
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -114,6 +114,16 @@ static int jffs2_build_filesystem(struct
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
+ if (c->flags & (1 << 7)) {
+ printk("%s(): unlocking the mtd device... ", __func__);
+ mtd_unlock(c->mtd, 0, c->mtd->size);
+ printk("done.\n");
+
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
+ jffs2_erase_pending_blocks(c, -1);
+ printk("done.\n");
+ }
+
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -148,8 +148,11 @@ int jffs2_scan_medium(struct jffs2_sb_in
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
- buf_size, s);
+ if (c->flags & (1 << 7))
+ ret = BLK_STATE_ALLFF;
+ else
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
if (ret < 0)
goto out;
@@ -556,6 +559,17 @@ static int jffs2_scan_eraseblock (struct
return err;
}
+ if ((buf[0] == 0xde) &&
+ (buf[1] == 0xad) &&
+ (buf[2] == 0xc0) &&
+ (buf[3] == 0xde)) {
+ /* end of filesystem. erase everything after this point */
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+ c->flags |= (1 << 7);
+
+ return BLK_STATE_ALLFF;
+ }
+
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;
max_ofs = EMPTY_SCAN_SIZE(c->sector_size);

View File

@ -0,0 +1,146 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1352,6 +1352,13 @@ config CRYPTO_LZ4HC
help
This is the LZ4 high compression mode algorithm.
+config CRYPTO_XZ
+ tristate "XZ compression algorithm"
+ select CRYPTO_ALGAPI
+ select XZ_DEC
+ help
+ This is the XZ algorithm. Only decompression is supported for now.
+
comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
+obj-$(CONFIG_CRYPTO_XZ) += xz.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
--- /dev/null
+++ b/crypto/xz.c
@@ -0,0 +1,117 @@
+/*
+ * Cryptographic API.
+ *
+ * XZ decompression support.
+ *
+ * Copyright (c) 2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xz.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+
+struct xz_comp_ctx {
+ struct xz_dec *decomp_state;
+ struct xz_buf decomp_buf;
+};
+
+static int crypto_xz_decomp_init(struct xz_comp_ctx *ctx)
+{
+ ctx->decomp_state = xz_dec_init(XZ_SINGLE, 0);
+ if (!ctx->decomp_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void crypto_xz_decomp_exit(struct xz_comp_ctx *ctx)
+{
+ xz_dec_end(ctx->decomp_state);
+}
+
+static int crypto_xz_init(struct crypto_tfm *tfm)
+{
+ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_xz_decomp_init(ctx);
+}
+
+static void crypto_xz_exit(struct crypto_tfm *tfm)
+{
+ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_xz_decomp_exit(ctx);
+}
+
+static int crypto_xz_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int crypto_xz_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct xz_comp_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct xz_buf *xz_buf = &dctx->decomp_buf;
+ int ret;
+
+ memset(xz_buf, '\0', sizeof(struct xz_buf));
+
+ xz_buf->in = (u8 *) src;
+ xz_buf->in_pos = 0;
+ xz_buf->in_size = slen;
+ xz_buf->out = (u8 *) dst;
+ xz_buf->out_pos = 0;
+ xz_buf->out_size = *dlen;
+
+ ret = xz_dec_run(dctx->decomp_state, xz_buf);
+ if (ret != XZ_STREAM_END) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *dlen = xz_buf->out_pos;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static struct crypto_alg crypto_xz_alg = {
+ .cra_name = "xz",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct xz_comp_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(crypto_xz_alg.cra_list),
+ .cra_init = crypto_xz_init,
+ .cra_exit = crypto_xz_exit,
+ .cra_u = { .compress = {
+ .coa_compress = crypto_xz_compress,
+ .coa_decompress = crypto_xz_decompress } }
+};
+
+static int __init crypto_xz_mod_init(void)
+{
+ return crypto_register_alg(&crypto_xz_alg);
+}
+
+static void __exit crypto_xz_mod_exit(void)
+{
+ crypto_unregister_alg(&crypto_xz_alg);
+}
+
+module_init(crypto_xz_mod_init);
+module_exit(crypto_xz_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto XZ decompression support");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");

View File

@ -0,0 +1,92 @@
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -5,8 +5,10 @@ config UBIFS_FS
select CRYPTO if UBIFS_FS_ADVANCED_COMPR
select CRYPTO if UBIFS_FS_LZO
select CRYPTO if UBIFS_FS_ZLIB
+ select CRYPTO if UBIFS_FS_XZ
select CRYPTO_LZO if UBIFS_FS_LZO
select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
+ select CRYPTO_XZ if UBIFS_FS_XZ
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.
@@ -35,3 +37,12 @@ config UBIFS_FS_ZLIB
default y
help
Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
+
+config UBIFS_FS_XZ
+ bool "XZ decompression support" if UBIFS_FS_ADVANCED_COMPR
+ depends on UBIFS_FS
+ default y
+ help
+ XZ compresses better the ZLIB but it is slower..
+ Say 'Y' if unsure.
+
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -71,6 +71,24 @@ static struct ubifs_compressor zlib_comp
};
#endif
+#ifdef CONFIG_UBIFS_FS_XZ
+static DEFINE_MUTEX(xz_enc_mutex);
+static DEFINE_MUTEX(xz_dec_mutex);
+
+static struct ubifs_compressor xz_compr = {
+ .compr_type = UBIFS_COMPR_XZ,
+ .comp_mutex = &xz_enc_mutex,
+ .decomp_mutex = &xz_dec_mutex,
+ .name = "xz",
+ .capi_name = "xz",
+};
+#else
+static struct ubifs_compressor xz_compr = {
+ .compr_type = UBIFS_COMPR_XZ,
+ .name = "xz",
+};
+#endif
+
/* All UBIFS compressors */
struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
@@ -232,9 +250,15 @@ int __init ubifs_compressors_init(void)
if (err)
goto out_lzo;
+ err = compr_init(&xz_compr);
+ if (err)
+ goto out_zlib;
+
ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr;
return 0;
+out_zlib:
+ compr_exit(&zlib_compr);
out_lzo:
compr_exit(&lzo_compr);
return err;
@@ -247,4 +271,5 @@ void ubifs_compressors_exit(void)
{
compr_exit(&lzo_compr);
compr_exit(&zlib_compr);
+ compr_exit(&xz_compr);
}
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -332,12 +332,14 @@ enum {
* UBIFS_COMPR_NONE: no compression
* UBIFS_COMPR_LZO: LZO compression
* UBIFS_COMPR_ZLIB: ZLIB compression
+ * UBIFS_COMPR_XZ: XZ compression
* UBIFS_COMPR_TYPES_CNT: count of supported compression types
*/
enum {
UBIFS_COMPR_NONE,
UBIFS_COMPR_LZO,
UBIFS_COMPR_ZLIB,
+ UBIFS_COMPR_XZ,
UBIFS_COMPR_TYPES_CNT,
};

View File

@ -0,0 +1,67 @@
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1577,6 +1577,12 @@ const struct inode_operations ubifs_syml
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+#ifdef CONFIG_UBIFS_FS_XATTR
+ .setxattr = ubifs_setxattr,
+ .getxattr = ubifs_getxattr,
+ .listxattr = ubifs_listxattr,
+ .removexattr = ubifs_removexattr,
+#endif
};
const struct file_operations ubifs_file_operations = {
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -553,7 +553,8 @@ int ubifs_jnl_update(struct ubifs_info *
dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
- ubifs_assert(dir_ui->data_len == 0);
+ if (!xent)
+ ubifs_assert(dir_ui->data_len == 0);
ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
@@ -573,6 +574,13 @@ int ubifs_jnl_update(struct ubifs_info *
aligned_dlen = ALIGN(dlen, 8);
aligned_ilen = ALIGN(ilen, 8);
len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
+ if (xent) {
+ /*
+ * Make sure to account for dir_ui->data_len in
+ * length calculation in case there is extended attribute.
+ */
+ len += dir_ui->data_len;
+ }
dent = kmalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -649,7 +657,8 @@ int ubifs_jnl_update(struct ubifs_info *
ino_key_init(c, &ino_key, dir->i_ino);
ino_offs += aligned_ilen;
- err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
+ err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
+ UBIFS_INO_NODE_SZ + dir_ui->data_len);
if (err)
goto out_ro;
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -209,12 +209,12 @@ static int change_xattr(struct ubifs_inf
goto out_free;
}
inode->i_size = ui->ui_size = size;
- ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
host->i_ctime = ubifs_current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
+ ui->data_len = size;
/*
* It is important to write the host inode after the xattr inode

View File

@ -0,0 +1,29 @@
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -63,6 +63,17 @@
/* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000
+static int get_default_compressor(void)
+{
+ if (ubifs_compr_present(UBIFS_COMPR_LZO))
+ return UBIFS_COMPR_LZO;
+
+ if (ubifs_compr_present(UBIFS_COMPR_ZLIB))
+ return UBIFS_COMPR_ZLIB;
+
+ return UBIFS_COMPR_NONE;
+}
+
/**
* create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object
@@ -183,7 +194,7 @@ static int create_default_filesystem(str
if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else
- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
+ sup->default_compr = cpu_to_le16(get_default_compressor());
generate_random_uuid(sup->uuid);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,108 @@
--- a/include/linux/netfilter/xt_layer7.h
+++ b/include/linux/netfilter/xt_layer7.h
@@ -8,6 +8,7 @@ struct xt_layer7_info {
char protocol[MAX_PROTOCOL_LEN];
char pattern[MAX_PATTERN_LEN];
u_int8_t invert;
+ u_int8_t pkt;
};
#endif /* _XT_LAYER7_H */
--- a/net/netfilter/xt_layer7.c
+++ b/net/netfilter/xt_layer7.c
@@ -314,33 +314,35 @@ static int match_no_append(struct nf_con
}
/* add the new app data to the conntrack. Return number of bytes added. */
-static int add_data(struct nf_conn * master_conntrack,
- char * app_data, int appdatalen)
+static int add_datastr(char *target, int offset, char *app_data, int len)
{
int length = 0, i;
- int oldlength = master_conntrack->layer7.app_data_len;
-
- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
- clear on whether the race condition exists or whether this really
- fixes it. I might just be being dense... Anyway, if it's not really
- a fix, all it does is waste a very small amount of time. */
- if(!master_conntrack->layer7.app_data) return 0;
+ if (!target) return 0;
/* Strip nulls. Make everything lower case (our regex lib doesn't
do case insensitivity). Add it to the end of the current data. */
- for(i = 0; i < maxdatalen-oldlength-1 &&
- i < appdatalen; i++) {
+ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
if(app_data[i] != '\0') {
/* the kernel version of tolower mungs 'upper ascii' */
- master_conntrack->layer7.app_data[length+oldlength] =
+ target[length+offset] =
isascii(app_data[i])?
tolower(app_data[i]) : app_data[i];
length++;
}
}
+ target[length+offset] = '\0';
+
+ return length;
+}
+
+/* add the new app data to the conntrack. Return number of bytes added. */
+static int add_data(struct nf_conn * master_conntrack,
+ char * app_data, int appdatalen)
+{
+ int length;
- master_conntrack->layer7.app_data[length+oldlength] = '\0';
- master_conntrack->layer7.app_data_len = length + oldlength;
+ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
+ master_conntrack->layer7.app_data_len += length;
return length;
}
@@ -438,7 +440,7 @@ match(const struct sk_buff *skbin,
enum ip_conntrack_info master_ctinfo, ctinfo;
struct nf_conn *master_conntrack, *conntrack;
- unsigned char * app_data;
+ unsigned char *app_data, *tmp_data;
unsigned int pattern_result, appdatalen;
regexp * comppattern;
@@ -466,8 +468,8 @@ match(const struct sk_buff *skbin,
master_conntrack = master_ct(master_conntrack);
/* if we've classified it or seen too many packets */
- if(total_acct_packets(master_conntrack) > num_packets ||
- master_conntrack->layer7.app_proto) {
+ if(!info->pkt && (total_acct_packets(master_conntrack) > num_packets ||
+ master_conntrack->layer7.app_proto)) {
pattern_result = match_no_append(conntrack, master_conntrack,
ctinfo, master_ctinfo, info);
@@ -500,6 +502,25 @@ match(const struct sk_buff *skbin,
/* the return value gets checked later, when we're ready to use it */
comppattern = compile_and_cache(info->pattern, info->protocol);
+ if (info->pkt) {
+ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
+ if(!tmp_data){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
+ return info->invert;
+ }
+
+ tmp_data[0] = '\0';
+ add_datastr(tmp_data, 0, app_data, appdatalen);
+ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
+
+ kfree(tmp_data);
+ tmp_data = NULL;
+ spin_unlock_bh(&l7_lock);
+
+ return (pattern_result ^ info->invert);
+ }
+
/* On the first packet of a connection, allocate space for app data */
if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
!master_conntrack->layer7.app_data){

View File

@ -0,0 +1,51 @@
--- a/net/netfilter/xt_layer7.c
+++ b/net/netfilter/xt_layer7.c
@@ -415,7 +415,9 @@ static int layer7_write_proc(struct file
}
static bool
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+match(const struct sk_buff *skbin, struct xt_action_param *par)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
match(const struct sk_buff *skbin, const struct xt_match_param *par)
#else
match(const struct sk_buff *skbin,
@@ -597,14 +599,19 @@ match(const struct sk_buff *skbin,
}
// load nf_conntrack_ipv4
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+static int
+#else
+static bool
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
-static bool check(const struct xt_mtchk_param *par)
+check(const struct xt_mtchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
printk(KERN_WARNING "can't load conntrack support for "
"proto=%d\n", par->match->family);
#else
-static bool check(const char *tablename, const void *inf,
+check(const char *tablename, const void *inf,
const struct xt_match *match, void *matchinfo,
unsigned int hook_mask)
{
@@ -612,9 +619,15 @@ static bool check(const char *tablename,
printk(KERN_WARNING "can't load conntrack support for "
"proto=%d\n", match->family);
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ return -EINVAL;
+ }
+ return 0;
+#else
return 0;
}
return 1;
+#endif
}

View File

@ -0,0 +1,61 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1001,6 +1001,27 @@ config NETFILTER_XT_MATCH_IPVS
If unsure, say N.
+config NETFILTER_XT_MATCH_LAYER7
+ tristate '"layer7" match support'
+ depends on EXPERIMENTAL
+ depends on NETFILTER_XTABLES
+ depends on NETFILTER_ADVANCED
+ depends on NF_CONNTRACK
+ help
+ Say Y if you want to be able to classify connections (and their
+ packets) based on regular expression matching of their application
+ layer data. This is one way to classify applications such as
+ peer-to-peer filesharing systems that do not always use the same
+ port.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_LAYER7_DEBUG
+ bool 'Layer 7 debugging output'
+ depends on NETFILTER_XT_MATCH_LAYER7
+ help
+ Say Y to get lots of debugging output.
+
config NETFILTER_XT_MATCH_LENGTH
tristate '"length" match support'
depends on NETFILTER_ADVANCED
@@ -1195,26 +1216,11 @@ config NETFILTER_XT_MATCH_STATE
To compile it as a module, choose M here. If unsure, say N.
-config NETFILTER_XT_MATCH_LAYER7
- tristate '"layer7" match support'
- depends on NETFILTER_XTABLES
- depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK)
- depends on NETFILTER_ADVANCED
- help
- Say Y if you want to be able to classify connections (and their
- packets) based on regular expression matching of their application
- layer data. This is one way to classify applications such as
- peer-to-peer filesharing systems that do not always use the same
- port.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config NETFILTER_XT_MATCH_LAYER7_DEBUG
- bool 'Layer 7 debugging output'
- depends on NETFILTER_XT_MATCH_LAYER7
- help
- Say Y to get lots of debugging output.
-
+ bool 'Layer 7 debugging output'
+ depends on NETFILTER_XT_MATCH_LAYER7
+ help
+ Say Y to get lots of debugging output.
config NETFILTER_XT_MATCH_STATISTIC
tristate '"statistic" match support'

View File

@ -0,0 +1,46 @@
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -268,10 +268,34 @@ static int ct_open(struct inode *inode,
sizeof(struct ct_iter_state));
}
+static int kill_all(struct nf_conn *i, void *data)
+{
+ return 1;
+}
+
+static ssize_t ct_file_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct net *net = seq_file_net(seq);
+
+ if (count) {
+ char c;
+
+ if (get_user(c, buf))
+ return -EFAULT;
+
+ if (c == 'f')
+ nf_ct_iterate_cleanup(net, kill_all, NULL);
+ }
+ return count;
+}
+
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
+ .write = ct_file_write,
.llseek = seq_lseek,
.release = seq_release_net,
};
@@ -373,7 +397,7 @@ static int nf_conntrack_standalone_init_
{
struct proc_dir_entry *pde;
- pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
+ pde = proc_create("nf_conntrack", 0660, net->proc_net, &ct_file_ops);
if (!pde)
goto out_nf_conntrack;

View File

@ -0,0 +1,93 @@
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -87,6 +87,7 @@ struct ipt_ip {
#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
#define IPT_F_GOTO 0x02 /* Set if jump is a goto */
#define IPT_F_MASK 0x03 /* All possible flag bits mask. */
+#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
/* Values for "inv" field in struct ipt_ip. */
#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -82,6 +82,9 @@ ip_packet_match(const struct iphdr *ip,
#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
IPT_INV_SRCIP) ||
FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
@@ -135,6 +138,29 @@ ip_packet_match(const struct iphdr *ip,
return true;
}
+static void
+ip_checkdefault(struct ipt_ip *ip)
+{
+ static const char iface_mask[IFNAMSIZ] = {};
+
+ if (ip->invflags || ip->flags & IPT_F_FRAG)
+ return;
+
+ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
+ return;
+
+ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
+ return;
+
+ if (ip->smsk.s_addr || ip->dmsk.s_addr)
+ return;
+
+ if (ip->proto)
+ return;
+
+ ip->flags |= IPT_F_NO_DEF_MATCH;
+}
+
static bool
ip_checkentry(const struct ipt_ip *ip)
{
@@ -560,7 +586,7 @@ static void cleanup_match(struct xt_entr
}
static int
-check_entry(const struct ipt_entry *e, const char *name)
+check_entry(struct ipt_entry *e, const char *name)
{
const struct xt_entry_target *t;
@@ -569,6 +595,8 @@ check_entry(const struct ipt_entry *e, c
return -EINVAL;
}
+ ip_checkdefault(&e->ip);
+
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
return -EINVAL;
@@ -930,6 +958,7 @@ copy_entries_to_user(unsigned int total_
const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
+ u8 flags;
counters = alloc_counters(table);
if (IS_ERR(counters))
@@ -960,6 +989,14 @@ copy_entries_to_user(unsigned int total_
ret = -EFAULT;
goto free_counters;
}
+
+ flags = e->ip.flags & IPT_F_MASK;
+ if (copy_to_user(userptr + off
+ + offsetof(struct ipt_entry, ip.flags),
+ &flags, sizeof(flags)) != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
for (i = sizeof(struct ipt_entry);
i < e->target_offset;

View File

@ -0,0 +1,81 @@
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -310,6 +310,33 @@ struct ipt_entry *ipt_next_entry(const s
return (void *)entry + entry->next_offset;
}
+static bool
+ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
+{
+ struct xt_entry_target *t;
+ struct xt_standard_target *st;
+
+ if (e->target_offset != sizeof(struct ipt_entry))
+ return false;
+
+ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
+ return false;
+
+ t = ipt_get_target(e);
+ if (t->u.kernel.target->target)
+ return false;
+
+ st = (struct xt_standard_target *) t;
+ if (st->verdict == XT_RETURN)
+ return false;
+
+ if (st->verdict >= 0)
+ return false;
+
+ *verdict = (unsigned)(-st->verdict) - 1;
+ return true;
+}
+
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
ipt_do_table(struct sk_buff *skb,
@@ -334,6 +361,25 @@ ipt_do_table(struct sk_buff *skb,
ip = ip_hdr(skb);
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
+
+ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+ local_bh_disable();
+ addend = xt_write_recseq_begin();
+ private = table->private;
+ cpu = smp_processor_id();
+ table_base = private->entries[cpu];
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
+ origptr = *stackptr;
+
+ e = get_entry(table_base, private->hook_entry[hook]);
+ if (ipt_handle_default_rule(e, &verdict)) {
+ ADD_COUNTER(e->counters, skb->len, 1);
+ xt_write_recseq_end(addend);
+ local_bh_enable();
+ return verdict;
+ }
+
/* We handle fragments by dealing with the first fragment as
* if it was a normal packet. All other fragments are treated
* normally, except that they will NEVER match rules that ask
@@ -348,18 +394,6 @@ ipt_do_table(struct sk_buff *skb,
acpar.family = NFPROTO_IPV4;
acpar.hooknum = hook;
- IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- local_bh_disable();
- addend = xt_write_recseq_begin();
- private = table->private;
- cpu = smp_processor_id();
- table_base = private->entries[cpu];
- jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
- stackptr = per_cpu_ptr(private->stackptr, cpu);
- origptr = *stackptr;
-
- e = get_entry(table_base, private->hook_entry[hook]);
-
pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
table->name, hook, origptr,
get_entry(table_base, private->underflow[hook]));

View File

@ -0,0 +1,16 @@
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -85,9 +85,11 @@ ip_packet_match(const struct iphdr *ip,
if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
return true;
- if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ if (FWINV(ipinfo->smsk.s_addr &&
+ (ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
IPT_INV_SRCIP) ||
- FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+ FWINV(ipinfo->dmsk.s_addr &&
+ (ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
IPT_INV_DSTIP)) {
dprintf("Source or dest mismatch.\n");

View File

@ -0,0 +1,36 @@
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -33,6 +33,9 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+/* Do not check the TCP window for incoming packets */
+static int nf_ct_tcp_no_window_check __read_mostly = 1;
+
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
@@ -515,6 +518,9 @@ static bool tcp_in_window(const struct n
s32 receiver_offset;
bool res, in_recv_win;
+ if (nf_ct_tcp_no_window_check)
+ return true;
+
/*
* Get the required data from the packet.
*/
@@ -1452,6 +1458,13 @@ static struct ctl_table tcp_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .data = &nf_ct_tcp_no_window_check,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{ }
};

View File

@ -0,0 +1,791 @@
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -222,6 +222,33 @@ struct tc_sfq_xstats {
__s32 allot;
};
+/* ESFQ section */
+
+enum
+{
+ /* traditional */
+ TCA_SFQ_HASH_CLASSIC,
+ TCA_SFQ_HASH_DST,
+ TCA_SFQ_HASH_SRC,
+ TCA_SFQ_HASH_FWMARK,
+ /* conntrack */
+ TCA_SFQ_HASH_CTORIGDST,
+ TCA_SFQ_HASH_CTORIGSRC,
+ TCA_SFQ_HASH_CTREPLDST,
+ TCA_SFQ_HASH_CTREPLSRC,
+ TCA_SFQ_HASH_CTNATCHG,
+};
+
+struct tc_esfq_qopt
+{
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+ unsigned hash_kind; /* Hash function to use for flow identification */
+};
+
/* RED section */
enum {
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -148,6 +148,37 @@ config NET_SCH_SFQ
To compile this code as a module, choose M here: the
module will be called sch_sfq.
+config NET_SCH_ESFQ
+ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
+ ---help---
+ Say Y here if you want to use the Enhanced Stochastic Fairness
+ Queueing (ESFQ) packet scheduling algorithm for some of your network
+ devices or as a leaf discipline for a classful qdisc such as HTB or
+ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
+ references to the SFQ algorithm).
+
+ This is an enchanced SFQ version which allows you to control some
+ hardcoded values in the SFQ scheduler.
+
+ ESFQ also adds control of the hash function used to identify packet
+ flows. The original SFQ discipline hashes by connection; ESFQ add
+ several other hashing methods, such as by src IP or by dst IP, which
+ can be more fair to users in some networking situations.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_esfq.
+
+config NET_SCH_ESFQ_NFCT
+ bool "Connection Tracking Hash Types"
+ depends on NET_SCH_ESFQ && NF_CONNTRACK
+ ---help---
+ Say Y here to enable support for hashing based on netfilter connection
+ tracking information. This is useful for a router that is also using
+ NAT to connect privately-addressed hosts to the Internet. If you want
+ to provide fair distribution of upstream bandwidth, ESFQ must use
+ connection tracking information, since all outgoing packets will share
+ the same source address.
+
config NET_SCH_TEQL
tristate "True Link Equalizer (TEQL)"
---help---
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_INGRESS) += sch_ing
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
--- /dev/null
+++ b/net/sched/sch_esfq.c
@@ -0,0 +1,702 @@
+/*
+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes: Alexander Atanasov, <alex@ssi.bg>
+ * Added dynamic depth,limit,divisor,hash_kind options.
+ * Added dst and src hashes.
+ *
+ * Alexander Clouter, <alex@digriz.org.uk>
+ * Ported ESFQ to Linux 2.6.
+ *
+ * Corey Hickey, <bugfood-c@fatooh.org>
+ * Maintenance of the Linux 2.6 port.
+ * Added fwmark hash (thanks to Robert Kurjata).
+ * Added usage of jhash.
+ * Added conntrack support.
+ * Added ctnatchg hash (thanks to Ben Pfountz).
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <net/ip.h>
+#include <net/netlink.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <linux/jhash.h>
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+/* Stochastic Fairness Queuing algorithm.
+ For more comments look at sch_sfq.c.
+ The difference is that you can change limit, depth,
+ hash table size and choose alternate hash types.
+
+ classic: same as in sch_sfq.c
+ dst: destination IP address
+ src: source IP address
+ fwmark: netfilter mark value
+ ctorigdst: original destination IP address
+ ctorigsrc: original source IP address
+ ctrepldst: reply destination IP address
+ ctreplsrc: reply source IP
+
+*/
+
+#define ESFQ_HEAD 0
+#define ESFQ_TAIL 1
+
+/* This type should contain at least SFQ_DEPTH*2 values */
+typedef unsigned int esfq_index;
+
+struct esfq_head
+{
+ esfq_index next;
+ esfq_index prev;
+};
+
+struct esfq_sched_data
+{
+/* Parameters */
+ int perturb_period;
+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ int limit;
+ unsigned depth;
+ unsigned hash_divisor;
+ unsigned hash_kind;
+/* Variables */
+ struct timer_list perturb_timer;
+ int perturbation;
+ esfq_index tail; /* Index of current slot in round */
+ esfq_index max_depth; /* Maximal depth */
+
+ esfq_index *ht; /* Hash table */
+ esfq_index *next; /* Active slots link */
+ short *allot; /* Current allotment per slot */
+ unsigned short *hash; /* Hash value indexed by slots */
+ struct sk_buff_head *qs; /* Slot queue */
+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
+};
+
+/* This contains the info we will hash. */
+struct esfq_packet_info
+{
+ u32 proto; /* protocol or port */
+ u32 src; /* source from packet header */
+ u32 dst; /* destination from packet header */
+ u32 ctorigsrc; /* original source from conntrack */
+ u32 ctorigdst; /* original destination from conntrack */
+ u32 ctreplsrc; /* reply source from conntrack */
+ u32 ctrepldst; /* reply destination from conntrack */
+ u32 mark; /* netfilter mark (fwmark) */
+};
+
+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
+{
+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
+{
+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
+{
+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
+}
+
+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
+{
+ struct esfq_packet_info info;
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+#endif
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ {
+ struct iphdr *iph = ip_hdr(skb);
+ info.dst = iph->daddr;
+ info.src = iph->saddr;
+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
+ (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP ||
+ iph->protocol == IPPROTO_SCTP ||
+ iph->protocol == IPPROTO_DCCP ||
+ iph->protocol == IPPROTO_ESP))
+ info.proto = *(((u32*)iph) + iph->ihl);
+ else
+ info.proto = iph->protocol;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6):
+ {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ /* Hash ipv6 addresses into a u32. This isn't ideal,
+ * but the code is simple. */
+ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
+ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
+ if (iph->nexthdr == IPPROTO_TCP ||
+ iph->nexthdr == IPPROTO_UDP ||
+ iph->nexthdr == IPPROTO_SCTP ||
+ iph->nexthdr == IPPROTO_DCCP ||
+ iph->nexthdr == IPPROTO_ESP)
+ info.proto = *(u32*)&iph[1];
+ else
+ info.proto = iph->nexthdr;
+ break;
+ }
+ default:
+ info.dst = (u32)(unsigned long)skb_dst(skb);
+ info.src = (u32)(unsigned long)skb->sk;
+ info.proto = skb->protocol;
+ }
+
+ info.mark = skb->mark;
+
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ /* defaults if there is no conntrack info */
+ info.ctorigsrc = info.src;
+ info.ctorigdst = info.dst;
+ info.ctreplsrc = info.dst;
+ info.ctrepldst = info.src;
+ /* collect conntrack info */
+ if (ct && ct != &nf_conntrack_untracked) {
+ if (skb->protocol == __constant_htons(ETH_P_IP)) {
+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ }
+ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+ /* Again, hash ipv6 addresses into a single u32. */
+ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
+ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
+ }
+
+ }
+#endif
+
+ switch(q->hash_kind) {
+ case TCA_SFQ_HASH_CLASSIC:
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+ case TCA_SFQ_HASH_DST:
+ return esfq_jhash_1word(q, info.dst);
+ case TCA_SFQ_HASH_SRC:
+ return esfq_jhash_1word(q, info.src);
+ case TCA_SFQ_HASH_FWMARK:
+ return esfq_jhash_1word(q, info.mark);
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ case TCA_SFQ_HASH_CTORIGDST:
+ return esfq_jhash_1word(q, info.ctorigdst);
+ case TCA_SFQ_HASH_CTORIGSRC:
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ case TCA_SFQ_HASH_CTREPLDST:
+ return esfq_jhash_1word(q, info.ctrepldst);
+ case TCA_SFQ_HASH_CTREPLSRC:
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ case TCA_SFQ_HASH_CTNATCHG:
+ {
+ if (info.ctorigdst == info.ctreplsrc)
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ }
+#endif
+ default:
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
+ }
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+}
+
+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d = q->qs[x].qlen + q->depth;
+
+ p = d;
+ n = q->dep[d].next;
+ q->dep[x].next = n;
+ q->dep[x].prev = p;
+ q->dep[p].next = q->dep[n].prev = x;
+}
+
+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+
+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
+ q->max_depth--;
+
+ esfq_link(q, x);
+}
+
+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+ d = q->qs[x].qlen;
+ if (q->max_depth < d)
+ q->max_depth = d;
+
+ esfq_link(q, x);
+}
+
+static unsigned int esfq_drop(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index d = q->max_depth;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ /* Queue is full! Find the longest slot and
+ drop a packet from it */
+
+ if (d > 1) {
+ esfq_index x = q->dep[d+q->depth].next;
+ skb = q->qs[x].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[x]);
+ kfree_skb(skb);
+ esfq_dec(q, x);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ if (d == 1) {
+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
+ d = q->next[q->tail];
+ q->next[q->tail] = q->next[d];
+ q->allot[q->next[d]] += q->quantum;
+ skb = q->qs[d].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[d]);
+ kfree_skb(skb);
+ esfq_dec(q, d);
+ sch->q.qlen--;
+ q->ht[q->hash[d]] = q->depth;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ return 0;
+}
+
+static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end)
+{
+ unsigned hash = esfq_hash(q, skb);
+ unsigned depth = q->depth;
+ esfq_index x;
+
+ x = q->ht[hash];
+ if (x == depth) {
+ q->ht[hash] = x = q->dep[depth].next;
+ q->hash[x] = hash;
+ }
+
+ if (end == ESFQ_TAIL)
+ __skb_queue_tail(&q->qs[x], skb);
+ else
+ __skb_queue_head(&q->qs[x], skb);
+
+ esfq_inc(q, x);
+ if (q->qs[x].qlen == 1) { /* The flow is new */
+ if (q->tail == depth) { /* It is the first flow */
+ q->tail = x;
+ q->next[x] = x;
+ q->allot[x] = q->quantum;
+ } else {
+ q->next[x] = q->next[q->tail];
+ q->next[q->tail] = x;
+ q->tail = x;
+ }
+ }
+}
+
+static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_enqueue(skb, q, ESFQ_TAIL);
+ sch->qstats.backlog += skb->len;
+ if (++sch->q.qlen < q->limit-1) {
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+ return 0;
+ }
+
+ sch->qstats.drops++;
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+static struct sk_buff *esfq_peek(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index a;
+
+ /* No active slots */
+ if (q->tail == q->depth)
+ return NULL;
+
+ a = q->next[q->tail];
+ return skb_peek(&q->qs[a]);
+}
+
+static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q)
+{
+ struct sk_buff *skb;
+ unsigned depth = q->depth;
+ esfq_index a, old_a;
+
+ /* No active slots */
+ if (q->tail == depth)
+ return NULL;
+
+ a = old_a = q->next[q->tail];
+
+ /* Grab packet */
+ skb = __skb_dequeue(&q->qs[a]);
+ esfq_dec(q, a);
+
+ /* Is the slot empty? */
+ if (q->qs[a].qlen == 0) {
+ q->ht[q->hash[a]] = depth;
+ a = q->next[a];
+ if (a == old_a) {
+ q->tail = depth;
+ return skb;
+ }
+ q->next[q->tail] = a;
+ q->allot[a] += q->quantum;
+ } else if ((q->allot[a] -= skb->len) <= 0) {
+ q->tail = a;
+ a = q->next[a];
+ q->allot[a] += q->quantum;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *esfq_dequeue(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = esfq_q_dequeue(q);
+ if (skb == NULL)
+ return NULL;
+ sch->q.qlen--;
+ sch->qstats.backlog -= skb->len;
+ return skb;
+}
+
+static void esfq_q_destroy(struct esfq_sched_data *q)
+{
+ del_timer(&q->perturb_timer);
+ if(q->ht)
+ kfree(q->ht);
+ if(q->dep)
+ kfree(q->dep);
+ if(q->next)
+ kfree(q->next);
+ if(q->allot)
+ kfree(q->allot);
+ if(q->hash)
+ kfree(q->hash);
+ if(q->qs)
+ kfree(q->qs);
+}
+
+static void esfq_destroy(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_destroy(q);
+}
+
+
+static void esfq_reset(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = esfq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void esfq_perturbation(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc*)arg;
+ struct esfq_sched_data *q = qdisc_priv(sch);
+
+ q->perturbation = net_random()&0x1F;
+
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+}
+
+static unsigned int esfq_check_hash(unsigned int kind)
+{
+ switch (kind) {
+ case TCA_SFQ_HASH_CTORIGDST:
+ case TCA_SFQ_HASH_CTORIGSRC:
+ case TCA_SFQ_HASH_CTREPLDST:
+ case TCA_SFQ_HASH_CTREPLSRC:
+ case TCA_SFQ_HASH_CTNATCHG:
+#ifndef CONFIG_NET_SCH_ESFQ_NFCT
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+#endif
+ case TCA_SFQ_HASH_CLASSIC:
+ case TCA_SFQ_HASH_DST:
+ case TCA_SFQ_HASH_SRC:
+ case TCA_SFQ_HASH_FWMARK:
+ return kind;
+ default:
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+ }
+}
+
+static int esfq_q_init(struct esfq_sched_data *q, struct nlattr *opt)
+{
+ struct tc_esfq_qopt *ctl = nla_data(opt);
+ esfq_index p = ~0U/2;
+ int i;
+
+ if (opt && opt->nla_len < nla_attr_size(sizeof(*ctl)))
+ return -EINVAL;
+
+ q->perturbation = 0;
+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
+ q->max_depth = 0;
+ if (opt == NULL) {
+ q->perturb_period = 0;
+ q->hash_divisor = 1024;
+ q->tail = q->limit = q->depth = 128;
+
+ } else {
+ struct tc_esfq_qopt *ctl = nla_data(opt);
+ if (ctl->quantum)
+ q->quantum = ctl->quantum;
+ q->perturb_period = ctl->perturb_period*HZ;
+ q->hash_divisor = ctl->divisor ? : 1024;
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
+
+ if ( q->depth > p - 1 )
+ return -EINVAL;
+
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
+
+ if (ctl->hash_kind) {
+ q->hash_kind = esfq_check_hash(ctl->hash_kind);
+ }
+ }
+
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->ht)
+ goto err_case;
+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
+ if (!q->dep)
+ goto err_case;
+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->next)
+ goto err_case;
+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
+ if (!q->allot)
+ goto err_case;
+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
+ if (!q->hash)
+ goto err_case;
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!q->qs)
+ goto err_case;
+
+ for (i=0; i< q->hash_divisor; i++)
+ q->ht[i] = q->depth;
+ for (i=0; i<q->depth; i++) {
+ skb_queue_head_init(&q->qs[i]);
+ q->dep[i+q->depth].next = i+q->depth;
+ q->dep[i+q->depth].prev = i+q->depth;
+ }
+
+ for (i=0; i<q->depth; i++)
+ esfq_link(q, i);
+ return 0;
+err_case:
+ esfq_q_destroy(q);
+ return -ENOBUFS;
+}
+
+static int esfq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ q->quantum = psched_mtu(qdisc_dev(sch)); /* default */
+ if ((err = esfq_q_init(q, opt)))
+ return err;
+
+ init_timer(&q->perturb_timer);
+ q->perturb_timer.data = (unsigned long)sch;
+ q->perturb_timer.function = esfq_perturbation;
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+
+ return 0;
+}
+
+static int esfq_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct esfq_sched_data new;
+ struct sk_buff *skb;
+ int err;
+
+ /* set up new queue */
+ memset(&new, 0, sizeof(struct esfq_sched_data));
+ new.quantum = psched_mtu(qdisc_dev(sch)); /* default */
+ if ((err = esfq_q_init(&new, opt)))
+ return err;
+
+ /* copy all packets from the old queue to the new queue */
+ sch_tree_lock(sch);
+ while ((skb = esfq_q_dequeue(q)) != NULL)
+ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
+
+ /* clean up the old queue */
+ esfq_q_destroy(q);
+
+ /* copy elements of the new queue into the old queue */
+ q->perturb_period = new.perturb_period;
+ q->quantum = new.quantum;
+ q->limit = new.limit;
+ q->depth = new.depth;
+ q->hash_divisor = new.hash_divisor;
+ q->hash_kind = new.hash_kind;
+ q->tail = new.tail;
+ q->max_depth = new.max_depth;
+ q->ht = new.ht;
+ q->dep = new.dep;
+ q->next = new.next;
+ q->allot = new.allot;
+ q->hash = new.hash;
+ q->qs = new.qs;
+
+ /* finish up */
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ } else {
+ q->perturbation = 0;
+ }
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_esfq_qopt opt;
+
+ opt.quantum = q->quantum;
+ opt.perturb_period = q->perturb_period/HZ;
+
+ opt.limit = q->limit;
+ opt.divisor = q->hash_divisor;
+ opt.flows = q->depth;
+ opt.hash_kind = q->hash_kind;
+
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct Qdisc_ops esfq_qdisc_ops =
+{
+ .next = NULL,
+ .cl_ops = NULL,
+ .id = "esfq",
+ .priv_size = sizeof(struct esfq_sched_data),
+ .enqueue = esfq_enqueue,
+ .dequeue = esfq_dequeue,
+ .peek = esfq_peek,
+ .drop = esfq_drop,
+ .init = esfq_init,
+ .reset = esfq_reset,
+ .destroy = esfq_destroy,
+ .change = esfq_change,
+ .dump = esfq_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init esfq_module_init(void)
+{
+ return register_qdisc(&esfq_qdisc_ops);
+}
+static void __exit esfq_module_exit(void)
+{
+ unregister_qdisc(&esfq_qdisc_ops);
+}
+module_init(esfq_module_init)
+module_exit(esfq_module_exit)
+MODULE_LICENSE("GPL");

Some files were not shown because too many files have changed in this diff Show More