kernel: start working on 3.18 support

This commit:
1) Copies 3.14 patches
2) Drops mainlined stuff
3) Modifies some patches to apply

Signed-off-by: Rafał Miłecki <zajec5@gmail.com>

SVN-Revision: 43093
lede-17.01
Rafał Miłecki 2014-10-27 18:14:39 +00:00
parent 38e72c779e
commit 1513b39a8c
169 changed files with 26505 additions and 0 deletions

View File

@ -0,0 +1,25 @@
Upstream changed the default rootfs to tmpfs when none has been passed
to the kernel - this doesn't fit our purposes, so change it back.
Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -623,6 +623,7 @@ int __init init_rootfs(void)
if (err)
return err;
+#if 0
if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
(!root_fs_names || strstr(root_fs_names, "tmpfs"))) {
err = shmem_init();
@@ -630,6 +631,9 @@ int __init init_rootfs(void)
} else {
err = init_ramfs_fs();
}
+#else
+ err = init_ramfs_fs();
+#endif
if (err)
unregister_filesystem(&rootfs_fs_type);

View File

@ -0,0 +1,81 @@
From 1e311820ec3055e3f08e687de6564692a7cec675 Mon Sep 17 00:00:00 2001
From: Florian Fainelli <florian@openwrt.org>
Date: Mon, 28 Jan 2013 20:06:29 +0100
Subject: [PATCH 11/12] USB: EHCI: add ignore_oc flag to disable overcurrent
checking
This patch adds an ignore_oc flag which can be set by EHCI controller
not supporting or wanting to disable overcurrent checking. The EHCI
platform data in include/linux/usb/ehci_pdriver.h is also augmented to
take advantage of this new flag.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
---
drivers/usb/host/ehci-hcd.c | 2 +-
drivers/usb/host/ehci-hub.c | 4 ++--
drivers/usb/host/ehci-platform.c | 1 +
drivers/usb/host/ehci.h | 1 +
include/linux/usb/ehci_pdriver.h | 1 +
5 files changed, 6 insertions(+), 3 deletions(-)
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -633,7 +633,7 @@ static int ehci_run (struct usb_hcd *hcd
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
temp >> 8, temp & 0xff,
- ignore_oc ? ", overcurrent ignored" : "");
+ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
ehci_writel(ehci, INTR_MASK,
&ehci->regs->intr_enable); /* Turn On Interrupts */
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -632,7 +632,7 @@ ehci_hub_status_data (struct usb_hcd *hc
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
- if (!ignore_oc)
+ if (!ignore_oc && !ehci->ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
@@ -992,7 +992,7 @@ int ehci_hub_control(
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
- if ((temp & PORT_OCC) && !ignore_oc){
+ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -57,6 +57,7 @@ static int ehci_platform_reset(struct us
hcd->has_tt = pdata->has_tt;
ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
+ ehci->ignore_oc = pdata->ignore_oc;
if (pdata->pre_setup) {
retval = pdata->pre_setup(hcd);
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -226,6 +226,7 @@ struct ehci_hcd { /* one per controlle
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
unsigned need_oc_pp_cycle:1; /* MPC834X port power */
unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
+ unsigned ignore_oc:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -45,6 +45,7 @@ struct usb_ehci_pdata {
unsigned big_endian_desc:1;
unsigned big_endian_mmio:1;
unsigned no_io_watchdog:1;
+ unsigned ignore_oc:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);

View File

@ -0,0 +1,54 @@
From: Stephen Hemminger <stephen@networkplumber.org>
Subject: bridge: allow receiption on disabled port
When an ethernet device is enslaved to a bridge, and the bridge STP
detects loss of carrier (or operational state down), then normally
packet receiption is blocked.
This breaks control applications like WPA which maybe expecting to
receive packets to negotiate to bring link up. The bridge needs to
block forwarding packets from these disabled ports, but there is no
hard requirement to not allow local packet delivery.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -146,11 +146,13 @@ EXPORT_SYMBOL_GPL(br_handle_frame_finish
static int br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
- u16 vid = 0;
+ if (p->state != BR_STATE_DISABLED) {
+ u16 vid = 0;
- /* check if vlan is allowed, to avoid spoofing */
- if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+ /* check if vlan is allowed, to avoid spoofing */
+ if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+ }
return 0; /* process further */
}
@@ -224,6 +226,18 @@ rx_handler_result_t br_handle_frame(stru
forward:
switch (p->state) {
+ case BR_STATE_DISABLED:
+ if (ether_addr_equal(p->br->dev->dev_addr, dest))
+ skb->pkt_type = PACKET_HOST;
+
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_local_finish))
+ break;
+
+ BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
+ br_pass_frame_up(skb);
+ break;
+
case BR_STATE_FORWARDING:
rhook = rcu_dereference(br_should_route_hook);
if (rhook) {

View File

@ -0,0 +1,660 @@
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1705,6 +1705,9 @@ config MIPS_MALTA_PM
bool
default y
+config SYS_HAS_DMA_OPS
+ bool
+
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -1,9 +1,16 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
+#include <linux/kmemcheck.h>
+#include <linux/bug.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/dma-attrs.h>
+
#include <asm/scatterlist.h>
#include <asm/dma-coherence.h>
#include <asm/cache.h>
+#include <asm/cpu-type.h>
#include <asm-generic/dma-coherent.h>
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
@@ -12,12 +19,48 @@
extern struct dma_map_ops *mips_dma_map_ops;
+void __dma_sync(struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs);
+void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs);
+
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
+#ifdef CONFIG_SYS_HAS_DMA_OPS
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
else
return mips_dma_map_ops;
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * Warning on the terminology - Linux calls an uncached area coherent;
+ * MIPS terminology calls memory areas with hardware maintained coherency
+ * coherent.
+ */
+
+static inline int cpu_needs_post_dma_flush(struct device *dev)
+{
+#ifndef CONFIG_SYS_HAS_CPU_R10000
+ return 0;
+#endif
+ return !plat_device_is_coherent(dev) &&
+ (boot_cpu_type() == CPU_R10000 ||
+ boot_cpu_type() == CPU_R12000 ||
+ boot_cpu_type() == CPU_BMIPS5000);
+}
+
+static inline struct page *dma_addr_to_page(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return pfn_to_page(
+ plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
@@ -30,12 +73,312 @@ static inline bool dma_capable(struct de
static inline void dma_mark_clean(void *addr, size_t size) {}
-#include <asm-generic/dma-mapping-common.h>
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
+ struct page *page = virt_to_page(ptr);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ } else {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, offset, size, dir);
+
+ addr = plat_map_dma_mem_page(dev, page) + offset;
+ }
+ debug_dma_map_page(dev, page, offset, size, dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->unmap_page(dev, addr, size, dir, attrs);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+
+ plat_unmap_dma_mem(dev, addr, size, dir);
+ }
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ } else {
+ for_each_sg(sg, s, nents, i) {
+ struct page *page = sg_page(s);
+
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, s->offset, s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address =
+ plat_map_dma_mem_page(dev, page) + s->offset;
+ }
+ ents = nents;
+ }
+ debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+ return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+ if (ops) {
+ ops->unmap_sg(dev, sg, nents, dir, attrs);
+ return;
+ }
+
+ for_each_sg(sg, s, nents, i) {
+ if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
+ }
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ } else {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, offset, size, dir);
+
+ addr = plat_map_dma_mem_page(dev, page) + offset;
+ }
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->unmap_page(dev, addr, size, dir, NULL);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+
+ plat_unmap_dma_mem(dev, addr, size, dir);
+ }
+ debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_cpu(dev, addr, size, dir);
+ else if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_device(dev, addr, size, dir);
+ else if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+ else if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr + offset),
+ (addr + offset) & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_device(dev, addr + offset, size, dir);
+ else if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, addr + offset),
+ (addr + offset) & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (cpu_needs_post_dma_flush(dev)) {
+ for_each_sg(sg, s, nelems, i)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ }
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_sg_for_device(dev, sg, nelems, dir);
+ else if (!plat_device_is_coherent(dev)) {
+ for_each_sg(sg, s, nelems, i)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ }
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops && ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops && ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
- return ops->dma_supported(dev, mask);
+ if (ops)
+ return ops->dma_supported(dev, mask);
+ return plat_dma_supported(dev, mask);
}
static inline int dma_mapping_error(struct device *dev, u64 mask)
@@ -43,7 +386,9 @@ static inline int dma_mapping_error(stru
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, mask);
- return ops->mapping_error(dev, mask);
+ if (ops)
+ return ops->mapping_error(dev, mask);
+ return 0;
}
static inline int
@@ -74,7 +419,11 @@ static inline void *dma_alloc_attrs(stru
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
- ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
+ if (ops)
+ ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
+ else
+ ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
+ attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
@@ -89,7 +438,10 @@ static inline void dma_free_attrs(struct
{
struct dma_map_ops *ops = get_dma_ops(dev);
- ops->free(dev, size, vaddr, dma_handle, attrs);
+ if (ops)
+ ops->free(dev, size, vaddr, dma_handle, attrs);
+ else
+ mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
}
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -26,7 +26,7 @@
#ifdef CONFIG_DMA_MAYBE_COHERENT
int coherentio = 0; /* User defined DMA coherency from command line. */
-EXPORT_SYMBOL_GPL(coherentio);
+EXPORT_SYMBOL(coherentio);
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
static int __init setcoherentio(char *str)
@@ -46,30 +46,6 @@ static int __init setnocoherentio(char *
early_param("nocoherentio", setnocoherentio);
#endif
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- */
-static inline int cpu_needs_post_dma_flush(struct device *dev)
-{
- return !plat_device_is_coherent(dev) &&
- (boot_cpu_type() == CPU_R10000 ||
- boot_cpu_type() == CPU_R12000 ||
- boot_cpu_type() == CPU_BMIPS5000);
-}
-
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
@@ -125,8 +101,9 @@ void *dma_alloc_noncoherent(struct devic
}
EXPORT_SYMBOL(dma_alloc_noncoherent);
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
struct page *page = NULL;
@@ -157,6 +134,7 @@ static void *mips_dma_alloc_coherent(str
return ret;
}
+EXPORT_SYMBOL(mips_dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
@@ -167,8 +145,8 @@ void dma_free_noncoherent(struct device
}
EXPORT_SYMBOL(dma_free_noncoherent);
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
int order = get_order(size);
@@ -188,6 +166,7 @@ static void mips_dma_free_coherent(struc
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
+EXPORT_SYMBOL(mips_dma_free_coherent);
static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
@@ -216,8 +195,8 @@ static inline void __dma_sync_virtual(vo
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
-static inline void __dma_sync(struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
+void __dma_sync(struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
size_t left = size;
@@ -246,108 +225,7 @@ static inline void __dma_sync(struct pag
left -= len;
} while (left);
}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
-
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- if (!plat_device_is_coherent(dev))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- int i;
-
- for (i = 0; i < nhwentries; i++, sg++) {
- if (!plat_device_is_coherent(dev) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- int i;
-
- if (cpu_needs_post_dma_flush(dev))
- for (i = 0; i < nelems; i++, sg++)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- int i;
-
- if (!plat_device_is_coherent(dev))
- for (i = 0; i < nelems; i++, sg++)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-}
-
-int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
+EXPORT_SYMBOL(__dma_sync);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
@@ -360,23 +238,10 @@ void dma_cache_sync(struct device *dev,
EXPORT_SYMBOL(dma_cache_sync);
-static struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .mapping_error = mips_dma_mapping_error,
- .dma_supported = mips_dma_supported
-};
-
-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+#ifdef CONFIG_SYS_HAS_DMA_OPS
+struct dma_map_ops *mips_dma_map_ops = NULL;
EXPORT_SYMBOL(mips_dma_map_ops);
+#endif
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

View File

@ -0,0 +1,11 @@
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -165,7 +165,7 @@ else
# annotated or signed tagged state (as git describe only
# looks at signed or annotated tags - git tag -a/-s) and
# LOCALVERSION= is not specified
- if test "${LOCALVERSION+set}" != "set"; then
+ if test "${CONFIG_LOCALVERSION+set}" != "set"; then
scm=$(scm_version --short)
res="$res${scm:++}"
fi

View File

@ -0,0 +1,14 @@
--- a/Makefile
+++ b/Makefile
@@ -611,9 +611,9 @@ include $(srctree)/arch/$(SRCARCH)/Makef
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += -Os $(EXTRA_OPTIMIZATION) $(call cc-disable-warning,maybe-uninitialized,)
else
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
endif
# Tell gcc to never replace conditional load with a non-conditional one

View File

@ -0,0 +1,11 @@
--- a/Makefile
+++ b/Makefile
@@ -408,7 +408,7 @@ KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)

View File

@ -0,0 +1,108 @@
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -59,6 +59,7 @@ static struct sym_entry *table;
static unsigned int table_size, table_cnt;
static int all_symbols = 0;
static int absolute_percpu = 0;
+static int uncompressed = 0;
static char symbol_prefix_char = '\0';
static unsigned long long kernel_start_addr = 0;
@@ -392,6 +393,9 @@ static void write_src(void)
free(markers);
+ if (uncompressed)
+ return;
+
output_label("kallsyms_token_table");
off = 0;
for (i = 0; i < 256; i++) {
@@ -450,6 +454,9 @@ static void *find_token(unsigned char *s
{
int i;
+ if (uncompressed)
+ return NULL;
+
for (i = 0; i < len - 1; i++) {
if (str[i] == token[0] && str[i+1] == token[1])
return &str[i];
@@ -522,6 +529,9 @@ static void optimize_result(void)
{
int i, best;
+ if (uncompressed)
+ return;
+
/* using the '\0' symbol last allows compress_symbols to use standard
* fast string functions */
for (i = 255; i >= 0; i--) {
@@ -692,7 +702,9 @@ int main(int argc, char **argv)
} else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
const char *p = &argv[i][14];
kernel_start_addr = strtoull(p, NULL, 16);
- } else
+ } else if (strcmp(argv[i], "--uncompressed") == 0)
+ uncompressed = 1;
+ else
usage();
}
} else if (argc != 1)
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1338,6 +1338,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
the unaligned access emulation.
see arch/parisc/kernel/unaligned.c for reference
+config KALLSYMS_UNCOMPRESSED
+ bool "Keep kallsyms uncompressed"
+ depends on KALLSYMS
+ help
+ Normally kallsyms contains compressed symbols (using a token table),
+ reducing the uncompressed kernel image size. Keeping the symbol table
+ uncompressed significantly improves the size of this part in compressed
+ kernel images.
+
+ Say N unless you need compressed kernel images to be small.
+
config HAVE_PCSPKR_PLATFORM
bool
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -90,6 +90,10 @@ kallsyms()
kallsymopt="${kallsymopt} --absolute-percpu"
fi
+ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
+ kallsymopt="${kallsymopt} --uncompressed"
+ fi
+
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -109,6 +109,11 @@ static unsigned int kallsyms_expand_symb
* For every byte on the compressed symbol data, copy the table
* entry for that byte.
*/
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+ memcpy(result, data + 1, len - 1);
+ result += len - 1;
+ len = 0;
+#endif
while (len) {
tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
data++;
@@ -141,6 +146,9 @@ tail:
*/
static char kallsyms_get_symbol_type(unsigned int off)
{
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+ return kallsyms_names[off + 1];
+#endif
/*
* Get just the first code, look it up in the token table,
* and return the first char from this token.

View File

@ -0,0 +1,194 @@
From: Felix Fietkau <nbd@openwrt.org>
Subject: [PATCH] build: add a hack for removing non-essential module info
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -84,9 +84,10 @@ void trim_init_extable(struct module *m)
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+#define MODULE_INFO_STRIP(tag, info) __MODULE_INFO_STRIP(tag, tag, info)
/* For userspace: you can also call me... */
-#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
+#define MODULE_ALIAS(_alias) MODULE_INFO_STRIP(alias, _alias)
/* Soft module dependencies. See man modprobe.d for details.
* Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
@@ -127,12 +128,12 @@ void trim_init_extable(struct module *m)
* Author(s), use "Name <email>" or just "Name", for multiple
* authors use multiple MODULE_AUTHOR() statements/lines.
*/
-#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
+#define MODULE_AUTHOR(_author) MODULE_INFO_STRIP(author, _author)
/* What your module does. */
-#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+#define MODULE_DESCRIPTION(_description) MODULE_INFO_STRIP(description, _description)
-#ifdef MODULE
+#if defined(MODULE) && !defined(CONFIG_MODULE_STRIPPED)
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
extern const struct type##_device_id __mod_##type##__##name##_device_table \
@@ -159,7 +160,9 @@ void trim_init_extable(struct module *m)
*/
#if defined(MODULE) || !defined(CONFIG_SYSFS)
-#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#define MODULE_VERSION(_version) MODULE_INFO_STRIP(version, _version)
+#elif defined(CONFIG_MODULE_STRIPPED)
+#define MODULE_VERSION(_version) __MODULE_INFO_DISABLED(version)
#else
#define MODULE_VERSION(_version) \
static struct module_version_attribute ___modver_attr = { \
@@ -181,7 +184,7 @@ void trim_init_extable(struct module *m)
/* Optional firmware file (or files) needed by the module
* format is simply firmware file name. Multiple firmware
* files require multiple MODULE_FIRMWARE() specifiers */
-#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+#define MODULE_FIRMWARE(_firmware) MODULE_INFO_STRIP(firmware, _firmware)
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,6 +16,16 @@
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO_DISABLED(name) \
+ struct __UNIQUE_ID(name) {}
+
+#ifdef CONFIG_MODULE_STRIPPED
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO_DISABLED(name)
+#else
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO(tag, name, info)
+#endif
+
#ifdef MODULE
#define __MODULE_INFO(tag, name, info) \
static const char __UNIQUE_ID(name)[] \
@@ -23,8 +33,7 @@ static const char __UNIQUE_ID(name)[]
= __stringify(tag) "=" info
#else /* !MODULE */
/* This struct is here for syntactic coherency, it is not used */
-#define __MODULE_INFO(tag, name, info) \
- struct __UNIQUE_ID(name) {}
+#define __MODULE_INFO(tag, name, info) __MODULE_INFO_DISABLED(name)
#endif
#define __MODULE_PARM_TYPE(name, _type) \
__MODULE_INFO(parmtype, name##type, #name ":" _type)
@@ -32,7 +41,7 @@ static const char __UNIQUE_ID(name)[]
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
#define MODULE_PARM_DESC(_parm, desc) \
- __MODULE_INFO(parm, _parm, #_parm ":" desc)
+ __MODULE_INFO_STRIP(parm, _parm, #_parm ":" desc)
struct kernel_param;
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1973,6 +1973,13 @@ config MODULE_COMPRESS_XZ
endchoice
+config MODULE_STRIPPED
+ bool "Reduce module size"
+ depends on MODULES
+ help
+ Remove module parameter descriptions, author info, version, aliases,
+ device tables, etc.
+
endif # MODULES
config INIT_ALL_POSSIBLE
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2665,6 +2665,7 @@ static struct module *setup_load_info(st
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
+#ifndef CONFIG_MODULE_STRIPPED
const char *modmagic = get_modinfo(info, "vermagic");
int err;
@@ -2690,6 +2691,7 @@ static int check_modinfo(struct module *
pr_warn("%s: module is from the staging directory, the quality "
"is unknown, you have been warned.\n", mod->name);
}
+#endif
/* Set up license info based on the info section */
set_license(mod, get_modinfo(info, "license"));
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1726,7 +1726,9 @@ static void read_symbols(char *modname)
symname = remove_dot(info.strtab + sym->st_name);
handle_modversions(mod, &info, sym, symname);
+#ifndef CONFIG_MODULE_STRIPPED
handle_moddevtable(mod, &info, sym, symname);
+#endif
}
if (!is_vmlinux(modname) ||
(is_vmlinux(modname) && vmlinux_section_warnings))
@@ -1870,7 +1872,9 @@ static void add_header(struct buffer *b,
buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
+#ifndef CONFIG_MODULE_STRIPPED
buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
+#endif
buf_printf(b, "\n");
buf_printf(b, "__visible struct module __this_module\n");
buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n");
@@ -1887,16 +1891,20 @@ static void add_header(struct buffer *b,
static void add_intree_flag(struct buffer *b, int is_intree)
{
+#ifndef CONFIG_MODULE_STRIPPED
if (is_intree)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
+#endif
}
static void add_staging_flag(struct buffer *b, const char *name)
{
+#ifndef CONFIG_MODULE_STRIPPED
static const char *staging_dir = "drivers/staging";
if (strncmp(staging_dir, name, strlen(staging_dir)) == 0)
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
+#endif
}
/**
@@ -1989,11 +1997,13 @@ static void add_depends(struct buffer *b
static void add_srcversion(struct buffer *b, struct module *mod)
{
+#ifndef CONFIG_MODULE_STRIPPED
if (mod->srcversion[0]) {
buf_printf(b, "\n");
buf_printf(b, "MODULE_INFO(srcversion, \"%s\");\n",
mod->srcversion);
}
+#endif
}
static void write_if_changed(struct buffer *b, const char *fname)
@@ -2224,7 +2234,9 @@ int main(int argc, char **argv)
add_staging_flag(&buf, mod->name);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
+#ifndef CONFIG_MODULE_STRIPPED
add_moddevtable(&buf, mod);
+#endif
add_srcversion(&buf, mod);
sprintf(fname, "%s.mod.c", mod->name);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
--- a/tools/include/tools/be_byteshift.h
+++ b/tools/include/tools/be_byteshift.h
@@ -1,6 +1,10 @@
#ifndef _TOOLS_BE_BYTESHIFT_H
#define _TOOLS_BE_BYTESHIFT_H
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
#include <stdint.h>
static inline uint16_t __get_unaligned_be16(const uint8_t *p)
--- a/tools/include/tools/le_byteshift.h
+++ b/tools/include/tools/le_byteshift.h
@@ -1,6 +1,10 @@
#ifndef _TOOLS_LE_BYTESHIFT_H
#define _TOOLS_LE_BYTESHIFT_H
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
#include <stdint.h>
static inline uint16_t __get_unaligned_le16(const uint8_t *p)
--- /dev/null
+++ b/tools/include/tools/linux_types.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_TYPES_H
+#define __LINUX_TYPES_H
+
+#include <stdint.h>
+
+typedef uint8_t __u8;
+typedef uint8_t __be8;
+typedef uint8_t __le8;
+
+typedef uint16_t __u16;
+typedef uint16_t __be16;
+typedef uint16_t __le16;
+
+typedef uint32_t __u32;
+typedef uint32_t __be32;
+typedef uint32_t __le32;
+
+typedef uint64_t __u64;
+typedef uint64_t __be64;
+typedef uint64_t __le64;
+
+#endif

View File

@ -0,0 +1,531 @@
From: Felix Fietkau <nbd@openwrt.org>
use -ffunction-sections, -fdata-sections and --gc-sections --sort-section=name
In combination with kernel symbol export stripping this significantly reduces
the kernel image size. Used on both ARM and MIPS architectures.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
#
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
cflags-y += -msoft-float
-LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections --sort-section=name
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+ifndef CONFIG_FUNCTION_TRACER
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+endif
+
cflags-y += -ffreestanding
#
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -67,7 +67,7 @@ SECTIONS
/* Exception table for data bus errors */
__dbe_table : {
__start___dbe_table = .;
- *(__dbe_table)
+ KEEP(*(__dbe_table))
__stop___dbe_table = .;
}
@@ -112,7 +112,7 @@ SECTIONS
. = ALIGN(4);
.mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
__mips_machines_start = .;
- *(.mips.machines.init)
+ KEEP(*(.mips.machines.init))
__mips_machines_end = .;
}
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -87,7 +87,7 @@
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#define MCOUNT_REC() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_mcount_loc) = .; \
- *(__mcount_loc) \
+ KEEP(*(__mcount_loc)) \
VMLINUX_SYMBOL(__stop_mcount_loc) = .;
#else
#define MCOUNT_REC()
@@ -95,7 +95,7 @@
#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
- *(_ftrace_annotated_branch) \
+ KEEP(*(_ftrace_annotated_branch)) \
VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
#else
#define LIKELY_PROFILE()
@@ -103,7 +103,7 @@
#ifdef CONFIG_PROFILE_ALL_BRANCHES
#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
- *(_ftrace_branch) \
+ KEEP(*(_ftrace_branch)) \
VMLINUX_SYMBOL(__stop_branch_profile) = .;
#else
#define BRANCH_PROFILE()
@@ -112,7 +112,7 @@
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
- *(_ftrace_events) \
+ KEEP(*(_ftrace_events)) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
#define FTRACE_EVENTS()
@@ -120,7 +120,7 @@
#ifdef CONFIG_TRACING
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
- *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
*(__tracepoint_str) /* Trace_printk fmt' pointer */ \
@@ -133,7 +133,7 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
- *(__syscalls_metadata) \
+ KEEP(*(__syscalls_metadata)) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
#define TRACE_SYSCALLS()
@@ -142,8 +142,8 @@
#ifdef CONFIG_CLKSRC_OF
#define CLKSRC_OF_TABLES() . = ALIGN(8); \
VMLINUX_SYMBOL(__clksrc_of_table) = .; \
- *(__clksrc_of_table) \
- *(__clksrc_of_table_end)
+ KEEP(*(__clksrc_of_table)) \
+ KEEP(*(__clksrc_of_table_end))
#else
#define CLKSRC_OF_TABLES()
#endif
@@ -152,8 +152,8 @@
#define IRQCHIP_OF_MATCH_TABLE() \
. = ALIGN(8); \
VMLINUX_SYMBOL(__irqchip_begin) = .; \
- *(__irqchip_of_table) \
- *(__irqchip_of_end)
+ KEEP(*(__irqchip_of_table)) \
+ KEEP(*(__irqchip_of_end))
#else
#define IRQCHIP_OF_MATCH_TABLE()
#endif
@@ -161,8 +161,8 @@
#ifdef CONFIG_COMMON_CLK
#define CLK_OF_TABLES() . = ALIGN(8); \
VMLINUX_SYMBOL(__clk_of_table) = .; \
- *(__clk_of_table) \
- *(__clk_of_table_end)
+ KEEP(*(__clk_of_table)) \
+ KEEP(*(__clk_of_table_end))
#else
#define CLK_OF_TABLES()
#endif
@@ -170,7 +170,7 @@
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
- *(.dtb.init.rodata) \
+ KEEP(*(.dtb.init.rodata)) \
VMLINUX_SYMBOL(__dtb_end) = .;
/* .data section */
@@ -186,16 +186,17 @@
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___jump_table) = .; \
- *(__jump_table) \
+ KEEP(*(__jump_table)) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
- *(__verbose) \
+ KEEP(*(__verbose)) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ *(.data.[a-zA-Z_]*)
/*
* Data section helpers
@@ -249,32 +250,32 @@
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
- *(.pci_fixup_early) \
+ KEEP(*(.pci_fixup_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
- *(.pci_fixup_header) \
+ KEEP(*(.pci_fixup_header)) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
- *(.pci_fixup_final) \
+ KEEP(*(.pci_fixup_final)) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
- *(.pci_fixup_enable) \
+ KEEP(*(.pci_fixup_enable)) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
- *(.pci_fixup_resume) \
+ KEEP(*(.pci_fixup_resume)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
- *(.pci_fixup_resume_early) \
+ KEEP(*(.pci_fixup_resume_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
- *(.pci_fixup_suspend) \
+ KEEP(*(.pci_fixup_suspend)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
- *(.builtin_fw) \
+ KEEP(*(.builtin_fw)) \
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
} \
\
@@ -283,49 +284,49 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(SORT(___ksymtab+*)) \
+ KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(SORT(___ksymtab_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(SORT(___ksymtab_unused+*)) \
+ KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(SORT(___ksymtab_unused_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(SORT(___ksymtab_gpl_future+*)) \
+ KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(SORT(___kcrctab+*)) \
+ KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(SORT(___kcrctab_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
@@ -339,14 +340,14 @@
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(SORT(___kcrctab_unused_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(SORT(___kcrctab_gpl_future+*)) \
+ KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
@@ -365,14 +366,14 @@
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
- *(__param) \
+ KEEP(*(__param)) \
VMLINUX_SYMBOL(__stop___param) = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___modver) = .; \
- *(__modver) \
+ KEEP(*(__modver)) \
VMLINUX_SYMBOL(__stop___modver) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
@@ -428,7 +429,7 @@
#define ENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__entry_text_start) = .; \
- *(.entry.text) \
+ KEEP(*(.entry.text)) \
VMLINUX_SYMBOL(__entry_text_end) = .;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -456,7 +457,7 @@
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ex_table) = .; \
- *(__ex_table) \
+ KEEP(*(__ex_table)) \
VMLINUX_SYMBOL(__stop___ex_table) = .; \
}
@@ -472,8 +473,8 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
- *(.ctors) \
- *(.init_array) \
+ KEEP(*(.ctors)) \
+ KEEP(*(.init_array)) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
@@ -517,7 +518,7 @@
#define SBSS(sbss_align) \
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
- *(.sbss) \
+ *(.sbss .sbss.*) \
*(.scommon) \
}
@@ -535,7 +536,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
- *(.bss) \
+ *(.bss .bss.*) \
*(COMMON) \
}
@@ -596,7 +597,7 @@
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__tracedata_start) = .; \
- *(.tracedata) \
+ KEEP(*(.tracedata)) \
VMLINUX_SYMBOL(__tracedata_end) = .; \
}
#else
@@ -613,17 +614,17 @@
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
VMLINUX_SYMBOL(__setup_start) = .; \
- *(.init.setup) \
+ KEEP(*(.init.setup)) \
VMLINUX_SYMBOL(__setup_end) = .;
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
- *(.initcall##level##.init) \
- *(.initcall##level##s.init) \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
- *(.initcallearly.init) \
+ KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
INIT_CALLS_LEVEL(2) \
@@ -637,21 +638,21 @@
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
- *(.con_initcall.init) \
+ KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
- *(.init.ramfs) \
+ KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
- *(.init.ramfs.info)
+ KEEP(*(.init.ramfs.info))
#else
#define INIT_RAM_FS
#endif
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -18,11 +18,16 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
LDFLAGS_MODULE += --be8
endif
+LDFLAGS_vmlinux += --gc-sections --sort-section=name
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
+ifndef CONFIG_FUNCTION_TRACER
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+endif
+
# Never generate .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,13 +12,13 @@
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
+ KEEP(*(.proc.info.init)) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
+ KEEP(*(.idmap.text)) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
. = ALIGN(32); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
@@ -93,7 +93,7 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
- *(.exception.text)
+ KEEP(*(.exception.text))
__exception_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
@@ -118,7 +118,7 @@ SECTIONS
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
#ifdef CONFIG_MMU
- *(__ex_table)
+ KEEP(*(__ex_table))
#endif
__stop___ex_table = .;
}
@@ -130,12 +130,12 @@ SECTIONS
. = ALIGN(8);
.ARM.unwind_idx : {
__start_unwind_idx = .;
- *(.ARM.exidx*)
+ KEEP(*(.ARM.exidx*))
__stop_unwind_idx = .;
}
.ARM.unwind_tab : {
__start_unwind_tab = .;
- *(.ARM.extab*)
+ KEEP(*(.ARM.extab*))
__stop_unwind_tab = .;
}
#endif
@@ -154,14 +154,14 @@ SECTIONS
*/
__vectors_start = .;
.vectors 0 : AT(__vectors_start) {
- *(.vectors)
+ KEEP(*(.vectors))
}
. = __vectors_start + SIZEOF(.vectors);
__vectors_end = .;
__stubs_start = .;
.stubs 0x1000 : AT(__stubs_start) {
- *(.stubs)
+ KEEP(*(.stubs))
}
. = __stubs_start + SIZEOF(.stubs);
__stubs_end = .;
@@ -175,24 +175,24 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
- *(.taglist.init)
+ KEEP(*(.taglist.init))
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
- *(.alt.smp.init)
+ KEEP(*(.alt.smp.init))
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
- *(.pv_table)
+ KEEP(*(.pv_table))
__pv_table_end = .;
}
.init.data : {
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -120,6 +120,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
+KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -DZIMAGE

View File

@ -0,0 +1,88 @@
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -52,6 +52,16 @@
#define LOAD_OFFSET 0
#endif
+#ifndef SYMTAB_KEEP
+#define SYMTAB_KEEP KEEP(*(SORT(___ksymtab+*)))
+#define SYMTAB_KEEP_GPL KEEP(*(SORT(___ksymtab_gpl+*)))
+#endif
+
+#ifndef SYMTAB_DISCARD
+#define SYMTAB_DISCARD
+#define SYMTAB_DISCARD_GPL
+#endif
+
#include <linux/export.h>
/* Align . to a 8 byte boundary equals to maximum function alignment. */
@@ -284,14 +294,14 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- KEEP(*(SORT(___ksymtab+*))) \
+ SYMTAB_KEEP \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- KEEP(*(SORT(___ksymtab_gpl+*))) \
+ SYMTAB_KEEP_GPL \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
@@ -353,7 +363,7 @@
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ *(__ksymtab_strings+*) \
} \
\
/* __*init sections */ \
@@ -671,6 +681,8 @@
EXIT_TEXT \
EXIT_DATA \
EXIT_CALL \
+ SYMTAB_DISCARD \
+ SYMTAB_DISCARD_GPL \
*(.discard) \
*(.discard.*) \
}
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -349,7 +349,7 @@ targets += $(extra-y) $(MAKECMDGOALS) $(
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
quiet_cmd_cpp_lds_S = LDS $@
- cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \
+ cmd_cpp_lds_S = $(CPP) $(EXTRA_LDSFLAGS) $(cpp_flags) -P -C -U$(ARCH) \
-D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
$(obj)/%.lds: $(src)/%.lds.S FORCE
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -52,12 +52,19 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef MODULE
+#define __EXPORT_SUFFIX(sym)
+#else
+#define __EXPORT_SUFFIX(sym) "+" #sym
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define __EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings" \
+ __EXPORT_SUFFIX(sym)), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
extern const struct kernel_symbol __ksymtab_##sym; \
__visible const struct kernel_symbol __ksymtab_##sym \

View File

@ -0,0 +1,58 @@
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -325,7 +325,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^)
quiet_cmd_lzma = LZMA $@
cmd_lzma = (cat $(filter-out FORCE,$^) | \
- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
quiet_cmd_lzo = LZO $@
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -226,7 +226,7 @@ cpio_list=
output="/dev/stdout"
output_file=""
is_cpio_compressed=
-compr="gzip -n -9 -f"
+compr="gzip -n -9 -f -"
arg="$1"
case "$arg" in
@@ -242,13 +242,13 @@ case "$arg" in
output=${cpio_list}
echo "$output_file" | grep -q "\.gz$" \
&& [ -x "`which gzip 2> /dev/null`" ] \
- && compr="gzip -n -9 -f"
+ && compr="gzip -n -9 -f -"
echo "$output_file" | grep -q "\.bz2$" \
&& [ -x "`which bzip2 2> /dev/null`" ] \
- && compr="bzip2 -9 -f"
+ && compr="bzip2 -9 -f -"
echo "$output_file" | grep -q "\.lzma$" \
&& [ -x "`which lzma 2> /dev/null`" ] \
- && compr="lzma -9 -f"
+ && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
echo "$output_file" | grep -q "\.xz$" \
&& [ -x "`which xz 2> /dev/null`" ] \
&& compr="xz --check=crc32 --lzma2=dict=1MiB"
@@ -315,7 +315,7 @@ if [ ! -z ${output_file} ]; then
if [ "${is_cpio_compressed}" = "compressed" ]; then
cat ${cpio_tfile} > ${output_file}
else
- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
+ (cat ${cpio_tfile} | ${compr} > ${output_file}) \
|| (rm -f ${output_file} ; false)
fi
[ -z ${cpio_file} ] && rm ${cpio_tfile}
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -48,6 +48,7 @@ static const struct compress_format comp
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
+ { {0x6d, 0x00}, "lzma-openwrt", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },

View File

@ -0,0 +1,18 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -198,7 +198,6 @@ config NF_CONNTRACK_FTP
config NF_CONNTRACK_H323
tristate "H.323 protocol support"
- depends on (IPV6 || IPV6=n)
depends on NETFILTER_ADVANCED
help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
@@ -895,7 +894,6 @@ config NETFILTER_XT_TARGET_SECMARK
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
- depends on (IPV6 || IPV6=n)
default m if NETFILTER_ADVANCED=n
---help---
This option adds a `TCPMSS' target, which allows you to alter the

View File

@ -0,0 +1,18 @@
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -10,13 +10,13 @@ config SND_DMAENGINE_PCM
tristate
config SND_HWDEP
- tristate
+ tristate "Sound hardware support"
config SND_RAWMIDI
tristate
config SND_COMPRESS_OFFLOAD
- tristate
+ tristate "Compression offloading support"
# To be effective this also requires INPUT - users should say:
# select SND_JACK if INPUT=y || INPUT=SND

View File

@ -0,0 +1,10 @@
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA
depends on PLAT_ORION
select CRYPTO_ALGAPI
select CRYPTO_AES
+ select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_HASH
help

View File

@ -0,0 +1,29 @@
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -29,6 +29,7 @@ config SSB_SPROM
config SSB_BLOCKIO
bool
depends on SSB
+ default y
config SSB_PCIHOST_POSSIBLE
bool
@@ -49,7 +50,7 @@ config SSB_PCIHOST
config SSB_B43_PCI_BRIDGE
bool
depends on SSB_PCIHOST
- default n
+ default y
config SSB_PCMCIAHOST_POSSIBLE
bool
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -17,6 +17,7 @@ config BCMA
config BCMA_BLOCKIO
bool
depends on BCMA
+ default y
config BCMA_HOST_PCI_POSSIBLE
bool

View File

@ -0,0 +1,23 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -320,16 +320,16 @@ config BCH_CONST_T
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
- boolean
+ boolean "Textsearch support"
config TEXTSEARCH_KMP
- tristate
+ tristate "Textsearch KMP"
config TEXTSEARCH_BM
- tristate
+ tristate "Textsearch BM"
config TEXTSEARCH_FSM
- tristate
+ tristate "Textsearch FSM"
config BTREE
boolean

View File

@ -0,0 +1,19 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -192,13 +192,13 @@ config LIB80211
Drivers should select this themselves if needed.
config LIB80211_CRYPT_WEP
- tristate
+ tristate "LIB80211_CRYPT_WEP"
config LIB80211_CRYPT_CCMP
- tristate
+ tristate "LIB80211_CRYPT_CCMP"
config LIB80211_CRYPT_TKIP
- tristate
+ tristate "LIB80211_CRYPT_TKIP"
config LIB80211_DEBUG
bool "lib80211 debugging messages"

View File

@ -0,0 +1,47 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -32,7 +32,7 @@ config CRYPTO_FIPS
this is.
config CRYPTO_ALGAPI
- tristate
+ tristate "ALGAPI"
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
@@ -41,7 +41,7 @@ config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
- tristate
+ tristate "AEAD"
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
@@ -50,7 +50,7 @@ config CRYPTO_AEAD2
select CRYPTO_ALGAPI2
config CRYPTO_BLKCIPHER
- tristate
+ tristate "BLKCIPHER"
select CRYPTO_BLKCIPHER2
select CRYPTO_ALGAPI
@@ -61,7 +61,7 @@ config CRYPTO_BLKCIPHER2
select CRYPTO_WORKQUEUE
config CRYPTO_HASH
- tristate
+ tristate "HASH"
select CRYPTO_HASH2
select CRYPTO_ALGAPI
@@ -70,7 +70,7 @@ config CRYPTO_HASH2
select CRYPTO_ALGAPI2
config CRYPTO_RNG
- tristate
+ tristate "RNG"
select CRYPTO_RNG2
select CRYPTO_ALGAPI

View File

@ -0,0 +1,22 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,5 @@
config WIRELESS_EXT
- bool
+ bool "Wireless extensions"
config WEXT_CORE
def_bool y
@@ -11,10 +11,10 @@ config WEXT_PROC
depends on WEXT_CORE
config WEXT_SPY
- bool
+ bool "WEXT_SPY"
config WEXT_PRIV
- bool
+ bool "WEXT_PRIV"
config CFG80211
tristate "cfg80211 - wireless configuration API"

View File

@ -0,0 +1,11 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -2,7 +2,7 @@ menu "Core Netfilter Configuration"
depends on NET && INET && NETFILTER
config NETFILTER_NETLINK
- tristate
+ tristate "Netfilter NFNETLINK interface"
config NETFILTER_NETLINK_ACCT
tristate "Netfilter NFACCT over NFNETLINK interface"

View File

@ -0,0 +1,76 @@
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,23 +3,28 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
- bool
+ tristate "Regmap"
config REGMAP_I2C
- tristate
+ select REGMAP
+ tristate "Regmap I2C
config REGMAP_SPI
- tristate
+ select REGMAP
+ depends on SPI_MASTER
+ tristate "Regmap SPI"
config REGMAP_SPMI
+ select REGMAP
tristate
config REGMAP_MMIO
+ select REGMAP
tristate
config REGMAP_IRQ
+ select REGMAP
bool
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -49,7 +49,7 @@ struct reg_default {
unsigned int def;
};
-#ifdef CONFIG_REGMAP
+#if IS_ENABLED(CONFIG_REGMAP)
enum regmap_endian {
/* Unspecified -> 0 -> Backwards compatible default */
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,6 +1,8 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-lzo.o regcache-flat.o
+ifdef CONFIG_DEBUG_FS
+regmap-core-objs += regmap-debugfs.o
+endif
+obj-$(CONFIG_REGMAP) += regmap-core.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/rbtree.h>
@@ -2247,3 +2248,5 @@ static int __init regmap_initcall(void)
return 0;
}
postcore_initcall(regmap_initcall);
+
+MODULE_LICENSE("GPL");

View File

@ -0,0 +1,37 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -96,10 +96,10 @@ config CRYPTO_MANAGER
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
- select CRYPTO_AEAD2
- select CRYPTO_HASH2
- select CRYPTO_BLKCIPHER2
- select CRYPTO_PCOMP2
+ select CRYPTO_AEAD2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_HASH2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_PCOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -248,6 +248,9 @@ static int cryptomgr_schedule_test(struc
type = alg->cra_flags;
/* This piece of crap needs to disappear into per-type test hooks. */
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ type |= CRYPTO_ALG_TESTED;
+#else
if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
@@ -256,6 +259,7 @@ static int cryptomgr_schedule_test(struc
(!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
type |= CRYPTO_ALG_TESTED;
+#endif
param->type = type;

View File

@ -0,0 +1,219 @@
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ /dev/null
@@ -1,96 +0,0 @@
-#ifndef __BCM963XX_TAG_H
-#define __BCM963XX_TAG_H
-
-#define TAGVER_LEN 4 /* Length of Tag Version */
-#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
-#define SIG1_LEN 20 /* Company Signature 1 Length */
-#define SIG2_LEN 14 /* Company Signature 2 Length */
-#define BOARDID_LEN 16 /* Length of BoardId */
-#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
-#define CHIPID_LEN 6 /* Chip Id Length */
-#define IMAGE_LEN 10 /* Length of Length Field */
-#define ADDRESS_LEN 12 /* Length of Address field */
-#define DUALFLAG_LEN 2 /* Dual Image flag Length */
-#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
-#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
-#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
-#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
-#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
-#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
-
-#define NUM_PIRELLI 2
-#define IMAGETAG_CRC_START 0xFFFFFFFF
-
-#define PIRELLI_BOARDS { \
- "AGPF-S0", \
- "DWV-S0", \
-}
-
-/*
- * The broadcom firmware assumes the rootfs starts the image,
- * therefore uses the rootfs start (flash_image_address)
- * to determine where to flash the image. Since we have the kernel first
- * we have to give it the kernel address, but the crc uses the length
- * associated with this address (root_length), which is added to the kernel
- * length (kernel_length) to determine the length of image to flash and thus
- * needs to be rootfs + deadcode (jffs2 EOF marker)
-*/
-
-struct bcm_tag {
- /* 0-3: Version of the image tag */
- char tag_version[TAGVER_LEN];
- /* 4-23: Company Line 1 */
- char sig_1[SIG1_LEN];
- /* 24-37: Company Line 2 */
- char sig_2[SIG2_LEN];
- /* 38-43: Chip this image is for */
- char chip_id[CHIPID_LEN];
- /* 44-59: Board name */
- char board_id[BOARDID_LEN];
- /* 60-61: Map endianness -- 1 BE 0 LE */
- char big_endian[ENDIANFLAG_LEN];
- /* 62-71: Total length of image */
- char total_length[IMAGE_LEN];
- /* 72-83: Address in memory of CFE */
- char cfe__address[ADDRESS_LEN];
- /* 84-93: Size of CFE */
- char cfe_length[IMAGE_LEN];
- /* 94-105: Address in memory of image start
- * (kernel for OpenWRT, rootfs for stock firmware)
- */
- char flash_image_start[ADDRESS_LEN];
- /* 106-115: Size of rootfs */
- char root_length[IMAGE_LEN];
- /* 116-127: Address in memory of kernel */
- char kernel_address[ADDRESS_LEN];
- /* 128-137: Size of kernel */
- char kernel_length[IMAGE_LEN];
- /* 138-139: Unused at the moment */
- char dual_image[DUALFLAG_LEN];
- /* 140-141: Unused at the moment */
- char inactive_flag[INACTIVEFLAG_LEN];
- /* 142-161: RSA Signature (not used; some vendors may use this) */
- char rsa_signature[RSASIG_LEN];
- /* 162-191: Compilation and related information (not used in OpenWrt) */
- char information1[TAGINFO1_LEN];
- /* 192-195: Version flash layout */
- char flash_layout_ver[FLASHLAYOUTVER_LEN];
- /* 196-199: kernel+rootfs CRC32 */
- __u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
- char information2[TAGINFO2_LEN];
- /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
- __u32 image_crc;
- /* 220-223: CRC32 of rootfs partition */
- __u32 rootfs_crc;
- /* 224-227: CRC32 of kernel partition */
- __u32 kernel_crc;
- /* 228-235: Unused at present */
- char reserved1[8];
- /* 236-239: CRC32 of header excluding last 20 bytes */
- __u32 header_crc;
- /* 240-255: Unused at present */
- char reserved2[16];
-};
-
-#endif /* __BCM63XX_TAG_H */
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -34,7 +34,7 @@
#include <linux/mtd/partitions.h>
#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <linux/bcm963xx_tag.h>
#include <asm/mach-bcm63xx/board_bcm963xx.h>
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -70,6 +70,7 @@ header-y += blktrace_api.h
header-y += bpf.h
header-y += bpf_common.h
header-y += bpqether.h
+header-y += bcm963xx_tag.h
header-y += bsg.h
header-y += btrfs.h
header-y += can.h
--- /dev/null
+++ b/include/uapi/linux/bcm963xx_tag.h
@@ -0,0 +1,96 @@
+#ifndef __BCM963XX_TAG_H
+#define __BCM963XX_TAG_H
+
+#define TAGVER_LEN 4 /* Length of Tag Version */
+#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
+#define SIG1_LEN 20 /* Company Signature 1 Length */
+#define SIG2_LEN 14 /* Company Signature 2 Length */
+#define BOARDID_LEN 16 /* Length of BoardId */
+#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
+#define CHIPID_LEN 6 /* Chip Id Length */
+#define IMAGE_LEN 10 /* Length of Length Field */
+#define ADDRESS_LEN 12 /* Length of Address field */
+#define DUALFLAG_LEN 2 /* Dual Image flag Length */
+#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
+#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
+#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
+#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
+#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
+#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
+
+#define NUM_PIRELLI 2
+#define IMAGETAG_CRC_START 0xFFFFFFFF
+
+#define PIRELLI_BOARDS { \
+ "AGPF-S0", \
+ "DWV-S0", \
+}
+
+/*
+ * The broadcom firmware assumes the rootfs starts the image,
+ * therefore uses the rootfs start (flash_image_address)
+ * to determine where to flash the image. Since we have the kernel first
+ * we have to give it the kernel address, but the crc uses the length
+ * associated with this address (root_length), which is added to the kernel
+ * length (kernel_length) to determine the length of image to flash and thus
+ * needs to be rootfs + deadcode (jffs2 EOF marker)
+*/
+
+struct bcm_tag {
+ /* 0-3: Version of the image tag */
+ char tag_version[TAGVER_LEN];
+ /* 4-23: Company Line 1 */
+ char sig_1[SIG1_LEN];
+ /* 24-37: Company Line 2 */
+ char sig_2[SIG2_LEN];
+ /* 38-43: Chip this image is for */
+ char chip_id[CHIPID_LEN];
+ /* 44-59: Board name */
+ char board_id[BOARDID_LEN];
+ /* 60-61: Map endianness -- 1 BE 0 LE */
+ char big_endian[ENDIANFLAG_LEN];
+ /* 62-71: Total length of image */
+ char total_length[IMAGE_LEN];
+ /* 72-83: Address in memory of CFE */
+ char cfe__address[ADDRESS_LEN];
+ /* 84-93: Size of CFE */
+ char cfe_length[IMAGE_LEN];
+ /* 94-105: Address in memory of image start
+ * (kernel for OpenWRT, rootfs for stock firmware)
+ */
+ char flash_image_start[ADDRESS_LEN];
+ /* 106-115: Size of rootfs */
+ char root_length[IMAGE_LEN];
+ /* 116-127: Address in memory of kernel */
+ char kernel_address[ADDRESS_LEN];
+ /* 128-137: Size of kernel */
+ char kernel_length[IMAGE_LEN];
+ /* 138-139: Unused at the moment */
+ char dual_image[DUALFLAG_LEN];
+ /* 140-141: Unused at the moment */
+ char inactive_flag[INACTIVEFLAG_LEN];
+ /* 142-161: RSA Signature (not used; some vendors may use this) */
+ char rsa_signature[RSASIG_LEN];
+ /* 162-191: Compilation and related information (not used in OpenWrt) */
+ char information1[TAGINFO1_LEN];
+ /* 192-195: Version flash layout */
+ char flash_layout_ver[FLASHLAYOUTVER_LEN];
+ /* 196-199: kernel+rootfs CRC32 */
+ __u32 fskernel_crc;
+ /* 200-215: Unused except on Alice Gate where is is information */
+ char information2[TAGINFO2_LEN];
+ /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
+ __u32 image_crc;
+ /* 220-223: CRC32 of rootfs partition */
+ __u32 rootfs_crc;
+ /* 224-227: CRC32 of kernel partition */
+ __u32 kernel_crc;
+ /* 228-235: Unused at present */
+ char reserved1[8];
+ /* 236-239: CRC32 of header excluding last 20 bytes */
+ __u32 header_crc;
+ /* 240-255: Unused at present */
+ char reserved2[16];
+};
+
+#endif /* __BCM63XX_TAG_H */

View File

@ -0,0 +1,23 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -205,16 +205,16 @@ config RANDOM32_SELFTEST
# compression support is select'ed if needed
#
config ZLIB_INFLATE
- tristate
+ tristate "ZLIB inflate support"
config ZLIB_DEFLATE
- tristate
+ tristate "ZLIB deflate support"
config LZO_COMPRESS
- tristate
+ tristate "LZO compress support"
config LZO_DECOMPRESS
- tristate
+ tristate "LZO decompress support"
config LZ4_COMPRESS
tristate

View File

@ -0,0 +1,39 @@
From: Mark Miller <mark@mirell.org>
This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
certain Broadcom chipsets running CFE in order to load the kernel.
Signed-off-by: Mark Miller <mark@mirell.org>
Acked-by: Rob Landley <rob@landley.net>
---
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -892,9 +892,6 @@ config FW_ARC
config ARCH_MAY_HAVE_PC_FDC
bool
-config BOOT_RAW
- bool
-
config CEVT_BCM1480
bool
@@ -2492,6 +2489,18 @@ config USE_OF
config BUILTIN_DTB
bool
+config BOOT_RAW
+ bool "Enable the kernel to be executed from the load address"
+ default n
+ help
+ Allow the kernel to be executed from the load address for
+ bootloaders which cannot read the ELF format. This places
+ a jump to start_kernel at the load address.
+
+ If unsure, say N.
+
+
+
endmenu
config LOCKDEP_SUPPORT

View File

@ -0,0 +1,28 @@
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -988,6 +988,10 @@ config SYNC_R4K
config MIPS_MACHINE
def_bool n
+config IMAGE_CMDLINE_HACK
+ bool "OpenWrt specific image command line hack"
+ default n
+
config NO_IOPORT_MAP
def_bool n
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -80,6 +80,12 @@ FEXPORT(__kernel_entry)
j kernel_entry
#endif
+#ifdef CONFIG_IMAGE_CMDLINE_HACK
+ .ascii "CMDLINE:"
+EXPORT(__image_cmdline)
+ .fill 0x400
+#endif /* CONFIG_IMAGE_CMDLINE_HACK */
+
__REF
NESTED(kernel_entry, 16, sp) # kernel entry point

View File

@ -0,0 +1,11 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -87,7 +87,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
# machines may also. Since BFD is incredibly buggy with respect to
# crossformat linking we rely on the elf2ecoff tool for format conversion.
#
-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
+cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
KBUILD_AFLAGS_MODULE += -mlong-calls

View File

@ -0,0 +1,160 @@
MIPS: allow disabling the kernel FPU emulator
This patch allows turning off the in-kernel Algorithmics
FPU emulator support, which allows one to save a couple of
precious blocks on an embedded system.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
--
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -934,6 +934,17 @@ config I8259
config MIPS_BONITO64
bool
+config MIPS_FPU_EMU
+ bool "Enable FPU emulation"
+ default y
+ help
+ This option allows building a kernel with or without the Algorithmics
+ FPU emulator enabled. Turning off this option results in a kernel which
+ does not catch floating operations exceptions. Make sure that your toolchain
+ is configured to enable software floating point emulation in that case.
+
+ If unsure say Y here.
+
config MIPS_MSC
bool
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -2,10 +2,12 @@
# Makefile for the Linux/MIPS kernel FPU emulation.
#
-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
+obj-y := kernel_linkage.o
+
+obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \
dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \
dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
+ dp_sqrt.o sp_sqrt.o dsemul.o cp1emu.o
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -29,6 +29,7 @@
#define SIGNALLING_NAN 0x7ff800007ff80000LL
+#ifdef CONFIG_MIPS_FPU_EMU
void fpu_emulator_init_fpu(void)
{
static int first = 1;
@@ -115,3 +116,36 @@ int fpu_emulator_restore_context32(struc
return err;
}
#endif
+
+#else
+
+void fpu_emulator_init_fpu(void)
+{
+ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain"
+ "was compiled with software floating point support (soft-float)\n");
+ return;
+}
+
+int fpu_emulator_save_context(struct sigcontext __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_restore_context(struct sigcontext __user *sc)
+{
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
+{
+ return 0;
+}
+#endif /* CONFIG_64BIT */
+
+#endif /* CONFIG_MIPS_FPU_EMU */
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -27,6 +27,8 @@
#include <asm/inst.h>
#include <asm/local.h>
+#ifdef CONFIG_MIPS_FPU_EMU
+
#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
@@ -60,6 +62,38 @@ extern int fpu_emulator_cop1Handler(stru
int process_fpemu_return(int sig, void __user *fault_addr);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
+#else
+static inline int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long cpc)
+{
+ return 0;
+}
+
+static inline int do_dsemulret(struct pt_regs *xcp)
+{
+ return 0;
+}
+
+static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx,
+ int has_fpu,
+ void *__user *fault_addr)
+{
+ return 0;
+}
+
+static inline int process_fpemu_return(int sig, void __user *fault_addr)
+{
+ return -EINVAL;
+}
+
+static inline int mm_isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_FPU_EMU */
/*
* Instruction inserted following the badinst to further tag the sequence
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -701,6 +701,7 @@ asmlinkage void do_ov(struct pt_regs *re
exception_exit(prev_state);
}
+#ifdef CONFIG_MIPS_FPU_EMU
int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
@@ -724,6 +725,7 @@ int process_fpemu_return(int sig, void _
return 0;
}
}
+#endif /* CONFIG_MIPS_FPU_EMU */
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX

View File

@ -0,0 +1,352 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -90,8 +90,13 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+ifdef CONFIG_64BIT
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+else
+KBUILD_AFLAGS_MODULE += -mno-long-calls
+KBUILD_CFLAGS_MODULE += -mno-long-calls
+endif
cflags-y += -ffreestanding
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -11,6 +11,11 @@ struct mod_arch_specific {
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
struct mips_hi16 *r_mips_hi16_list;
+
+ void *phys_plt_tbl;
+ void *virt_plt_tbl;
+ unsigned int phys_plt_offset;
+ unsigned int virt_plt_offset;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -43,14 +43,222 @@ struct mips_hi16 {
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
-#ifdef MODULE_START
+/*
+ * Get the potential max trampolines size required of the init and
+ * non-init sections. Only used if we cannot find enough contiguous
+ * physically mapped memory to put the module into.
+ */
+static unsigned int
+get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ const char *secstrings, unsigned int symindex, bool is_init)
+{
+ unsigned long ret = 0;
+ unsigned int i, j;
+ Elf_Sym *syms;
+
+ /* Everything marked ALLOC (this includes the exported symbols) */
+ for (i = 1; i < hdr->e_shnum; ++i) {
+ unsigned int info = sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_REL
+ && sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ /* If it's called *.init*, and we're not init, we're
+ not interested */
+ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
+ != is_init)
+ continue;
+
+ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
+ if (sechdrs[i].sh_type == SHT_REL) {
+ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rel[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ } else {
+ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rela[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ }
+ }
+
+ return ret;
+}
+
+#ifndef MODULE_START
+static void *alloc_phys(unsigned long size)
+{
+ unsigned order;
+ struct page *page;
+ struct page *p;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_THISNODE, order);
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
+ __free_page(p);
+
+ return page_address(page);
+}
+#endif
+
+static void free_phys(void *ptr, unsigned long size)
+{
+ struct page *page;
+ struct page *end;
+
+ page = virt_to_page(ptr);
+ end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
+
+ for (; page < end; ++page)
+ __free_page(page);
+}
+
+
void *module_alloc(unsigned long size)
{
+#ifdef MODULE_START
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
+#else
+ void *ptr;
+
+ if (size == 0)
+ return NULL;
+
+ ptr = alloc_phys(size);
+
+ /* If we failed to allocate physically contiguous memory,
+ * fall back to regular vmalloc. The module loader code will
+ * create jump tables to handle long jumps */
+ if (!ptr)
+ return vmalloc(size);
+
+ return ptr;
+#endif
}
+
+static inline bool is_phys_addr(void *ptr)
+{
+#ifdef CONFIG_64BIT
+ return (KSEGX((unsigned long)ptr) == CKSEG0);
+#else
+ return (KSEGX(ptr) == KSEG0);
#endif
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ if (is_phys_addr(module_region)) {
+ if (mod->module_init == module_region)
+ free_phys(module_region, mod->init_size);
+ else if (mod->module_core == module_region)
+ free_phys(module_region, mod->core_size);
+ else
+ BUG();
+ } else {
+ vfree(module_region);
+ }
+}
+
+static void *__module_alloc(int size, bool phys)
+{
+ void *ptr;
+
+ if (phys)
+ ptr = kmalloc(size, GFP_KERNEL);
+ else
+ ptr = vmalloc(size);
+ return ptr;
+}
+
+static void __module_free(void *ptr)
+{
+ if (is_phys_addr(ptr))
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int symindex = 0;
+ unsigned int core_size, init_size;
+ int i;
+
+ mod->arch.phys_plt_offset = 0;
+ mod->arch.virt_plt_offset = 0;
+ mod->arch.phys_plt_tbl = NULL;
+ mod->arch.virt_plt_tbl = NULL;
+
+ if (IS_ENABLED(CONFIG_64BIT))
+ return 0;
+
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (sechdrs[i].sh_type == SHT_SYMTAB)
+ symindex = i;
+
+ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
+ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
+
+ if ((core_size + init_size) == 0)
+ return 0;
+
+ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
+ if (!mod->arch.phys_plt_tbl)
+ return -ENOMEM;
+
+ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
+ if (!mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
@@ -64,8 +272,39 @@ static int apply_r_mips_32_rel(struct mo
return 0;
}
+static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
+ void *start, Elf_Addr v)
+{
+ unsigned *tramp = start + *plt_offset;
+ *plt_offset += 4 * sizeof(int);
+
+ /* adjust carry for addiu */
+ if (v & 0x00008000)
+ v += 0x10000;
+
+ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
+ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
+ tramp[2] = 0x03200008; /* jr t9 */
+ tramp[3] = 0x00000000; /* nop */
+
+ return (Elf_Addr) tramp;
+}
+
+static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
+{
+ if (is_phys_addr(location))
+ return add_plt_entry_to(&me->arch.phys_plt_offset,
+ me->arch.phys_plt_tbl, v);
+ else
+ return add_plt_entry_to(&me->arch.virt_plt_offset,
+ me->arch.virt_plt_tbl, v);
+
+}
+
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
+ u32 ofs = *location & 0x03ffffff;
+
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
@@ -73,14 +312,17 @@ static int apply_r_mips_26_rel(struct mo
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
+ v = add_plt_entry(me, location, v + (ofs << 2));
+ if (!v) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+ ofs = 0;
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((ofs + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -287,11 +529,32 @@ int module_finalize(const Elf_Ehdr *hdr,
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
+
+ /* Get rid of the fixup trampoline if we're running the module
+ * from physically mapped address space */
+ if (me->arch.phys_plt_offset == 0) {
+ __module_free(me->arch.phys_plt_tbl);
+ me->arch.phys_plt_tbl = NULL;
+ }
+ if (me->arch.virt_plt_offset == 0) {
+ __module_free(me->arch.virt_plt_tbl);
+ me->arch.virt_plt_tbl = NULL;
+ }
+
return 0;
}
void module_arch_cleanup(struct module *mod)
{
+ if (mod->arch.phys_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ }
+ if (mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.virt_plt_tbl);
+ mod->arch.virt_plt_tbl = NULL;
+ }
+
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);

View File

@ -0,0 +1,83 @@
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+#define memset(__s, __c, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memset((__s), (__c), __len); \
+ else \
+ __ret = __builtin_memset((__s), (__c), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+#define memcpy(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memcpy((dst), (src), __len); \
+ else \
+ __ret = __builtin_memcpy((dst), (src), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+#define memmove(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memmove((dst), (src), __len); \
+ else \
+ __ret = __builtin_memmove((dst), (src), __len); \
+ __ret; \
+})
+
+#define __HAVE_ARCH_MEMCMP
+#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
#endif /* _ASM_STRING_H */
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -4,7 +4,7 @@
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strlen_user.o strncpy_user.o \
- strnlen_user.o uncached.o
+ strnlen_user.o uncached.o memcmp.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
--- /dev/null
+++ b/arch/mips/lib/memcmp.c
@@ -0,0 +1,22 @@
+/*
+ * copied from linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+EXPORT_SYMBOL(memcmp);
+

View File

@ -0,0 +1,32 @@
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -38,6 +38,7 @@ void (*__flush_cache_vunmap)(void);
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+EXPORT_SYMBOL(__flush_cache_all);
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
/* MIPS specific cache operations */
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -20,6 +20,9 @@
#include <linux/swap.h>
#include <linux/splice.h>
#include <linux/aio.h>
+#ifdef CONFIG_MIPS
+#include <asm/cacheflush.h>
+#endif
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -749,6 +752,9 @@ static int fuse_copy_fill(struct fuse_co
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
+#ifdef CONFIG_MIPS
+ __flush_cache_all();
+#endif
if (val) {
void *pgaddr = kmap_atomic(cs->pg);
void *buf = pgaddr + cs->offset;

View File

@ -0,0 +1,13 @@
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -83,6 +83,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
return -ENOEXEC;
}
+ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
+ ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {

View File

@ -0,0 +1,31 @@
Upstream doesn't optimize the kernel and bootwrappers for ppc44x because
they still want to support gcc 3.3 -- well, we don't.
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -203,7 +203,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
KBUILD_CFLAGS += -mno-sched-epilog
endif
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
+cpu-as-$(CONFIG_40x) += -Wa,-m405
+cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
cpu-as-$(CONFIG_E200) += -Wa,-me200
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -45,10 +45,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob
DTC_FLAGS ?= -p 1024
$(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
+$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
+$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405

View File

@ -0,0 +1,10 @@
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -165,7 +165,6 @@ CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
ifeq ($(CONFIG_476FPE_ERR46),y)
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \

View File

@ -0,0 +1,215 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -12,6 +12,28 @@ menuconfig MTD
if MTD
+menu "OpenWrt specific MTD options"
+
+config MTD_ROOTFS_ROOT_DEV
+ bool "Automatically set 'rootfs' partition to be root filesystem"
+ default y
+
+config MTD_SPLIT_FIRMWARE
+ bool "Automatically split firmware partition for kernel+rootfs"
+ default y
+
+config MTD_SPLIT_FIRMWARE_NAME
+ string "Firmware partition name"
+ depends on MTD_SPLIT_FIRMWARE
+ default "firmware"
+
+config MTD_UIMAGE_SPLIT
+ bool "Enable split support for firmware partitions containing a uImage"
+ depends on MTD_SPLIT_FIRMWARE
+ default y
+
+endmenu
+
config MTD_TESTS
tristate "MTD tests support (DANGEROUS)"
depends on m
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,9 +29,11 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/magic.h>
#include <linux/err.h>
#include "mtdcore.h"
+#include "mtdsplit.h"
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -45,13 +47,14 @@ struct mtd_part {
struct list_head list;
};
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
+
/*
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
-
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
@@ -547,8 +550,10 @@ out_register:
return slave;
}
-int mtd_add_partition(struct mtd_info *master, const char *name,
- long long offset, long long length)
+
+static int
+__mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length, bool dup_check)
{
struct mtd_partition part;
struct mtd_part *p, *new;
@@ -580,21 +585,24 @@ int mtd_add_partition(struct mtd_info *m
end = offset + length;
mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(p, &mtd_partitions, list)
- if (p->master == master) {
- if ((start >= p->offset) &&
- (start < (p->offset + p->mtd.size)))
- goto err_inv;
-
- if ((end >= p->offset) &&
- (end < (p->offset + p->mtd.size)))
- goto err_inv;
- }
+ if (dup_check) {
+ list_for_each_entry(p, &mtd_partitions, list)
+ if (p->master == master) {
+ if ((start >= p->offset) &&
+ (start < (p->offset + p->mtd.size)))
+ goto err_inv;
+
+ if ((end >= p->offset) &&
+ (end < (p->offset + p->mtd.size)))
+ goto err_inv;
+ }
+ }
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
+ mtd_partition_split(master, new);
return ret;
err_inv:
@@ -604,6 +612,12 @@ err_inv:
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
+int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length)
+{
+ return __mtd_add_partition(master, name, offset, length, true);
+}
+
int mtd_del_partition(struct mtd_info *master, int partno)
{
struct mtd_part *slave, *next;
@@ -627,6 +641,74 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+static inline unsigned long
+mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
+{
+ unsigned long mask = mtd->erasesize - 1;
+
+ len += offset & mask;
+ len = (len + mask) & ~mask;
+ len -= offset & mask;
+ return len;
+}
+
+#define UBOOT_MAGIC 0x27051956
+
+static void split_uimage(struct mtd_info *master, struct mtd_part *part)
+{
+ struct {
+ __be32 magic;
+ __be32 pad[2];
+ __be32 size;
+ } hdr;
+ size_t len;
+
+ if (mtd_read(master, part->offset, sizeof(hdr), &len, (void *) &hdr))
+ return;
+
+ if (len != sizeof(hdr) || hdr.magic != cpu_to_be32(UBOOT_MAGIC))
+ return;
+
+ len = be32_to_cpu(hdr.size) + 0x40;
+ len = mtd_pad_erasesize(master, part->offset, len);
+ if (len + master->erasesize > part->mtd.size)
+ return;
+
+ __mtd_add_partition(master, "rootfs", part->offset + len,
+ part->mtd.size - len, false);
+}
+
+#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
+#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
+#else
+#define SPLIT_FIRMWARE_NAME "unused"
+#endif
+
+static void split_firmware(struct mtd_info *master, struct mtd_part *part)
+{
+ if (config_enabled(CONFIG_MTD_UIMAGE_SPLIT))
+ split_uimage(master, part);
+}
+
+void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,
+ int offset, int size)
+{
+}
+
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
+{
+ static int rootfs_found = 0;
+
+ if (rootfs_found)
+ return;
+
+ if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
+ config_enabled(CONFIG_MTD_SPLIT_FIRMWARE))
+ split_firmware(master, part);
+
+ arch_split_mtd_part(master, part->mtd.name, part->offset,
+ part->mtd.size);
+}
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -656,6 +738,7 @@ int add_mtd_partitions(struct mtd_info *
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
+ mtd_partition_split(master, slave);
cur_offset = slave->offset + slave->mtd.size;
}
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -84,5 +84,7 @@ int mtd_add_partition(struct mtd_info *m
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+extern void __weak arch_split_mtd_part(struct mtd_info *master,
+ const char *name, int offset, int size);
#endif

View File

@ -0,0 +1,113 @@
From 02cff0ccaa6d364f5c1eeea83f47ac80ccc967d4 Mon Sep 17 00:00:00 2001
From: Gabor Juhos <juhosg@openwrt.org>
Date: Tue, 3 Sep 2013 18:11:50 +0200
Subject: [PATCH] mtd: add support for different partition parser types
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
drivers/mtd/mtdpart.c | 56 ++++++++++++++++++++++++++++++++++++++++
include/linux/mtd/partitions.h | 11 ++++++++
2 files changed, 67 insertions(+)
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -768,6 +768,30 @@ static struct mtd_part_parser *get_parti
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+static struct mtd_part_parser *
+get_partition_parser_by_type(enum mtd_parser_type type,
+ struct mtd_part_parser *start)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ p = list_prepare_entry(start, &part_parsers, list);
+ if (start)
+ put_partition_parser(start);
+
+ list_for_each_entry_continue(p, &part_parsers, list) {
+ if (p->type == type && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
void register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
@@ -841,6 +865,38 @@ int parse_mtd_partitions(struct mtd_info
return ret;
}
+int parse_mtd_partitions_by_type(struct mtd_info *master,
+ enum mtd_parser_type type,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_part_parser *prev = NULL;
+ int ret = 0;
+
+ while (1) {
+ struct mtd_part_parser *parser;
+
+ parser = get_partition_parser_by_type(type, prev);
+ if (!parser)
+ break;
+
+ ret = (*parser->parse_fn)(master, pparts, data);
+
+ if (ret > 0) {
+ put_partition_parser(parser);
+ printk(KERN_NOTICE
+ "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
+ break;
+ }
+
+ prev = parser;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
+
int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -68,12 +68,17 @@ struct mtd_part_parser_data {
* Functions dealing with the various ways of partitioning the space
*/
+enum mtd_parser_type {
+ MTD_PARSER_TYPE_DEVICE = 0,
+};
+
struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
struct mtd_part_parser_data *);
+ enum mtd_parser_type type;
};
extern void register_mtd_parser(struct mtd_part_parser *parser);
@@ -87,4 +92,9 @@ uint64_t mtd_get_device_size(const struc
extern void __weak arch_split_mtd_part(struct mtd_info *master,
const char *name, int offset, int size);
+int parse_mtd_partitions_by_type(struct mtd_info *master,
+ enum mtd_parser_type type,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
#endif

View File

@ -0,0 +1,77 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -641,6 +641,37 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+static int
+run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type)
+{
+ struct mtd_partition *parts;
+ int nr_parts;
+ int i;
+
+ nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, &parts,
+ NULL);
+ if (nr_parts <= 0)
+ return nr_parts;
+
+ if (WARN_ON(!parts))
+ return 0;
+
+ for (i = 0; i < nr_parts; i++) {
+ /* adjust partition offsets */
+ parts[i].offset += slave->offset;
+
+ __mtd_add_partition(slave->master,
+ parts[i].name,
+ parts[i].offset,
+ parts[i].size,
+ false);
+ }
+
+ kfree(parts);
+
+ return nr_parts;
+}
+
static inline unsigned long
mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
{
@@ -686,6 +717,12 @@ static void split_uimage(struct mtd_info
static void split_firmware(struct mtd_info *master, struct mtd_part *part)
{
+ int ret;
+
+ ret = run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
+ if (ret > 0)
+ return;
+
if (config_enabled(CONFIG_MTD_UIMAGE_SPLIT))
split_uimage(master, part);
}
@@ -702,6 +739,12 @@ static void mtd_partition_split(struct m
if (rootfs_found)
return;
+ if (!strcmp(part->mtd.name, "rootfs")) {
+ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
+
+ rootfs_found = 1;
+ }
+
if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
config_enabled(CONFIG_MTD_SPLIT_FIRMWARE))
split_firmware(master, part);
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -70,6 +70,8 @@ struct mtd_part_parser_data {
enum mtd_parser_type {
MTD_PARSER_TYPE_DEVICE = 0,
+ MTD_PARSER_TYPE_ROOTFS,
+ MTD_PARSER_TYPE_FIRMWARE,
};
struct mtd_part_parser {

View File

@ -0,0 +1,25 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -32,6 +32,11 @@ config MTD_UIMAGE_SPLIT
depends on MTD_SPLIT_FIRMWARE
default y
+config MTD_SPLIT
+ def_bool n
+ help
+ Generic MTD split support.
+
endmenu
config MTD_TESTS
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+mtd-$(CONFIG_MTD_SPLIT) += mtdsplit.o
+
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o

View File

@ -0,0 +1,83 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -445,14 +445,12 @@ static struct mtd_part *allocate_partiti
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if (mtd_mod_by_eb(cur_offset, master) != 0) {
- /* Round up to next erasesize */
- slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ /* Round up to next erasesize */
+ slave->offset = mtd_roundup_to_eb(cur_offset, master);
+ if (slave->offset != cur_offset)
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
- }
}
if (slave->offset == MTDPART_OFS_RETAIN) {
slave->offset = cur_offset;
@@ -957,6 +955,24 @@ int mtd_is_partition(const struct mtd_in
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return (struct mtd_info *)mtd;
+
+ return PART(mtd)->master;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_master);
+
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return 0;
+
+ return PART(mtd)->offset;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_offset);
+
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -90,6 +90,8 @@ int mtd_is_partition(const struct mtd_in
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
extern void __weak arch_split_mtd_part(struct mtd_info *master,
const char *name, int offset, int size);
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -333,6 +333,24 @@ static inline uint32_t mtd_mod_by_eb(uin
return do_div(sz, mtd->erasesize);
}
+static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd_mod_by_eb(sz, mtd) == 0)
+ return sz;
+
+ /* Round up to next erase block */
+ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
+}
+
+static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd_mod_by_eb(sz, mtd) == 0)
+ return sz;
+
+ /* Round down to the start of the current erase block */
+ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)

View File

@ -0,0 +1,24 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -32,6 +32,8 @@ config MTD_UIMAGE_SPLIT
depends on MTD_SPLIT_FIRMWARE
default y
+source "drivers/mtd/mtdsplit/Kconfig"
+
config MTD_SPLIT
def_bool n
help
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -7,6 +7,10 @@ obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
mtd-$(CONFIG_MTD_SPLIT) += mtdsplit.o
+mtd-$(CONFIG_MTD_SPLIT_SEAMA_FW) += mtdsplit_seama.o
+mtd-$(CONFIG_MTD_SPLIT_SQUASHFS_ROOT) += mtdsplit_squashfs.o
+mtd-$(CONFIG_MTD_SPLIT_UIMAGE_FW) += mtdsplit_uimage.o
+mtd-$(CONFIG_MTD_SPLIT_LZMA_FW) += mtdsplit_lzma.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o

View File

@ -0,0 +1,76 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -18,6 +18,11 @@ config MTD_ROOTFS_ROOT_DEV
bool "Automatically set 'rootfs' partition to be root filesystem"
default y
+config MTD_ROOTFS_SPLIT
+ bool "Automatically split 'rootfs' partition for squashfs"
+ select MTD_SPLIT
+ default y
+
config MTD_SPLIT_FIRMWARE
bool "Automatically split firmware partition for kernel+rootfs"
default y
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -681,6 +681,47 @@ mtd_pad_erasesize(struct mtd_info *mtd,
return len;
}
+static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
+{
+ size_t squashfs_len;
+ int len, ret;
+
+ ret = mtd_get_squashfs_len(master, offset, &squashfs_len);
+ if (ret)
+ return ret;
+
+ len = mtd_pad_erasesize(master, offset, squashfs_len);
+ *split_offset = offset + len;
+
+ return 0;
+}
+
+static void split_rootfs_data(struct mtd_info *master, struct mtd_part *part)
+{
+ unsigned int split_offset = 0;
+ unsigned int split_size;
+ int ret;
+
+ ret = split_squashfs(master, part->offset, &split_offset);
+ if (ret)
+ return;
+
+ if (split_offset <= 0)
+ return;
+
+ if (config_enabled(CONFIG_MTD_SPLIT_SQUASHFS_ROOT))
+ pr_err("Dedicated partitioner didn't create \"rootfs_data\" partition, please fill a bug report!\n");
+ else
+ pr_warn("Support for built-in \"rootfs_data\" splitter will be removed, please use CONFIG_MTD_SPLIT_SQUASHFS_ROOT\n");
+
+ split_size = part->mtd.size - (split_offset - part->offset);
+ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=0x%x, len=0x%x\n",
+ ROOTFS_SPLIT_NAME, split_offset, split_size);
+
+ __mtd_add_partition(master, ROOTFS_SPLIT_NAME, split_offset,
+ split_size, false);
+}
+
#define UBOOT_MAGIC 0x27051956
static void split_uimage(struct mtd_info *master, struct mtd_part *part)
@@ -738,7 +779,10 @@ static void mtd_partition_split(struct m
return;
if (!strcmp(part->mtd.name, "rootfs")) {
- run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
+ int num = run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
+
+ if (num <= 0 && config_enabled(CONFIG_MTD_ROOTFS_SPLIT))
+ split_rootfs_data(master, part);
rootfs_found = 1;
}

View File

@ -0,0 +1,18 @@
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -35,6 +35,7 @@
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_info;
struct mtd_partition {
const char *name; /* identifier string */
@@ -50,7 +51,6 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
-struct mtd_info;
struct device_node;
/**

View File

@ -0,0 +1,146 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -35,6 +35,8 @@
#include "mtdcore.h"
#include "mtdsplit.h"
+#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -233,13 +235,61 @@ static int part_erase(struct mtd_info *m
struct mtd_part *part = PART(mtd);
int ret;
+
+ instr->partial_start = false;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ size_t readlen = 0;
+ u64 mtd_ofs;
+
+ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
+ if (!instr->erase_buf)
+ return -ENOMEM;
+
+ mtd_ofs = part->offset + instr->addr;
+ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->addr -= instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ instr->addr + part->offset,
+ part->master->erasesize,
+ &readlen, instr->erase_buf);
+
+ instr->len += instr->erase_buf_ofs;
+ instr->partial_start = true;
+ } else {
+ mtd_ofs = part->offset + part->mtd.size;
+ instr->erase_buf_ofs = part->master->erasesize -
+ do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->len += instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ part->offset + instr->addr +
+ instr->len - part->master->erasesize,
+ part->master->erasesize, &readlen,
+ instr->erase_buf);
+ } else {
+ ret = 0;
+ }
+ }
+ if (ret < 0) {
+ kfree(instr->erase_buf);
+ return ret;
+ }
+
+ }
+
instr->addr += part->offset;
ret = part->master->_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL)
+ kfree(instr->erase_buf);
}
+
return ret;
}
@@ -247,7 +297,25 @@ void mtd_erase_callback(struct erase_inf
{
if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
+ size_t wrlen = 0;
+ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
+ if (instr->partial_start) {
+ part->master->_write(part->master,
+ instr->addr, instr->erase_buf_ofs,
+ &wrlen, instr->erase_buf);
+ instr->addr += instr->erase_buf_ofs;
+ } else {
+ instr->len -= instr->erase_buf_ofs;
+ part->master->_write(part->master,
+ instr->addr + instr->len,
+ instr->erase_buf_ofs, &wrlen,
+ instr->erase_buf +
+ part->master->erasesize -
+ instr->erase_buf_ofs);
+ }
+ kfree(instr->erase_buf);
+ }
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
@@ -514,18 +582,24 @@ static struct mtd_part *allocate_partiti
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of
- * _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- part->name);
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+ if (((u32) slave->mtd.size) > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- part->name);
+ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+
+ if ((u32) slave->mtd.size > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
+ if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
+ printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
+ part->name);
slave->mtd.ecclayout = master->ecclayout;
slave->mtd.ecc_step_size = master->ecc_step_size;
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -55,6 +55,10 @@ struct erase_info {
u_long priv;
u_char state;
struct erase_info *next;
+
+ u8 *erase_buf;
+ u32 erase_buf_ofs;
+ bool partial_start;
};
struct mtd_erase_region_info {

View File

@ -0,0 +1,18 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -334,7 +334,14 @@ static int part_lock(struct mtd_info *mt
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->_unlock(part->master, ofs + part->offset, len);
+
+ ofs += part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ /* round up len to next erasesize and round down offset to prev block */
+ len = (mtd_div_by_eb(len, part->master) + 1) * part->master->erasesize;
+ ofs &= ~(part->master->erasesize - 1);
+ }
+ return part->master->_unlock(part->master, ofs, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)

View File

@ -0,0 +1,30 @@
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -265,14 +265,21 @@ static int parse_redboot_partitions(stru
#endif
names += strlen(names)+1;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
- i++;
- parts[i].offset = parts[i-1].size + parts[i-1].offset;
- parts[i].size = fl->next->img->flash_base - parts[i].offset;
- parts[i].name = nullname;
- }
+ if (!strcmp(parts[i].name, "rootfs")) {
+ parts[i].size = fl->next->img->flash_base;
+ parts[i].size &= ~(master->erasesize - 1);
+ parts[i].size -= parts[i].offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ nrparts--;
+ } else {
+ i++;
+ parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
#endif
+ }
+ }
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);

View File

@ -0,0 +1,35 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -189,6 +189,22 @@ config MTD_BCM47XX_PARTS
This provides partitions parser for devices based on BCM47xx
boards.
+config MTD_MYLOADER_PARTS
+ tristate "MyLoader partition parsing"
+ depends on ADM5120 || ATHEROS_AR231X || ATHEROS_AR71XX || ATH79
+ ---help---
+ MyLoader is a bootloader which allows the user to define partitions
+ in flash devices, by putting a table in the second erase block
+ on the device, similar to a partition table. This table gives the
+ offsets and lengths of the user defined partitions.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically.
+
comment "User Modules And Translation Layers"
#
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o

View File

@ -0,0 +1,66 @@
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -36,6 +36,7 @@
#define ML_MAGIC2 0x26594131
#define TRX_MAGIC 0x30524448
#define SQSH_MAGIC 0x71736873 /* shsq */
+#define UBI_EC_MAGIC 0x23494255 /* UBI# */
struct trx_header {
uint32_t magic;
@@ -46,7 +47,7 @@ struct trx_header {
uint32_t offset[3];
} __packed;
-static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
+static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
u64 offset, uint32_t mask_flags)
{
part->name = name;
@@ -54,6 +55,26 @@ static void bcm47xxpart_add_part(struct
part->mask_flags = mask_flags;
}
+static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+{
+ uint32_t buf;
+ size_t bytes_read;
+
+ if (mtd_read(master, offset, sizeof(&buf), &bytes_read,
+ (uint8_t *)&buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ goto out_default;
+ }
+
+ if (buf == UBI_EC_MAGIC)
+ return "ubi";
+
+out_default:
+ return "rootfs";
+}
+
static int bcm47xxpart_parse(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
@@ -147,6 +168,8 @@ static int bcm47xxpart_parse(struct mtd_
/* TRX */
if (buf[0x000 / 4] == TRX_MAGIC) {
+ const char *name;
+
if (BCM47XXPART_MAX_PARTS - curr_part < 4) {
pr_warn("Not enough partitions left to register trx, scanning stopped!\n");
break;
@@ -177,7 +200,9 @@ static int bcm47xxpart_parse(struct mtd_
* trx->length - trx->offset[i]. We don't fill it as
* we want to have jffs2 (overlay) in the same mtd.
*/
- bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ name = bcm47xxpart_trx_data_part_name(master,
+ offset + trx->offset[i]);
+ bcm47xxpart_add_part(&parts[curr_part++], name,
offset + trx->offset[i], 0);
i++;

View File

@ -0,0 +1,107 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -210,11 +211,12 @@ static void block2mtd_free_device(struct
/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
{
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
struct block_device *bdev;
struct block2mtd_dev *dev;
+ struct mtd_partition *part;
char *name;
if (!devname)
@@ -253,13 +255,16 @@ static struct block2mtd_dev *add_device(
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+ if (!mtdname)
+ mtdname = devname;
+ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
if (!name)
goto devinit_err;
+ strcpy(name, mtdname);
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
@@ -272,15 +277,18 @@ static struct block2mtd_dev *add_device(
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (mtd_device_register(&dev->mtd, NULL, 0)) {
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = name;
+ part->offset = 0;
+ part->size = dev->mtd.size;
+ if (mtd_device_register(&dev->mtd, part, 1)) {
/* Device didn't get added, so free the entry */
goto devinit_err;
}
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
devinit_err:
@@ -347,9 +355,9 @@ static char block2mtd_paramline[80 + 12]
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
char *str = buf;
- char *token[2];
+ char *token[3];
char *name;
size_t erase_size = PAGE_SIZE;
int i, ret;
@@ -362,7 +370,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str) {
@@ -388,8 +396,10 @@ static int block2mtd_setup2(const char *
return 0;
}
}
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ pr_err("mtd device name too long\n");
- add_device(name, erase_size);
+ add_device(name, erase_size, token[2]);
return 0;
}
@@ -423,7 +433,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
static int __init block2mtd_init(void)
{

View File

@ -0,0 +1,108 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
@@ -211,13 +212,14 @@ static void block2mtd_free_device(struct
/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname, int timeout)
{
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
- struct block_device *bdev;
+ struct block_device *bdev = ERR_PTR(-ENODEV);
struct block2mtd_dev *dev;
struct mtd_partition *part;
char *name;
+ int i;
if (!devname)
return NULL;
@@ -228,15 +230,20 @@ static struct block2mtd_dev *add_device(
/* Get a handle on the device */
bdev = blkdev_get_by_path(devname, mode, dev);
+
#ifndef MODULE
- if (IS_ERR(bdev)) {
+ for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
+ dev_t devt;
- /* We might not have rootfs mounted at this point. Try
- to resolve the device name by other means. */
+ if (i)
+ msleep(1000);
+ wait_for_device_probe();
+
+ devt = name_to_dev_t(devname);
+ if (!devt)
+ continue;
- dev_t devt = name_to_dev_t(devname);
- if (devt)
- bdev = blkdev_get_by_dev(devt, mode, dev);
+ bdev = blkdev_get_by_dev(devt, mode, dev);
}
#endif
@@ -355,11 +362,12 @@ static char block2mtd_paramline[80 + 12]
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
+ char buf[80 + 12 + 80 + 8]; /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
char *str = buf;
- char *token[3];
+ char *token[4];
char *name;
size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = 0;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
@@ -370,7 +378,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
token[i] = strsep(&str, ",");
if (str) {
@@ -399,7 +407,10 @@ static int block2mtd_setup2(const char *
if (token[2] && (strlen(token[2]) + 1 > 80))
pr_err("mtd device name too long\n");
- add_device(name, erase_size, token[2]);
+ if (token[3] && kstrtoul(token[3], 0, &timeout))
+ pr_err("invalid timeout\n");
+
+ add_device(name, erase_size, token[2], timeout);
return 0;
}
@@ -433,7 +444,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\"");
static int __init block2mtd_init(void)
{
@@ -467,7 +478,7 @@ static void block2mtd_exit(void)
}
-module_init(block2mtd_init);
+late_initcall(block2mtd_init);
module_exit(block2mtd_exit);
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,37 @@
---
drivers/mtd/nand/plat_nand.c | 13 ++++++++++++-
include/linux/mtd/nand.h | 1 +
2 files changed, 13 insertions(+), 1 deletion(-)
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -851,6 +851,7 @@ struct platform_nand_chip {
unsigned int options;
unsigned int bbt_options;
const char **part_probe_types;
+ int (*chip_fixup)(struct mtd_info *mtd);
};
/* Keep gcc happy */
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -90,7 +90,18 @@ static int plat_nand_probe(struct platfo
}
/* Scan to find existence of the device */
- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (pdata->chip.chip_fixup) {
+ err = pdata->chip.chip_fixup(&data->mtd);
+ if (err)
+ goto out;
+ }
+
+ if (nand_scan_tail(&data->mtd)) {
err = -ENXIO;
goto out;
}

View File

@ -0,0 +1,11 @@
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -507,7 +507,7 @@ int __nand_correct_data(unsigned char *b
return 1; /* error in ECC data; no action needed */
pr_err("%s: uncorrectable ECC error\n", __func__);
- return -1;
+ return -EBADMSG;
}
EXPORT_SYMBOL(__nand_correct_data);

View File

@ -0,0 +1,11 @@
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -809,7 +809,7 @@ static int get_chip(struct map_info *map
return 0;
case FL_ERASING:
- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;

View File

@ -0,0 +1,18 @@
From: George Kashperko <george@znau.edu.ua>
Issue map read after Write Buffer Load command to ensure chip is ready
to receive data.
Signed-off-by: George Kashperko <george@znau.edu.ua>
---
drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1830,6 +1830,7 @@ static int __xipram do_write_buffer(stru
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
+ (void) map_read(map, cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;

View File

@ -0,0 +1,20 @@
From eef9dfc4e821408af1af13aa0cc707fc496fb7c6 Mon Sep 17 00:00:00 2001
From: Gabor Juhos <juhosg@openwrt.org>
Date: Wed, 11 Dec 2013 19:05:59 +0100
Subject: [PATCH] m25p80: add support for the Winbond W25X05 flash
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
drivers/mtd/devices/m25p80.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -614,6 +614,7 @@ const struct spi_device_id spi_nor_ids[]
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },

View File

@ -0,0 +1,26 @@
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -37,6 +37,7 @@
#include <linux/backing-dev.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/root_dev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -456,6 +457,15 @@ int add_mtd_device(struct mtd_info *mtd)
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
+
+ if (!strcmp(mtd->name, "rootfs") &&
+ config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ ROOT_DEV == 0) {
+ pr_notice("mtd: device %d (%s) set to be root filesystem\n",
+ mtd->index, mtd->name);
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ }
+
return 0;
fail_added:

View File

@ -0,0 +1,63 @@
From 8a52e4100d7c3a4a1dfddfa02b8864a9b0068c13 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Sat, 17 May 2014 03:36:18 +0200
Subject: [PATCH 1/5] ubi: auto-attach mtd device named "ubi" or "data" on boot
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1209,6 +1209,36 @@ static struct mtd_info * __init open_mtd
return mtd;
}
+/*
+ * This function tries attaching mtd partitions named either "ubi" or "data"
+ * during boot.
+ */
+static void __init ubi_auto_attach(void)
+{
+ int err;
+ struct mtd_info *mtd;
+ /* try attaching mtd device named "ubi" or "data" */
+ mtd = open_mtd_device("ubi");
+ if (IS_ERR(mtd))
+ mtd = open_mtd_device("data");
+
+ if (!IS_ERR(mtd)) {
+ /* auto-add only media types where UBI makes sense */
+ if (mtd->type == MTD_NANDFLASH ||
+ mtd->type == MTD_DATAFLASH ||
+ mtd->type == MTD_MLCNANDFLASH) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_msg("auto-attach mtd%d", mtd->index);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ ubi_err("cannot attach mtd%d", mtd->index);
+ put_mtd_device(mtd);
+ }
+ }
+ }
+}
+
static int __init ubi_init(void)
{
int err, i, k;
@@ -1298,6 +1328,12 @@ static int __init ubi_init(void)
}
}
+ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd
+ * parameter was given */
+ if (config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ !ubi_is_module() && !mtd_devs)
+ ubi_auto_attach();
+
err = ubiblock_init();
if (err) {
ubi_err("block: cannot initialize, error %d", err);

View File

@ -0,0 +1,69 @@
From 0f3966579815f889bb2fcb4846152c35f65e79c4 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 15 May 2014 21:06:33 +0200
Subject: [PATCH 2/5] ubi: auto-create ubiblock device for rootfs
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -615,6 +615,44 @@ static int __init ubiblock_create_from_p
return ret;
}
+#define UBIFS_NODE_MAGIC 0x06101831
+static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
+{
+ int ret;
+ uint32_t magic_of, magic;
+ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
+ if (ret)
+ return 0;
+ magic = le32_to_cpu(magic_of);
+ return magic == UBIFS_NODE_MAGIC;
+}
+
+static void __init ubiblock_create_auto_rootfs(void)
+{
+ int ubi_num, ret, is_ubifs;
+ struct ubi_volume_desc *desc;
+ struct ubi_volume_info vi;
+
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
+ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
+ if (IS_ERR(desc))
+ continue;
+
+ ubi_get_volume_info(desc, &vi);
+ is_ubifs = ubi_vol_is_ubifs(desc);
+ ubi_close_volume(desc);
+ if (is_ubifs)
+ break;
+
+ ret = ubiblock_create(&vi);
+ if (ret)
+ ubi_err("block: can't add '%s' volume, err=%d\n",
+ vi.name, ret);
+ /* always break if we get here */
+ break;
+ }
+}
+
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -645,6 +683,10 @@ int __init ubiblock_init(void)
if (ret)
goto err_remove;
+ /* auto-attach "rootfs" volume if existing and non-ubifs */
+ if (config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV))
+ ubiblock_create_auto_rootfs();
+
/*
* Block devices are only created upon user requests, so we ignore
* existing volumes.

View File

@ -0,0 +1,53 @@
From eea9e1785e4c05c2a3444506aabafa0ae958538f Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Sat, 17 May 2014 03:35:02 +0200
Subject: [PATCH 4/5] try auto-mounting ubi0:rootfs in init/do_mounts.c
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
init/do_mounts.c | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -433,7 +433,27 @@ retry:
out:
put_page(page);
}
-
+
+static int __init mount_ubi_rootfs(void)
+{
+ int flags = MS_SILENT;
+ int err, tried = 0;
+
+ while (tried < 2) {
+ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \
+ root_mount_data);
+ switch (err) {
+ case -EACCES:
+ flags |= MS_RDONLY;
+ tried++;
+ default:
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_ROOT_NFS
#define NFSROOT_TIMEOUT_MIN 5
@@ -527,6 +547,10 @@ void __init mount_root(void)
change_floppy("root floppy");
}
#endif
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
+ if (!mount_ubi_rootfs())
+ return;
+#endif
#ifdef CONFIG_BLOCK
create_dev("/dev/root", ROOT_DEV);
mount_block_root("/dev/root", root_mountflags);

View File

@ -0,0 +1,37 @@
From cd68d1b12b5ea4c01a664c064179ada42bf55d3d Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 15 May 2014 20:55:42 +0200
Subject: [PATCH 5/5] ubi: set ROOT_DEV to ubiblock "rootfs" if unset
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/mtd/ubi/block.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -48,6 +48,7 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <asm/div64.h>
+#include <linux/root_dev.h>
#include "ubi-media.h"
#include "ubi.h"
@@ -448,6 +449,15 @@ int ubiblock_create(struct ubi_volume_in
add_disk(dev->gd);
ubi_msg("%s created from ubi%d:%d(%s)",
dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
+
+ if (!strcmp(vi->name, "rootfs") &&
+ config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ ROOT_DEV == 0) {
+ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
+ dev->ubi_num, dev->vol_id, vi->name);
+ ROOT_DEV = MKDEV(gd->major, gd->first_minor);
+ }
+
return 0;
out_free_queue:

View File

@ -0,0 +1,18 @@
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -39,6 +39,7 @@ source "fs/gfs2/Kconfig"
source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
+source "fs/yaffs2/Kconfig"
endif # BLOCK
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -126,3 +126,5 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
+

View File

@ -0,0 +1,155 @@
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -329,6 +329,33 @@ static int yaffs_readpage(struct file *f
return ret;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid())
+#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid())
+#else
+#define YCRED_FSUID() YCRED(current)->fsuid
+#define YCRED_FSGID() YCRED(current)->fsgid
+
+static inline uid_t i_uid_read(const struct inode *inode)
+{
+ return inode->i_uid;
+}
+
+static inline gid_t i_gid_read(const struct inode *inode)
+{
+ return inode->i_gid;
+}
+
+static inline void i_uid_write(struct inode *inode, uid_t uid)
+{
+ inode->i_uid = uid;
+}
+
+static inline void i_gid_write(struct inode *inode, gid_t gid)
+{
+ inode->i_gid = gid;
+}
+#endif
static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
{
@@ -1225,9 +1252,9 @@ static int yaffs_mknod(struct inode *dir
struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
int error = -ENOSPC;
- uid_t uid = YCRED(current)->fsuid;
+ uid_t uid = YCRED_FSUID();
gid_t gid =
- (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
mode |= S_ISGID;
@@ -1424,9 +1451,9 @@ static int yaffs_symlink(struct inode *d
{
struct yaffs_obj *obj;
struct yaffs_dev *dev;
- uid_t uid = YCRED(current)->fsuid;
+ uid_t uid = YCRED_FSUID();
gid_t gid =
- (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
@@ -1829,8 +1856,8 @@ static void yaffs_fill_inode_from_obj(st
inode->i_ino = obj->obj_id;
inode->i_mode = obj->yst_mode;
- inode->i_uid = obj->yst_uid;
- inode->i_gid = obj->yst_gid;
+ i_uid_write(inode, obj->yst_uid);
+ i_gid_write(inode, obj->yst_gid);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
inode->i_blksize = inode->i_sb->s_blocksize;
#endif
@@ -1856,7 +1883,7 @@ static void yaffs_fill_inode_from_obj(st
yaffs_trace(YAFFS_TRACE_OS,
"yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
- inode->i_mode, inode->i_uid, inode->i_gid,
+ inode->i_mode, i_uid_read(inode), i_gid_read(inode),
inode->i_size, atomic_read(&inode->i_count));
switch (obj->yst_mode & S_IFMT) {
--- a/fs/yaffs2/yaffs_attribs.c
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -14,6 +14,48 @@
#include "yaffs_guts.h"
#include "yaffs_attribs.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+ return from_kuid(&init_user_ns, iattr->ia_uid);
+}
+
+static inline gid_t ia_gid_read(const struct iattr *iattr)
+{
+ return from_kgid(&init_user_ns, iattr->ia_gid);
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+ iattr->ia_uid = make_kuid(&init_user_ns, uid);
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+ iattr->ia_gid = make_kgid(&init_user_ns, gid);
+}
+#else
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+ return iattr->ia_uid;
+}
+
+static inline gid_t ia_gid_read(const struct iattr *inode)
+{
+ return iattr->ia_gid;
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+ iattr->ia_uid = uid;
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+ iattr->ia_gid = gid;
+}
+#endif
+
void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
{
obj->yst_uid = oh->yst_uid;
@@ -77,9 +119,9 @@ int yaffs_set_attribs(struct yaffs_obj *
if (valid & ATTR_MODE)
obj->yst_mode = attr->ia_mode;
if (valid & ATTR_UID)
- obj->yst_uid = attr->ia_uid;
+ obj->yst_uid = ia_uid_read(attr);
if (valid & ATTR_GID)
- obj->yst_gid = attr->ia_gid;
+ obj->yst_gid = ia_gid_read(attr);
if (valid & ATTR_ATIME)
obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
@@ -103,9 +145,9 @@ int yaffs_get_attribs(struct yaffs_obj *
attr->ia_mode = obj->yst_mode;
valid |= ATTR_MODE;
- attr->ia_uid = obj->yst_uid;
+ ia_uid_write(attr, obj->yst_uid);
valid |= ATTR_UID;
- attr->ia_gid = obj->yst_gid;
+ ia_gid_write(attr, obj->yst_gid);
valid |= ATTR_GID;
Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;

View File

@ -0,0 +1,44 @@
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -3025,6 +3025,7 @@ static DECLARE_FSTYPE(yaffs2_fs_type, "y
#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
static struct proc_dir_entry *my_proc_entry;
static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
@@ -3398,6 +3399,7 @@ static int yaffs_proc_write(struct file
return yaffs_proc_debug_write(file, buf, count, data);
return yaffs_proc_write_trace_options(file, buf, count, data);
}
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
/* Stuff to handle installation of file systems */
struct file_system_to_install {
@@ -3421,6 +3423,7 @@ static int __init init_yaffs_fs(void)
mutex_init(&yaffs_context_lock);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
/* Install the proc_fs entries */
my_proc_entry = create_proc_entry("yaffs",
S_IRUGO | S_IFREG, YPROC_ROOT);
@@ -3432,6 +3435,7 @@ static int __init init_yaffs_fs(void)
} else {
return -ENOMEM;
}
+#endif
/* Now add the file system entries */
@@ -3468,7 +3472,9 @@ static void __exit exit_yaffs_fs(void)
yaffs_trace(YAFFS_TRACE_ALWAYS,
"yaffs built " __DATE__ " " __TIME__ " removing.");
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
remove_proc_entry("yaffs", YPROC_ROOT);
+#endif
fsinst = fs_to_install;

View File

@ -0,0 +1,129 @@
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -1701,6 +1701,110 @@ static void yaffs_remove_obj_callback(st
/*-----------------------------------------------------------------*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+static int yaffs_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = file->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(file->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = ctx->pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (!dir_emit_dot(file, ctx)) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ ctx->pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)file->f_dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (!dir_emit_dotdot(file, ctx)) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ ctx->pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (file->f_version != inode->i_version) {
+ offset = 2;
+ ctx->pos = offset;
+ file->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (!dir_emit(ctx, name, strlen(name),
+ this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ ctx->pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+#else
static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
{
struct yaffs_obj *obj;
@@ -1807,10 +1911,15 @@ out:
return ret_val;
}
+#endif
static const struct file_operations yaffs_dir_operations = {
.read = generic_read_dir,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ .iterate = yaffs_readdir,
+#else
.readdir = yaffs_readdir,
+#endif
.fsync = yaffs_sync_object,
.llseek = generic_file_llseek,
};

View File

@ -0,0 +1,123 @@
Subject: yaffs: add support for tags-9bytes mount option
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -2634,6 +2634,7 @@ static const struct super_operations yaf
struct yaffs_options {
int inband_tags;
+ int tags_9bytes;
int skip_checkpoint_read;
int skip_checkpoint_write;
int no_cache;
@@ -2673,6 +2674,8 @@ static int yaffs_parse_options(struct ya
if (!strcmp(cur_opt, "inband-tags")) {
options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-9bytes")) {
+ options->tags_9bytes = 1;
} else if (!strcmp(cur_opt, "tags-ecc-off")) {
options->tags_ecc_on = 0;
options->tags_ecc_overridden = 1;
@@ -2746,7 +2749,6 @@ static struct super_block *yaffs_interna
struct yaffs_param *param;
int read_only = 0;
- int inband_tags = 0;
struct yaffs_options options;
@@ -2786,6 +2788,9 @@ static struct super_block *yaffs_interna
memset(&options, 0, sizeof(options));
+ if (IS_ENABLED(CONFIG_YAFFS_9BYTE_TAGS))
+ options.tags_9bytes = 1;
+
if (yaffs_parse_options(&options, data_str)) {
/* Option parsing failed */
return NULL;
@@ -2819,17 +2824,22 @@ static struct super_block *yaffs_interna
}
/* Added NCB 26/5/2006 for completeness */
- if (yaffs_version == 2 && !options.inband_tags
- && WRITE_SIZE(mtd) == 512) {
+ if (yaffs_version == 2 &&
+ (!options.inband_tags || options.tags_9bytes) &&
+ WRITE_SIZE(mtd) == 512) {
yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
yaffs_version = 1;
}
- if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) ||
- options.inband_tags)
- inband_tags = 1;
+ if (yaffs_version == 2 &&
+ mtd->oobavail < sizeof(struct yaffs_packed_tags2)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting inband tags");
+ options.inband_tags = 1;
+ }
- if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0)
+ err = yaffs_verify_mtd(mtd, yaffs_version, options.inband_tags,
+ options.tags_9bytes);
+ if (err < 0)
return NULL;
/* OK, so if we got here, we have an MTD that's NAND and looks
@@ -2890,7 +2900,8 @@ static struct super_block *yaffs_interna
param->n_reserved_blocks = 5;
param->n_caches = (options.no_cache) ? 0 : 10;
- param->inband_tags = inband_tags;
+ param->inband_tags = options.inband_tags;
+ param->tags_9bytes = options.tags_9bytes;
param->enable_xattr = 1;
if (options.lazy_loading_overridden)
--- a/fs/yaffs2/yaffs_mtdif.c
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -16,6 +16,7 @@
#include "yaffs_mtdif.h"
#include "linux/mtd/mtd.h"
+#include "uapi/linux/major.h"
#include "linux/types.h"
#include "linux/time.h"
#include "linux/mtd/nand.h"
@@ -276,7 +277,8 @@ struct mtd_info * yaffs_get_mtd_device(d
return mtd;
}
-int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags,
+ int tags_9bytes)
{
if (yaffs_version == 2) {
if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
@@ -295,6 +297,12 @@ int yaffs_verify_mtd(struct mtd_info *mt
);
return -1;
}
+
+ if (tags_9bytes && mtd->oobavail < 9) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support 9-byte tags");
+ return -1;
+ }
}
return 0;
--- a/fs/yaffs2/yaffs_mtdif.h
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -21,5 +21,6 @@
void yaffs_mtd_drv_install(struct yaffs_dev *dev);
struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
void yaffs_put_mtd_device(struct mtd_info *mtd);
-int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags,
+ int tags_9bytes);
#endif

View File

@ -0,0 +1,239 @@
Subject: yaffs: fix compat tags handling
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/fs/yaffs2/yaffs_tagscompat.c
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -17,7 +17,9 @@
#include "yaffs_getblockinfo.h"
#include "yaffs_trace.h"
+#if 0
static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+#endif
/********** Tags ECC calculations *********/
@@ -71,6 +73,7 @@ int yaffs_check_tags_ecc(struct yaffs_ta
return 0;
}
+#if 0
/********** Tags **********/
static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
@@ -379,3 +382,214 @@ void yaffs_tags_compat_install(struct ya
if(!dev->tagger.mark_bad_fn)
dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
}
+#else
+
+#include "yaffs_packedtags1.h"
+
+static int yaffs_tags_compat_write(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags1 pt1;
+ u8 tag_buf[9];
+ int retval;
+
+ /* we assume that yaffs_packed_tags1 and yaffs_tags are compatible */
+ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+ compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+ yaffs_pack_tags1(&pt1, tags);
+ yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * tags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+ if (!dev->param.tags_9bytes) {
+ if (tags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+ memcpy(tag_buf, &pt1, 8);
+ } else {
+ if (tags->is_deleted) {
+ memset(tag_buf, 0xff, 8);
+ tag_buf[8] = 0;
+ } else {
+ memcpy(tag_buf, &pt1, 8);
+ tag_buf[8] = 0xff;
+ }
+ }
+
+ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data,
+ (data) ? dev->data_bytes_per_chunk : 0,
+ tag_buf,
+ (dev->param.tags_9bytes) ? 9 : 8);
+
+ return retval;
+}
+
+/* Return with empty extended tags but add ecc_result.
+ */
+static int return_empty_tags(struct yaffs_ext_tags *tags,
+ enum yaffs_ecc_result ecc_result,
+ int retval)
+{
+ if (tags) {
+ memset(tags, 0, sizeof(*tags));
+ tags->ecc_result = ecc_result;
+ }
+
+ return retval;
+}
+
+static int yaffs_tags_compat_read(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags1 pt1;
+ enum yaffs_ecc_result ecc_result;
+ int retval;
+ int deleted;
+ u8 tag_buf[9];
+
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ tag_buf,
+ (dev->param.tags_9bytes) ? 9 : 8,
+ &ecc_result);
+
+ switch (ecc_result) {
+ case YAFFS_ECC_RESULT_NO_ERROR:
+ case YAFFS_ECC_RESULT_FIXED:
+ break;
+
+ case YAFFS_ECC_RESULT_UNFIXED:
+ default:
+ return_empty_tags(tags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ tags->block_bad = dev->drv.drv_check_bad_fn(dev, nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk. */
+ if (yaffs_check_ff(tag_buf, 8)) {
+ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return return_empty_tags(tags, YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_OK);
+ }
+
+ memcpy(&pt1, tag_buf, 8);
+
+ if (!dev->param.tags_9bytes) {
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+ } else {
+ deleted = (hweight8(tag_buf[8]) < 7) ? 1 : 0;
+ }
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible. */
+ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->n_tags_ecc_fixed++;
+ if (ecc_result == YAFFS_ECC_RESULT_NO_ERROR)
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->n_tags_ecc_unfixed++;
+ return return_empty_tags(tags, YAFFS_ECC_RESULT_UNFIXED,
+ YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.should_be_ff = 0xffffffff;
+ yaffs_unpack_tags1(tags, &pt1);
+ tags->ecc_result = ecc_result;
+
+ /* Set deleted state */
+ tags->is_deleted = deleted;
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ return dev->drv.drv_mark_bad_fn(dev, block_no);
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_ext_tags tags;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "%s %d", __func__, block_no);
+
+ *seq_number = 0;
+
+ retval = dev->drv.drv_check_bad_fn(dev, block_no);
+ if (retval == YAFFS_FAIL) {
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ goto out;
+ }
+
+ yaffs_tags_compat_read(dev, block_no * dev->param.chunks_per_block,
+ NULL, &tags);
+
+ if (tags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block %d is marked bad",
+ block_no);
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else if (tags.chunk_used) {
+ *seq_number = tags.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ retval = YAFFS_OK;
+
+out:
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block query returns seq %u state %d",
+ *seq_number, *state);
+
+ return retval;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+ if (dev->param.is_yaffs2)
+ return;
+
+ if (!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_write;
+
+ if (!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_read;
+
+ if (!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+
+ if (!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
+#endif

View File

@ -0,0 +1,25 @@
From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001
From: Jonas Gorski <jonas.gorski@gmail.com>
Date: Tue, 12 Apr 2011 19:55:41 +0200
Subject: [PATCH] squashfs: update xz compressor options struct.
Update the xz compressor options struct to match the squashfs userspace
one.
---
fs/squashfs/xz_wrapper.c | 4 +++-
1 files changed, 3 insertions(+), 1 deletions(-)
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -40,8 +40,10 @@ struct squashfs_xz {
};
struct disk_comp_opts {
- __le32 dictionary_size;
__le32 flags;
+ __le16 bit_opts;
+ __le16 fb;
+ __le32 dictionary_size;
};
struct comp_opts {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -114,6 +114,16 @@ static int jffs2_build_filesystem(struct
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
+ if (c->flags & (1 << 7)) {
+ printk("%s(): unlocking the mtd device... ", __func__);
+ mtd_unlock(c->mtd, 0, c->mtd->size);
+ printk("done.\n");
+
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
+ jffs2_erase_pending_blocks(c, -1);
+ printk("done.\n");
+ }
+
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
- buf_size, s);
+ if (c->flags & (1 << 7)) {
+ if (mtd_block_isbad(c->mtd, jeb->offset))
+ ret = BLK_STATE_BADBLOCK;
+ else
+ ret = BLK_STATE_ALLFF;
+ } else
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
if (ret < 0)
goto out;
@@ -556,6 +562,17 @@ static int jffs2_scan_eraseblock (struct
return err;
}
+ if ((buf[0] == 0xde) &&
+ (buf[1] == 0xad) &&
+ (buf[2] == 0xc0) &&
+ (buf[3] == 0xde)) {
+ /* end of filesystem. erase everything after this point */
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+ c->flags |= (1 << 7);
+
+ return BLK_STATE_ALLFF;
+ }
+
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;
max_ofs = EMPTY_SCAN_SIZE(c->sector_size);

View File

@ -0,0 +1,146 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1437,6 +1437,13 @@ config CRYPTO_LZ4HC
help
This is the LZ4 high compression mode algorithm.
+config CRYPTO_XZ
+ tristate "XZ compression algorithm"
+ select CRYPTO_ALGAPI
+ select XZ_DEC
+ help
+ This is the XZ algorithm. Only decompression is supported for now.
+
comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
+obj-$(CONFIG_CRYPTO_XZ) += xz.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
--- /dev/null
+++ b/crypto/xz.c
@@ -0,0 +1,117 @@
+/*
+ * Cryptographic API.
+ *
+ * XZ decompression support.
+ *
+ * Copyright (c) 2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xz.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+
+struct xz_comp_ctx {
+ struct xz_dec *decomp_state;
+ struct xz_buf decomp_buf;
+};
+
+static int crypto_xz_decomp_init(struct xz_comp_ctx *ctx)
+{
+ ctx->decomp_state = xz_dec_init(XZ_SINGLE, 0);
+ if (!ctx->decomp_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void crypto_xz_decomp_exit(struct xz_comp_ctx *ctx)
+{
+ xz_dec_end(ctx->decomp_state);
+}
+
+static int crypto_xz_init(struct crypto_tfm *tfm)
+{
+ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_xz_decomp_init(ctx);
+}
+
+static void crypto_xz_exit(struct crypto_tfm *tfm)
+{
+ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_xz_decomp_exit(ctx);
+}
+
+static int crypto_xz_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int crypto_xz_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct xz_comp_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct xz_buf *xz_buf = &dctx->decomp_buf;
+ int ret;
+
+ memset(xz_buf, '\0', sizeof(struct xz_buf));
+
+ xz_buf->in = (u8 *) src;
+ xz_buf->in_pos = 0;
+ xz_buf->in_size = slen;
+ xz_buf->out = (u8 *) dst;
+ xz_buf->out_pos = 0;
+ xz_buf->out_size = *dlen;
+
+ ret = xz_dec_run(dctx->decomp_state, xz_buf);
+ if (ret != XZ_STREAM_END) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *dlen = xz_buf->out_pos;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static struct crypto_alg crypto_xz_alg = {
+ .cra_name = "xz",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct xz_comp_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(crypto_xz_alg.cra_list),
+ .cra_init = crypto_xz_init,
+ .cra_exit = crypto_xz_exit,
+ .cra_u = { .compress = {
+ .coa_compress = crypto_xz_compress,
+ .coa_decompress = crypto_xz_decompress } }
+};
+
+static int __init crypto_xz_mod_init(void)
+{
+ return crypto_register_alg(&crypto_xz_alg);
+}
+
+static void __exit crypto_xz_mod_exit(void)
+{
+ crypto_unregister_alg(&crypto_xz_alg);
+}
+
+module_init(crypto_xz_mod_init);
+module_exit(crypto_xz_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto XZ decompression support");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");

View File

@ -0,0 +1,92 @@
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -5,8 +5,10 @@ config UBIFS_FS
select CRYPTO if UBIFS_FS_ADVANCED_COMPR
select CRYPTO if UBIFS_FS_LZO
select CRYPTO if UBIFS_FS_ZLIB
+ select CRYPTO if UBIFS_FS_XZ
select CRYPTO_LZO if UBIFS_FS_LZO
select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
+ select CRYPTO_XZ if UBIFS_FS_XZ
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.
@@ -35,3 +37,12 @@ config UBIFS_FS_ZLIB
default y
help
Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
+
+config UBIFS_FS_XZ
+ bool "XZ decompression support" if UBIFS_FS_ADVANCED_COMPR
+ depends on UBIFS_FS
+ default y
+ help
+ XZ compresses better the ZLIB but it is slower..
+ Say 'Y' if unsure.
+
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -71,6 +71,24 @@ static struct ubifs_compressor zlib_comp
};
#endif
+#ifdef CONFIG_UBIFS_FS_XZ
+static DEFINE_MUTEX(xz_enc_mutex);
+static DEFINE_MUTEX(xz_dec_mutex);
+
+static struct ubifs_compressor xz_compr = {
+ .compr_type = UBIFS_COMPR_XZ,
+ .comp_mutex = &xz_enc_mutex,
+ .decomp_mutex = &xz_dec_mutex,
+ .name = "xz",
+ .capi_name = "xz",
+};
+#else
+static struct ubifs_compressor xz_compr = {
+ .compr_type = UBIFS_COMPR_XZ,
+ .name = "xz",
+};
+#endif
+
/* All UBIFS compressors */
struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
@@ -232,9 +250,15 @@ int __init ubifs_compressors_init(void)
if (err)
goto out_lzo;
+ err = compr_init(&xz_compr);
+ if (err)
+ goto out_zlib;
+
ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr;
return 0;
+out_zlib:
+ compr_exit(&zlib_compr);
out_lzo:
compr_exit(&lzo_compr);
return err;
@@ -247,4 +271,5 @@ void ubifs_compressors_exit(void)
{
compr_exit(&lzo_compr);
compr_exit(&zlib_compr);
+ compr_exit(&xz_compr);
}
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -332,12 +332,14 @@ enum {
* UBIFS_COMPR_NONE: no compression
* UBIFS_COMPR_LZO: LZO compression
* UBIFS_COMPR_ZLIB: ZLIB compression
+ * UBIFS_COMPR_XZ: XZ compression
* UBIFS_COMPR_TYPES_CNT: count of supported compression types
*/
enum {
UBIFS_COMPR_NONE,
UBIFS_COMPR_LZO,
UBIFS_COMPR_ZLIB,
+ UBIFS_COMPR_XZ,
UBIFS_COMPR_TYPES_CNT,
};

View File

@ -0,0 +1,65 @@
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1576,6 +1576,10 @@ const struct inode_operations ubifs_syml
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+ .setxattr = ubifs_setxattr,
+ .getxattr = ubifs_getxattr,
+ .listxattr = ubifs_listxattr,
+ .removexattr = ubifs_removexattr,
};
const struct file_operations ubifs_file_operations = {
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -553,7 +553,8 @@ int ubifs_jnl_update(struct ubifs_info *
dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
- ubifs_assert(dir_ui->data_len == 0);
+ if (!xent)
+ ubifs_assert(dir_ui->data_len == 0);
ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
@@ -573,6 +574,13 @@ int ubifs_jnl_update(struct ubifs_info *
aligned_dlen = ALIGN(dlen, 8);
aligned_ilen = ALIGN(ilen, 8);
len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
+ if (xent) {
+ /*
+ * Make sure to account for dir_ui->data_len in
+ * length calculation in case there is extended attribute.
+ */
+ len += dir_ui->data_len;
+ }
dent = kmalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -649,7 +657,8 @@ int ubifs_jnl_update(struct ubifs_info *
ino_key_init(c, &ino_key, dir->i_ino);
ino_offs += aligned_ilen;
- err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
+ err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
+ UBIFS_INO_NODE_SZ + dir_ui->data_len);
if (err)
goto out_ro;
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -209,12 +209,12 @@ static int change_xattr(struct ubifs_inf
goto out_free;
}
inode->i_size = ui->ui_size = size;
- ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
host->i_ctime = ubifs_current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
+ ui->data_len = size;
/*
* It is important to write the host inode after the xattr inode

View File

@ -0,0 +1,29 @@
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -63,6 +63,17 @@
/* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000
+static int get_default_compressor(void)
+{
+ if (ubifs_compr_present(UBIFS_COMPR_LZO))
+ return UBIFS_COMPR_LZO;
+
+ if (ubifs_compr_present(UBIFS_COMPR_ZLIB))
+ return UBIFS_COMPR_ZLIB;
+
+ return UBIFS_COMPR_NONE;
+}
+
/**
* create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object
@@ -183,7 +194,7 @@ static int create_default_filesystem(str
if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else
- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
+ sup->default_compr = cpu_to_le16(get_default_compressor());
generate_random_uuid(sup->uuid);

View File

@ -0,0 +1,122 @@
From 90bea5a3f0bf680b87b90516f3c231997f4b8f3b Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Mon, 2 Jun 2014 15:51:10 +0200
X-Git-Url: http://git.infradead.org/linux-ubifs.git/commitdiff_plain/90bea5a3f0bf680b87b90516f3c231997f4b8f3b
X-Git-Url: https://git.kernel.org/cgit/linux/kernel/git/next/linux-next.git/commit/?id=90bea5a3f0bf680b87b90516f3c231997f4b8f3b
Subject: UBIFS: respect MS_SILENT mount flag
When attempting to mount a non-ubifs formatted volume, lots of error
messages (including a stack dump) are thrown to the kernel log even if
the MS_SILENT mount flag is set.
Fix this by introducing adding an additional state-variable in
struct ubifs_info and suppress error messages in ubifs_read_node if
MS_SILENT is set.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
---
fs/ubifs/io.c | 18 ++++++++++--------
fs/ubifs/super.c | 5 +++++
fs/ubifs/ubifs.h | 11 +++++++++++
3 files changed, 26 insertions(+), 8 deletions(-)
---
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -988,30 +988,32 @@ int ubifs_read_node(const struct ubifs_i
return err;
if (type != ch->node_type) {
- ubifs_err("bad node type (%d but expected %d)",
- ch->node_type, type);
+ ubifs_errc(c, "bad node type (%d but expected %d)",
+ ch->node_type, type);
goto out;
}
err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
if (err) {
- ubifs_err("expected node type %d", type);
+ ubifs_errc(c, "expected node type %d", type);
return err;
}
l = le32_to_cpu(ch->len);
if (l != len) {
- ubifs_err("bad node length %d, expected %d", l, len);
+ ubifs_errc(c, "bad node length %d, expected %d", l, len);
goto out;
}
return 0;
out:
- ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
- ubi_is_mapped(c->ubi, lnum));
- ubifs_dump_node(c, buf);
- dump_stack();
+ ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
+ offs, ubi_is_mapped(c->ubi, lnum));
+ if (!c->probing) {
+ ubifs_dump_node(c, buf);
+ dump_stack();
+ }
return -EINVAL;
}
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1149,6 +1149,9 @@ static int mount_ubifs(struct ubifs_info
size_t sz;
c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
+ /* Suppress error messages while probing if MS_SILENT is set */
+ c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
+
err = init_constants_early(c);
if (err)
return err;
@@ -1214,6 +1217,8 @@ static int mount_ubifs(struct ubifs_info
if (err)
goto out_free;
+ c->probing = 0;
+
/*
* Make sure the compressor which is set as default in the superblock
* or overridden by mount options is actually compiled in.
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -51,6 +51,15 @@
#define ubifs_warn(fmt, ...) \
pr_warn("UBIFS warning (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
+/*
+ * A variant of 'ubifs_err()' which takes the UBIFS file-sytem description
+ * object as an argument.
+ */
+#define ubifs_errc(c, fmt, ...) \
+ do { \
+ if (!(c)->probing) \
+ ubifs_err(fmt, ##__VA_ARGS__); \
+ } while (0)
/* UBIFS file system VFS magic number */
#define UBIFS_SUPER_MAGIC 0x24051905
@@ -1209,6 +1218,7 @@ struct ubifs_debug_info;
* @need_recovery: %1 if the file-system needs recovery
* @replaying: %1 during journal replay
* @mounting: %1 while mounting
+ * @probing: %1 while attempting to mount if MS_SILENT mount flag is set
* @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
* @replay_list: temporary list used during journal replay
* @replay_buds: list of buds to replay
@@ -1441,6 +1451,7 @@ struct ubifs_info {
unsigned int replaying:1;
unsigned int mounting:1;
unsigned int remounting_rw:1;
+ unsigned int probing:1;
struct list_head replay_list;
struct list_head replay_buds;
unsigned long long cs_sqnum;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,108 @@
--- a/include/linux/netfilter/xt_layer7.h
+++ b/include/linux/netfilter/xt_layer7.h
@@ -8,6 +8,7 @@ struct xt_layer7_info {
char protocol[MAX_PROTOCOL_LEN];
char pattern[MAX_PATTERN_LEN];
u_int8_t invert;
+ u_int8_t pkt;
};
#endif /* _XT_LAYER7_H */
--- a/net/netfilter/xt_layer7.c
+++ b/net/netfilter/xt_layer7.c
@@ -314,33 +314,35 @@ static int match_no_append(struct nf_con
}
/* add the new app data to the conntrack. Return number of bytes added. */
-static int add_data(struct nf_conn * master_conntrack,
- char * app_data, int appdatalen)
+static int add_datastr(char *target, int offset, char *app_data, int len)
{
int length = 0, i;
- int oldlength = master_conntrack->layer7.app_data_len;
-
- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
- clear on whether the race condition exists or whether this really
- fixes it. I might just be being dense... Anyway, if it's not really
- a fix, all it does is waste a very small amount of time. */
- if(!master_conntrack->layer7.app_data) return 0;
+ if (!target) return 0;
/* Strip nulls. Make everything lower case (our regex lib doesn't
do case insensitivity). Add it to the end of the current data. */
- for(i = 0; i < maxdatalen-oldlength-1 &&
- i < appdatalen; i++) {
+ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
if(app_data[i] != '\0') {
/* the kernel version of tolower mungs 'upper ascii' */
- master_conntrack->layer7.app_data[length+oldlength] =
+ target[length+offset] =
isascii(app_data[i])?
tolower(app_data[i]) : app_data[i];
length++;
}
}
+ target[length+offset] = '\0';
+
+ return length;
+}
+
+/* add the new app data to the conntrack. Return number of bytes added. */
+static int add_data(struct nf_conn * master_conntrack,
+ char * app_data, int appdatalen)
+{
+ int length;
- master_conntrack->layer7.app_data[length+oldlength] = '\0';
- master_conntrack->layer7.app_data_len = length + oldlength;
+ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
+ master_conntrack->layer7.app_data_len += length;
return length;
}
@@ -438,7 +440,7 @@ match(const struct sk_buff *skbin,
enum ip_conntrack_info master_ctinfo, ctinfo;
struct nf_conn *master_conntrack, *conntrack;
- unsigned char * app_data;
+ unsigned char *app_data, *tmp_data;
unsigned int pattern_result, appdatalen;
regexp * comppattern;
@@ -466,8 +468,8 @@ match(const struct sk_buff *skbin,
master_conntrack = master_ct(master_conntrack);
/* if we've classified it or seen too many packets */
- if(total_acct_packets(master_conntrack) > num_packets ||
- master_conntrack->layer7.app_proto) {
+ if(!info->pkt && (total_acct_packets(master_conntrack) > num_packets ||
+ master_conntrack->layer7.app_proto)) {
pattern_result = match_no_append(conntrack, master_conntrack,
ctinfo, master_ctinfo, info);
@@ -500,6 +502,25 @@ match(const struct sk_buff *skbin,
/* the return value gets checked later, when we're ready to use it */
comppattern = compile_and_cache(info->pattern, info->protocol);
+ if (info->pkt) {
+ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
+ if(!tmp_data){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
+ return info->invert;
+ }
+
+ tmp_data[0] = '\0';
+ add_datastr(tmp_data, 0, app_data, appdatalen);
+ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
+
+ kfree(tmp_data);
+ tmp_data = NULL;
+ spin_unlock_bh(&l7_lock);
+
+ return (pattern_result ^ info->invert);
+ }
+
/* On the first packet of a connection, allocate space for app data */
if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
!master_conntrack->layer7.app_data){

View File

@ -0,0 +1,51 @@
--- a/net/netfilter/xt_layer7.c
+++ b/net/netfilter/xt_layer7.c
@@ -415,7 +415,9 @@ static int layer7_write_proc(struct file
}
static bool
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+match(const struct sk_buff *skbin, struct xt_action_param *par)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
match(const struct sk_buff *skbin, const struct xt_match_param *par)
#else
match(const struct sk_buff *skbin,
@@ -597,14 +599,19 @@ match(const struct sk_buff *skbin,
}
// load nf_conntrack_ipv4
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+static int
+#else
+static bool
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
-static bool check(const struct xt_mtchk_param *par)
+check(const struct xt_mtchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
printk(KERN_WARNING "can't load conntrack support for "
"proto=%d\n", par->match->family);
#else
-static bool check(const char *tablename, const void *inf,
+check(const char *tablename, const void *inf,
const struct xt_match *match, void *matchinfo,
unsigned int hook_mask)
{
@@ -612,9 +619,15 @@ static bool check(const char *tablename,
printk(KERN_WARNING "can't load conntrack support for "
"proto=%d\n", match->family);
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ return -EINVAL;
+ }
+ return 0;
+#else
return 0;
}
return 1;
+#endif
}

View File

@ -0,0 +1,61 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1175,6 +1175,27 @@ config NETFILTER_XT_MATCH_L2TP
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_LAYER7
+ tristate '"layer7" match support'
+ depends on EXPERIMENTAL
+ depends on NETFILTER_XTABLES
+ depends on NETFILTER_ADVANCED
+ depends on NF_CONNTRACK
+ help
+ Say Y if you want to be able to classify connections (and their
+ packets) based on regular expression matching of their application
+ layer data. This is one way to classify applications such as
+ peer-to-peer filesharing systems that do not always use the same
+ port.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_LAYER7_DEBUG
+ bool 'Layer 7 debugging output'
+ depends on NETFILTER_XT_MATCH_LAYER7
+ help
+ Say Y to get lots of debugging output.
+
config NETFILTER_XT_MATCH_LENGTH
tristate '"length" match support'
depends on NETFILTER_ADVANCED
@@ -1369,26 +1390,11 @@ config NETFILTER_XT_MATCH_STATE
To compile it as a module, choose M here. If unsure, say N.
-config NETFILTER_XT_MATCH_LAYER7
- tristate '"layer7" match support'
- depends on NETFILTER_XTABLES
- depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK)
- depends on NETFILTER_ADVANCED
- help
- Say Y if you want to be able to classify connections (and their
- packets) based on regular expression matching of their application
- layer data. This is one way to classify applications such as
- peer-to-peer filesharing systems that do not always use the same
- port.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config NETFILTER_XT_MATCH_LAYER7_DEBUG
- bool 'Layer 7 debugging output'
- depends on NETFILTER_XT_MATCH_LAYER7
- help
- Say Y to get lots of debugging output.
-
+ bool 'Layer 7 debugging output'
+ depends on NETFILTER_XT_MATCH_LAYER7
+ help
+ Say Y to get lots of debugging output.
config NETFILTER_XT_MATCH_STATISTIC
tristate '"statistic" match support'

View File

@ -0,0 +1,86 @@
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -17,6 +17,7 @@
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <linux/security.h>
+#include <linux/inet.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@@ -268,10 +269,66 @@ static int ct_open(struct inode *inode,
sizeof(struct ct_iter_state));
}
+struct kill_request {
+ u16 family;
+ union nf_inet_addr addr;
+};
+
+static int kill_matching(struct nf_conn *i, void *data)
+{
+ struct kill_request *kr = data;
+ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ if (!kr->family)
+ return 1;
+
+ if (t1->src.l3num != kr->family)
+ return 0;
+
+ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) ||
+ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) ||
+ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) ||
+ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3));
+}
+
+static ssize_t ct_file_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct net *net = seq_file_net(seq);
+ struct kill_request kr = { };
+ char req[INET6_ADDRSTRLEN] = { };
+
+ if (count == 0)
+ return 0;
+
+ if (count >= INET6_ADDRSTRLEN)
+ count = INET6_ADDRSTRLEN - 1;
+
+ if (copy_from_user(req, buf, count))
+ return -EFAULT;
+
+ if (strnchr(req, count, ':')) {
+ kr.family = AF_INET6;
+ if (!in6_pton(req, count, (void *)&kr.addr, '\n', NULL))
+ return -EINVAL;
+ } else if (strnchr(req, count, '.')) {
+ kr.family = AF_INET;
+ if (!in4_pton(req, count, (void *)&kr.addr, '\n', NULL))
+ return -EINVAL;
+ }
+
+ nf_ct_iterate_cleanup(net, kill_matching, &kr, 0, 0);
+
+ return count;
+}
+
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
+ .write = ct_file_write,
.llseek = seq_lseek,
.release = seq_release_net,
};
@@ -373,7 +430,7 @@ static int nf_conntrack_standalone_init_
{
struct proc_dir_entry *pde;
- pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
+ pde = proc_create("nf_conntrack", 0660, net->proc_net, &ct_file_ops);
if (!pde)
goto out_nf_conntrack;

View File

@ -0,0 +1,93 @@
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -87,6 +87,7 @@ struct ipt_ip {
#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
#define IPT_F_GOTO 0x02 /* Set if jump is a goto */
#define IPT_F_MASK 0x03 /* All possible flag bits mask. */
+#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
/* Values for "inv" field in struct ipt_ip. */
#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -82,6 +82,9 @@ ip_packet_match(const struct iphdr *ip,
#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
IPT_INV_SRCIP) ||
FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
@@ -135,6 +138,29 @@ ip_packet_match(const struct iphdr *ip,
return true;
}
+static void
+ip_checkdefault(struct ipt_ip *ip)
+{
+ static const char iface_mask[IFNAMSIZ] = {};
+
+ if (ip->invflags || ip->flags & IPT_F_FRAG)
+ return;
+
+ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
+ return;
+
+ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
+ return;
+
+ if (ip->smsk.s_addr || ip->dmsk.s_addr)
+ return;
+
+ if (ip->proto)
+ return;
+
+ ip->flags |= IPT_F_NO_DEF_MATCH;
+}
+
static bool
ip_checkentry(const struct ipt_ip *ip)
{
@@ -565,7 +591,7 @@ static void cleanup_match(struct xt_entr
}
static int
-check_entry(const struct ipt_entry *e, const char *name)
+check_entry(struct ipt_entry *e, const char *name)
{
const struct xt_entry_target *t;
@@ -574,6 +600,8 @@ check_entry(const struct ipt_entry *e, c
return -EINVAL;
}
+ ip_checkdefault(&e->ip);
+
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
return -EINVAL;
@@ -935,6 +963,7 @@ copy_entries_to_user(unsigned int total_
const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
+ u8 flags;
counters = alloc_counters(table);
if (IS_ERR(counters))
@@ -965,6 +994,14 @@ copy_entries_to_user(unsigned int total_
ret = -EFAULT;
goto free_counters;
}
+
+ flags = e->ip.flags & IPT_F_MASK;
+ if (copy_to_user(userptr + off
+ + offsetof(struct ipt_entry, ip.flags),
+ &flags, sizeof(flags)) != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
for (i = sizeof(struct ipt_entry);
i < e->target_offset;

View File

@ -0,0 +1,94 @@
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -310,6 +310,33 @@ struct ipt_entry *ipt_next_entry(const s
return (void *)entry + entry->next_offset;
}
+static bool
+ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
+{
+ struct xt_entry_target *t;
+ struct xt_standard_target *st;
+
+ if (e->target_offset != sizeof(struct ipt_entry))
+ return false;
+
+ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
+ return false;
+
+ t = ipt_get_target(e);
+ if (t->u.kernel.target->target)
+ return false;
+
+ st = (struct xt_standard_target *) t;
+ if (st->verdict == XT_RETURN)
+ return false;
+
+ if (st->verdict >= 0)
+ return false;
+
+ *verdict = (unsigned)(-st->verdict) - 1;
+ return true;
+}
+
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
ipt_do_table(struct sk_buff *skb,
@@ -331,9 +358,33 @@ ipt_do_table(struct sk_buff *skb,
unsigned int addend;
/* Initialization */
+ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+ local_bh_disable();
+ private = table->private;
+ cpu = smp_processor_id();
+ /*
+ * Ensure we load private-> members after we've fetched the base
+ * pointer.
+ */
+ smp_read_barrier_depends();
+ table_base = private->entries[cpu];
+
+ e = get_entry(table_base, private->hook_entry[hook]);
+ if (ipt_handle_default_rule(e, &verdict)) {
+ ADD_COUNTER(e->counters, skb->len, 1);
+ local_bh_enable();
+ return verdict;
+ }
+
ip = ip_hdr(skb);
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
+
+ addend = xt_write_recseq_begin();
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
+ origptr = *stackptr;
+
/* We handle fragments by dealing with the first fragment as
* if it was a normal packet. All other fragments are treated
* normally, except that they will NEVER match rules that ask
@@ -348,23 +399,6 @@ ipt_do_table(struct sk_buff *skb,
acpar.family = NFPROTO_IPV4;
acpar.hooknum = hook;
- IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- local_bh_disable();
- addend = xt_write_recseq_begin();
- private = table->private;
- cpu = smp_processor_id();
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
- table_base = private->entries[cpu];
- jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
- stackptr = per_cpu_ptr(private->stackptr, cpu);
- origptr = *stackptr;
-
- e = get_entry(table_base, private->hook_entry[hook]);
-
pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
table->name, hook, origptr,
get_entry(table_base, private->underflow[hook]));

View File

@ -0,0 +1,16 @@
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -85,9 +85,11 @@ ip_packet_match(const struct iphdr *ip,
if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
return true;
- if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ if (FWINV(ipinfo->smsk.s_addr &&
+ (ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
IPT_INV_SRCIP) ||
- FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+ FWINV(ipinfo->dmsk.s_addr &&
+ (ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
IPT_INV_DSTIP)) {
dprintf("Source or dest mismatch.\n");

View File

@ -0,0 +1,36 @@
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -33,6 +33,9 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+/* Do not check the TCP window for incoming packets */
+static int nf_ct_tcp_no_window_check __read_mostly = 1;
+
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
@@ -515,6 +518,9 @@ static bool tcp_in_window(const struct n
s32 receiver_offset;
bool res, in_recv_win;
+ if (nf_ct_tcp_no_window_check)
+ return true;
+
/*
* Get the required data from the packet.
*/
@@ -1452,6 +1458,13 @@ static struct ctl_table tcp_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .data = &nf_ct_tcp_no_window_check,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{ }
};

View File

@ -0,0 +1,95 @@
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -55,6 +55,7 @@ header-y += xt_ecn.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
header-y += xt_helper.h
+header-y += xt_id.h
header-y += xt_ipcomp.h
header-y += xt_iprange.h
header-y += xt_ipvs.h
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_id.h
@@ -0,0 +1,8 @@
+#ifndef _XT_ID_H
+#define _XT_ID_H
+
+struct xt_id_info {
+ u32 id;
+};
+
+#endif /* XT_ID_H */
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1145,6 +1145,13 @@ config NETFILTER_XT_MATCH_IPCOMP
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_ID
+ tristate '"id" match support'
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds a `id' dummy-match, which allows you to put
+ numeric IDs into your iptables ruleset.
+
config NETFILTER_XT_MATCH_IPRANGE
tristate '"iprange" address range match support'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -140,6 +140,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) +=
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_ID) += xt_id.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPCOMP) += xt_ipcomp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
--- /dev/null
+++ b/net/netfilter/xt_id.c
@@ -0,0 +1,45 @@
+/*
+ * Implements a dummy match to allow attaching IDs to rules
+ *
+ * 2014-08-01 Jo-Philipp Wich <jow@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_id.h>
+
+MODULE_AUTHOR("Jo-Philipp Wich <jow@openwrt.org>");
+MODULE_DESCRIPTION("Xtables: No-op match which can be tagged with a 32bit ID");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_id");
+MODULE_ALIAS("ip6t_id");
+
+static bool
+id_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ /* We always match */
+ return true;
+}
+
+static struct xt_match id_mt_reg __read_mostly = {
+ .name = "id",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .match = id_mt,
+ .matchsize = sizeof(struct xt_id_info),
+ .me = THIS_MODULE,
+};
+
+static int __init id_mt_init(void)
+{
+ return xt_register_match(&id_mt_reg);
+}
+
+static void __exit id_mt_exit(void)
+{
+ xt_unregister_match(&id_mt_reg);
+}
+
+module_init(id_mt_init);
+module_exit(id_mt_exit);

View File

@ -0,0 +1,12 @@
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -90,6 +90,9 @@ int nf_xfrm_me_harder(struct sk_buff *sk
struct dst_entry *dst;
int err;
+ if (!dev_net(skb->dev)->xfrm.policy_count[XFRM_POLICY_OUT])
+ return 0;
+
err = xfrm_decode_session(skb, &fl, family);
if (err < 0)
return err;

View File

@ -0,0 +1,87 @@
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -86,6 +86,7 @@ struct netns_ct {
struct ctl_table_header *helper_sysctl_header;
#endif
char *slabname;
+ int skip_filter;
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_events;
int sysctl_acct;
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -15,6 +15,7 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/slab.h>
#include <net/ip.h>
+#include <net/netfilter/nf_conntrack.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -37,6 +38,7 @@ iptable_filter_hook(const struct nf_hook
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ enum ip_conntrack_info ctinfo;
const struct net *net;
if (ops->hooknum == NF_INET_LOCAL_OUT &&
@@ -46,6 +48,11 @@ iptable_filter_hook(const struct nf_hook
return NF_ACCEPT;
net = dev_net((in != NULL) ? in : out);
+ nf_ct_get(skb, &ctinfo);
+ if ((ctinfo == IP_CT_ESTABLISHED_REPLY || ctinfo == IP_CT_ESTABLISHED) &&
+ net->ct.skip_filter)
+ return NF_ACCEPT;
+
return ipt_do_table(skb, ops->hooknum, in, out,
net->ipv4.iptable_filter);
}
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -13,6 +13,7 @@
#include <linux/moduleparam.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/slab.h>
+#include <net/netfilter/nf_conntrack.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -37,6 +38,12 @@ ip6table_filter_hook(const struct nf_hoo
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
+ enum ip_conntrack_info ctinfo;
+
+ nf_ct_get(skb, &ctinfo);
+ if ((ctinfo == IP_CT_ESTABLISHED_REPLY || ctinfo == IP_CT_ESTABLISHED) &&
+ net->ct.skip_filter)
+ return NF_ACCEPT;
return ip6t_do_table(skb, ops->hooknum, in, out,
net->ipv6.ip6table_filter);
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -510,6 +510,13 @@ static struct ctl_table nf_ct_sysctl_tab
.extra2 = &log_invalid_proto_max,
},
{
+ .procname = "nf_conntrack_skip_filter",
+ .data = &init_net.ct.skip_filter,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "nf_conntrack_expect_max",
.data = &nf_ct_expect_max,
.maxlen = sizeof(int),
@@ -545,6 +552,7 @@ static int nf_conntrack_standalone_init_
table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
+ table[5].data = &net->ct.skip_filter;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)

View File

@ -0,0 +1,791 @@
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -226,6 +226,33 @@ struct tc_sfq_xstats {
__s32 allot;
};
+/* ESFQ section */
+
+enum
+{
+ /* traditional */
+ TCA_SFQ_HASH_CLASSIC,
+ TCA_SFQ_HASH_DST,
+ TCA_SFQ_HASH_SRC,
+ TCA_SFQ_HASH_FWMARK,
+ /* conntrack */
+ TCA_SFQ_HASH_CTORIGDST,
+ TCA_SFQ_HASH_CTORIGSRC,
+ TCA_SFQ_HASH_CTREPLDST,
+ TCA_SFQ_HASH_CTREPLSRC,
+ TCA_SFQ_HASH_CTNATCHG,
+};
+
+struct tc_esfq_qopt
+{
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+ unsigned hash_kind; /* Hash function to use for flow identification */
+};
+
/* RED section */
enum {
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -148,6 +148,37 @@ config NET_SCH_SFQ
To compile this code as a module, choose M here: the
module will be called sch_sfq.
+config NET_SCH_ESFQ
+ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
+ ---help---
+ Say Y here if you want to use the Enhanced Stochastic Fairness
+ Queueing (ESFQ) packet scheduling algorithm for some of your network
+ devices or as a leaf discipline for a classful qdisc such as HTB or
+ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
+ references to the SFQ algorithm).
+
+ This is an enchanced SFQ version which allows you to control some
+ hardcoded values in the SFQ scheduler.
+
+ ESFQ also adds control of the hash function used to identify packet
+ flows. The original SFQ discipline hashes by connection; ESFQ add
+ several other hashing methods, such as by src IP or by dst IP, which
+ can be more fair to users in some networking situations.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_esfq.
+
+config NET_SCH_ESFQ_NFCT
+ bool "Connection Tracking Hash Types"
+ depends on NET_SCH_ESFQ && NF_CONNTRACK
+ ---help---
+ Say Y here to enable support for hashing based on netfilter connection
+ tracking information. This is useful for a router that is also using
+ NAT to connect privately-addressed hosts to the Internet. If you want
+ to provide fair distribution of upstream bandwidth, ESFQ must use
+ connection tracking information, since all outgoing packets will share
+ the same source address.
+
config NET_SCH_TEQL
tristate "True Link Equalizer (TEQL)"
---help---
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_INGRESS) += sch_ing
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
--- /dev/null
+++ b/net/sched/sch_esfq.c
@@ -0,0 +1,702 @@
+/*
+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes: Alexander Atanasov, <alex@ssi.bg>
+ * Added dynamic depth,limit,divisor,hash_kind options.
+ * Added dst and src hashes.
+ *
+ * Alexander Clouter, <alex@digriz.org.uk>
+ * Ported ESFQ to Linux 2.6.
+ *
+ * Corey Hickey, <bugfood-c@fatooh.org>
+ * Maintenance of the Linux 2.6 port.
+ * Added fwmark hash (thanks to Robert Kurjata).
+ * Added usage of jhash.
+ * Added conntrack support.
+ * Added ctnatchg hash (thanks to Ben Pfountz).
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <net/ip.h>
+#include <net/netlink.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <linux/jhash.h>
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+/* Stochastic Fairness Queuing algorithm.
+ For more comments look at sch_sfq.c.
+ The difference is that you can change limit, depth,
+ hash table size and choose alternate hash types.
+
+ classic: same as in sch_sfq.c
+ dst: destination IP address
+ src: source IP address
+ fwmark: netfilter mark value
+ ctorigdst: original destination IP address
+ ctorigsrc: original source IP address
+ ctrepldst: reply destination IP address
+ ctreplsrc: reply source IP
+
+*/
+
+#define ESFQ_HEAD 0
+#define ESFQ_TAIL 1
+
+/* This type should contain at least SFQ_DEPTH*2 values */
+typedef unsigned int esfq_index;
+
+struct esfq_head
+{
+ esfq_index next;
+ esfq_index prev;
+};
+
+struct esfq_sched_data
+{
+/* Parameters */
+ int perturb_period;
+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ int limit;
+ unsigned depth;
+ unsigned hash_divisor;
+ unsigned hash_kind;
+/* Variables */
+ struct timer_list perturb_timer;
+ int perturbation;
+ esfq_index tail; /* Index of current slot in round */
+ esfq_index max_depth; /* Maximal depth */
+
+ esfq_index *ht; /* Hash table */
+ esfq_index *next; /* Active slots link */
+ short *allot; /* Current allotment per slot */
+ unsigned short *hash; /* Hash value indexed by slots */
+ struct sk_buff_head *qs; /* Slot queue */
+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
+};
+
+/* This contains the info we will hash. */
+struct esfq_packet_info
+{
+ u32 proto; /* protocol or port */
+ u32 src; /* source from packet header */
+ u32 dst; /* destination from packet header */
+ u32 ctorigsrc; /* original source from conntrack */
+ u32 ctorigdst; /* original destination from conntrack */
+ u32 ctreplsrc; /* reply source from conntrack */
+ u32 ctrepldst; /* reply destination from conntrack */
+ u32 mark; /* netfilter mark (fwmark) */
+};
+
+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
+{
+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
+{
+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
+{
+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
+}
+
+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
+{
+ struct esfq_packet_info info;
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+#endif
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ {
+ struct iphdr *iph = ip_hdr(skb);
+ info.dst = iph->daddr;
+ info.src = iph->saddr;
+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
+ (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP ||
+ iph->protocol == IPPROTO_SCTP ||
+ iph->protocol == IPPROTO_DCCP ||
+ iph->protocol == IPPROTO_ESP))
+ info.proto = *(((u32*)iph) + iph->ihl);
+ else
+ info.proto = iph->protocol;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6):
+ {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ /* Hash ipv6 addresses into a u32. This isn't ideal,
+ * but the code is simple. */
+ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
+ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
+ if (iph->nexthdr == IPPROTO_TCP ||
+ iph->nexthdr == IPPROTO_UDP ||
+ iph->nexthdr == IPPROTO_SCTP ||
+ iph->nexthdr == IPPROTO_DCCP ||
+ iph->nexthdr == IPPROTO_ESP)
+ info.proto = *(u32*)&iph[1];
+ else
+ info.proto = iph->nexthdr;
+ break;
+ }
+ default:
+ info.dst = (u32)(unsigned long)skb_dst(skb);
+ info.src = (u32)(unsigned long)skb->sk;
+ info.proto = skb->protocol;
+ }
+
+ info.mark = skb->mark;
+
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ /* defaults if there is no conntrack info */
+ info.ctorigsrc = info.src;
+ info.ctorigdst = info.dst;
+ info.ctreplsrc = info.dst;
+ info.ctrepldst = info.src;
+ /* collect conntrack info */
+ if (ct && ct != &nf_conntrack_untracked) {
+ if (skb->protocol == __constant_htons(ETH_P_IP)) {
+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ }
+ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+ /* Again, hash ipv6 addresses into a single u32. */
+ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
+ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
+ }
+
+ }
+#endif
+
+ switch(q->hash_kind) {
+ case TCA_SFQ_HASH_CLASSIC:
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+ case TCA_SFQ_HASH_DST:
+ return esfq_jhash_1word(q, info.dst);
+ case TCA_SFQ_HASH_SRC:
+ return esfq_jhash_1word(q, info.src);
+ case TCA_SFQ_HASH_FWMARK:
+ return esfq_jhash_1word(q, info.mark);
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ case TCA_SFQ_HASH_CTORIGDST:
+ return esfq_jhash_1word(q, info.ctorigdst);
+ case TCA_SFQ_HASH_CTORIGSRC:
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ case TCA_SFQ_HASH_CTREPLDST:
+ return esfq_jhash_1word(q, info.ctrepldst);
+ case TCA_SFQ_HASH_CTREPLSRC:
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ case TCA_SFQ_HASH_CTNATCHG:
+ {
+ if (info.ctorigdst == info.ctreplsrc)
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ }
+#endif
+ default:
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
+ }
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+}
+
+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d = q->qs[x].qlen + q->depth;
+
+ p = d;
+ n = q->dep[d].next;
+ q->dep[x].next = n;
+ q->dep[x].prev = p;
+ q->dep[p].next = q->dep[n].prev = x;
+}
+
+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+
+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
+ q->max_depth--;
+
+ esfq_link(q, x);
+}
+
+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+ d = q->qs[x].qlen;
+ if (q->max_depth < d)
+ q->max_depth = d;
+
+ esfq_link(q, x);
+}
+
+static unsigned int esfq_drop(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index d = q->max_depth;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ /* Queue is full! Find the longest slot and
+ drop a packet from it */
+
+ if (d > 1) {
+ esfq_index x = q->dep[d+q->depth].next;
+ skb = q->qs[x].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[x]);
+ kfree_skb(skb);
+ esfq_dec(q, x);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ if (d == 1) {
+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
+ d = q->next[q->tail];
+ q->next[q->tail] = q->next[d];
+ q->allot[q->next[d]] += q->quantum;
+ skb = q->qs[d].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[d]);
+ kfree_skb(skb);
+ esfq_dec(q, d);
+ sch->q.qlen--;
+ q->ht[q->hash[d]] = q->depth;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ return 0;
+}
+
+static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end)
+{
+ unsigned hash = esfq_hash(q, skb);
+ unsigned depth = q->depth;
+ esfq_index x;
+
+ x = q->ht[hash];
+ if (x == depth) {
+ q->ht[hash] = x = q->dep[depth].next;
+ q->hash[x] = hash;
+ }
+
+ if (end == ESFQ_TAIL)
+ __skb_queue_tail(&q->qs[x], skb);
+ else
+ __skb_queue_head(&q->qs[x], skb);
+
+ esfq_inc(q, x);
+ if (q->qs[x].qlen == 1) { /* The flow is new */
+ if (q->tail == depth) { /* It is the first flow */
+ q->tail = x;
+ q->next[x] = x;
+ q->allot[x] = q->quantum;
+ } else {
+ q->next[x] = q->next[q->tail];
+ q->next[q->tail] = x;
+ q->tail = x;
+ }
+ }
+}
+
+static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_enqueue(skb, q, ESFQ_TAIL);
+ sch->qstats.backlog += skb->len;
+ if (++sch->q.qlen < q->limit-1) {
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+ return 0;
+ }
+
+ sch->qstats.drops++;
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+static struct sk_buff *esfq_peek(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index a;
+
+ /* No active slots */
+ if (q->tail == q->depth)
+ return NULL;
+
+ a = q->next[q->tail];
+ return skb_peek(&q->qs[a]);
+}
+
+static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q)
+{
+ struct sk_buff *skb;
+ unsigned depth = q->depth;
+ esfq_index a, old_a;
+
+ /* No active slots */
+ if (q->tail == depth)
+ return NULL;
+
+ a = old_a = q->next[q->tail];
+
+ /* Grab packet */
+ skb = __skb_dequeue(&q->qs[a]);
+ esfq_dec(q, a);
+
+ /* Is the slot empty? */
+ if (q->qs[a].qlen == 0) {
+ q->ht[q->hash[a]] = depth;
+ a = q->next[a];
+ if (a == old_a) {
+ q->tail = depth;
+ return skb;
+ }
+ q->next[q->tail] = a;
+ q->allot[a] += q->quantum;
+ } else if ((q->allot[a] -= skb->len) <= 0) {
+ q->tail = a;
+ a = q->next[a];
+ q->allot[a] += q->quantum;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *esfq_dequeue(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = esfq_q_dequeue(q);
+ if (skb == NULL)
+ return NULL;
+ sch->q.qlen--;
+ sch->qstats.backlog -= skb->len;
+ return skb;
+}
+
+static void esfq_q_destroy(struct esfq_sched_data *q)
+{
+ del_timer(&q->perturb_timer);
+ if(q->ht)
+ kfree(q->ht);
+ if(q->dep)
+ kfree(q->dep);
+ if(q->next)
+ kfree(q->next);
+ if(q->allot)
+ kfree(q->allot);
+ if(q->hash)
+ kfree(q->hash);
+ if(q->qs)
+ kfree(q->qs);
+}
+
+static void esfq_destroy(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_destroy(q);
+}
+
+
+static void esfq_reset(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = esfq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void esfq_perturbation(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc*)arg;
+ struct esfq_sched_data *q = qdisc_priv(sch);
+
+ q->perturbation = prandom_u32()&0x1F;
+
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+}
+
+static unsigned int esfq_check_hash(unsigned int kind)
+{
+ switch (kind) {
+ case TCA_SFQ_HASH_CTORIGDST:
+ case TCA_SFQ_HASH_CTORIGSRC:
+ case TCA_SFQ_HASH_CTREPLDST:
+ case TCA_SFQ_HASH_CTREPLSRC:
+ case TCA_SFQ_HASH_CTNATCHG:
+#ifndef CONFIG_NET_SCH_ESFQ_NFCT
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+#endif
+ case TCA_SFQ_HASH_CLASSIC:
+ case TCA_SFQ_HASH_DST:
+ case TCA_SFQ_HASH_SRC:
+ case TCA_SFQ_HASH_FWMARK:
+ return kind;
+ default:
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+ }
+}
+
+static int esfq_q_init(struct esfq_sched_data *q, struct nlattr *opt)
+{
+ struct tc_esfq_qopt *ctl = nla_data(opt);
+ esfq_index p = ~0U/2;
+ int i;
+
+ if (opt && opt->nla_len < nla_attr_size(sizeof(*ctl)))
+ return -EINVAL;
+
+ q->perturbation = 0;
+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
+ q->max_depth = 0;
+ if (opt == NULL) {
+ q->perturb_period = 0;
+ q->hash_divisor = 1024;
+ q->tail = q->limit = q->depth = 128;
+
+ } else {
+ struct tc_esfq_qopt *ctl = nla_data(opt);
+ if (ctl->quantum)
+ q->quantum = ctl->quantum;
+ q->perturb_period = ctl->perturb_period*HZ;
+ q->hash_divisor = ctl->divisor ? : 1024;
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
+
+ if ( q->depth > p - 1 )
+ return -EINVAL;
+
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
+
+ if (ctl->hash_kind) {
+ q->hash_kind = esfq_check_hash(ctl->hash_kind);
+ }
+ }
+
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->ht)
+ goto err_case;
+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
+ if (!q->dep)
+ goto err_case;
+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->next)
+ goto err_case;
+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
+ if (!q->allot)
+ goto err_case;
+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
+ if (!q->hash)
+ goto err_case;
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!q->qs)
+ goto err_case;
+
+ for (i=0; i< q->hash_divisor; i++)
+ q->ht[i] = q->depth;
+ for (i=0; i<q->depth; i++) {
+ skb_queue_head_init(&q->qs[i]);
+ q->dep[i+q->depth].next = i+q->depth;
+ q->dep[i+q->depth].prev = i+q->depth;
+ }
+
+ for (i=0; i<q->depth; i++)
+ esfq_link(q, i);
+ return 0;
+err_case:
+ esfq_q_destroy(q);
+ return -ENOBUFS;
+}
+
+static int esfq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ q->quantum = psched_mtu(qdisc_dev(sch)); /* default */
+ if ((err = esfq_q_init(q, opt)))
+ return err;
+
+ init_timer(&q->perturb_timer);
+ q->perturb_timer.data = (unsigned long)sch;
+ q->perturb_timer.function = esfq_perturbation;
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+
+ return 0;
+}
+
+static int esfq_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct esfq_sched_data new;
+ struct sk_buff *skb;
+ int err;
+
+ /* set up new queue */
+ memset(&new, 0, sizeof(struct esfq_sched_data));
+ new.quantum = psched_mtu(qdisc_dev(sch)); /* default */
+ if ((err = esfq_q_init(&new, opt)))
+ return err;
+
+ /* copy all packets from the old queue to the new queue */
+ sch_tree_lock(sch);
+ while ((skb = esfq_q_dequeue(q)) != NULL)
+ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
+
+ /* clean up the old queue */
+ esfq_q_destroy(q);
+
+ /* copy elements of the new queue into the old queue */
+ q->perturb_period = new.perturb_period;
+ q->quantum = new.quantum;
+ q->limit = new.limit;
+ q->depth = new.depth;
+ q->hash_divisor = new.hash_divisor;
+ q->hash_kind = new.hash_kind;
+ q->tail = new.tail;
+ q->max_depth = new.max_depth;
+ q->ht = new.ht;
+ q->dep = new.dep;
+ q->next = new.next;
+ q->allot = new.allot;
+ q->hash = new.hash;
+ q->qs = new.qs;
+
+ /* finish up */
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ } else {
+ q->perturbation = 0;
+ }
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_esfq_qopt opt;
+
+ opt.quantum = q->quantum;
+ opt.perturb_period = q->perturb_period/HZ;
+
+ opt.limit = q->limit;
+ opt.divisor = q->hash_divisor;
+ opt.flows = q->depth;
+ opt.hash_kind = q->hash_kind;
+
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct Qdisc_ops esfq_qdisc_ops =
+{
+ .next = NULL,
+ .cl_ops = NULL,
+ .id = "esfq",
+ .priv_size = sizeof(struct esfq_sched_data),
+ .enqueue = esfq_enqueue,
+ .dequeue = esfq_dequeue,
+ .peek = esfq_peek,
+ .drop = esfq_drop,
+ .init = esfq_init,
+ .reset = esfq_reset,
+ .destroy = esfq_destroy,
+ .change = esfq_change,
+ .dump = esfq_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init esfq_module_init(void)
+{
+ return register_qdisc(&esfq_qdisc_ops);
+}
+static void __exit esfq_module_exit(void)
+{
+ unregister_qdisc(&esfq_qdisc_ops);
+}
+module_init(esfq_module_init)
+module_exit(esfq_module_exit)
+MODULE_LICENSE("GPL");

View File

@ -0,0 +1,179 @@
--- /dev/null
+++ b/net/sched/act_connmark.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/act_api.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+#define TCA_ACT_CONNMARK 20
+
+#define CONNMARK_TAB_MASK 3
+
+static struct tcf_hashinfo connmark_hash_info;
+
+static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct nf_conn *c;
+ enum ip_conntrack_info ctinfo;
+ int proto;
+ int r;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->len < sizeof(struct iphdr))
+ goto out;
+ proto = PF_INET;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (skb->len < sizeof(struct ipv6hdr))
+ goto out;
+ proto = PF_INET6;
+ } else
+ goto out;
+
+ r = nf_conntrack_in(dev_net(skb->dev), proto, NF_INET_PRE_ROUTING, skb);
+ if (r != NF_ACCEPT)
+ goto out;
+
+ c = nf_ct_get(skb, &ctinfo);
+ if (!c)
+ goto out;
+
+ skb->mark = c->mark;
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+
+out:
+ return TC_ACT_PIPE;
+}
+
+static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
+{
+ struct tcf_common *pc;
+ int ret = 0;
+
+ pc = tcf_hash_check(0, a, bind);
+ if (!pc) {
+ pc = tcf_hash_create(0, est, a, sizeof(*pc), bind);
+ if (IS_ERR(pc))
+ return PTR_ERR(pc);
+
+ tcf_hash_insert(pc, &connmark_hash_info);
+ ret = ACT_P_CREATED;
+ } else {
+ if (!ovr) {
+ tcf_hash_release(pc, bind, &connmark_hash_info);
+ return -EEXIST;
+ }
+ }
+
+ return ret;
+}
+
+static inline int tcf_connmark_cleanup(struct tc_action *a, int bind)
+{
+ if (a->priv)
+ return tcf_hash_release(a->priv, bind, &connmark_hash_info);
+ return 0;
+}
+
+static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ return skb->len;
+}
+
+static struct tc_action_ops act_connmark_ops = {
+ .kind = "connmark",
+ .hinfo = &connmark_hash_info,
+ .type = TCA_ACT_CONNMARK,
+ .owner = THIS_MODULE,
+ .act = tcf_connmark,
+ .dump = tcf_connmark_dump,
+ .cleanup = tcf_connmark_cleanup,
+ .init = tcf_connmark_init,
+};
+
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_DESCRIPTION("Connection tracking mark restoring");
+MODULE_LICENSE("GPL");
+
+static int __init connmark_init_module(void)
+{
+ int ret;
+
+ ret = tcf_hashinfo_init(&connmark_hash_info, CONNMARK_TAB_MASK);
+ if (ret)
+ return ret;
+
+ return tcf_register_action(&act_connmark_ops);
+}
+
+static void __exit connmark_cleanup_module(void)
+{
+ tcf_unregister_action(&act_connmark_ops);
+}
+
+module_init(connmark_init_module);
+module_exit(connmark_cleanup_module);
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -717,6 +717,19 @@ config NET_ACT_CSUM
To compile this code as a module, choose M here: the
module will be called act_csum.
+config NET_ACT_CONNMARK
+ tristate "Connection Tracking Marking"
+ depends on NET_CLS_ACT
+ depends on NF_CONNTRACK
+ depends on NF_CONNTRACK_MARK
+ ---help---
+ Say Y here to restore the connmark from a scheduler action
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_connmark.
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit
obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
+obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o

View File

@ -0,0 +1,134 @@
This patch allows the user to specify desired packet types (outgoing,
broadcast, unicast, etc.) on packet sockets via setsockopt.
This can reduce the load in situations where only a limited number
of packet types are necessary
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -31,6 +31,8 @@ struct sockaddr_ll {
#define PACKET_KERNEL 7 /* To kernel space */
/* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
#define PACKET_FASTROUTE 6 /* Fastrouted frame */
+#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */
+
/* Packet socket options */
@@ -54,6 +56,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
+#define PACKET_RECV_TYPE 21
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1530,6 +1530,7 @@ static int packet_rcv_spkt(struct sk_buf
{
struct sock *sk;
struct sockaddr_pkt *spkt;
+ struct packet_sock *po;
/*
* When we registered the protocol we saved the socket in the data
@@ -1537,6 +1538,7 @@ static int packet_rcv_spkt(struct sk_buf
*/
sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
/*
* Yank back the headers [hope the device set this
@@ -1549,7 +1551,7 @@ static int packet_rcv_spkt(struct sk_buf
* so that this procedure is noop.
*/
- if (skb->pkt_type == PACKET_LOOPBACK)
+ if (!(po->pkt_type & (1 << skb->pkt_type)))
goto out;
if (!net_eq(dev_net(dev), sock_net(sk)))
@@ -1756,12 +1758,12 @@ static int packet_rcv(struct sk_buff *sk
int skb_len = skb->len;
unsigned int snaplen, res;
- if (skb->pkt_type == PACKET_LOOPBACK)
- goto drop;
-
sk = pt->af_packet_priv;
po = pkt_sk(sk);
+ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto drop;
+
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
@@ -1881,12 +1883,12 @@ static int tpacket_rcv(struct sk_buff *s
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
- if (skb->pkt_type == PACKET_LOOPBACK)
- goto drop;
-
sk = pt->af_packet_priv;
po = pkt_sk(sk);
+ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto drop;
+
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
@@ -2823,6 +2825,7 @@ static int packet_create(struct net *net
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv;
+ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
@@ -3403,6 +3406,16 @@ packet_setsockopt(struct socket *sock, i
po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
return 0;
}
+ case PACKET_RECV_TYPE:
+ {
+ unsigned int val;
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ po->pkt_type = val & ~BIT(PACKET_LOOPBACK);
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -3454,6 +3467,13 @@ static int packet_getsockopt(struct sock
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
break;
+ case PACKET_RECV_TYPE:
+ if (len > sizeof(unsigned int))
+ len = sizeof(unsigned int);
+ val = po->pkt_type;
+
+ data = &val;
+ break;
case PACKET_VERSION:
val = po->tp_version;
break;
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -117,6 +117,7 @@ struct packet_sock {
struct net_device __rcu *cached_dev;
int (*xmit)(struct sk_buff *skb);
struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ unsigned int pkt_type;
};
static struct packet_sock *pkt_sk(struct sock *sk)

View File

@ -0,0 +1,15 @@
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -97,7 +97,11 @@ int br_handle_frame_finish(struct sk_buf
dst = NULL;
- if (is_broadcast_ether_addr(dest)) {
+ if (skb->protocol == htons(ETH_P_PAE)) {
+ skb2 = skb;
+ /* Do not forward 802.1x/EAP frames */
+
+ } else if (is_broadcast_ether_addr(dest)) {
skb2 = skb;
unicast = false;
} else if (is_multicast_ether_addr(dest)) {

View File

@ -0,0 +1,11 @@
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -84,7 +84,7 @@ int br_handle_frame_finish(struct sk_buf
br_multicast_rcv(br, p, skb, vid))
goto drop;
- if (p->state == BR_STATE_LEARNING)
+ if ((p->state == BR_STATE_LEARNING) && skb->protocol != htons(ETH_P_PAE))
goto drop;
BR_INPUT_SKB_CB(skb)->brdev = br->dev;

View File

@ -0,0 +1,102 @@
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -174,6 +174,7 @@ struct net_bridge_port
#define BR_ADMIN_COST 0x00000010
#define BR_LEARNING 0x00000020
#define BR_FLOOD 0x00000040
+#define BR_ISOLATE_MODE 0x00000080
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_query ip4_query;
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -161,6 +161,22 @@ BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLO
BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
+static ssize_t show_isolate_mode(struct net_bridge_port *p, char *buf)
+{
+ int isolate_mode = (p->flags & BR_ISOLATE_MODE) ? 1 : 0;
+ return sprintf(buf, "%d\n", isolate_mode);
+}
+static ssize_t store_isolate_mode(struct net_bridge_port *p, unsigned long v)
+{
+ if (v)
+ p->flags |= BR_ISOLATE_MODE;
+ else
+ p->flags &= ~BR_ISOLATE_MODE;
+ return 0;
+}
+static BRPORT_ATTR(isolate_mode, S_IRUGO | S_IWUSR,
+ show_isolate_mode, store_isolate_mode);
+
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
{
@@ -203,6 +219,7 @@ static const struct brport_attribute *br
&brport_attr_multicast_router,
&brport_attr_multicast_fast_leave,
#endif
+ &brport_attr_isolate_mode,
NULL
};
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -120,8 +120,8 @@ int br_handle_frame_finish(struct sk_buf
unicast = false;
br->dev->stats.multicast++;
- } else if ((dst = __br_fdb_get(br, dest, vid)) &&
- dst->is_local) {
+ } else if ((p->flags & BR_ISOLATE_MODE) ||
+ ((dst = __br_fdb_get(br, dest, vid)) && dst->is_local)) {
skb2 = skb;
/* Do not forward the packet since it's local. */
skb = NULL;
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -119,7 +119,7 @@ void br_deliver(const struct net_bridge_
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
{
- if (should_deliver(to, skb)) {
+ if (should_deliver(to, skb) && !(to->flags & BR_ISOLATE_MODE)) {
if (skb0)
deliver_clone(to, skb, __br_forward);
else
@@ -175,7 +175,7 @@ static void br_flood(struct net_bridge *
struct sk_buff *skb0,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb),
- bool unicast)
+ bool unicast, bool forward)
{
struct net_bridge_port *p;
struct net_bridge_port *prev;
@@ -183,6 +183,8 @@ static void br_flood(struct net_bridge *
prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) {
+ if (forward && (p->flags & BR_ISOLATE_MODE))
+ continue;
/* Do not flood unicast traffic to ports that turn it off */
if (unicast && !(p->flags & BR_FLOOD))
continue;
@@ -209,14 +211,14 @@ out:
/* called with rcu_read_lock */
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
{
- br_flood(br, skb, NULL, __br_deliver, unicast);
+ br_flood(br, skb, NULL, __br_deliver, unicast, false);
}
/* called under bridge lock */
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2, bool unicast)
{
- br_flood(br, skb, skb2, __br_forward, unicast);
+ br_flood(br, skb, skb2, __br_forward, unicast, true);
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING

View File

@ -0,0 +1,110 @@
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -88,6 +88,12 @@ int ipv6_rcv_saddr_equal(const struct so
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
+extern int (*ipv6_dev_get_saddr_hook)(struct net *net,
+ const struct net_device *dev,
+ const struct in6_addr *daddr,
+ unsigned int prefs,
+ struct in6_addr *saddr);
+
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,7 +6,6 @@ config BRIDGE
tristate "802.1d Ethernet Bridging"
select LLC
select STP
- depends on IPV6 || IPV6=n
---help---
If you say Y here, then your Linux box will be able to act as an
Ethernet bridge, which means that the different Ethernet segments it
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -45,6 +45,7 @@ obj-y += addrconf_core.o exthdrs_core.o
obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
+obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_stubs.o
ifneq ($(CONFIG_IPV6),)
obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1317,7 +1317,7 @@ out:
return ret;
}
-int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
+static int __ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
const struct in6_addr *daddr, unsigned int prefs,
struct in6_addr *saddr)
{
@@ -1442,7 +1442,6 @@ try_nextdev:
in6_ifa_put(hiscore->ifa);
return 0;
}
-EXPORT_SYMBOL(ipv6_dev_get_saddr);
int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
u32 banned_flags)
@@ -5433,6 +5432,9 @@ int __init addrconf_init(void)
ipv6_addr_label_rtnl_register();
+ BUG_ON(ipv6_dev_get_saddr_hook != NULL);
+ rcu_assign_pointer(ipv6_dev_get_saddr_hook, __ipv6_dev_get_saddr);
+
return 0;
errout:
rtnl_af_unregister(&inet6_ops);
@@ -5452,6 +5454,9 @@ void addrconf_cleanup(void)
struct net_device *dev;
int i;
+ rcu_assign_pointer(ipv6_dev_get_saddr_hook, NULL);
+ synchronize_rcu();
+
unregister_netdevice_notifier(&ipv6_dev_notf);
unregister_pernet_subsys(&addrconf_ops);
ipv6_addr_label_cleanup();
--- /dev/null
+++ b/net/ipv6/inet6_stubs.c
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/export.h>
+#include <net/ipv6.h>
+
+int (*ipv6_dev_get_saddr_hook)(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int prefs,
+ struct in6_addr *saddr);
+
+EXPORT_SYMBOL(ipv6_dev_get_saddr_hook);
+
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
+ const struct in6_addr *daddr, unsigned int prefs,
+ struct in6_addr *saddr)
+{
+ int ret = -EADDRNOTAVAIL;
+ typeof(ipv6_dev_get_saddr_hook) dev_get_saddr;
+
+ rcu_read_lock();
+ dev_get_saddr = rcu_dereference(ipv6_dev_get_saddr_hook);
+
+ if (dev_get_saddr)
+ ret = dev_get_saddr(net, dst_dev, daddr, prefs, saddr);
+
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL(ipv6_dev_get_saddr);
+

View File

@ -0,0 +1,155 @@
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -57,7 +57,7 @@ int br_dev_queue_push_xmit(struct sk_buf
int br_forward_finish(struct sk_buff *skb)
{
- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ return BR_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
@@ -80,7 +80,7 @@ static void __br_deliver(const struct ne
return;
}
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
br_forward_finish);
}
@@ -101,7 +101,7 @@ static void __br_forward(const struct ne
skb->dev = to->dev;
skb_forward_csum(skb);
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ BR_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
br_forward_finish);
}
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -53,7 +53,7 @@ static int br_pass_frame_up(struct sk_bu
if (!skb)
return NET_RX_DROP;
- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ return BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);
}
@@ -214,7 +214,7 @@ rx_handler_result_t br_handle_frame(stru
}
/* Deliver packet to local host only */
- if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
+ if (BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
NULL, br_handle_local_finish)) {
return RX_HANDLER_CONSUMED; /* consumed by filter */
} else {
@@ -229,7 +229,7 @@ forward:
if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
- if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ if (BR_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_local_finish))
break;
@@ -251,7 +251,7 @@ forward:
if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ BR_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish);
break;
default:
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -802,7 +802,7 @@ static void __br_multicast_send_query(st
if (port) {
__skb_push(skb, sizeof(struct ethhdr));
skb->dev = port->dev;
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
} else
netif_rx(skb);
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -73,6 +73,15 @@ static int brnf_pass_vlan_indev __read_m
#define IS_ARP(skb) \
(!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+int brnf_call_ebtables __read_mostly = 0;
+EXPORT_SYMBOL_GPL(brnf_call_ebtables);
+
+bool br_netfilter_run_hooks(void)
+{
+ return brnf_call_iptables | brnf_call_ip6tables | brnf_call_arptables |
+ brnf_call_ebtables;
+}
+
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -724,15 +724,29 @@ static inline u16 br_get_pvid(const stru
/* br_netfilter.c */
#ifdef CONFIG_BRIDGE_NETFILTER
+extern int brnf_call_ebtables;
int br_netfilter_init(void);
void br_netfilter_fini(void);
void br_netfilter_rtable_init(struct net_bridge *);
+bool br_netfilter_run_hooks(void);
#else
#define br_netfilter_init() (0)
#define br_netfilter_fini() do { } while (0)
#define br_netfilter_rtable_init(x)
+#define br_netfilter_run_hooks() false
#endif
+static inline int
+BR_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ if (!br_netfilter_run_hooks())
+ return okfn(skb);
+
+ return NF_HOOK(pf, hook, skb, in, out, okfn);
+}
+
/* br_stp.c */
void br_log_state(const struct net_bridge_port *p);
struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -54,7 +54,7 @@ static void br_send_bpdu(struct net_brid
skb_reset_mac_header(skb);
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
}
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2405,11 +2405,13 @@ static int __init ebtables_init(void)
}
printk(KERN_INFO "Ebtables v2.0 registered\n");
+ brnf_call_ebtables = 1;
return 0;
}
static void __exit ebtables_fini(void)
{
+ brnf_call_ebtables = 0;
nf_unregister_sockopt(&ebt_sockopts);
xt_unregister_target(&ebt_standard_target);
printk(KERN_INFO "Ebtables v2.0 unregistered\n");

View File

@ -0,0 +1,375 @@
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -634,7 +634,8 @@ struct net_bridge_port_group *br_multica
struct net_bridge_port *port,
struct br_ip *group,
struct net_bridge_port_group __rcu *next,
- unsigned char state)
+ unsigned char state,
+ const unsigned char *src)
{
struct net_bridge_port_group *p;
@@ -649,12 +650,33 @@ struct net_bridge_port_group *br_multica
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
+ if ((port->flags & BR_MULTICAST_TO_UCAST) && src) {
+ memcpy(p->eth_addr, src, ETH_ALEN);
+ p->unicast = true;
+ }
return p;
}
+static bool br_port_group_equal(struct net_bridge_port_group *p,
+ struct net_bridge_port *port,
+ const unsigned char *src)
+{
+ if (p->port != port)
+ return false;
+
+ if (!p->unicast)
+ return true;
+
+ if (!src)
+ return false;
+
+ return ether_addr_equal(src, p->eth_addr);
+}
+
static int br_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
- struct br_ip *group)
+ struct br_ip *group,
+ const unsigned char *src)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -681,13 +703,13 @@ static int br_multicast_add_group(struct
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port == port)
+ if (br_port_group_equal(p, port, src))
goto found;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
- p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
+ p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY, src);
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
@@ -706,7 +728,7 @@ err:
static int br_ip4_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
- __u16 vid)
+ __u16 vid, const unsigned char *src)
{
struct br_ip br_group;
@@ -717,14 +739,14 @@ static int br_ip4_multicast_add_group(st
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
- return br_multicast_add_group(br, port, &br_group);
+ return br_multicast_add_group(br, port, &br_group, src);
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
- __u16 vid)
+ __u16 vid, const unsigned char *src)
{
struct br_ip br_group;
@@ -735,7 +757,7 @@ static int br_ip6_multicast_add_group(st
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
- return br_multicast_add_group(br, port, &br_group);
+ return br_multicast_add_group(br, port, &br_group, src);
}
#endif
@@ -950,6 +972,7 @@ static int br_ip4_multicast_igmp3_report
struct sk_buff *skb,
u16 vid)
{
+ const unsigned char *src = eth_hdr(skb)->h_source;
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
int i;
@@ -993,7 +1016,7 @@ static int br_ip4_multicast_igmp3_report
continue;
}
- err = br_ip4_multicast_add_group(br, port, group, vid);
+ err = br_ip4_multicast_add_group(br, port, group, vid, src);
if (err)
break;
}
@@ -1007,6 +1030,7 @@ static int br_ip6_multicast_mld2_report(
struct sk_buff *skb,
u16 vid)
{
+ const unsigned char *src = eth_hdr(skb)->h_source;
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
int i;
@@ -1055,7 +1079,7 @@ static int br_ip6_multicast_mld2_report(
}
err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
- vid);
+ vid, src);
if (!err)
break;
}
@@ -1319,7 +1343,8 @@ static void br_multicast_leave_group(str
struct net_bridge_port *port,
struct br_ip *group,
struct bridge_mcast_querier *querier,
- struct bridge_mcast_query *query)
+ struct bridge_mcast_query *query,
+ const unsigned char *src)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -1369,7 +1394,7 @@ static void br_multicast_leave_group(str
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port != port)
+ if (!br_port_group_equal(p, port, src))
continue;
rcu_assign_pointer(*pp, p->next);
@@ -1403,7 +1428,7 @@ static void br_multicast_leave_group(str
for (p = mlock_dereference(mp->ports, br);
p != NULL;
p = mlock_dereference(p->next, br)) {
- if (p->port != port)
+ if (!br_port_group_equal(p, port, src))
continue;
if (!hlist_unhashed(&p->mglist) &&
@@ -1421,8 +1446,8 @@ out:
static void br_ip4_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
- __be32 group,
- __u16 vid)
+ __be32 group, __u16 vid,
+ const unsigned char *src)
{
struct br_ip br_group;
struct bridge_mcast_query *query = port ? &port->ip4_query :
@@ -1435,14 +1460,15 @@ static void br_ip4_multicast_leave_group
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
- br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
+ br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query,
+ src);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
- __u16 vid)
+ __u16 vid, const unsigned char *src)
{
struct br_ip br_group;
struct bridge_mcast_query *query = port ? &port->ip6_query :
@@ -1456,7 +1482,8 @@ static void br_ip6_multicast_leave_group
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
- br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
+ br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query,
+ src);
}
#endif
@@ -1465,6 +1492,7 @@ static int br_multicast_ipv4_rcv(struct
struct sk_buff *skb,
u16 vid)
{
+ const unsigned char *src = eth_hdr(skb)->h_source;
struct sk_buff *skb2 = skb;
const struct iphdr *iph;
struct igmphdr *ih;
@@ -1538,7 +1566,7 @@ static int br_multicast_ipv4_rcv(struct
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
- err = br_ip4_multicast_add_group(br, port, ih->group, vid);
+ err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
@@ -1547,7 +1575,7 @@ static int br_multicast_ipv4_rcv(struct
err = br_ip4_multicast_query(br, port, skb2, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
- br_ip4_multicast_leave_group(br, port, ih->group, vid);
+ br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
break;
}
@@ -1565,6 +1593,7 @@ static int br_multicast_ipv6_rcv(struct
struct sk_buff *skb,
u16 vid)
{
+ const unsigned char *src = eth_hdr(skb)->h_source;
struct sk_buff *skb2;
const struct ipv6hdr *ip6h;
u8 icmp6_type;
@@ -1674,7 +1703,8 @@ static int br_multicast_ipv6_rcv(struct
}
mld = (struct mld_msg *)skb_transport_header(skb2);
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
- err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
+ err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
+ src);
break;
}
case ICMPV6_MLD2_REPORT:
@@ -1691,7 +1721,7 @@ static int br_multicast_ipv6_rcv(struct
goto out;
}
mld = (struct mld_msg *)skb_transport_header(skb2);
- br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
+ br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
}
}
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -116,6 +116,9 @@ struct net_bridge_port_group {
struct timer_list timer;
struct br_ip addr;
unsigned char state;
+
+ unsigned char eth_addr[ETH_ALEN];
+ bool unicast;
};
struct net_bridge_mdb_entry
@@ -175,6 +178,7 @@ struct net_bridge_port
#define BR_LEARNING 0x00000020
#define BR_FLOOD 0x00000040
#define BR_ISOLATE_MODE 0x00000080
+#define BR_MULTICAST_TO_UCAST 0x00000100
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_query ip4_query;
@@ -468,7 +472,8 @@ void br_multicast_free_pg(struct rcu_hea
struct net_bridge_port_group *
br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
struct net_bridge_port_group __rcu *next,
- unsigned char state);
+ unsigned char state,
+ const unsigned char *src);
void br_mdb_init(void);
void br_mdb_uninit(void);
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -342,7 +342,7 @@ static int br_mdb_add_group(struct net_b
break;
}
- p = br_multicast_new_port_group(port, group, *pp, state);
+ p = br_multicast_new_port_group(port, group, *pp, state, NULL);
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -170,6 +170,29 @@ out:
return p;
}
+static struct net_bridge_port *maybe_deliver_addr(
+ struct net_bridge_port *prev, struct net_bridge_port *p,
+ struct sk_buff *skb, const unsigned char *addr,
+ void (*__packet_hook)(const struct net_bridge_port *p,
+ struct sk_buff *skb))
+{
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+
+ if (!should_deliver(p, skb))
+ return prev;
+
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
+ dev->stats.tx_dropped++;
+ return prev;
+ }
+
+ memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
+ __packet_hook(p, skb);
+
+ return prev;
+}
+
/* called under bridge lock */
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb0,
@@ -234,6 +257,7 @@ static void br_multicast_flood(struct ne
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
struct hlist_node *rp;
+ const unsigned char *addr;
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
p = mdst ? rcu_dereference(mdst->ports) : NULL;
@@ -244,10 +268,19 @@ static void br_multicast_flood(struct ne
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
- port = (unsigned long)lport > (unsigned long)rport ?
- lport : rport;
-
- prev = maybe_deliver(prev, port, skb, __packet_hook);
+ if ((unsigned long)lport > (unsigned long)rport) {
+ port = lport;
+ addr = p->unicast ? p->eth_addr : NULL;
+ } else {
+ port = rport;
+ addr = NULL;
+ }
+
+ if (addr)
+ prev = maybe_deliver_addr(prev, port, skb, addr,
+ __packet_hook);
+ else
+ prev = maybe_deliver(prev, port, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -192,6 +192,7 @@ static BRPORT_ATTR(multicast_router, S_I
store_multicast_router);
BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
+BRPORT_ATTR_FLAG(multicast_to_unicast, BR_MULTICAST_TO_UCAST);
#endif
static const struct brport_attribute *brport_attrs[] = {
@@ -218,6 +219,7 @@ static const struct brport_attribute *br
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&brport_attr_multicast_router,
&brport_attr_multicast_fast_leave,
+ &brport_attr_multicast_to_unicast,
#endif
&brport_attr_isolate_mode,
NULL

View File

@ -0,0 +1,20 @@
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -850,7 +850,7 @@ static int pppoe_sendmsg(struct kiocb *i
goto end;
- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
+ skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32 + NET_SKB_PAD,
0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
@@ -858,7 +858,7 @@ static int pppoe_sendmsg(struct kiocb *i
}
/* Reserve space for headers. */
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, dev->hard_header_len + NET_SKB_PAD);
skb_reset_network_header(skb);
skb->dev = dev;

View File

@ -0,0 +1,11 @@
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -130,7 +130,7 @@ static inline bool dev_xmit_complete(int
*/
#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
-# if defined(CONFIG_MAC80211_MESH)
+# if 1 || defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96

Some files were not shown because too many files have changed in this diff Show More