openwrt-owl/target/linux/layerscape/patches-4.4/7016-dpa-add-dpaa_eth-drive...

19161 lines
548 KiB
Diff

From 2af9b49c7e6bad2dee75960ddf61fd52a4d3748f Mon Sep 17 00:00:00 2001
From: Zhao Qiang <qiang.zhao@nxp.com>
Date: Wed, 16 Dec 2015 22:00:36 +0200
Subject: [PATCH 16/70] dpa: add dpaa_eth driver
Dpaa is Datapatch Acceleration Architecture, this architecture provides
the infrastructure to support simplified sharing of networking
interfaces and accelerators by multiple CPUs.
Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
Signed-off-by: Camelia Groza <camelia.groza@freescale.com>
Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
---
drivers/net/ethernet/freescale/Kconfig | 2 +
drivers/net/ethernet/freescale/Makefile | 1 +
drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 187 ++
drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 59 +
.../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
.../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
.../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
.../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1183 +++++++++++
drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 695 +++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
.../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1719 ++++++++++++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 230 +++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1787 ++++++++++++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 227 +++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c | 1735 ++++++++++++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h | 90 +
.../freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c | 201 ++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c | 499 +++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c | 2156 ++++++++++++++++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h | 294 +++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
.../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1128 ++++++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c | 914 +++++++++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
.../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
.../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 +++++
.../freescale/sdk_dpaa/dpaa_generic_ethtool.c | 286 +++
.../freescale/sdk_dpaa/dpaa_macsec_ethtool.c | 250 +++
drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 287 +++
drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 915 +++++++++
drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 470 +++++
drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 ++
.../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 ++++++++
.../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
36 files changed, 18957 insertions(+)
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -93,4 +93,6 @@ config GIANFAR
on the 8540.
source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
+source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
+
endif # NET_VENDOR_FREESCALE
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -18,3 +18,4 @@ gianfar_driver-objs := gianfar.o \
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
+obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
@@ -0,0 +1,187 @@
+menuconfig FSL_SDK_DPAA_ETH
+ tristate "DPAA Ethernet"
+ depends on (FSL_SOC || ARM64 || ARM) && FSL_BMAN && FSL_QMAN && FSL_SDK_FMAN
+ select PHYLIB
+ ---help---
+ Data Path Acceleration Architecture Ethernet driver,
+ supporting the Freescale QorIQ chips.
+ Depends on Freescale Buffer Manager and Queue Manager
+ driver and Frame Manager Driver.
+
+if FSL_SDK_DPAA_ETH
+
+config FSL_DPAA_HOOKS
+ bool "DPAA Ethernet driver hooks"
+
+config FSL_DPAA_MACSEC
+ tristate "DPAA MACSEC"
+ select FSL_DPAA_HOOKS
+ ---help---
+ Enable MACSEC support in DPAA.
+
+config FSL_DPAA_CEETM
+ bool "DPAA CEETM QoS"
+ select NET_SCHED
+ default n
+ ---help---
+ Enable QoS offloading support through the CEETM hardware block.
+
+config FSL_DPAA_OFFLINE_PORTS
+ bool "Offline Ports support"
+ depends on FSL_SDK_DPAA_ETH
+ default y
+ ---help---
+ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
+ most of the functionality of the regular, online ports, except they receive their
+ frames from a core or an accelerator on the SoC, via QMan frame queues,
+ rather than directly from the network.
+ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
+ any online FMan port. They deliver the processed frames to frame queues, according
+ to the applied PCD configurations.
+
+ Choosing this feature will not impact the functionality and/or performance of the system,
+ so it is safe to have it.
+
+config FSL_DPAA_ADVANCED_DRIVERS
+ bool "Advanced DPAA Ethernet drivers"
+ depends on FSL_SDK_DPAA_ETH
+ default y
+ ---help---
+ Besides the standard DPAA Ethernet driver there are available other flavours
+ of DPAA drivers that support advanced scenarios:
+ - DPAA Shared MAC driver
+ - DPAA MAC-less driver
+ - DPAA Proxy initialization driver (for USDPAA)
+ Select this to also build the advanced drivers.
+
+config FSL_DPAA_GENERIC_DRIVER
+ bool "Generic DPAA Ethernet driver"
+ depends on FSL_SDK_DPAA_ETH
+ default y
+ ---help---
+ This enables the DPAA Generic driver (oNIC).
+
+config FSL_DPAA_ETH_JUMBO_FRAME
+ bool "Optimize for jumbo frames"
+ depends on !ARM64 && !ARM
+ default n
+ ---help---
+ Optimize the DPAA Ethernet driver throughput for large frames
+ termination traffic (e.g. 4K and above).
+ NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
+ is set to 9600 bytes.
+ Using this option in combination with small frames increases
+ significantly the driver's memory footprint and may even deplete
+ the system memory.
+ This option is not available on LS1043.
+
+config FSL_DPAA_TS
+ bool "Linux compliant timestamping"
+ depends on FSL_SDK_DPAA_ETH
+ default n
+ ---help---
+ Enable Linux API compliant timestamping support.
+
+config FSL_DPAA_1588
+ bool "IEEE 1588-compliant timestamping"
+ depends on FSL_SDK_DPAA_ETH
+ select FSL_DPAA_TS
+ default n
+ ---help---
+ Enable IEEE1588 support code.
+
+config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ bool "Use driver's Tx queue selection mechanism"
+ default y
+ depends on FSL_SDK_DPAA_ETH
+ ---help---
+ The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
+ of the egress FQ. That will override the XPS support for this netdevice.
+ If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
+ or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
+ and use the standard XPS support instead.
+
+config FSL_DPAA_ETH_MAX_BUF_COUNT
+ int "Maximum nuber of buffers in private bpool"
+ depends on FSL_SDK_DPAA_ETH
+ range 64 2048
+ default "128"
+ ---help---
+ The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
+ buffer pool. One needn't normally modify this, as it has probably been tuned for performance
+ already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
+
+config FSL_DPAA_ETH_REFILL_THRESHOLD
+ int "Private bpool refill threshold"
+ depends on FSL_SDK_DPAA_ETH
+ range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
+ default "80"
+ ---help---
+ The DPAA-Ethernet driver will start replenishing buffer pools whose count
+ falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
+ modify this value unless one has very specific performance reasons.
+
+config FSL_DPAA_CS_THRESHOLD_1G
+ hex "Egress congestion threshold on 1G ports"
+ depends on FSL_SDK_DPAA_ETH
+ range 0x1000 0x10000000
+ default "0x06000000"
+ ---help---
+ The size in bytes of the egress Congestion State notification threshold on 1G ports.
+ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
+ (e.g. by sending UDP datagrams at "while(1) speed"),
+ and the larger the frame size, the more acute the problem.
+ So we have to find a balance between these factors:
+ - avoiding the device staying congested for a prolonged time (risking
+ the netdev watchdog to fire - see also the tx_timeout module param);
+ - affecting performance of protocols such as TCP, which otherwise
+ behave well under the congestion notification mechanism;
+ - preventing the Tx cores from tightly-looping (as if the congestion
+ threshold was too low to be effective);
+ - running out of memory if the CS threshold is set too high.
+
+config FSL_DPAA_CS_THRESHOLD_10G
+ hex "Egress congestion threshold on 10G ports"
+ depends on FSL_SDK_DPAA_ETH
+ range 0x1000 0x20000000
+ default "0x10000000"
+
+config FSL_DPAA_INGRESS_CS_THRESHOLD
+ hex "Ingress congestion threshold on FMan ports"
+ depends on FSL_SDK_DPAA_ETH
+ default "0x10000000"
+ ---help---
+ The size in bytes of the ingress tail-drop threshold on FMan ports.
+ Traffic piling up above this value will be rejected by QMan and discarded by FMan.
+
+config FSL_DPAA_ETH_DEBUGFS
+ bool "DPAA Ethernet debugfs interface"
+ depends on DEBUG_FS && FSL_SDK_DPAA_ETH
+ default y
+ ---help---
+ This option compiles debugfs code for the DPAA Ethernet driver.
+
+config FSL_DPAA_ETH_DEBUG
+ bool "DPAA Ethernet Debug Support"
+ depends on FSL_SDK_DPAA_ETH
+ default n
+ ---help---
+ This option compiles debug code for the DPAA Ethernet driver.
+
+config FSL_DPAA_DBG_LOOP
+ bool "DPAA Ethernet Debug loopback"
+ depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ default n
+ ---help---
+ This option allows to divert all received traffic on a certain interface A towards a
+ selected interface B. This option is used to benchmark the HW + Ethernet driver in
+ isolation from the Linux networking stack. The loops are controlled by debugfs entries,
+ one for each interface. By default all loops are disabled (target value is -1). I.e. to
+ change the loop setting for interface 4 and divert all received traffic to interface 5
+ write Tx interface number in the receive interface debugfs file:
+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
+ 4->-1
+ # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
+ 4->5
+endif # FSL_SDK_DPAA_ETH
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
@@ -0,0 +1,59 @@
+#
+# Makefile for the Freescale Ethernet controllers
+#
+ccflags-y += -DVERSION=\"\"
+#
+# Include netcomm SW specific definitions
+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
+
+ccflags-y += -I$(NET_DPA)
+
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
+obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
+
+fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
+ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
+fsl_dpa-objs += dpaa_debugfs.o
+endif
+ifeq ($(CONFIG_FSL_DPAA_1588),y)
+fsl_dpa-objs += dpaa_1588.o
+endif
+ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
+ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
+fsl_dpa-objs += dpaa_eth_ceetm.o
+endif
+
+fsl_mac-objs += mac.o mac-api.o
+
+# Advanced drivers
+ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_shared.o
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_macless.o
+obj-$(CONFIG_FSL_DPAA_MACSEC) += fsl_dpa_macsec.o
+
+fsl_advanced-objs += dpaa_eth_base.o
+# suport for multiple drivers per kernel module comes in kernel 3.14
+# so we are forced to generate several modules for the advanced drivers
+fsl_proxy-objs += dpaa_eth_proxy.o
+fsl_dpa_shared-objs += dpaa_eth_shared.o
+fsl_dpa_macless-objs += dpaa_eth_macless.o
+fsl_dpa_macsec-objs += dpaa_eth_macsec.o dpaa_macsec_ethtool.o
+
+ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
+
+fsl_oh-objs += offline_port.o
+endif
+endif
+
+# Generic driver
+ifeq ($(CONFIG_FSL_DPAA_GENERIC_DRIVER),y)
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_generic.o
+
+fsl_generic-objs += dpaa_eth_generic.o dpaa_eth_generic_sysfs.o dpaa_generic_ethtool.o
+endif
+
+# Needed by the tracing framework
+CFLAGS_dpaa_eth.o := -I$(src)
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
@@ -0,0 +1,580 @@
+/* Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2009 IXXAT Automation, GmbH
+ *
+ * DPAA Ethernet Driver -- IEEE 1588 interface functionality
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <asm/div64.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_1588.h"
+#include "mac.h"
+
+static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+
+ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
+ if (!circ_buf->buf)
+ return 1;
+
+ circ_buf->head = 0;
+ circ_buf->tail = 0;
+ ptp_buf->size = size;
+ spin_lock_init(&ptp_buf->ptp_lock);
+
+ return 0;
+}
+
+static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+
+ circ_buf->head = 0;
+ circ_buf->tail = 0;
+ ptp_buf->size = size;
+}
+
+static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
+ struct dpa_ptp_data *data)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+ int size = ptp_buf->size;
+ struct dpa_ptp_data *tmp;
+ unsigned long flags;
+ int head, tail;
+
+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
+
+ head = circ_buf->head;
+ tail = circ_buf->tail;
+
+ if (CIRC_SPACE(head, tail, size) <= 0)
+ circ_buf->tail = (tail + 1) & (size - 1);
+
+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
+ memcpy(tmp, data, sizeof(struct dpa_ptp_data));
+
+ circ_buf->head = (head + 1) & (size - 1);
+
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+
+ return 0;
+}
+
+static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
+ struct dpa_ptp_ident *src)
+{
+ int ret;
+
+ if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
+ return 0;
+
+ if ((dst->netw_prot == src->netw_prot)
+ || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
+ if (dst->seq_id != src->seq_id)
+ return 0;
+
+ ret = memcmp(dst->snd_port_id, src->snd_port_id,
+ DPA_PTP_SOURCE_PORT_LENGTH);
+ if (ret)
+ return 0;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
+ struct dpa_ptp_ident *ident,
+ struct dpa_ptp_time *ts)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+ int size = ptp_buf->size;
+ int head, tail, idx;
+ unsigned long flags;
+ struct dpa_ptp_data *tmp, *tmp2;
+ struct dpa_ptp_ident *tmp_ident;
+
+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
+
+ head = circ_buf->head;
+ tail = idx = circ_buf->tail;
+
+ if (CIRC_CNT(head, tail, size) == 0) {
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+ return 1;
+ }
+
+ while (idx != head) {
+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
+ tmp_ident = &tmp->ident;
+ if (dpa_ptp_is_ident_match(tmp_ident, ident))
+ break;
+ idx = (idx + 1) & (size - 1);
+ }
+
+ if (idx == head) {
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+ return 1;
+ }
+
+ ts->sec = tmp->ts.sec;
+ ts->nsec = tmp->ts.nsec;
+
+ if (idx != tail) {
+ if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
+ tail = circ_buf->tail =
+ (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
+ }
+
+ while (CIRC_CNT(idx, tail, size) > 0) {
+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
+ idx = (idx - 1) & (size - 1);
+ tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
+ *tmp = *tmp2;
+ }
+ }
+ circ_buf->tail = (tail + 1) & (size - 1);
+
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+
+ return 0;
+}
+
+/* Parse the PTP packets
+ *
+ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
+ * an IEEE802.3 ethernet frame. This function returns the position of
+ * the PTP packet or NULL if no PTP found
+ */
+static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
+{
+ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
+ u8 *ptp_loc = NULL;
+ u8 msg_type;
+ u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct ipv6hdr *ipv6h;
+
+ /* when we can receive S/G frames we need to check the data we want to
+ * access is in the linear skb buffer
+ */
+ if (!pskb_may_pull(skb, access_len))
+ return NULL;
+
+ *eth_type = *((u16 *)pos);
+
+ /* Check if inner tag is here */
+ if (*eth_type == ETH_P_8021Q) {
+ access_len += DPA_VLAN_TAG_LEN;
+
+ if (!pskb_may_pull(skb, access_len))
+ return NULL;
+
+ pos += DPA_VLAN_TAG_LEN;
+ *eth_type = *((u16 *)pos);
+ }
+
+ pos += DPA_ETYPE_LEN;
+
+ switch (*eth_type) {
+ /* Transport of PTP over Ethernet */
+ case ETH_P_1588:
+ ptp_loc = pos;
+
+ if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
+ return NULL;
+
+ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
+ if ((msg_type == PTP_MSGTYPE_SYNC)
+ || (msg_type == PTP_MSGTYPE_DELREQ)
+ || (msg_type == PTP_MSGTYPE_PDELREQ)
+ || (msg_type == PTP_MSGTYPE_PDELRESP))
+ return ptp_loc;
+ break;
+ /* Transport of PTP over IPv4 */
+ case ETH_P_IP:
+ iph = (struct iphdr *)pos;
+ access_len += sizeof(struct iphdr);
+
+ if (!pskb_may_pull(skb, access_len))
+ return NULL;
+
+ if (ntohs(iph->protocol) != IPPROTO_UDP)
+ return NULL;
+
+ access_len += iph->ihl * 4 - sizeof(struct iphdr) +
+ sizeof(struct udphdr);
+
+ if (!pskb_may_pull(skb, access_len))
+ return NULL;
+
+ pos += iph->ihl * 4;
+ udph = (struct udphdr *)pos;
+ if (ntohs(udph->dest) != 319)
+ return NULL;
+ ptp_loc = pos + sizeof(struct udphdr);
+ break;
+ /* Transport of PTP over IPv6 */
+ case ETH_P_IPV6:
+ ipv6h = (struct ipv6hdr *)pos;
+
+ access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
+
+ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
+ return NULL;
+
+ pos += sizeof(struct ipv6hdr);
+ udph = (struct udphdr *)pos;
+ if (ntohs(udph->dest) != 319)
+ return NULL;
+ ptp_loc = pos + sizeof(struct udphdr);
+ break;
+ default:
+ break;
+ }
+
+ return ptp_loc;
+}
+
+static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
+ struct sk_buff *skb, void *data, enum port_type rx_tx,
+ struct dpa_ptp_data *ptp_data)
+{
+ u64 nsec;
+ u32 mod;
+ u8 *ptp_loc;
+ u16 eth_type;
+
+ ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
+ if (!ptp_loc)
+ return -EINVAL;
+
+ switch (eth_type) {
+ case ETH_P_IP:
+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
+ break;
+ case ETH_P_IPV6:
+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
+ break;
+ case ETH_P_1588:
+ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
+ return -EINVAL;
+
+ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
+ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
+ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
+ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
+ DPA_PTP_SOURCE_PORT_LENGTH);
+
+ nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
+ mod = do_div(nsec, NANOSEC_PER_SECOND);
+ ptp_data->ts.sec = nsec;
+ ptp_data->ts.nsec = mod;
+
+ return 0;
+}
+
+void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
+ struct sk_buff *skb, void *data)
+{
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+ struct dpa_ptp_data ptp_tx_data;
+
+ if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
+ return;
+
+ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
+}
+
+void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
+ struct sk_buff *skb, void *data)
+{
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+ struct dpa_ptp_data ptp_rx_data;
+
+ if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
+ return;
+
+ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
+}
+
+static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
+ struct dpa_ptp_ident *ident,
+ struct dpa_ptp_time *ts)
+{
+ struct dpa_ptp_tsu *tsu = ptp_tsu;
+ struct dpa_ptp_time tmp;
+ int flag;
+
+ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
+ if (!flag) {
+ ts->sec = tmp.sec;
+ ts->nsec = tmp.nsec;
+ return 0;
+ }
+
+ return -1;
+}
+
+static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
+ struct dpa_ptp_ident *ident,
+ struct dpa_ptp_time *ts)
+{
+ struct dpa_ptp_tsu *tsu = ptp_tsu;
+ struct dpa_ptp_time tmp;
+ int flag;
+
+ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
+ if (!flag) {
+ ts->sec = tmp.sec;
+ ts->nsec = tmp.nsec;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
+ struct dpa_ptp_time *cnt_time)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u64 tmp, fiper;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
+
+ /* TMR_FIPER1 will pulse every second after ALARM1 expired */
+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
+ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
+ if (mac_dev->fm_rtc_set_alarm)
+ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
+ 0, tmp);
+ if (mac_dev->fm_rtc_set_fiper)
+ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
+ 0, fiper);
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
+}
+
+static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
+ struct dpa_ptp_time *curr_time)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u64 tmp;
+ u32 mod;
+
+ if (mac_dev->fm_rtc_get_cnt)
+ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
+ &tmp);
+
+ mod = do_div(tmp, NANOSEC_PER_SECOND);
+ curr_time->sec = (u32)tmp;
+ curr_time->nsec = mod;
+}
+
+static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
+ struct dpa_ptp_time *cnt_time)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u64 tmp;
+
+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
+
+ if (mac_dev->fm_rtc_set_cnt)
+ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
+ tmp);
+
+ /* Restart fiper two seconds later */
+ cnt_time->sec += 2;
+ cnt_time->nsec = 0;
+ dpa_set_fiper_alarm(tsu, cnt_time);
+}
+
+static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u32 drift;
+
+ if (mac_dev->fm_rtc_get_drift)
+ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
+ &drift);
+
+ *addend = drift;
+}
+
+static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+
+ if (mac_dev->fm_rtc_set_drift)
+ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
+ addend);
+}
+
+static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
+{
+ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
+ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
+}
+
+int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+ struct mac_device *mac_dev = priv->mac_dev;
+ struct dpa_ptp_data ptp_data;
+ struct dpa_ptp_data *ptp_data_user;
+ struct dpa_ptp_time act_time;
+ u32 addend;
+ int retval = 0;
+
+ if (!tsu || !tsu->valid)
+ return -ENODEV;
+
+ switch (cmd) {
+ case PTP_ENBL_TXTS_IOCTL:
+ tsu->hwts_tx_en_ioctl = 1;
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+ break;
+ case PTP_DSBL_TXTS_IOCTL:
+ tsu->hwts_tx_en_ioctl = 0;
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
+ if (mac_dev->ptp_disable)
+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
+ break;
+ case PTP_ENBL_RXTS_IOCTL:
+ tsu->hwts_rx_en_ioctl = 1;
+ break;
+ case PTP_DSBL_RXTS_IOCTL:
+ tsu->hwts_rx_en_ioctl = 0;
+ break;
+ case PTP_GET_RX_TIMESTAMP:
+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
+ if (copy_from_user(&ptp_data.ident,
+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
+ return -EINVAL;
+
+ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
+ return -EAGAIN;
+
+ if (copy_to_user((void __user *)&ptp_data_user->ts,
+ &ptp_data.ts, sizeof(ptp_data.ts)))
+ return -EFAULT;
+ break;
+ case PTP_GET_TX_TIMESTAMP:
+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
+ if (copy_from_user(&ptp_data.ident,
+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
+ return -EINVAL;
+
+ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
+ return -EAGAIN;
+
+ if (copy_to_user((void __user *)&ptp_data_user->ts,
+ &ptp_data.ts, sizeof(ptp_data.ts)))
+ return -EFAULT;
+ break;
+ case PTP_GET_TIME:
+ dpa_get_curr_cnt(tsu, &act_time);
+ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
+ return -EFAULT;
+ break;
+ case PTP_SET_TIME:
+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
+ return -EINVAL;
+ dpa_set_1588cnt(tsu, &act_time);
+ break;
+ case PTP_GET_ADJ:
+ dpa_get_drift(tsu, &addend);
+ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
+ return -EFAULT;
+ break;
+ case PTP_SET_ADJ:
+ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
+ return -EINVAL;
+ dpa_set_drift(tsu, addend);
+ break;
+ case PTP_SET_FIPER_ALARM:
+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
+ return -EINVAL;
+ dpa_set_fiper_alarm(tsu, &act_time);
+ break;
+ case PTP_CLEANUP_TS:
+ dpa_flush_timestamp(tsu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return retval;
+}
+
+int dpa_ptp_init(struct dpa_priv_s *priv)
+{
+ struct dpa_ptp_tsu *tsu;
+
+ /* Allocate memory for PTP structure */
+ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
+ if (!tsu)
+ return -ENOMEM;
+
+ tsu->valid = TRUE;
+ tsu->dpa_priv = priv;
+
+ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
+ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
+
+ priv->tsu = tsu;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_ptp_init);
+
+void dpa_ptp_cleanup(struct dpa_priv_s *priv)
+{
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+
+ tsu->valid = FALSE;
+ vfree(tsu->rx_timestamps.circ_buf.buf);
+ vfree(tsu->tx_timestamps.circ_buf.buf);
+
+ kfree(tsu);
+}
+EXPORT_SYMBOL(dpa_ptp_cleanup);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
@@ -0,0 +1,138 @@
+/* Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef __DPAA_1588_H__
+#define __DPAA_1588_H__
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/circ_buf.h>
+#include <linux/fsl_qman.h>
+
+#define DEFAULT_PTP_RX_BUF_SZ 256
+#define DEFAULT_PTP_TX_BUF_SZ 256
+
+/* 1588 private ioctl calls */
+#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
+#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
+#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
+#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
+#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
+#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
+#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
+#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
+#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
+#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
+#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
+#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
+
+/* PTP V2 message type */
+enum {
+ PTP_MSGTYPE_SYNC = 0x0,
+ PTP_MSGTYPE_DELREQ = 0x1,
+ PTP_MSGTYPE_PDELREQ = 0x2,
+ PTP_MSGTYPE_PDELRESP = 0x3,
+ PTP_MSGTYPE_FLWUP = 0x8,
+ PTP_MSGTYPE_DELRESP = 0x9,
+ PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
+ PTP_MSGTYPE_ANNOUNCE = 0xB,
+ PTP_MSGTYPE_SGNLNG = 0xC,
+ PTP_MSGTYPE_MNGMNT = 0xD,
+};
+
+/* Byte offset of data in the PTP V2 headers */
+#define PTP_OFFS_MSG_TYPE 0
+#define PTP_OFFS_VER_PTP 1
+#define PTP_OFFS_MSG_LEN 2
+#define PTP_OFFS_DOM_NMB 4
+#define PTP_OFFS_FLAGS 6
+#define PTP_OFFS_CORFIELD 8
+#define PTP_OFFS_SRCPRTID 20
+#define PTP_OFFS_SEQ_ID 30
+#define PTP_OFFS_CTRL 32
+#define PTP_OFFS_LOGMEAN 33
+
+#define PTP_IP_OFFS 14
+#define PTP_UDP_OFFS 34
+#define PTP_HEADER_OFFS 42
+#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
+#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
+#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
+#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
+
+/* 1588-2008 network protocol enumeration values */
+#define DPA_PTP_PROT_IPV4 1
+#define DPA_PTP_PROT_IPV6 2
+#define DPA_PTP_PROT_802_3 3
+#define DPA_PTP_PROT_DONTCARE 0xFFFF
+
+#define DPA_PTP_SOURCE_PORT_LENGTH 10
+#define DPA_PTP_HEADER_SZE 34
+#define DPA_ETYPE_LEN 2
+#define DPA_VLAN_TAG_LEN 4
+#define NANOSEC_PER_SECOND 1000000000
+
+/* The threshold between the current found one and the oldest one */
+#define TS_ACCUMULATION_THRESHOLD 50
+
+/* Struct needed to identify a timestamp */
+struct dpa_ptp_ident {
+ u8 version;
+ u8 msg_type;
+ u16 netw_prot;
+ u16 seq_id;
+ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
+};
+
+/* Timestamp format in 1588-2008 */
+struct dpa_ptp_time {
+ u64 sec; /* just 48 bit used */
+ u32 nsec;
+};
+
+/* needed for timestamp data over ioctl */
+struct dpa_ptp_data {
+ struct dpa_ptp_ident ident;
+ struct dpa_ptp_time ts;
+};
+
+struct dpa_ptp_circ_buf {
+ struct circ_buf circ_buf;
+ u32 size;
+ spinlock_t ptp_lock;
+};
+
+/* PTP TSU control structure */
+struct dpa_ptp_tsu {
+ struct dpa_priv_s *dpa_priv;
+ bool valid;
+ struct dpa_ptp_circ_buf rx_timestamps;
+ struct dpa_ptp_circ_buf tx_timestamps;
+
+ /* HW timestamping over ioctl enabled flag */
+ int hwts_tx_en_ioctl;
+ int hwts_rx_en_ioctl;
+};
+
+extern int dpa_ptp_init(struct dpa_priv_s *priv);
+extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
+extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
+ struct sk_buff *skb, void *data);
+extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
+ struct sk_buff *skb, void *data);
+extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
+#endif
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
@@ -0,0 +1,180 @@
+/* Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
+#include <linux/debugfs.h>
+#include "dpaa_debugfs.h"
+#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
+
+#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
+#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
+
+static struct dentry *dpa_debugfs_root;
+
+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
+static ssize_t dpa_loop_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off);
+
+static const struct file_operations dpa_debugfs_lp_fops = {
+ .open = dpa_debugfs_loop_open,
+ .write = dpa_loop_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
+{
+ struct dpa_priv_s *priv;
+
+ BUG_ON(offset == NULL);
+
+ priv = netdev_priv((struct net_device *)file->private);
+ seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
+
+ return 0;
+}
+
+static int user_input_convert(const char __user *user_buf, size_t count,
+ long *val)
+{
+ char buf[12];
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ if (kstrtol(buf, 0, val))
+ return -EINVAL;
+ return 0;
+}
+
+static ssize_t dpa_loop_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ struct dpa_priv_s *priv;
+ struct net_device *netdev;
+ struct seq_file *sf;
+ int ret;
+ long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+
+ sf = (struct seq_file *)f->private_data;
+ netdev = (struct net_device *)sf->private;
+ priv = netdev_priv(netdev);
+
+ priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
+
+ return count;
+}
+
+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
+{
+ int _errno;
+ const struct net_device *net_dev;
+
+ _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
+ if (unlikely(_errno < 0)) {
+ net_dev = (struct net_device *)inode->i_private;
+
+ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
+ netdev_err(net_dev, "single_open() = %d\n",
+ _errno);
+ }
+
+ return _errno;
+}
+
+
+int dpa_netdev_debugfs_create(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ static int cnt;
+ char loop_file_name[100];
+
+ if (unlikely(dpa_debugfs_root == NULL)) {
+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
+ KBUILD_BASENAME".c", __LINE__, __func__,
+ "root debugfs missing, possible module ordering issue");
+ return -ENOMEM;
+ }
+
+ sprintf(loop_file_name, "eth%d_loop", ++cnt);
+ priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
+ S_IRUGO,
+ dpa_debugfs_root,
+ net_dev,
+ &dpa_debugfs_lp_fops);
+ if (unlikely(priv->debugfs_loop_file == NULL)) {
+ netdev_err(net_dev, "debugfs_create_file(%s/%s)",
+ dpa_debugfs_root->d_iname,
+ loop_file_name);
+
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void dpa_netdev_debugfs_remove(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+
+ debugfs_remove(priv->debugfs_loop_file);
+}
+
+int __init dpa_debugfs_module_init(void)
+{
+ int _errno = 0;
+
+ pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
+
+ dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
+
+ if (unlikely(dpa_debugfs_root == NULL)) {
+ _errno = -ENOMEM;
+ pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
+ KBUILD_BASENAME".c", __LINE__, __func__);
+ pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
+ DPA_ETH_DEBUGFS_ROOT, _errno);
+ }
+
+ return _errno;
+}
+
+void __exit dpa_debugfs_module_exit(void)
+{
+ debugfs_remove(dpa_debugfs_root);
+}
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
@@ -0,0 +1,43 @@
+/* Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPAA_DEBUGFS_H_
+#define DPAA_DEBUGFS_H_
+
+#include <linux/netdevice.h>
+#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
+
+int dpa_netdev_debugfs_create(struct net_device *net_dev);
+void dpa_netdev_debugfs_remove(struct net_device *net_dev);
+int __init dpa_debugfs_module_init(void);
+void __exit dpa_debugfs_module_exit(void);
+
+#endif /* DPAA_DEBUGFS_H_ */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
@@ -0,0 +1,1183 @@
+/* Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/if_arp.h> /* arp_hdr_len() */
+#include <linux/if_vlan.h> /* VLAN_HLEN */
+#include <linux/icmp.h> /* struct icmphdr */
+#include <linux/ip.h> /* struct iphdr */
+#include <linux/ipv6.h> /* struct ipv6hdr */
+#include <linux/udp.h> /* struct udphdr */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/net.h> /* net_ratelimit() */
+#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
+#include <linux/highmem.h>
+#include <linux/percpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_bman.h>
+
+#include "fsl_fman.h"
+#include "fm_ext.h"
+#include "fm_port_ext.h"
+
+#include "mac.h"
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+#include "dpaa_debugfs.h"
+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa_eth_trace.h"
+
+#define DPA_NAPI_WEIGHT 64
+
+/* Valid checksum indication */
+#define DPA_CSUM_VALID 0xFFFF
+
+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+static uint8_t debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static uint16_t tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+static const char rtx[][3] = {
+ [RX] = "RX",
+ [TX] = "TX"
+};
+
+/* BM */
+
+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
+
+static uint8_t dpa_priv_common_bpid;
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+struct net_device *dpa_loop_netdevs[20];
+#endif
+
+#ifdef CONFIG_PM
+
+static int dpaa_suspend(struct device *dev)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ int err = 0;
+
+ net_dev = dev_get_drvdata(dev);
+
+ if (net_dev->flags & IFF_UP) {
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (priv->wol & DPAA_WOL_MAGIC) {
+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
+ priv->mac_dev->get_mac_handle(mac_dev), true);
+ if (err) {
+ netdev_err(net_dev, "set_wol() = %d\n", err);
+ goto set_wol_failed;
+ }
+ }
+
+ err = fm_port_suspend(mac_dev->port_dev[RX]);
+ if (err) {
+ netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
+ goto rx_port_suspend_failed;
+ }
+
+ err = fm_port_suspend(mac_dev->port_dev[TX]);
+ if (err) {
+ netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
+ goto tx_port_suspend_failed;
+ }
+ }
+
+ return 0;
+
+tx_port_suspend_failed:
+ fm_port_resume(mac_dev->port_dev[RX]);
+rx_port_suspend_failed:
+ if (priv->wol & DPAA_WOL_MAGIC) {
+ priv->mac_dev->set_wol(mac_dev->port_dev[RX],
+ priv->mac_dev->get_mac_handle(mac_dev), false);
+ }
+set_wol_failed:
+ return err;
+}
+
+static int dpaa_resume(struct device *dev)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ int err = 0;
+
+ net_dev = dev_get_drvdata(dev);
+
+ if (net_dev->flags & IFF_UP) {
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ err = fm_port_resume(mac_dev->port_dev[TX]);
+ if (err) {
+ netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
+ goto resume_failed;
+ }
+
+ err = fm_port_resume(mac_dev->port_dev[RX]);
+ if (err) {
+ netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
+ goto resume_failed;
+ }
+
+ if (priv->wol & DPAA_WOL_MAGIC) {
+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
+ priv->mac_dev->get_mac_handle(mac_dev), false);
+ if (err) {
+ netdev_err(net_dev, "set_wol() = %d\n", err);
+ goto resume_failed;
+ }
+ }
+ }
+
+ return 0;
+
+resume_failed:
+ return err;
+}
+
+static const struct dev_pm_ops dpaa_pm_ops = {
+ .suspend = dpaa_suspend,
+ .resume = dpaa_resume,
+};
+
+#define DPAA_PM_OPS (&dpaa_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define DPAA_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
+/* Checks whether the checksum field in Parse Results array is valid
+ * (equals 0xFFFF) and increments the .cse counter otherwise
+ */
+static inline void
+dpa_csum_validation(const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd)
+{
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ void *frm = phys_to_virt(addr);
+ fm_prs_result_t *parse_result;
+
+ if (unlikely(!frm))
+ return;
+
+ dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
+ DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
+
+ parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
+
+ if (parse_result->cksum != DPA_CSUM_VALID)
+ percpu_priv->rx_errors.cse++;
+}
+
+static void _dpa_rx_error(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ /* limit common, possibly innocuous Rx FIFO Overflow errors'
+ * interference with zero-loss convergence benchmark results.
+ */
+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
+ else
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_RX_ERRORS);
+#ifdef CONFIG_FSL_DPAA_HOOKS
+ if (dpaa_eth_hooks.rx_error &&
+ dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
+ /* it's up to the hook to perform resource cleanup */
+ return;
+#endif
+ percpu_priv->stats.rx_errors++;
+
+ if (fd->status & FM_PORT_FRM_ERR_DMA)
+ percpu_priv->rx_errors.dme++;
+ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
+ percpu_priv->rx_errors.fpe++;
+ if (fd->status & FM_PORT_FRM_ERR_SIZE)
+ percpu_priv->rx_errors.fse++;
+ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
+ percpu_priv->rx_errors.phe++;
+ if (fd->status & FM_FD_STAT_L4CV)
+ dpa_csum_validation(priv, percpu_priv, fd);
+
+ dpa_fd_release(net_dev, fd);
+}
+
+static void _dpa_tx_error(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct sk_buff *skb;
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+#ifdef CONFIG_FSL_DPAA_HOOKS
+ if (dpaa_eth_hooks.tx_error &&
+ dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
+ /* now the hook must ensure proper cleanup */
+ return;
+#endif
+ percpu_priv->stats.tx_errors++;
+
+ /* If we intended the buffers from this frame to go into the bpools
+ * when the FMan transmit was done, we need to put it in manually.
+ */
+ if (fd->bpid != 0xff) {
+ dpa_fd_release(net_dev, fd);
+ return;
+ }
+
+ skb = _dpa_cleanup_tx_fd(priv, fd);
+ dev_kfree_skb(skb);
+}
+
+/* Helper function to factor out frame validation logic on all Rx paths. Its
+ * purpose is to extract from the Parse Results structure information about
+ * the integrity of the frame, its checksum, the length of the parsed headers
+ * and whether the frame is suitable for GRO.
+ *
+ * Assumes no parser errors, since any error frame is dropped before this
+ * function is called.
+ *
+ * @skb will have its ip_summed field overwritten;
+ * @use_gro will only be written with 0, if the frame is definitely not
+ * GRO-able; otherwise, it will be left unchanged;
+ * @hdr_size will be written with a safe value, at least the size of the
+ * headers' length.
+ */
+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
+ const struct qm_fd *fd,
+ struct sk_buff *skb, int *use_gro)
+{
+ if (fd->status & FM_FD_STAT_L4CV) {
+ /* The parser has run and performed L4 checksum validation.
+ * We know there were no parser errors (and implicitly no
+ * L4 csum error), otherwise we wouldn't be here.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Don't go through GRO for certain types of traffic that
+ * we know are not GRO-able, such as dgram-based protocols.
+ * In the worst-case scenarios, such as small-pkt terminating
+ * UDP, the extra GRO processing would be overkill.
+ *
+ * The only protocol the Parser supports that is also GRO-able
+ * is currently TCP.
+ */
+ if (!fm_l4_frame_is_tcp(parse_results))
+ *use_gro = 0;
+
+ return;
+ }
+
+ /* We're here because either the parser didn't run or the L4 checksum
+ * was not verified. This may include the case of a UDP frame with
+ * checksum zero or an L4 proto other than TCP/UDP
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Bypass GRO for unknown traffic or if no PCDs are applied */
+ *use_gro = 0;
+}
+
+int dpaa_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpa_napi_portal *np =
+ container_of(napi, struct dpa_napi_portal, napi);
+
+ int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+ if (cleaned < budget) {
+ int tmp;
+ napi_complete(napi);
+ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ DPA_BUG_ON(tmp);
+ }
+
+ return cleaned;
+}
+EXPORT_SYMBOL(dpaa_eth_poll);
+
+static void __hot _dpa_tx_conf(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct sk_buff *skb;
+
+ /* do we need the timestamp for the error frames? */
+
+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ /* hopefully we need not get the timestamp before the hook */
+#ifdef CONFIG_FSL_DPAA_HOOKS
+ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
+ fd, fqid) == DPAA_ETH_STOLEN)
+ /* it's the hook that must now perform cleanup */
+ return;
+#endif
+ /* This might not perfectly reflect the reality, if the core dequeuing
+ * the Tx confirmation is different from the one that did the enqueue,
+ * but at least it'll show up in the total count.
+ */
+ percpu_priv->tx_confirm++;
+
+ skb = _dpa_cleanup_tx_fd(priv, fd);
+
+ dev_kfree_skb(skb);
+}
+
+enum qman_cb_dqrr_result
+priv_rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int *count_ptr;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpa_fd_release(net_dev, &dq->fd);
+ else
+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+
+enum qman_cb_dqrr_result __hot
+priv_rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int *count_ptr;
+ struct dpa_bp *dpa_bp;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ dpa_bp = priv->dpa_bp;
+
+ /* Trace the Rx fd */
+ trace_dpa_rx_fd(net_dev, fq, &dq->fd);
+
+ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ /* Vale of plenty: make sure we didn't run out of buffers */
+
+ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpa_fd_release(net_dev, &dq->fd);
+ else
+ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
+ count_ptr);
+
+ return qman_cb_dqrr_consume;
+}
+
+enum qman_cb_dqrr_result
+priv_tx_conf_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+enum qman_cb_dqrr_result __hot
+priv_tx_conf_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ /* Trace the fd */
+ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
+
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+void priv_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+ struct sk_buff *skb;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct qm_fd fd = msg->ern.fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+
+ /* If we intended this buffer to go into the pool
+ * when the FM was done, we need to put it in
+ * manually.
+ */
+ if (msg->ern.fd.bpid != 0xff) {
+ dpa_fd_release(net_dev, &fd);
+ return;
+ }
+
+ skb = _dpa_cleanup_tx_fd(priv, &fd);
+ dev_kfree_skb_any(skb);
+}
+
+const struct dpa_fq_cbs_t private_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = priv_ern } }
+};
+EXPORT_SYMBOL(private_fq_cbs);
+
+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ for (j = 0; j < qman_portal_max; j++)
+ napi_enable(&percpu_priv->np[j].napi);
+ }
+}
+
+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ for (j = 0; j < qman_portal_max; j++)
+ napi_disable(&percpu_priv->np[j].napi);
+ }
+}
+
+static int __cold dpa_eth_priv_start(struct net_device *net_dev)
+{
+ int err;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_napi_enable(priv);
+
+ err = dpa_start(net_dev);
+ if (err < 0)
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+
+
+static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ _errno = dpa_stop(net_dev);
+ /* Allow NAPI to consume any frame still in the Rx/TxConfirm
+ * ingress queues. This is to avoid a race between the current
+ * context and ksoftirqd which could leave NAPI disabled while
+ * in fact there's still Rx traffic to be processed.
+ */
+ usleep_range(5000, 10000);
+
+ priv = netdev_priv(net_dev);
+ dpaa_eth_napi_disable(priv);
+
+ return _errno;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dpaa_eth_poll_controller(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv =
+ raw_cpu_ptr(priv->percpu_priv);
+ struct qman_portal *p;
+ const struct qman_portal_config *pc;
+ struct dpa_napi_portal *np;
+
+ p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
+ pc = qman_p_get_portal_config(p);
+ np = &percpu_priv->np[pc->index];
+
+ qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
+ qman_p_poll_dqrr(np->p, np->napi.weight);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+}
+#endif
+
+static const struct net_device_ops dpa_private_ops = {
+ .ndo_open = dpa_eth_priv_start,
+ .ndo_start_xmit = dpa_tx,
+ .ndo_stop = dpa_eth_priv_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_set_rx_mode = dpa_set_rx_mode,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_fix_features,
+ .ndo_do_ioctl = dpa_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = dpaa_eth_poll_controller,
+#endif
+};
+
+static int dpa_private_napi_add(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
+ qman_portal_max * sizeof(struct dpa_napi_portal),
+ GFP_KERNEL);
+
+ if (unlikely(percpu_priv->np == NULL)) {
+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < qman_portal_max; i++)
+ netif_napi_add(net_dev, &percpu_priv->np[i].napi,
+ dpaa_eth_poll, DPA_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+void dpa_private_napi_del(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ if (percpu_priv->np) {
+ for (i = 0; i < qman_portal_max; i++)
+ netif_napi_del(&percpu_priv->np[i].napi);
+
+ devm_kfree(net_dev->dev.parent, percpu_priv->np);
+ }
+ }
+}
+EXPORT_SYMBOL(dpa_private_napi_del);
+
+static int dpa_private_netdev_init(struct net_device *net_dev)
+{
+ int i;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ const uint8_t *mac_addr;
+
+ /* Although we access another CPU's private data here
+ * we do it at initialization so it is safe
+ */
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+ }
+
+ net_dev->netdev_ops = &dpa_private_ops;
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ /* Advertise S/G and HIGHDMA support for private interfaces */
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* Recent kernels enable GSO automatically, if
+ * we declare NETIF_F_SG. For conformity, we'll
+ * still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+
+ /* Advertise GRO support */
+ net_dev->features |= NETIF_F_GRO;
+
+ return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
+}
+
+static struct dpa_bp * __cold
+dpa_priv_bp_probe(struct device *dev)
+{
+ struct dpa_bp *dpa_bp;
+
+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
+ if (unlikely(dpa_bp == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
+ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ dpa_bp->seed_cb = dpa_bp_priv_seed;
+ dpa_bp->free_buf_cb = _dpa_bp_free_pf;
+
+ return dpa_bp;
+}
+
+/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
+ * We won't be sending congestion notifications to FMan; for now, we just use
+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
+ * before they reach our ingress queues and eat up memory.
+ */
+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
+ if (err < 0) {
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+
+ /* Enable CS TD, but disable Congestion State Change Notifications. */
+ initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ /* This is actually a hack, because this CGR will be associated with
+ * our affine SWP. However, we'll place our ingress FQs in it.
+ */
+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ pr_err("Error %d creating ingress CGR with ID %d\n", err,
+ priv->ingress_cgr.cgrid);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ goto out_error;
+ }
+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+
+ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
+ * range), but we have no common initialization path between the
+ * different variants of the DPAA Eth driver, so we do it here rather
+ * than modifying every other variant than "private Eth".
+ */
+ priv->use_ingress_cgr = true;
+
+out_error:
+ return err;
+}
+
+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int i;
+
+ if (netif_msg_probe(priv))
+ dev_dbg(net_dev->dev.parent,
+ "Using private BM buffer pools\n");
+
+ priv->bp_count = count;
+
+ for (i = 0; i < count; i++) {
+ int err;
+ err = dpa_bp_alloc(&dpa_bp[i]);
+ if (err < 0) {
+ dpa_bp_free(priv);
+ priv->dpa_bp = NULL;
+ return err;
+ }
+
+ priv->dpa_bp = &dpa_bp[i];
+ }
+
+ dpa_priv_common_bpid = priv->dpa_bp->bpid;
+ return 0;
+}
+
+static const struct of_device_id dpa_match[];
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+static int dpa_new_loop_id(void)
+{
+ static int if_id;
+
+ return if_id++;
+}
+#endif
+
+static int
+dpaa_eth_priv_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i, channel;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count = 1;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ struct mac_device *mac_dev;
+ struct task_struct *kth;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface;
+ * run only once the default pool probing code
+ */
+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
+ dpa_priv_bp_probe(dev);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area (needed by 1588 code in dpa_mac_probe)
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ goto alloc_etherdev_mq_failed;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ strcpy(priv->if_type, "private");
+
+ priv->msg_enable = netif_msg_init(debug, -1);
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ priv->loop_id = dpa_new_loop_id();
+ priv->loop_to = -1; /* disabled by default */
+ dpa_loop_netdevs[priv->loop_id] = net_dev;
+#endif
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev) || !mac_dev) {
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ }
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ goto alloc_failed;
+ }
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ /* For private ports, need to compute the size of the default
+ * buffer pool, based on FMan port buffer layout;also update
+ * the maximum buffer size for private ports if necessary
+ */
+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
+
+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
+ /* We only want to use jumbo frame optimization if we actually have
+ * L2 MAX FRM set for jumbo frames as well.
+ */
+ if (fm_get_max_frm() < 9600)
+ dev_warn(dev,
+ "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
+#endif
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+ &port_fqs, true, TX);
+
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+
+ err = dpa_priv_bp_create(net_dev, dpa_bp, count);
+
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = mac_dev;
+
+ channel = dpa_get_channel();
+
+ if (channel < 0) {
+ err = channel;
+ goto get_channel_failed;
+ }
+
+ priv->channel = (uint16_t)channel;
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+
+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto tx_cgr_init_failed;
+ }
+ err = dpaa_eth_priv_ingress_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing ingress CGR\n");
+ goto rx_cgr_init_failed;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_fq_init(dpa_fq, false);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->buf_layout = buf_layout;
+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+#ifdef CONFIG_FMAN_PFC
+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
+ err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
+ mac_dev->port_dev[TX], i, i);
+ if (unlikely(err != 0)) {
+ dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
+ goto pfc_mapping_failed;
+ }
+ }
+#endif
+
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ /* Initialize NAPI */
+ err = dpa_private_napi_add(net_dev);
+
+ if (err < 0)
+ goto napi_add_failed;
+
+ err = dpa_private_netdev_init(net_dev);
+
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+#ifdef CONFIG_PM
+ device_set_wakeup_capable(dev, true);
+#endif
+
+ pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
+
+ return 0;
+
+netdev_init_failed:
+napi_add_failed:
+ dpa_private_napi_del(net_dev);
+alloc_percpu_failed:
+#ifdef CONFIG_FMAN_PFC
+pfc_mapping_failed:
+#endif
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+fq_alloc_failed:
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+rx_cgr_init_failed:
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+tx_cgr_init_failed:
+add_channel_failed:
+get_channel_failed:
+ dpa_bp_free(priv);
+bp_create_failed:
+fq_probe_failed:
+alloc_failed:
+mac_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+alloc_etherdev_mq_failed:
+ if (atomic_read(&dpa_bp->refs) == 0)
+ devm_kfree(dev, dpa_bp);
+
+ return err;
+}
+
+static const struct of_device_id dpa_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_match);
+
+static struct platform_driver dpa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = dpa_match,
+ .owner = THIS_MODULE,
+ .pm = DPAA_PM_OPS,
+ },
+ .probe = dpaa_eth_priv_probe,
+ .remove = dpa_remove
+};
+
+static int __init __cold dpa_load(void)
+{
+ int _errno;
+
+ pr_info(DPA_DESCRIPTION "\n");
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ dpa_debugfs_module_init();
+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
+
+ /* initialise dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+ dpa_num_cpus = num_possible_cpus();
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
+#endif
+
+ _errno = platform_driver_register(&dpa_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_load);
+
+static void __exit __cold dpa_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&dpa_driver);
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ dpa_debugfs_module_exit();
+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
+
+ /* Only one channel is used and needs to be relased after all
+ * interfaces are removed
+ */
+ dpa_release_channel();
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(dpa_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
@@ -0,0 +1,695 @@
+/* Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPA_H
+#define __DPA_H
+
+#include <linux/netdevice.h>
+#include <linux/fsl_qman.h> /* struct qman_fq */
+
+#include "fm_ext.h"
+#include "dpaa_eth_trace.h"
+
+extern int dpa_rx_extra_headroom;
+extern int dpa_max_frm;
+extern int dpa_num_cpus;
+
+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
+#define dpa_get_max_frm() dpa_max_frm
+
+#define dpa_get_max_mtu() \
+ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+
+#define __hot
+
+/* Simple enum of FQ types - used for array indexing */
+enum port_type {RX, TX};
+
+/* TODO: This structure should be renamed & moved to the FMD wrapper */
+struct dpa_buffer_layout_s {
+ uint16_t priv_data_size;
+ bool parse_results;
+ bool time_stamp;
+ bool hash_results;
+ uint8_t manip_extra_space;
+ uint16_t data_align;
+};
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define DPA_BUG_ON(cond) BUG_ON(cond)
+#else
+#define DPA_BUG_ON(cond)
+#endif
+
+#define DPA_TX_PRIV_DATA_SIZE 16
+#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
+#define DPA_TIME_STAMP_SIZE 8
+#define DPA_HASH_RESULTS_SIZE 8
+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
+ dpa_get_rx_extra_headroom())
+
+#define FM_FD_STAT_RX_ERRORS \
+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
+ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
+
+#define FM_FD_STAT_TX_ERRORS \
+ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
+ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
+
+#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
+/* The raw buffer size must be cacheline aligned.
+ * Normally we use 2K buffers.
+ */
+#define DPA_BP_RAW_SIZE 2048
+#else
+/* For jumbo frame optimizations, use buffers large enough to accommodate
+ * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
+ * space to account for further alignments.
+ */
+#define DPA_MAX_FRM_SIZE 9600
+#define DPA_BP_RAW_SIZE \
+ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
+ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
+#endif
+
+/* This is what FMan is ever allowed to use.
+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers (can we?), so we reserve some more space
+ * for start-of-buffer alignment.
+ */
+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
+ SMP_CACHE_BYTES)
+/* We must ensure that skb_shinfo is always cacheline-aligned. */
+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
+
+/* Maximum size of a buffer for which recycling is allowed.
+ * We need an upper limit such that forwarded skbs that get reallocated on Tx
+ * aren't allowed to grow unboundedly. On the other hand, we need to make sure
+ * that skbs allocated by us will not fail to be recycled due to their size.
+ *
+ * For a requested size, the kernel allocator provides the next power of two
+ * sized block, which the stack will use as is, regardless of the actual size
+ * it required; since we must accommodate at most 9.6K buffers (L2 maximum
+ * supported frame size), set the recycling upper limit to 16K.
+ */
+#define DPA_RECYCLE_MAX_SIZE 16384
+
+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
+/*TODO: temporary for fman pcd testing */
+#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
+#endif
+
+#define DPAA_ETH_FQ_DELTA 0x10000
+
+#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
+ (((device_addr) & 0x1fffff) >> 6)
+
+#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
+ (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
+
+/* Largest value that the FQD's OAL field can hold.
+ * This is DPAA-1.x specific.
+ * TODO: This rather belongs in fsl_qman.h
+ */
+#define FSL_QMAN_MAX_OAL 127
+
+/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
+#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
+
+/* Default alignment for start of data in an Rx FD */
+#define DPA_FD_DATA_ALIGNMENT 16
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+/* FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV 0x00000004
+
+
+#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
+
+/* Check if the parsed frame was found to be a TCP segment.
+ *
+ * @parse_result_ptr must be of type (fm_prs_result_t *).
+ */
+#define fm_l4_frame_is_tcp(parse_result_ptr) \
+ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
+
+/* number of Tx queues to FMan */
+#ifdef CONFIG_FMAN_PFC
+#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
+#else
+#define DPAA_ETH_TX_QUEUES NR_CPUS
+#endif
+
+#define DPAA_ETH_RX_QUEUES 128
+
+/* Convenience macros for storing/retrieving the skb back-pointers. They must
+ * accommodate both recycling and confirmation paths - i.e. cases when the buf
+ * was allocated by ourselves, respectively by the stack. In the former case,
+ * we could store the skb at negative offset; in the latter case, we can't,
+ * so we'll use 0 as offset.
+ *
+ * NB: @off is an offset from a (struct sk_buff **) pointer!
+ */
+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
+{ \
+ skbh = (struct sk_buff **)addr; \
+ *(skbh + (off)) = skb; \
+}
+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
+{ \
+ skbh = (struct sk_buff **)addr; \
+ skb = *(skbh + (off)); \
+}
+
+#ifdef CONFIG_PM
+/* Magic Packet wakeup */
+#define DPAA_WOL_MAGIC 0x00000001
+#endif
+
+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
+struct pcd_range {
+ uint32_t base;
+ uint32_t count;
+};
+#endif
+
+/* More detailed FQ types - used for fine-grained WQ assignments */
+enum dpa_fq_type {
+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
+ FQ_TYPE_RX_PCD, /* User-defined PCDs */
+ FQ_TYPE_TX, /* "Real" Tx FQs */
+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
+ FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
+};
+
+struct dpa_fq {
+ struct qman_fq fq_base;
+ struct list_head list;
+ struct net_device *net_dev;
+ bool init;
+ uint32_t fqid;
+ uint32_t flags;
+ uint16_t channel;
+ uint8_t wq;
+ enum dpa_fq_type fq_type;
+};
+
+struct dpa_fq_cbs_t {
+ struct qman_fq rx_defq;
+ struct qman_fq tx_defq;
+ struct qman_fq rx_errq;
+ struct qman_fq tx_errq;
+ struct qman_fq egress_ern;
+};
+
+struct fqid_cell {
+ uint32_t start;
+ uint32_t count;
+};
+
+struct dpa_bp {
+ struct bman_pool *pool;
+ uint8_t bpid;
+ struct device *dev;
+ union {
+ /* The buffer pools used for the private ports are initialized
+ * with target_count buffers for each CPU; at runtime the
+ * number of buffers per CPU is constantly brought back to this
+ * level
+ */
+ int target_count;
+ /* The configured value for the number of buffers in the pool,
+ * used for shared port buffer pools
+ */
+ int config_count;
+ };
+ size_t size;
+ bool seed_pool;
+ /* physical address of the contiguous memory used by the pool to store
+ * the buffers
+ */
+ dma_addr_t paddr;
+ /* virtual address of the contiguous memory used by the pool to store
+ * the buffers
+ */
+ void __iomem *vaddr;
+ /* current number of buffers in the bpool alloted to this CPU */
+ int __percpu *percpu_count;
+ atomic_t refs;
+ /* some bpools need to be seeded before use by this cb */
+ int (*seed_cb)(struct dpa_bp *);
+ /* some bpools need to be emptied before freeing; this cb is used
+ * for freeing of individual buffers taken from the pool
+ */
+ void (*free_buf_cb)(void *addr);
+};
+
+struct dpa_rx_errors {
+ u64 dme; /* DMA Error */
+ u64 fpe; /* Frame Physical Error */
+ u64 fse; /* Frame Size Error */
+ u64 phe; /* Header Error */
+ u64 cse; /* Checksum Validation Error */
+};
+
+/* Counters for QMan ERN frames - one counter per rejection code */
+struct dpa_ern_cnt {
+ u64 cg_tdrop; /* Congestion group taildrop */
+ u64 wred; /* WRED congestion */
+ u64 err_cond; /* Error condition */
+ u64 early_window; /* Order restoration, frame too early */
+ u64 late_window; /* Order restoration, frame too late */
+ u64 fq_tdrop; /* FQ taildrop */
+ u64 fq_retired; /* FQ is retired */
+ u64 orp_zero; /* ORP disabled */
+};
+
+struct dpa_napi_portal {
+ struct napi_struct napi;
+ struct qman_portal *p;
+};
+
+struct dpa_percpu_priv_s {
+ struct net_device *net_dev;
+ struct dpa_napi_portal *np;
+ u64 in_interrupt;
+ u64 tx_returned;
+ u64 tx_confirm;
+ /* fragmented (non-linear) skbuffs received from the stack */
+ u64 tx_frag_skbuffs;
+ /* number of S/G frames received */
+ u64 rx_sg;
+
+ struct rtnl_link_stats64 stats;
+ struct dpa_rx_errors rx_errors;
+ struct dpa_ern_cnt ern_cnt;
+};
+
+struct dpa_priv_s {
+ struct dpa_percpu_priv_s __percpu *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ /* Store here the needed Tx headroom for convenience and speed
+ * (even though it can be computed based on the fields of buf_layout)
+ */
+ uint16_t tx_headroom;
+ struct net_device *net_dev;
+ struct mac_device *mac_dev;
+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
+ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
+
+ size_t bp_count;
+
+ uint16_t channel; /* "fsl,qman-channel-id" */
+ struct list_head dpa_fq_list;
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ struct dentry *debugfs_loop_file;
+#endif
+
+ uint32_t msg_enable; /* net_device message level */
+#ifdef CONFIG_FSL_DPAA_1588
+ struct dpa_ptp_tsu *tsu;
+#endif
+
+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
+/* TODO: this is temporary until pcd support is implemented in dpaa */
+ int priv_pcd_num_ranges;
+ struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
+#endif
+
+ struct {
+ /**
+ * All egress queues to a given net device belong to one
+ * (and the same) congestion group.
+ */
+ struct qman_cgr cgr;
+ /* If congested, when it began. Used for performance stats. */
+ u32 congestion_start_jiffies;
+ /* Number of jiffies the Tx port was congested. */
+ u32 congested_jiffies;
+ /**
+ * Counter for the number of times the CGR
+ * entered congestion state
+ */
+ u32 cgr_congested_count;
+ } cgr_data;
+ /* Use a per-port CGR for ingress traffic. */
+ bool use_ingress_cgr;
+ struct qman_cgr ingress_cgr;
+
+#ifdef CONFIG_FSL_DPAA_TS
+ bool ts_tx_en; /* Tx timestamping enabled */
+ bool ts_rx_en; /* Rx timestamping enabled */
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ struct dpa_buffer_layout_s *buf_layout;
+ uint16_t rx_headroom;
+ char if_type[30];
+
+ void *peer;
+#ifdef CONFIG_PM
+ u32 wol;
+#endif
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ int loop_id;
+ int loop_to;
+#endif
+#ifdef CONFIG_FSL_DPAA_CEETM
+ bool ceetm_en; /* CEETM QoS enabled */
+#endif
+};
+
+struct fm_port_fqs {
+ struct dpa_fq *tx_defq;
+ struct dpa_fq *tx_errq;
+ struct dpa_fq *rx_defq;
+ struct dpa_fq *rx_errq;
+};
+
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+extern struct net_device *dpa_loop_netdevs[20];
+#endif
+
+/* functions with different implementation for SG and non-SG: */
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
+void __hot _dpa_rx(struct net_device *net_dev,
+ struct qman_portal *portal,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid,
+ int *count_ptr);
+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
+ struct qman_fq *egress_fq, struct qman_fq *conf_fq);
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd);
+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
+ const struct qm_fd *fd,
+ struct sk_buff *skb,
+ int *use_gro);
+#ifndef CONFIG_FSL_DPAA_TS
+bool dpa_skb_is_recyclable(struct sk_buff *skb);
+bool dpa_buf_is_recyclable(struct sk_buff *skb,
+ uint32_t min_size,
+ uint16_t min_offset,
+ unsigned char **new_buf_start);
+#endif
+int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *count_ptr, int *offset);
+int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd);
+int __cold __attribute__((nonnull))
+ _dpa_fq_free(struct device *dev, struct qman_fq *fq);
+
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
+
+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
+ struct qman_portal *portal)
+{
+ /* In case of threaded ISR for RT enable kernel,
+ * in_irq() does not return appropriate value, so use
+ * in_serving_softirq to distinguish softirq or irq context.
+ */
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ and invoke NAPI */
+ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+ if (likely(!ret)) {
+ const struct qman_portal_config *pc =
+ qman_p_get_portal_config(portal);
+ struct dpa_napi_portal *np =
+ &percpu_priv->np[pc->index];
+
+ np->p = portal;
+ napi_schedule(&np->napi);
+ percpu_priv->in_interrupt++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline ssize_t __const __must_check __attribute__((nonnull))
+dpa_fd_length(const struct qm_fd *fd)
+{
+ return fd->length20;
+}
+
+static inline ssize_t __const __must_check __attribute__((nonnull))
+dpa_fd_offset(const struct qm_fd *fd)
+{
+ return fd->offset;
+}
+
+/* Verifies if the skb length is below the interface MTU */
+static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
+{
+ if (unlikely(skb->len > mtu))
+ if ((skb->protocol != htons(ETH_P_8021Q))
+ || (skb->len > mtu + 4))
+ return -1;
+
+ return 0;
+}
+
+static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
+{
+ uint16_t headroom;
+ /* The frame headroom must accommodate:
+ * - the driver private data area
+ * - parse results, hash results, timestamp if selected
+ * - manip extra space
+ * If either hash results or time stamp are selected, both will
+ * be copied to/from the frame headroom, as TS is located between PR and
+ * HR in the IC and IC copy size has a granularity of 16bytes
+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+ *
+ * Also make sure the headroom is a multiple of data_align bytes
+ */
+ headroom = (uint16_t)(bl->priv_data_size +
+ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
+ (bl->hash_results || bl->time_stamp ?
+ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
+ bl->manip_extra_space);
+
+ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
+}
+
+int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
+int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
+int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
+
+void dpaa_eth_sysfs_remove(struct device *dev);
+void dpaa_eth_sysfs_init(struct device *dev);
+int dpaa_eth_poll(struct napi_struct *napi, int budget);
+
+void dpa_private_napi_del(struct net_device *net_dev);
+
+/* Equivalent to a memset(0), but works faster */
+static inline void clear_fd(struct qm_fd *fd)
+{
+ fd->opaque_addr = 0;
+ fd->opaque = 0;
+ fd->cmd = 0;
+}
+
+static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return i;
+
+ return -EINVAL;
+}
+
+static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct qm_fd *fd, struct qman_fq *egress_fq,
+ struct qman_fq *conf_fq)
+{
+ int err, i;
+
+ if (fd->bpid == 0xff)
+ fd->cmd |= qman_fq_fqid(conf_fq);
+
+ /* Trace this Tx fd */
+ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(egress_fq, fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
+ percpu_stats->tx_errors++;
+ percpu_stats->tx_fifo_errors++;
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpa_fd_length(fd);
+
+ return 0;
+}
+
+/* Use multiple WQs for FQ assignment:
+ * - Tx Confirmation queues go to WQ1.
+ * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
+ * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ3).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system (usually PCDs), while
+ * dequeue scheduling is round-robin.
+ */
+static inline void _dpa_assign_wq(struct dpa_fq *fq)
+{
+ switch (fq->fq_type) {
+ case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
+ fq->wq = 1;
+ break;
+ case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_TX:
+ fq->wq = 3;
+ break;
+ case FQ_TYPE_RX_ERROR:
+ case FQ_TYPE_TX_ERROR:
+ case FQ_TYPE_RX_PCD_HI_PRIO:
+ fq->wq = 2;
+ break;
+ case FQ_TYPE_RX_PCD:
+ fq->wq = 5;
+ break;
+ default:
+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
+ fq->fq_type, fq->fqid);
+ }
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+/* Use in lieu of skb_get_queue_mapping() */
+#ifdef CONFIG_FMAN_PFC
+#define dpa_get_queue_mapping(skb) \
+ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
+ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
+ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
+ dpa_num_cpus + smp_processor_id()));
+
+#else
+#define dpa_get_queue_mapping(skb) \
+ raw_smp_processor_id()
+#endif
+#else
+/* Use the queue selected by XPS */
+#define dpa_get_queue_mapping(skb) \
+ skb_get_queue_mapping(skb)
+#endif
+
+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
+struct ptp_priv_s {
+ struct device_node *node;
+ struct platform_device *of_dev;
+ struct mac_device *mac_dev;
+};
+extern struct ptp_priv_s ptp_priv;
+#endif
+
+static inline void _dpa_bp_free_pf(void *addr)
+{
+ put_page(virt_to_head_page(addr));
+}
+
+/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
+ * manifests itself at high traffic rates when frames exceed 4K memory
+ * boundaries; For the moment, we use a SW workaround to avoid frames larger
+ * than 4K or that exceed 4K alignements.
+ */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#define DPAA_LS1043A_DMA_4K_ISSUE 1
+#endif
+
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+#define HAS_DMA_ISSUE(start, size) \
+ (((unsigned long)(start) ^ ((unsigned long)(start) + \
+ (unsigned long)(size))) & ~0xFFF)
+
+#define BOUNDARY_4K(start, size) (((unsigned long)(start) + \
+ (unsigned long)(size)) & ~0xFFF)
+#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
+
+#endif /* __DPA_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
@@ -0,0 +1,263 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/sort.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_eth_base.h"
+
+#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+uint8_t advanced_debug = -1;
+module_param(advanced_debug, byte, S_IRUGO);
+MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
+EXPORT_SYMBOL(advanced_debug);
+
+static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
+{
+ return ((struct dpa_bp *)dpa_bp0)->size -
+ ((struct dpa_bp *)dpa_bp1)->size;
+}
+
+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
+dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
+{
+ int i, lenp, na, ns, err;
+ struct device *dev;
+ struct device_node *dev_node;
+ const __be32 *bpool_cfg;
+ struct dpa_bp *dpa_bp;
+ u32 bpid;
+
+ dev = &_of_dev->dev;
+
+ *count = of_count_phandle_with_args(dev->of_node,
+ "fsl,bman-buffer-pools", NULL);
+ if (*count < 1) {
+ dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
+ if (dpa_bp == NULL) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev_node = of_find_node_by_path("/");
+ if (unlikely(dev_node == NULL)) {
+ dev_err(dev, "of_find_node_by_path(/) failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ na = of_n_addr_cells(dev_node);
+ ns = of_n_size_cells(dev_node);
+
+ for (i = 0; i < *count; i++) {
+ of_node_put(dev_node);
+
+ dev_node = of_parse_phandle(dev->of_node,
+ "fsl,bman-buffer-pools", i);
+ if (dev_node == NULL) {
+ dev_err(dev, "of_find_node_by_phandle() failed\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
+ dev_err(dev,
+ "!of_device_is_compatible(%s, fsl,bpool)\n",
+ dev_node->full_name);
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+
+ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
+ if (err) {
+ dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+ dpa_bp[i].bpid = (uint8_t)bpid;
+
+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
+ &lenp);
+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
+ const uint32_t *seed_pool;
+
+ dpa_bp[i].config_count =
+ (int)of_read_number(bpool_cfg, ns);
+ dpa_bp[i].size =
+ (size_t)of_read_number(bpool_cfg + ns, ns);
+ dpa_bp[i].paddr =
+ of_read_number(bpool_cfg + 2 * ns, na);
+
+ seed_pool = of_get_property(dev_node,
+ "fsl,bpool-ethernet-seeds", &lenp);
+ dpa_bp[i].seed_pool = !!seed_pool;
+
+ } else {
+ dev_err(dev,
+ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
+ dev_node->full_name);
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+ }
+
+ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
+
+ return dpa_bp;
+
+_return_of_node_put:
+ if (dev_node)
+ of_node_put(dev_node);
+
+ return dpa_bp;
+}
+EXPORT_SYMBOL(dpa_bp_probe);
+
+int dpa_bp_shared_port_seed(struct dpa_bp *bp)
+{
+ void __iomem **ptr;
+
+ /* In MAC-less and Shared-MAC scenarios the physical
+ * address of the buffer pool in device tree is set
+ * to 0 to specify that another entity (USDPAA) will
+ * allocate and seed the buffers
+ */
+ if (!bp->paddr)
+ return 0;
+
+ /* allocate memory region for buffers */
+ devm_request_mem_region(bp->dev, bp->paddr,
+ bp->size * bp->config_count, KBUILD_MODNAME);
+ /* managed ioremap unmapping */
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -EIO;
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
+#else
+ bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
+#endif
+ if (bp->vaddr == NULL) {
+ pr_err("Could not map memory for pool %d\n", bp->bpid);
+ devres_free(ptr);
+ return -EIO;
+ }
+ *ptr = bp->vaddr;
+ devres_add(bp->dev, ptr);
+
+ /* seed pool with buffers from that memory region */
+ if (bp->seed_pool) {
+ int count = bp->target_count;
+ dma_addr_t addr = bp->paddr;
+
+ while (count) {
+ struct bm_buffer bufs[8];
+ uint8_t num_bufs = 0;
+
+ do {
+ BUG_ON(addr > 0xffffffffffffull);
+ bufs[num_bufs].bpid = bp->bpid;
+ bm_buffer_set64(&bufs[num_bufs++], addr);
+ addr += bp->size;
+
+ } while (--count && (num_bufs < 8));
+
+ while (bman_release(bp->pool, bufs, num_bufs, 0))
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_bp_shared_port_seed);
+
+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int i;
+
+ priv->dpa_bp = dpa_bp;
+ priv->bp_count = count;
+
+ for (i = 0; i < count; i++) {
+ int err;
+ err = dpa_bp_alloc(&dpa_bp[i]);
+ if (err < 0) {
+ dpa_bp_free(priv);
+ priv->dpa_bp = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_bp_create);
+
+static int __init __cold dpa_advanced_load(void)
+{
+ pr_info(DPA_DESCRIPTION "\n");
+
+ return 0;
+}
+module_init(dpa_advanced_load);
+
+static void __exit __cold dpa_advanced_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+}
+module_exit(dpa_advanced_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
@@ -0,0 +1,50 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_BASE_H
+#define __DPAA_ETH_BASE_H
+
+#include <linux/etherdevice.h> /* struct net_device */
+#include <linux/fsl_bman.h> /* struct bm_buffer */
+#include <linux/of_platform.h> /* struct platform_device */
+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
+
+extern uint8_t advanced_debug;
+extern const struct dpa_fq_cbs_t shared_fq_cbs;
+extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
+
+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
+dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count);
+int dpa_bp_shared_port_seed(struct dpa_bp *bp);
+
+#endif /* __DPAA_ETH_BASE_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
@@ -0,0 +1,1719 @@
+/* Copyright 2008-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include "dpaa_eth_ceetm.h"
+
+#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
+
+const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
+ [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
+ [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
+};
+
+struct Qdisc_ops ceetm_qdisc_ops;
+
+/* Obtain the DCP and the SP ids from the FMan port */
+static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
+ unsigned int *sp_id)
+{
+ uint32_t channel;
+ t_LnxWrpFmPortDev *port_dev;
+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
+ struct mac_device *mac_dev = dpa_priv->mac_dev;
+
+ port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
+ channel = port_dev->txCh;
+
+ *sp_id = channel & CHANNEL_SP_MASK;
+ pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
+
+ if (channel < DCP0_MAX_CHANNEL) {
+ *dcp_id = qm_dc_portal_fman0;
+ pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
+ } else {
+ *dcp_id = qm_dc_portal_fman1;
+ pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
+ }
+}
+
+/* Enqueue Rejection Notification callback */
+static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ struct ceetm_class *cls;
+ struct ceetm_class_stats *cstats = NULL;
+ const struct dpa_priv_s *dpa_priv;
+ struct dpa_percpu_priv_s *dpa_percpu_priv;
+ struct sk_buff *skb;
+ struct qm_fd fd = msg->ern.fd;
+
+ net_dev = ((struct ceetm_fq *)fq)->net_dev;
+ dpa_priv = netdev_priv(net_dev);
+ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
+
+ /* Increment DPA counters */
+ dpa_percpu_priv->stats.tx_dropped++;
+ dpa_percpu_priv->stats.tx_fifo_errors++;
+
+ /* Increment CEETM counters */
+ cls = ((struct ceetm_fq *)fq)->ceetm_cls;
+ switch (cls->type) {
+ case CEETM_PRIO:
+ cstats = this_cpu_ptr(cls->prio.cstats);
+ break;
+ case CEETM_WBFS:
+ cstats = this_cpu_ptr(cls->wbfs.cstats);
+ break;
+ }
+
+ if (cstats)
+ cstats->ern_drop_count++;
+
+ if (fd.bpid != 0xff) {
+ dpa_fd_release(net_dev, &fd);
+ return;
+ }
+
+ skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
+ dev_kfree_skb_any(skb);
+}
+
+/* Congestion State Change Notification callback */
+static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
+{
+ struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
+ struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
+ struct ceetm_class *cls = ceetm_fq->ceetm_cls;
+ struct ceetm_class_stats *cstats = NULL;
+
+ switch (cls->type) {
+ case CEETM_PRIO:
+ cstats = this_cpu_ptr(cls->prio.cstats);
+ break;
+ case CEETM_WBFS:
+ cstats = this_cpu_ptr(cls->wbfs.cstats);
+ break;
+ }
+
+ if (congested) {
+ dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
+ netif_tx_stop_all_queues(dpa_priv->net_dev);
+ dpa_priv->cgr_data.cgr_congested_count++;
+ if (cstats)
+ cstats->cgr_congested_count++;
+ } else {
+ dpa_priv->cgr_data.congested_jiffies +=
+ (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
+ netif_tx_wake_all_queues(dpa_priv->net_dev);
+ }
+}
+
+/* Allocate a ceetm fq */
+static int ceetm_alloc_fq(struct ceetm_fq **fq,
+ struct net_device *dev,
+ struct ceetm_class *cls)
+{
+ *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
+ if (!*fq)
+ return -ENOMEM;
+
+ (*fq)->net_dev = dev;
+ (*fq)->ceetm_cls = cls;
+ return 0;
+}
+
+/* Configure a ceetm Class Congestion Group */
+static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
+ struct qm_ceetm_channel *channel,
+ unsigned int id,
+ struct ceetm_fq *fq,
+ u32 if_support)
+{
+ int err;
+ u32 cs_th;
+ u16 ccg_mask;
+ struct qm_ceetm_ccg_params ccg_params;
+
+ err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
+ if (err)
+ return err;
+
+ /* Configure the count mode (frames/bytes), enable
+ * notifications, enable tail-drop, and configure the tail-drop
+ * mode and threshold */
+ ccg_mask = QM_CCGR_WE_MODE | QM_CCGR_WE_CSCN_EN |
+ QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
+ QM_CCGR_WE_TD_THRES;
+
+ ccg_params.mode = 0; /* count bytes */
+ ccg_params.cscn_en = 1; /* generate notifications */
+ ccg_params.td_en = 1; /* enable tail-drop */
+ ccg_params.td_mode = 1; /* tail-drop on threshold */
+
+ /* Configure the tail-drop threshold according to the link
+ * speed */
+ if (if_support & SUPPORTED_10000baseT_Full)
+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
+ else
+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&ccg_params.td_thres, cs_th, 1);
+
+ err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Configure a ceetm Logical Frame Queue */
+static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
+ struct qm_ceetm_lfq **lfq)
+{
+ int err;
+ u64 context_a;
+ u32 context_b;
+
+ err = qman_ceetm_lfq_claim(lfq, cq);
+ if (err)
+ return err;
+
+ /* Get the former contexts in order to preserve context B */
+ err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
+ if (err)
+ return err;
+
+ context_a = CEETM_CONTEXT_A;
+ err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
+ if (err)
+ return err;
+
+ (*lfq)->ern = ceetm_ern;
+
+ err = qman_ceetm_create_fq(*lfq, &fq->fq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Configure a prio ceetm class */
+static int ceetm_config_prio_cls(struct ceetm_class *cls, struct net_device *dev,
+ struct qm_ceetm_channel *channel, unsigned int id)
+{
+ int err;
+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
+
+ err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
+ if (err)
+ return err;
+
+ /* Claim and configure the CCG */
+ err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
+ dpa_priv->mac_dev->if_support);
+ if (err)
+ return err;
+
+ /* Claim and configure the CQ */
+ err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
+ if (err)
+ return err;
+
+ if (cls->shaped) {
+ err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
+ if (err)
+ return err;
+
+ err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
+ if (err)
+ return err;
+ }
+
+ /* Claim and configure a LFQ */
+ err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Configure a wbfs ceetm class */
+static int ceetm_config_wbfs_cls(struct ceetm_class *cls, struct net_device *dev,
+ struct qm_ceetm_channel *channel, unsigned int id, int type)
+{
+ int err;
+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
+
+ err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
+ if (err)
+ return err;
+
+ /* Claim and configure the CCG */
+ err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
+ dpa_priv->mac_dev->if_support);
+ if (err)
+ return err;
+
+ /* Claim and configure the CQ */
+ if (type == WBFS_GRP_B)
+ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
+ cls->wbfs.ccg);
+ else
+ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
+ cls->wbfs.ccg);
+ if (err)
+ return err;
+
+ /* Configure the CQ weight: real number mutiplied by 100 to get rid
+ * of the fraction */
+ err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
+ cls->wbfs.weight * 100);
+ if (err)
+ return err;
+
+ /* Claim and configure a LFQ */
+ err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Find class in qdisc hash table using given handle */
+static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
+{
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct Qdisc_class_common *clc;
+
+ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
+ __func__, handle, sch->handle);
+
+ clc = qdisc_class_find(&priv->clhash, handle);
+ return clc ? container_of(clc, struct ceetm_class, common) : NULL;
+}
+
+/* Insert a class in the qdisc's class hash */
+static void ceetm_link_class(struct Qdisc *sch,
+ struct Qdisc_class_hash *clhash,
+ struct Qdisc_class_common *common)
+{
+ sch_tree_lock(sch);
+ qdisc_class_hash_insert(clhash, common);
+ sch_tree_unlock(sch);
+ qdisc_class_hash_grow(sch, clhash);
+}
+
+/* Destroy a ceetm class */
+static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
+{
+ if (!cl)
+ return;
+
+ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
+ __func__, cl->common.classid, sch->handle);
+
+ switch (cl->type) {
+ case CEETM_ROOT:
+ if (cl->root.child) {
+ qdisc_destroy(cl->root.child);
+ cl->root.child = NULL;
+ }
+
+ if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the channel %d\n",
+ __func__, cl->root.ch->idx);
+
+ break;
+
+ case CEETM_PRIO:
+ if (cl->prio.child) {
+ qdisc_destroy(cl->prio.child);
+ cl->prio.child = NULL;
+ }
+
+ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the LFQ %d\n",
+ __func__, cl->prio.lfq->idx);
+
+ if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the CQ %d\n",
+ __func__, cl->prio.cq->idx);
+
+ if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the CCG %d\n",
+ __func__, cl->prio.ccg->idx);
+
+ if (cl->prio.fq)
+ kfree(cl->prio.fq);
+
+ if (cl->prio.cstats)
+ free_percpu(cl->prio.cstats);
+
+ break;
+
+ case CEETM_WBFS:
+ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the LFQ %d\n",
+ __func__, cl->wbfs.lfq->idx);
+
+ if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the CQ %d\n",
+ __func__, cl->wbfs.cq->idx);
+
+ if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the CCG %d\n",
+ __func__, cl->wbfs.ccg->idx);
+
+ if (cl->wbfs.fq)
+ kfree(cl->wbfs.fq);
+
+ if (cl->wbfs.cstats)
+ free_percpu(cl->wbfs.cstats);
+ }
+
+ tcf_destroy_chain(&cl->filter_list);
+ kfree(cl);
+}
+
+/* Destroy a ceetm qdisc */
+static void ceetm_destroy(struct Qdisc *sch)
+{
+ unsigned int ntx, i;
+ struct hlist_node *next;
+ struct ceetm_class *cl;
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
+ __func__, sch->handle);
+
+ /* All filters need to be removed before destroying the classes */
+ tcf_destroy_chain(&priv->filter_list);
+
+ for (i = 0; i < priv->clhash.hashsize; i++) {
+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
+ tcf_destroy_chain(&cl->filter_list);
+ }
+
+ for (i = 0; i < priv->clhash.hashsize; i++) {
+ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
+ common.hnode)
+ ceetm_cls_destroy(sch, cl);
+ }
+
+ qdisc_class_hash_destroy(&priv->clhash);
+
+ switch (priv->type) {
+ case CEETM_ROOT:
+ dpa_disable_ceetm(dev);
+
+ if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the LNI %d\n",
+ __func__, priv->root.lni->idx);
+
+ if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the SP %d\n",
+ __func__, priv->root.sp->idx);
+
+ if (priv->root.qstats)
+ free_percpu(priv->root.qstats);
+
+ if (!priv->root.qdiscs)
+ break;
+
+ /* Remove the pfifo qdiscs */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
+ if (priv->root.qdiscs[ntx])
+ qdisc_destroy(priv->root.qdiscs[ntx]);
+
+ kfree(priv->root.qdiscs);
+ break;
+
+ case CEETM_PRIO:
+ if (priv->prio.parent)
+ priv->prio.parent->root.child = NULL;
+ break;
+
+ case CEETM_WBFS:
+ if (priv->wbfs.parent)
+ priv->wbfs.parent->prio.child = NULL;
+ break;
+ }
+}
+
+static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct Qdisc *qdisc;
+ unsigned int ntx, i;
+ struct nlattr *nest;
+ struct tc_ceetm_qopt qopt;
+ struct ceetm_qdisc_stats *qstats;
+ struct net_device *dev = qdisc_dev(sch);
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ sch_tree_lock(sch);
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.type = priv->type;
+ qopt.shaped = priv->shaped;
+
+ switch (priv->type) {
+ case CEETM_ROOT:
+ /* Gather statistics from the underlying pfifo qdiscs */
+ sch->q.qlen = 0;
+ memset(&sch->bstats, 0, sizeof(sch->bstats));
+ memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.qlen += qdisc->qstats.qlen;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ }
+
+ for_each_online_cpu(i) {
+ qstats = per_cpu_ptr(priv->root.qstats, i);
+ sch->qstats.drops += qstats->drops;
+ }
+
+ qopt.rate = priv->root.rate;
+ qopt.ceil = priv->root.ceil;
+ qopt.overhead = priv->root.overhead;
+ break;
+
+ case CEETM_PRIO:
+ qopt.qcount = priv->prio.qcount;
+ break;
+
+ case CEETM_WBFS:
+ qopt.qcount = priv->wbfs.qcount;
+ qopt.cr = priv->wbfs.cr;
+ qopt.er = priv->wbfs.er;
+ break;
+
+ default:
+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
+ sch_tree_unlock(sch);
+ return -EINVAL;
+ }
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+
+ sch_tree_unlock(sch);
+ return skb->len;
+
+nla_put_failure:
+ sch_tree_unlock(sch);
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+/* Configure a root ceetm qdisc */
+static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
+ struct tc_ceetm_qopt *qopt)
+{
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+ enum qm_dc_portal dcp_id;
+ unsigned int i, sp_id;
+ int err;
+ u64 bps;
+ struct qm_ceetm_sp *sp;
+ struct qm_ceetm_lni *lni;
+ struct net_device *dev = qdisc_dev(sch);
+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
+ struct mac_device *mac_dev = dpa_priv->mac_dev;
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ /* Validate inputs */
+ if (sch->parent != TC_H_ROOT) {
+ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
+ tcf_destroy_chain(&priv->filter_list);
+ qdisc_class_hash_destroy(&priv->clhash);
+ return -EINVAL;
+ }
+
+ if (!mac_dev) {
+ pr_err("CEETM: the interface is lacking a mac\n");
+ err = -EINVAL;
+ goto err_init_root;
+ }
+
+ /* pre-allocate underlying pfifo qdiscs */
+ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
+ sizeof(priv->root.qdiscs[0]),
+ GFP_KERNEL);
+ if (priv->root.qdiscs == NULL) {
+ err = -ENOMEM;
+ goto err_init_root;
+ }
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ dev_queue = netdev_get_tx_queue(dev, i);
+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + PFIFO_MIN_OFFSET)));
+ if (qdisc == NULL) {
+ err = -ENOMEM;
+ goto err_init_root;
+ }
+
+ priv->root.qdiscs[i] = qdisc;
+ qdisc->flags |= TCQ_F_ONETXQUEUE;
+ }
+
+ sch->flags |= TCQ_F_MQROOT;
+
+ priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
+ if (!priv->root.qstats) {
+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_init_root;
+ }
+
+ priv->shaped = qopt->shaped;
+ priv->root.rate = qopt->rate;
+ priv->root.ceil = qopt->ceil;
+ priv->root.overhead = qopt->overhead;
+
+ /* Claim the SP */
+ get_dcp_and_sp(dev, &dcp_id, &sp_id);
+ err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
+ __func__);
+ goto err_init_root;
+ }
+
+ priv->root.sp = sp;
+
+ /* Claim the LNI - will use the same id as the SP id since SPs 0-7
+ * are connected to the TX FMan ports */
+ err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
+ __func__);
+ goto err_init_root;
+ }
+
+ priv->root.lni = lni;
+
+ err = qman_ceetm_sp_set_lni(sp, lni);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and "
+ "LNI\n", __func__);
+ goto err_init_root;
+ }
+
+ lni->sp = sp;
+
+ /* Configure the LNI shaper */
+ if (priv->shaped) {
+ err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure "
+ "the LNI shaper\n", __func__);
+ goto err_init_root;
+ }
+
+ bps = priv->root.rate << 3; /* Bps -> bps */
+ err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure "
+ "the LNI shaper\n", __func__);
+ goto err_init_root;
+ }
+
+ bps = priv->root.ceil << 3; /* Bps -> bps */
+ err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure "
+ "the LNI shaper\n", __func__);
+ goto err_init_root;
+ }
+ }
+
+ /* TODO default configuration */
+
+ dpa_enable_ceetm(dev);
+ return 0;
+
+err_init_root:
+ ceetm_destroy(sch);
+ return err;
+}
+
+/* Configure a prio ceetm qdisc */
+static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
+ struct tc_ceetm_qopt *qopt)
+{
+ int err;
+ unsigned int i;
+ struct ceetm_class *parent_cl, *child_cl;
+ struct Qdisc *parent_qdisc;
+ struct net_device *dev = qdisc_dev(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ if (sch->parent == TC_H_ROOT) {
+ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
+ err = -EINVAL;
+ goto err_init_prio;
+ }
+
+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: a ceetm qdisc can not be attached to other "
+ "qdisc/class types\n");
+ err = -EINVAL;
+ goto err_init_prio;
+ }
+
+ /* Obtain the parent root ceetm_class */
+ parent_cl = ceetm_find(sch->parent, parent_qdisc);
+
+ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a "
+ "root ceetm class\n");
+ err = -EINVAL;
+ goto err_init_prio;
+ }
+
+ priv->prio.parent = parent_cl;
+ parent_cl->root.child = sch;
+
+ priv->shaped = parent_cl->shaped;
+ priv->prio.qcount = qopt->qcount;
+
+ /* Create and configure qcount child classes */
+ for (i = 0; i < priv->prio.qcount; i++) {
+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
+ if (!child_cl) {
+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_init_prio;
+ }
+
+ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
+ if (!child_cl->prio.cstats) {
+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_init_prio_cls;
+ }
+
+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
+ child_cl->refcnt = 1;
+ child_cl->parent = sch;
+ child_cl->type = CEETM_PRIO;
+ child_cl->shaped = priv->shaped;
+ child_cl->prio.child = NULL;
+
+ /* All shaped CQs have CR and ER enabled by default */
+ child_cl->prio.cr = child_cl->shaped;
+ child_cl->prio.er = child_cl->shaped;
+ child_cl->prio.fq = NULL;
+ child_cl->prio.cq = NULL;
+
+ /* Configure the corresponding hardware CQ */
+ err = ceetm_config_prio_cls(child_cl, dev,
+ parent_cl->root.ch, i);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure "
+ "the ceetm prio class %X\n",
+ __func__,
+ child_cl->common.classid);
+ goto err_init_prio_cls;
+ }
+
+ /* Add class handle in Qdisc */
+ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
+ pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X "
+ "associated with CQ %d and CCG %d\n",
+ __func__,
+ child_cl->common.classid,
+ child_cl->prio.cq->idx,
+ child_cl->prio.ccg->idx);
+ }
+
+ return 0;
+
+err_init_prio_cls:
+ ceetm_cls_destroy(sch, child_cl);
+err_init_prio:
+ ceetm_destroy(sch);
+ return err;
+}
+
+/* Configure a wbfs ceetm qdisc */
+static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
+ struct tc_ceetm_qopt *qopt)
+{
+ int err, group_b, small_group;
+ unsigned int i, id, prio_a, prio_b;
+ struct ceetm_class *parent_cl, *child_cl, *root_cl;
+ struct Qdisc *parent_qdisc;
+ struct ceetm_qdisc *parent_priv;
+ struct qm_ceetm_channel *channel;
+ struct net_device *dev = qdisc_dev(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ /* Validate inputs */
+ if (sch->parent == TC_H_ROOT) {
+ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ /* Obtain the parent prio ceetm qdisc */
+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: a ceetm qdisc can not be attached to other "
+ "qdisc/class types\n");
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ /* Obtain the parent prio ceetm class */
+ parent_cl = ceetm_find(sch->parent, parent_qdisc);
+ parent_priv = qdisc_priv(parent_qdisc);
+
+ if (!parent_cl || parent_cl->type != CEETM_PRIO) {
+ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a "
+ "prio ceetm class\n");
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ priv->shaped = parent_cl->shaped;
+
+ if (!priv->shaped && (qopt->cr || qopt->er)) {
+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs "
+ "ceetm qdiscs\n");
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ if (priv->shaped && !(qopt->cr || qopt->er)) {
+ pr_err("CEETM: either CR or ER must be enabled for shaped "
+ "wbfs ceetm qdiscs\n");
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ /* Obtain the parent root ceetm class */
+ root_cl = parent_priv->prio.parent;
+ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b)
+ || root_cl->root.wbfs_grp_large) {
+ pr_err("CEETM: no more wbfs classes are available\n");
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b)
+ && qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
+ pr_err("CEETM: only %d wbfs classes are available\n",
+ CEETM_MIN_WBFS_QCOUNT);
+ err = -EINVAL;
+ goto err_init_wbfs;
+ }
+
+ priv->wbfs.parent = parent_cl;
+ parent_cl->prio.child = sch;
+
+ priv->wbfs.qcount = qopt->qcount;
+ priv->wbfs.cr = qopt->cr;
+ priv->wbfs.er = qopt->er;
+
+ channel = root_cl->root.ch;
+
+ /* Configure the hardware wbfs channel groups */
+ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
+ /* Configure the large group A */
+ priv->wbfs.group_type = WBFS_GRP_LARGE;
+ small_group = false;
+ group_b = false;
+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
+ prio_b = prio_a;
+
+ } else if (root_cl->root.wbfs_grp_a) {
+ /* Configure the group B */
+ priv->wbfs.group_type = WBFS_GRP_B;
+
+ err = qman_ceetm_channel_get_group(channel, &small_group,
+ &prio_a, &prio_b);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to get group "
+ "details\n", __func__);
+ goto err_init_wbfs;
+ }
+
+ small_group = true;
+ group_b = true;
+ prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
+ /* If group A isn't configured, configure it as group B */
+ prio_a = prio_a ? : prio_b;
+
+ } else {
+ /* Configure the small group A */
+ priv->wbfs.group_type = WBFS_GRP_A;
+
+ err = qman_ceetm_channel_get_group(channel, &small_group,
+ &prio_a, &prio_b);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to get group "
+ "details\n", __func__);
+ goto err_init_wbfs;
+ }
+
+ small_group = true;
+ group_b = false;
+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
+ /* If group B isn't configured, configure it as group A */
+ prio_b = prio_b ? : prio_a;
+ }
+
+ err = qman_ceetm_channel_set_group(channel, small_group, prio_a, prio_b);
+ if (err)
+ goto err_init_wbfs;
+
+ if (priv->shaped) {
+ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
+ group_b,
+ priv->wbfs.cr);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to set group "
+ "CR eligibility\n", __func__);
+ goto err_init_wbfs;
+ }
+
+ err = qman_ceetm_channel_set_group_er_eligibility(channel,
+ group_b,
+ priv->wbfs.er);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to set group "
+ "ER eligibility\n", __func__);
+ goto err_init_wbfs;
+ }
+ }
+
+ /* Create qcount child classes */
+ for (i = 0; i < priv->wbfs.qcount; i++) {
+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
+ if (!child_cl) {
+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_init_wbfs;
+ }
+
+ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
+ if (!child_cl->wbfs.cstats) {
+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_init_wbfs_cls;
+ }
+
+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
+ child_cl->refcnt = 1;
+ child_cl->parent = sch;
+ child_cl->type = CEETM_WBFS;
+ child_cl->shaped = priv->shaped;
+ child_cl->wbfs.fq = NULL;
+ child_cl->wbfs.cq = NULL;
+ child_cl->wbfs.weight = qopt->qweight[i];
+
+ if (priv->wbfs.group_type == WBFS_GRP_B)
+ id = WBFS_GRP_B_OFFSET + i;
+ else
+ id = WBFS_GRP_A_OFFSET + i;
+
+ err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
+ priv->wbfs.group_type);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure "
+ "the ceetm wbfs class %X\n",
+ __func__,
+ child_cl->common.classid);
+ goto err_init_wbfs_cls;
+ }
+
+ /* Add class handle in Qdisc */
+ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
+ pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X "
+ "associated with CQ %d and CCG %d\n",
+ __func__,
+ child_cl->common.classid,
+ child_cl->wbfs.cq->idx,
+ child_cl->wbfs.ccg->idx);
+ }
+
+ /* Signal the root class that a group has been configured */
+ switch (priv->wbfs.group_type) {
+ case WBFS_GRP_LARGE:
+ root_cl->root.wbfs_grp_large = true;
+ break;
+ case WBFS_GRP_A:
+ root_cl->root.wbfs_grp_a = true;
+ break;
+ case WBFS_GRP_B:
+ root_cl->root.wbfs_grp_b = true;
+ break;
+ }
+
+ return 0;
+
+err_init_wbfs_cls:
+ ceetm_cls_destroy(sch, child_cl);
+err_init_wbfs:
+ ceetm_destroy(sch);
+ return err;
+}
+
+/* Configure a generic ceetm qdisc */
+static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct tc_ceetm_qopt *qopt;
+ struct nlattr *tb[TCA_CEETM_QOPS + 1];
+ int ret;
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
+ if (ret < 0) {
+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
+ return ret;
+ }
+
+ if (tb[TCA_CEETM_QOPS] == NULL) {
+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
+ return -EINVAL;
+ }
+
+ if (TC_H_MIN(sch->handle)) {
+ pr_err("CEETM: a qdisc should not have a minor\n");
+ return -EINVAL;
+ }
+
+ qopt = nla_data(tb[TCA_CEETM_QOPS]);
+
+ /* Initialize the class hash list. Each qdisc has its own class hash */
+ ret = qdisc_class_hash_init(&priv->clhash);
+ if (ret < 0) {
+ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init "
+ "failed\n", __func__);
+ return ret;
+ }
+
+ priv->type = qopt->type;
+
+ switch (priv->type) {
+ case CEETM_ROOT:
+ ret = ceetm_init_root(sch, priv, qopt);
+ break;
+ case CEETM_PRIO:
+ ret = ceetm_init_prio(sch, priv, qopt);
+ break;
+ case CEETM_WBFS:
+ ret = ceetm_init_wbfs(sch, priv, qopt);
+ break;
+ default:
+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
+ ceetm_destroy(sch);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Attach the underlying pfifo qdiscs */
+static void ceetm_attach(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct Qdisc *qdisc, *old_qdisc;
+ unsigned int i;
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ qdisc = priv->root.qdiscs[i];
+ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (old_qdisc)
+ qdisc_destroy(old_qdisc);
+ }
+}
+
+static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
+{
+ struct ceetm_class *cl;
+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
+ __func__, classid, sch->handle);
+ cl = ceetm_find(classid, sch);
+
+ if (cl)
+ cl->refcnt++; /* Will decrement in put() */
+ return (unsigned long)cl;
+}
+
+static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
+{
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
+ __func__, cl->common.classid, sch->handle);
+ cl->refcnt--;
+
+ if (cl->refcnt == 0)
+ ceetm_cls_destroy(sch, cl);
+}
+
+/* Add a ceetm root class or configure a ceetm prio class */
+static int ceetm_cls_change(struct Qdisc *sch, u32 classid,
+ u32 parentid, struct nlattr **tca,
+ unsigned long *arg)
+{
+ int err;
+ u64 bps;
+ struct ceetm_qdisc *priv;
+ struct ceetm_class *cl = (struct ceetm_class *)*arg;
+ struct nlattr *opt = tca[TCA_OPTIONS];
+ struct nlattr *tb[__TCA_CEETM_MAX];
+ struct tc_ceetm_copt *copt;
+ struct qm_ceetm_channel *channel;
+ struct net_device *dev = qdisc_dev(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
+ __func__, classid, sch->handle);
+
+ if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: a ceetm class can not be attached to other "
+ "qdisc/class types\n");
+ return -EINVAL;
+ }
+
+ priv = qdisc_priv(sch);
+
+ if (!opt) {
+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cl && sch->handle != parentid) {
+ pr_err("CEETM: classes can be attached to the root ceetm "
+ "qdisc only\n");
+ return -EINVAL;
+ }
+
+ if (!cl && priv->type != CEETM_ROOT) {
+ pr_err("CEETM: only root ceetm classes can be attached to the "
+ "root ceetm qdisc\n");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
+ if (err < 0) {
+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
+ return -EINVAL;
+ }
+
+ if (tb[TCA_CEETM_COPT] == NULL) {
+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
+ return -EINVAL;
+ }
+
+ if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
+ pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm "
+ "root classes\n");
+ return -EINVAL;
+ }
+
+ copt = nla_data(tb[TCA_CEETM_COPT]);
+
+ /* Configure an existing ceetm prio class */
+ if (cl) {
+ if (copt->type != CEETM_PRIO) {
+ pr_err("CEETM: only prio ceetm classes can be changed\n");
+ return -EINVAL;
+ }
+
+ if (!cl->shaped && (copt->cr || copt->er)) {
+ pr_err("CEETM: only shaped classes can have CR and "
+ "ER enabled\n");
+ return -EINVAL;
+ }
+
+ if (cl->prio.cr != (bool)copt->cr)
+ err = qman_ceetm_channel_set_cq_cr_eligibility(
+ cl->prio.cq->parent,
+ cl->prio.cq->idx,
+ copt->cr);
+
+ if (!err && cl->prio.er != (bool)copt->er)
+ err = qman_ceetm_channel_set_cq_er_eligibility(
+ cl->prio.cq->parent,
+ cl->prio.cq->idx,
+ copt->er);
+
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure "
+ "the ceetm prio class %X\n",
+ __func__,
+ cl->common.classid);
+ return err;
+ }
+
+ cl->prio.cr = copt->cr;
+ cl->prio.er = copt->er;
+ return 0;
+ }
+
+ /* Add a new root ceetm class */
+ if (copt->type != CEETM_ROOT) {
+ pr_err("CEETM: only root ceetm classes can be attached to the "
+ "root ceetm qdisc\n");
+ return -EINVAL;
+ }
+
+ if (copt->shaped && !priv->shaped) {
+ pr_err("CEETM: can not add a shaped ceetm root class under an "
+ "unshaped ceetm root qdisc\n");
+ return -EINVAL;
+ }
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl) {
+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ cl->type = copt->type;
+ cl->shaped = copt->shaped;
+ cl->root.rate = copt->rate;
+ cl->root.ceil = copt->ceil;
+ cl->root.tbl = copt->tbl;
+
+ cl->common.classid = classid;
+ cl->refcnt = 1;
+ cl->parent = sch;
+ cl->root.child = NULL;
+ cl->root.wbfs_grp_a = false;
+ cl->root.wbfs_grp_b = false;
+ cl->root.wbfs_grp_large = false;
+
+ /* Claim a CEETM channel */
+ err = qman_ceetm_channel_claim(&channel, priv->root.lni);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
+ __func__);
+ goto claim_err;
+ }
+
+ cl->root.ch = channel;
+
+ if (cl->shaped) {
+ /* Configure the channel shaper */
+ err = qman_ceetm_channel_enable_shaper(channel, 1);
+ if (err)
+ goto channel_err;
+
+ bps = cl->root.rate << 3; /* Bps -> bps */
+ err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
+ dev->mtu);
+ if (err)
+ goto channel_err;
+
+ bps = cl->root.ceil << 3; /* Bps -> bps */
+ err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
+ dev->mtu);
+ if (err)
+ goto channel_err;
+
+ } else {
+ /* Configure the uFQ algorithm */
+ err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
+ if (err)
+ goto channel_err;
+ }
+
+ /* Add class handle in Qdisc */
+ ceetm_link_class(sch, &priv->clhash, &cl->common);
+
+ pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with "
+ "channel %d\n", __func__, classid, channel->idx);
+ *arg = (unsigned long)cl;
+ return 0;
+
+channel_err:
+ pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
+ __func__, channel->idx);
+ if (qman_ceetm_channel_release(channel))
+ pr_err(KBUILD_BASENAME " : %s : failed to release the channel "
+ "%d\n", __func__, channel->idx);
+claim_err:
+ if (cl) {
+ kfree(cl);
+ }
+ return err;
+}
+
+static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct ceetm_class *cl;
+ unsigned int i;
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ if (arg->stop)
+ return;
+
+ for (i = 0; i < priv->clhash.hashsize; i++) {
+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
+ if (arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
+ arg->stop = 1;
+ return;
+ }
+ arg->count++;
+ }
+ }
+}
+
+static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+ struct nlattr *nest;
+ struct tc_ceetm_copt copt;
+
+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
+ __func__, cl->common.classid, sch->handle);
+
+ sch_tree_lock(sch);
+
+ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
+ tcm->tcm_handle = cl->common.classid;
+
+ memset(&copt, 0, sizeof(copt));
+
+ copt.shaped = cl->shaped;
+ copt.type = cl->type;
+
+ switch (cl->type) {
+ case CEETM_ROOT:
+ if (cl->root.child)
+ tcm->tcm_info = cl->root.child->handle;
+
+ copt.rate = cl->root.rate;
+ copt.ceil = cl->root.ceil;
+ copt.tbl = cl->root.tbl;
+ break;
+
+ case CEETM_PRIO:
+ if (cl->prio.child)
+ tcm->tcm_info = cl->prio.child->handle;
+
+ copt.cr = cl->prio.cr;
+ copt.er = cl->prio.er;
+ break;
+
+ case CEETM_WBFS:
+ copt.weight = cl->wbfs.weight;
+ break;
+ }
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+ sch_tree_unlock(sch);
+ return skb->len;
+
+nla_put_failure:
+ sch_tree_unlock(sch);
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
+{
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+
+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
+ __func__, cl->common.classid, sch->handle);
+
+ sch_tree_lock(sch);
+ qdisc_class_hash_remove(&priv->clhash, &cl->common);
+ cl->refcnt--;
+
+ /* The refcnt should be at least 1 since we have incremented it in
+ get(). Will decrement again in put() where we will call destroy()
+ to actually free the memory if it reaches 0. */
+ BUG_ON(cl->refcnt == 0);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+/* Get the class' child qdisc, if any */
+static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+
+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
+ __func__, cl->common.classid, sch->handle);
+
+ switch (cl->type) {
+ case CEETM_ROOT:
+ return cl->root.child;
+ break;
+
+ case CEETM_PRIO:
+ return cl->prio.child;
+ break;
+ }
+
+ return NULL;
+}
+
+static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
+ struct Qdisc *new, struct Qdisc **old)
+{
+ if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm "
+ "classes\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
+ struct gnet_dump *d)
+{
+ unsigned int i;
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+ struct gnet_stats_basic_packed tmp_bstats;
+ struct ceetm_class_stats *cstats = NULL;
+ struct qm_ceetm_cq *cq = NULL;
+ struct tc_ceetm_xstats xstats;
+
+ memset(&xstats, 0, sizeof(xstats));
+ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
+
+ switch (cl->type) {
+ case CEETM_ROOT:
+ return 0;
+ case CEETM_PRIO:
+ cq = cl->prio.cq;
+ break;
+ case CEETM_WBFS:
+ cq = cl->wbfs.cq;
+ break;
+ }
+
+ for_each_online_cpu(i) {
+ switch (cl->type) {
+ case CEETM_PRIO:
+ cstats = per_cpu_ptr(cl->prio.cstats, i);
+ break;
+ case CEETM_WBFS:
+ cstats = per_cpu_ptr(cl->wbfs.cstats, i);
+ break;
+ }
+
+ if (cstats) {
+ xstats.ern_drop_count += cstats->ern_drop_count;
+ xstats.cgr_congested_count += cstats->cgr_congested_count;
+ tmp_bstats.bytes += cstats->bstats.bytes;
+ tmp_bstats.packets += cstats->bstats.packets;
+ }
+ }
+
+ if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
+ return -1;
+
+ if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
+ &xstats.frame_count, &xstats.byte_count))
+ return -1;
+
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
+{
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+ struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
+
+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
+ cl ? cl->common.classid : 0, sch->handle);
+ return fl;
+}
+
+static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ struct ceetm_class *cl = ceetm_find(classid, sch);
+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
+ cl ? cl->common.classid : 0, sch->handle);
+ return (unsigned long)cl;
+}
+
+static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
+{
+ struct ceetm_class *cl = (struct ceetm_class *)arg;
+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
+ cl ? cl->common.classid : 0, sch->handle);
+}
+
+const struct Qdisc_class_ops ceetm_cls_ops = {
+ .graft = ceetm_cls_graft,
+ .leaf = ceetm_cls_leaf,
+ .get = ceetm_cls_get,
+ .put = ceetm_cls_put,
+ .change = ceetm_cls_change,
+ .delete = ceetm_cls_delete,
+ .walk = ceetm_cls_walk,
+ .tcf_chain = ceetm_tcf_chain,
+ .bind_tcf = ceetm_tcf_bind,
+ .unbind_tcf = ceetm_tcf_unbind,
+ .dump = ceetm_cls_dump,
+ .dump_stats = ceetm_cls_dump_stats,
+};
+
+struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
+ .id = "ceetm",
+ .priv_size = sizeof(struct ceetm_qdisc),
+ .cl_ops = &ceetm_cls_ops,
+ .init = ceetm_init,
+ .destroy = ceetm_destroy,
+ .dump = ceetm_dump,
+ .attach = ceetm_attach,
+ .owner = THIS_MODULE,
+};
+
+/* Run the filters and classifiers attached to the qdisc on the provided skb */
+static struct ceetm_class *ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
+ int *qerr, bool *act_drop)
+{
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct ceetm_class *cl = NULL, *wbfs_cl;
+ struct tcf_result res;
+ struct tcf_proto *tcf;
+ int result;
+
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ tcf = priv->filter_list;
+ while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ /* No valid class found due to action */
+ *act_drop = true;
+ return NULL;
+ }
+#endif
+ cl = (void *)res.class;
+ if (!cl) {
+ if (res.classid == sch->handle) {
+ /* The filter leads to the qdisc */
+ /* TODO default qdisc */
+ return NULL;
+ }
+
+ cl = ceetm_find(res.classid, sch);
+ if (!cl)
+ /* The filter leads to an invalid class */
+ break;
+ }
+
+ /* The class might have its own filters attached */
+ tcf = cl->filter_list;
+ }
+
+ if (!cl) {
+ /* No valid class found */
+ /* TODO default qdisc */
+ return NULL;
+ }
+
+ switch (cl->type) {
+ case CEETM_ROOT:
+ if (cl->root.child) {
+ /* Run the prio qdisc classifiers */
+ return ceetm_classify(skb, cl->root.child, qerr,
+ act_drop);
+ } else {
+ /* The root class does not have a child prio qdisc */
+ /* TODO default qdisc */
+ return NULL;
+ }
+ case CEETM_PRIO:
+ if (cl->prio.child) {
+ /* If filters lead to a wbfs class, return it.
+ * Otherwise, return the prio class */
+ wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
+ act_drop);
+ /* A NULL result might indicate either an erroneous
+ * filter, or no filters at all. We will assume the
+ * latter */
+ return wbfs_cl ? : cl;
+ }
+ }
+
+ /* For wbfs and childless prio classes, return the class directly */
+ return cl;
+}
+
+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ int ret;
+ bool act_drop = false;
+ struct Qdisc *sch = net_dev->qdisc;
+ struct ceetm_class *cl;
+ struct dpa_priv_s *priv_dpa;
+ struct qman_fq *egress_fq, *conf_fq;
+ struct ceetm_qdisc *priv = qdisc_priv(sch);
+ struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
+ struct ceetm_class_stats *cstats;
+ const int queue_mapping = dpa_get_queue_mapping(skb);
+ spinlock_t *root_lock = qdisc_lock(sch);
+
+ spin_lock(root_lock);
+ cl = ceetm_classify(skb, sch, &ret, &act_drop);
+ spin_unlock(root_lock);
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (act_drop) {
+ if (ret & __NET_XMIT_BYPASS)
+ qstats->drops++;
+ goto drop;
+ }
+#endif
+ /* TODO default class */
+ if (unlikely(!cl)) {
+ qstats->drops++;
+ goto drop;
+ }
+
+ priv_dpa = netdev_priv(net_dev);
+ conf_fq = priv_dpa->conf_fqs[queue_mapping];
+
+ /* Choose the proper tx fq and update the basic stats (bytes and
+ * packets sent by the class) */
+ switch (cl->type) {
+ case CEETM_PRIO:
+ egress_fq = &(cl->prio.fq->fq);
+ cstats = this_cpu_ptr(cl->prio.cstats);
+ break;
+ case CEETM_WBFS:
+ egress_fq = &(cl->wbfs.fq->fq);
+ cstats = this_cpu_ptr(cl->wbfs.cstats);
+ break;
+ default:
+ qstats->drops++;
+ goto drop;
+ }
+
+ bstats_update(&cstats->bstats, skb);
+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
+
+drop:
+ dev_kfree_skb_any(skb);
+ return NET_XMIT_SUCCESS;
+}
+
+static int __init ceetm_register(void)
+{
+ int _errno = 0;
+
+ pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
+
+ _errno = register_qdisc(&ceetm_qdisc_ops);
+ if (unlikely(_errno))
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): register_qdisc() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+
+ return _errno;
+}
+
+static void __exit ceetm_unregister(void)
+{
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ unregister_qdisc(&ceetm_qdisc_ops);
+}
+
+module_init(ceetm_register);
+module_exit(ceetm_unregister);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
@@ -0,0 +1,230 @@
+/* Copyright 2008-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_CEETM_H
+#define __DPAA_ETH_CEETM_H
+
+#include <net/pkt_sched.h>
+#include <net/netlink.h>
+#include <lnxwrp_fm.h>
+
+#include "mac.h"
+#include "dpaa_eth_common.h"
+
+/* Mask to determine the sub-portal id from a channel number */
+#define CHANNEL_SP_MASK 0x1f
+/* The number of the last channel that services DCP0, connected to FMan 0.
+ * Value validated for B4 and T series platforms.
+ */
+#define DCP0_MAX_CHANNEL 0x80f
+/* A2V=1 - field A2 is valid
+ * A0V=1 - field A0 is valid - enables frame confirmation
+ * OVOM=1 - override operation mode bits with values from A2
+ * EBD=1 - external buffers are deallocated at the end of the FMan flow
+ * NL=0 - the BMI releases all the internal buffers
+ */
+#define CEETM_CONTEXT_A 0x1a00000080000000
+
+/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
+ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
+ * are reserved for the maximum 32 CEETM channels (majors and minors are in
+ * hex).
+ */
+#define PFIFO_MIN_OFFSET 0x21
+
+/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
+#define CEETM_MAX_PRIO_QCOUNT 8
+#define CEETM_MAX_WBFS_QCOUNT 8
+#define CEETM_MIN_WBFS_QCOUNT 4
+
+/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
+ * and/or 12-15 for group B).
+ */
+#define WBFS_GRP_A_OFFSET 8
+#define WBFS_GRP_B_OFFSET 12
+
+#define WBFS_GRP_A 1
+#define WBFS_GRP_B 2
+#define WBFS_GRP_LARGE 3
+
+enum {
+ TCA_CEETM_UNSPEC,
+ TCA_CEETM_COPT,
+ TCA_CEETM_QOPS,
+ __TCA_CEETM_MAX,
+};
+
+/* CEETM configuration types */
+enum {
+ CEETM_ROOT = 1,
+ CEETM_PRIO,
+ CEETM_WBFS
+};
+
+#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
+extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
+
+struct ceetm_class;
+struct ceetm_qdisc_stats;
+struct ceetm_class_stats;
+
+struct ceetm_fq {
+ struct qman_fq fq;
+ struct net_device *net_dev;
+ struct ceetm_class *ceetm_cls;
+};
+
+struct root_q {
+ struct Qdisc **qdiscs;
+ __u16 overhead;
+ __u32 rate;
+ __u32 ceil;
+ struct qm_ceetm_sp *sp;
+ struct qm_ceetm_lni *lni;
+ struct ceetm_qdisc_stats __percpu *qstats;
+};
+
+struct prio_q {
+ __u16 qcount;
+ struct ceetm_class *parent;
+};
+
+struct wbfs_q {
+ __u16 qcount;
+ int group_type;
+ struct ceetm_class *parent;
+ __u16 cr;
+ __u16 er;
+};
+
+struct ceetm_qdisc {
+ int type; /* LNI/CHNL/WBFS */
+ bool shaped;
+ union {
+ struct root_q root;
+ struct prio_q prio;
+ struct wbfs_q wbfs;
+ };
+ struct Qdisc_class_hash clhash;
+ struct tcf_proto *filter_list; /* qdisc attached filters */
+};
+
+/* CEETM Qdisc configuration parameters */
+struct tc_ceetm_qopt {
+ __u32 type;
+ __u16 shaped;
+ __u16 qcount;
+ __u16 overhead;
+ __u32 rate;
+ __u32 ceil;
+ __u16 cr;
+ __u16 er;
+ __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
+};
+
+struct root_c {
+ unsigned int rate;
+ unsigned int ceil;
+ unsigned int tbl;
+ bool wbfs_grp_a;
+ bool wbfs_grp_b;
+ bool wbfs_grp_large;
+ struct Qdisc *child;
+ struct qm_ceetm_channel *ch;
+};
+
+struct prio_c {
+ bool cr;
+ bool er;
+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
+ struct qm_ceetm_lfq *lfq;
+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
+ struct qm_ceetm_ccg *ccg;
+ /* only one wbfs can be linked to one priority CQ */
+ struct Qdisc *child;
+ struct ceetm_class_stats __percpu *cstats;
+};
+
+struct wbfs_c {
+ __u8 weight; /* The weight of the class between 1 and 248 */
+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
+ struct qm_ceetm_lfq *lfq;
+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
+ struct qm_ceetm_ccg *ccg;
+ struct ceetm_class_stats __percpu *cstats;
+};
+
+struct ceetm_class {
+ struct Qdisc_class_common common;
+ int refcnt; /* usage count of this class */
+ struct tcf_proto *filter_list; /* class attached filters */
+ struct Qdisc *parent;
+ bool shaped;
+ int type; /* ROOT/PRIO/WBFS */
+ union {
+ struct root_c root;
+ struct prio_c prio;
+ struct wbfs_c wbfs;
+ };
+};
+
+/* CEETM Class configuration parameters */
+struct tc_ceetm_copt {
+ __u32 type;
+ __u16 shaped;
+ __u32 rate;
+ __u32 ceil;
+ __u16 tbl;
+ __u16 cr;
+ __u16 er;
+ __u8 weight;
+};
+
+/* CEETM stats */
+struct ceetm_qdisc_stats {
+ __u32 drops;
+};
+
+struct ceetm_class_stats {
+ struct gnet_stats_basic_packed bstats;
+ __u32 ern_drop_count;
+ __u32 cgr_congested_count;
+};
+
+struct tc_ceetm_xstats {
+ __u32 ern_drop_count;
+ __u32 cgr_congested_count;
+ __u64 frame_count;
+ __u64 byte_count;
+};
+
+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
+#endif
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
@@ -0,0 +1,1787 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/sort.h>
+#include <linux/fsl_qman.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h> /* vlan_eth_hdr */
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#ifdef CONFIG_FSL_DPAA_1588
+#include "dpaa_1588.h"
+#endif
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+#include "dpaa_debugfs.h"
+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
+#include "mac.h"
+
+/* DPAA platforms benefit from hardware-assisted queue management */
+#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ
+
+/* Size in bytes of the FQ taildrop threshold */
+#define DPA_FQ_TD 0x200000
+
+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
+struct ptp_priv_s ptp_priv;
+#endif
+
+static struct dpa_bp *dpa_bp_array[64];
+
+int dpa_max_frm;
+EXPORT_SYMBOL(dpa_max_frm);
+
+int dpa_rx_extra_headroom;
+EXPORT_SYMBOL(dpa_rx_extra_headroom);
+
+int dpa_num_cpus = NR_CPUS;
+
+static const struct fqid_cell tx_confirm_fqids[] = {
+ {0, DPAA_ETH_TX_QUEUES}
+};
+
+static struct fqid_cell default_fqids[][3] = {
+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
+};
+
+static const char fsl_qman_frame_queues[][25] = {
+ [RX] = "fsl,qman-frame-queues-rx",
+ [TX] = "fsl,qman-frame-queues-tx"
+};
+#ifdef CONFIG_FSL_DPAA_HOOKS
+/* A set of callbacks for hooking into the fastpath at different points. */
+struct dpaa_eth_hooks_s dpaa_eth_hooks;
+EXPORT_SYMBOL(dpaa_eth_hooks);
+/* This function should only be called on the probe paths, since it makes no
+ * effort to guarantee consistency of the destination hooks structure.
+ */
+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
+{
+ if (hooks)
+ dpaa_eth_hooks = *hooks;
+ else
+ pr_err("NULL pointer to hooks!\n");
+}
+EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
+#endif
+
+int dpa_netdev_init(struct net_device *net_dev,
+ const uint8_t *mac_addr,
+ uint16_t tx_timeout)
+{
+ int err;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+
+ net_dev->hw_features |= DPA_NETIF_FEATURES;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ net_dev->ethtool_ops = &dpa_ethtool_ops;
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ /* create debugfs entry for this net_device */
+ err = dpa_netdev_debugfs_create(net_dev);
+ if (err) {
+ unregister_netdev(net_dev);
+ return err;
+ }
+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_netdev_init);
+
+int __cold dpa_start(struct net_device *net_dev)
+{
+ int err, i;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->init_phy(net_dev, priv->mac_dev);
+ if (err < 0) {
+ if (netif_msg_ifup(priv))
+ netdev_err(net_dev, "init_phy() = %d\n", err);
+ return err;
+ }
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ err = fm_port_enable(mac_dev->port_dev[i]);
+ if (err)
+ goto mac_start_failed;
+ }
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ if (netif_msg_ifup(priv))
+ netdev_err(net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+ netif_tx_start_all_queues(net_dev);
+
+ return 0;
+
+mac_start_failed:
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+
+ return err;
+}
+EXPORT_SYMBOL(dpa_start);
+
+int __cold dpa_stop(struct net_device *net_dev)
+{
+ int _errno, i, err;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ netif_tx_stop_all_queues(net_dev);
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ usleep_range(5000, 10000);
+
+ _errno = mac_dev->stop(mac_dev);
+ if (unlikely(_errno < 0))
+ if (netif_msg_ifdown(priv))
+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
+ _errno);
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ err = fm_port_disable(mac_dev->port_dev[i]);
+ _errno = err ? err : _errno;
+ }
+
+ if (mac_dev->phy_dev)
+ phy_disconnect(mac_dev->phy_dev);
+ mac_dev->phy_dev = NULL;
+
+ return _errno;
+}
+EXPORT_SYMBOL(dpa_stop);
+
+void __cold dpa_timeout(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (netif_msg_timer(priv))
+ netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - net_dev->trans_start));
+
+ percpu_priv->stats.tx_errors++;
+}
+EXPORT_SYMBOL(dpa_timeout);
+
+/* net_device */
+
+/**
+ * @param net_dev the device for which statistics are calculated
+ * @param stats the function fills this structure with the device's statistics
+ * @return the address of the structure containing the statistics
+ *
+ * Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+struct rtnl_link_stats64 * __cold
+dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+
+ return stats;
+}
+EXPORT_SYMBOL(dpa_get_stats64);
+
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ const int max_mtu = dpa_get_max_mtu();
+
+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */
+ if (new_mtu < 68 || new_mtu > max_mtu) {
+ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
+ new_mtu, 68, max_mtu);
+ return -EINVAL;
+ }
+ net_dev->mtu = new_mtu;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_change_mtu);
+
+/* .ndo_init callback */
+int dpa_ndo_init(struct net_device *net_dev)
+{
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
+ */
+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
+
+ pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
+ net_dev->mtu = init_mtu;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_ndo_init);
+
+int dpa_set_features(struct net_device *dev, netdev_features_t features)
+{
+ /* Not much to do here for now */
+ dev->features = features;
+ return 0;
+}
+EXPORT_SYMBOL(dpa_set_features);
+
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t unsupported_features = 0;
+
+ /* In theory we should never be requested to enable features that
+ * we didn't set in netdev->features and netdev->hw_features at probe
+ * time, but double check just to be on the safe side.
+ * We don't support enabling Rx csum through ethtool yet
+ */
+ unsupported_features |= NETIF_F_RXCSUM;
+
+ features &= ~unsupported_features;
+
+ return features;
+}
+EXPORT_SYMBOL(dpa_fix_features);
+
+#ifdef CONFIG_FSL_DPAA_TS
+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ const void *data)
+{
+ u64 *ts, ns;
+
+ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
+ data);
+
+ if (!ts || *ts == 0)
+ return 0;
+
+ be64_to_cpus(ts);
+
+ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
+ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
+
+ return ns;
+}
+
+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ struct skb_shared_hwtstamps *shhwtstamps, const void *data)
+{
+ u64 ns;
+
+ ns = dpa_get_timestamp_ns(priv, rx_tx, data);
+
+ if (ns == 0)
+ return -EINVAL;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+
+ return 0;
+}
+
+static void dpa_ts_tx_enable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+
+ priv->ts_tx_en = true;
+}
+
+static void dpa_ts_tx_disable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+
+#if 0
+/* the RTC might be needed by the Rx Ts, cannot disable here
+ * no separate ptp_disable API for Rx/Tx, cannot disable here
+ */
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
+
+ if (mac_dev->ptp_disable)
+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
+#endif
+
+ priv->ts_tx_en = false;
+}
+
+static void dpa_ts_rx_enable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+
+ priv->ts_rx_en = true;
+}
+
+static void dpa_ts_rx_disable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+
+#if 0
+/* the RTC might be needed by the Tx Ts, cannot disable here
+ * no separate ptp_disable API for Rx/Tx, cannot disable here
+ */
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
+
+ if (mac_dev->ptp_disable)
+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
+#endif
+
+ priv->ts_rx_en = false;
+}
+
+static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ dpa_ts_tx_disable(dev);
+ break;
+ case HWTSTAMP_TX_ON:
+ dpa_ts_tx_enable(dev);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE)
+ dpa_ts_rx_disable(dev);
+ else {
+ dpa_ts_rx_enable(dev);
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif /* CONFIG_FSL_DPAA_TS */
+
+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_FSL_DPAA_1588
+ struct dpa_priv_s *priv = netdev_priv(dev);
+#endif
+ int ret = 0;
+
+ /* at least one timestamping feature must be enabled */
+#ifdef CONFIG_FSL_DPAA_TS
+ if (!netif_running(dev))
+#endif
+ return -EINVAL;
+
+#ifdef CONFIG_FSL_DPAA_TS
+ if (cmd == SIOCSHWTSTAMP)
+ return dpa_ts_ioctl(dev, rq, cmd);
+#endif /* CONFIG_FSL_DPAA_TS */
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
+ if (priv->tsu && priv->tsu->valid)
+ ret = dpa_ioctl_1588(dev, rq, cmd);
+ else
+ ret = -ENODEV;
+ }
+#endif
+
+ return ret;
+}
+EXPORT_SYMBOL(dpa_ioctl);
+
+int __cold dpa_remove(struct platform_device *of_dev)
+{
+ int err;
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+
+ dev = &of_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
+
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+ dpa_private_napi_del(net_dev);
+
+ dpa_bp_free(priv);
+
+ if (priv->buf_layout)
+ devm_kfree(dev, priv->buf_layout);
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ /* remove debugfs entry for this net_device */
+ dpa_netdev_debugfs_remove(net_dev);
+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid)
+ dpa_ptp_cleanup(priv);
+#endif
+
+ free_netdev(net_dev);
+
+ return err;
+}
+EXPORT_SYMBOL(dpa_remove);
+
+struct mac_device * __cold __must_check
+__attribute__((nonnull))
+dpa_mac_probe(struct platform_device *_of_dev)
+{
+ struct device *dpa_dev, *dev;
+ struct device_node *mac_node;
+ struct platform_device *of_dev;
+ struct mac_device *mac_dev;
+#ifdef CONFIG_FSL_DPAA_1588
+ int lenp;
+ const phandle *phandle_prop;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct device_node *timer_node;
+#endif
+ dpa_dev = &_of_dev->dev;
+
+ mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
+ if (unlikely(mac_node == NULL)) {
+ dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (unlikely(of_dev == NULL)) {
+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (unlikely(mac_dev == NULL)) {
+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+#ifdef CONFIG_FSL_DPAA_1588
+ phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
+ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
+ (mac_dev->speed == SPEED_1000)))) {
+ timer_node = of_find_node_by_phandle(*phandle_prop);
+ if (timer_node)
+ net_dev = dev_get_drvdata(dpa_dev);
+ if (timer_node && net_dev) {
+ priv = netdev_priv(net_dev);
+ if (!dpa_ptp_init(priv))
+ dev_info(dev, "%s: ptp 1588 is initialized.\n",
+ mac_node->full_name);
+ }
+ }
+#endif
+
+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
+ if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
+ (mac_dev->speed == SPEED_1000))) {
+ ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
+ if (ptp_priv.node) {
+ ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
+ if (unlikely(ptp_priv.of_dev == NULL)) {
+ dev_err(dpa_dev,
+ "Cannot find device represented by timer_node\n");
+ of_node_put(ptp_priv.node);
+ return ERR_PTR(-EINVAL);
+ }
+ ptp_priv.mac_dev = mac_dev;
+ }
+ }
+#endif
+ return mac_dev;
+}
+EXPORT_SYMBOL(dpa_mac_probe);
+
+int dpa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpa_priv_s *priv;
+ int _errno;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+
+ _errno = eth_mac_addr(net_dev, addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "eth_mac_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
+ net_dev->dev_addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "mac_dev->change_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_set_mac_address);
+
+void dpa_set_rx_mode(struct net_device *net_dev)
+{
+ int _errno;
+ const struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ _errno = priv->mac_dev->set_promisc(
+ priv->mac_dev->get_mac_handle(priv->mac_dev),
+ priv->mac_dev->promisc);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ _errno);
+ }
+
+ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
+}
+EXPORT_SYMBOL(dpa_set_rx_mode);
+
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout)
+{
+ struct fm_port_params params;
+
+ /* Rx */
+ layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
+ layout[RX].parse_results = true;
+ layout[RX].hash_results = true;
+#ifdef CONFIG_FSL_DPAA_TS
+ layout[RX].time_stamp = true;
+#endif
+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
+ layout[RX].manip_extra_space = params.manip_extra_space;
+ /* a value of zero for data alignment means "don't care", so align to
+ * a non-zero value to prevent FMD from using its own default
+ */
+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+
+ /* Tx */
+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ layout[TX].parse_results = true;
+ layout[TX].hash_results = true;
+#ifdef CONFIG_FSL_DPAA_TS
+ layout[TX].time_stamp = true;
+#endif
+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
+ layout[TX].manip_extra_space = params.manip_extra_space;
+ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+}
+EXPORT_SYMBOL(dpa_set_buffers_layout);
+
+int __attribute__((nonnull))
+dpa_bp_alloc(struct dpa_bp *dpa_bp)
+{
+ int err;
+ struct bman_pool_params bp_params;
+ struct platform_device *pdev;
+
+ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
+ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
+ return -EINVAL;
+ }
+
+ memset(&bp_params, 0, sizeof(struct bman_pool_params));
+#ifdef CONFIG_FMAN_PFC
+ bp_params.flags = BMAN_POOL_FLAG_THRESH;
+ bp_params.thresholds[0] = bp_params.thresholds[2] =
+ CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
+ bp_params.thresholds[1] = bp_params.thresholds[3] =
+ CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
+#endif
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpa_bpid2pool_use(dpa_bp->bpid))
+ return 0;
+
+ if (dpa_bp->bpid == 0)
+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
+ else
+ bp_params.bpid = dpa_bp->bpid;
+
+ dpa_bp->pool = bman_new_pool(&bp_params);
+ if (unlikely(dpa_bp->pool == NULL)) {
+ pr_err("bman_new_pool() failed\n");
+ return -ENODEV;
+ }
+
+ dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
+
+ pdev = platform_device_register_simple("dpaa_eth_bpool",
+ dpa_bp->bpid, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_register_failed;
+ }
+
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto pdev_mask_failed;
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto pdev_mask_failed;
+ }
+
+#ifdef CONFIG_FMAN_ARM
+ /* force coherency */
+ pdev->dev.archdata.dma_coherent = true;
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
+#endif
+
+ dpa_bp->dev = &pdev->dev;
+
+ if (dpa_bp->seed_cb) {
+ err = dpa_bp->seed_cb(dpa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
+
+ return 0;
+
+pool_seed_failed:
+pdev_mask_failed:
+ platform_device_unregister(pdev);
+pdev_register_failed:
+ bman_free_pool(dpa_bp->pool);
+
+ return err;
+}
+EXPORT_SYMBOL(dpa_bp_alloc);
+
+void dpa_bp_drain(struct dpa_bp *bp)
+{
+ int ret, num = 8;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ ret = bman_acquire(bp->pool, bmb, num, 0);
+ if (ret < 0) {
+ if (num == 8) {
+ /* we have less than 8 buffers left;
+ * drain them one by one
+ */
+ num = 1;
+ ret = 1;
+ continue;
+ } else {
+ /* Pool is fully drained */
+ break;
+ }
+ }
+
+ for (i = 0; i < num; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_BIDIRECTIONAL);
+
+ bp->free_buf_cb(phys_to_virt(addr));
+ }
+ } while (ret > 0);
+}
+EXPORT_SYMBOL(dpa_bp_drain);
+
+static void __cold __attribute__((nonnull))
+_dpa_bp_free(struct dpa_bp *dpa_bp)
+{
+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
+
+ /* the mapping between bpid and dpa_bp is done very late in the
+ * allocation procedure; if something failed before the mapping, the bp
+ * was not configured, therefore we don't need the below instructions
+ */
+ if (!bp)
+ return;
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpa_bp_drain(bp);
+
+ dpa_bp_array[bp->bpid] = NULL;
+ bman_free_pool(bp->pool);
+
+ if (bp->dev)
+ platform_device_unregister(to_platform_device(bp->dev));
+}
+
+void __cold __attribute__((nonnull))
+dpa_bp_free(struct dpa_priv_s *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ _dpa_bp_free(&priv->dpa_bp[i]);
+}
+EXPORT_SYMBOL(dpa_bp_free);
+
+struct dpa_bp *dpa_bpid2pool(int bpid)
+{
+ return dpa_bp_array[bpid];
+}
+EXPORT_SYMBOL(dpa_bpid2pool);
+
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
+{
+ dpa_bp_array[bpid] = dpa_bp;
+ atomic_set(&dpa_bp->refs, 1);
+}
+
+bool dpa_bpid2pool_use(int bpid)
+{
+ if (dpa_bpid2pool(bpid)) {
+ atomic_inc(&dpa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ return dpa_get_queue_mapping(skb);
+}
+EXPORT_SYMBOL(dpa_select_queue);
+#endif
+
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ u32 fq_start,
+ u32 fq_count,
+ struct list_head *list,
+ enum dpa_fq_type fq_type)
+{
+ int i;
+ struct dpa_fq *dpa_fq;
+
+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
+ if (dpa_fq == NULL)
+ return NULL;
+
+ for (i = 0; i < fq_count; i++) {
+ dpa_fq[i].fq_type = fq_type;
+ if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
+ dpa_fq[i].fqid = fq_start ?
+ DPAA_ETH_FQ_DELTA + fq_start + i : 0;
+ else
+ dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
+
+ list_add_tail(&dpa_fq[i].list, list);
+ }
+
+#ifdef CONFIG_FMAN_PFC
+ if (fq_type == FQ_TYPE_TX)
+ for (i = 0; i < fq_count; i++)
+ dpa_fq[i].wq = i / dpa_num_cpus;
+ else
+#endif
+ for (i = 0; i < fq_count; i++)
+ _dpa_assign_wq(dpa_fq + i);
+
+ return dpa_fq;
+}
+EXPORT_SYMBOL(dpa_fq_alloc);
+
+/* Probing of FQs for MACful ports */
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool alloc_tx_conf_fqs,
+ enum port_type ptype)
+{
+ struct fqid_cell *fqids = NULL;
+ const void *fqids_off = NULL;
+ struct dpa_fq *dpa_fq = NULL;
+ struct device_node *np = dev->of_node;
+ int num_ranges;
+ int i, lenp;
+
+ if (ptype == TX && alloc_tx_conf_fqs) {
+ if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
+ tx_confirm_fqids->count, list,
+ FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+ }
+
+ fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
+ if (fqids_off == NULL) {
+ /* No dts definition, so use the defaults. */
+ fqids = default_fqids[ptype];
+ num_ranges = 3;
+ } else {
+ num_ranges = lenp / sizeof(*fqids);
+
+ fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
+ GFP_KERNEL);
+ if (fqids == NULL)
+ goto fqids_alloc_failed;
+
+ /* convert to CPU endianess */
+ for (i = 0; i < num_ranges; i++) {
+ fqids[i].start = be32_to_cpup(fqids_off +
+ i * sizeof(*fqids));
+ fqids[i].count = be32_to_cpup(fqids_off +
+ i * sizeof(*fqids) + sizeof(__be32));
+ }
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ switch (i) {
+ case 0:
+ /* The first queue is the error queue */
+ if (fqids[i].count != 1)
+ goto invalid_error_queue;
+
+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
+ fqids[i].count, list,
+ ptype == RX ?
+ FQ_TYPE_RX_ERROR :
+ FQ_TYPE_TX_ERROR);
+ if (dpa_fq == NULL)
+ goto fq_alloc_failed;
+
+ if (ptype == RX)
+ port_fqs->rx_errq = &dpa_fq[0];
+ else
+ port_fqs->tx_errq = &dpa_fq[0];
+ break;
+ case 1:
+ /* the second queue is the default queue */
+ if (fqids[i].count != 1)
+ goto invalid_default_queue;
+
+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
+ fqids[i].count, list,
+ ptype == RX ?
+ FQ_TYPE_RX_DEFAULT :
+ FQ_TYPE_TX_CONFIRM);
+ if (dpa_fq == NULL)
+ goto fq_alloc_failed;
+
+ if (ptype == RX)
+ port_fqs->rx_defq = &dpa_fq[0];
+ else
+ port_fqs->tx_defq = &dpa_fq[0];
+ break;
+ default:
+ /* all subsequent queues are either RX* PCD or Tx */
+ if (ptype == RX) {
+ if (!dpa_fq_alloc(dev, fqids[i].start,
+ fqids[i].count, list,
+ FQ_TYPE_RX_PCD) ||
+ !dpa_fq_alloc(dev, fqids[i].start,
+ fqids[i].count, list,
+ FQ_TYPE_RX_PCD_HI_PRIO))
+ goto fq_alloc_failed;
+ } else {
+ if (!dpa_fq_alloc(dev, fqids[i].start,
+ fqids[i].count, list,
+ FQ_TYPE_TX))
+ goto fq_alloc_failed;
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+fq_alloc_failed:
+fqids_alloc_failed:
+ dev_err(dev, "Cannot allocate memory for frame queues\n");
+ return -ENOMEM;
+
+invalid_default_queue:
+invalid_error_queue:
+ dev_err(dev, "Too many default or error queues\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL(dpa_fq_probe_mac);
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+int dpa_get_channel(void)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret = qman_alloc_pool(&pool);
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+EXPORT_SYMBOL(dpa_get_channel);
+
+void dpa_release_channel(void)
+{
+ qman_release_pool(rx_pool_channel);
+}
+EXPORT_SYMBOL(dpa_release_channel);
+
+int dpaa_eth_add_channel(void *__arg)
+{
+ const cpumask_t *cpus = qman_affine_cpus();
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
+ int cpu;
+ struct qman_portal *portal;
+
+ for_each_cpu(cpu, cpus) {
+ portal = (struct qman_portal *)qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dpaa_eth_add_channel);
+
+/**
+ * Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+
+ int congested)
+{
+ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
+ struct dpa_priv_s, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+ netif_tx_stop_all_queues(priv->net_dev);
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+ netif_tx_wake_all_queues(priv->net_dev);
+ }
+}
+
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the MAC speed.
+ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+ * In such cases, we ought to reconfigure the threshold, too.
+ */
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
+ else
+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ pr_err("Error %d creating CGR with ID %d\n", err,
+ priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+EXPORT_SYMBOL(dpaa_eth_cgr_init);
+
+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
+ struct dpa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
+ struct dpa_fq *fq,
+ struct fm_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (uint16_t)fm_get_tx_port_channel(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port)
+{
+ struct dpa_fq *fq;
+ uint16_t portals[NR_CPUS];
+ int cpu, portal_cnt = 0, num_portals = 0;
+ uint32_t pcd_fqid, pcd_fqid_hi_prio;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+ int egress_cnt = 0, conf_cnt = 0;
+
+ /* Prepare for PCD FQs init */
+ for_each_cpu(cpu, affine_cpus)
+ portals[num_portals++] = qman_affine_channel(cpu);
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found");
+
+ pcd_fqid = (priv->mac_dev) ?
+ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
+ pcd_fqid_hi_prio = (priv->mac_dev) ?
+ DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_RX_PCD:
+ /* For MACless we can't have dynamic Rx queues */
+ BUG_ON(!priv->mac_dev && !fq->fqid);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ if (!fq->fqid)
+ fq->fqid = pcd_fqid++;
+ fq->channel = portals[portal_cnt];
+ portal_cnt = (portal_cnt + 1) % num_portals;
+ break;
+ case FQ_TYPE_RX_PCD_HI_PRIO:
+ /* For MACless we can't have dynamic Hi Pri Rx queues */
+ BUG_ON(!priv->mac_dev && !fq->fqid);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ if (!fq->fqid)
+ fq->fqid = pcd_fqid_hi_prio++;
+ fq->channel = portals[portal_cnt];
+ portal_cnt = (portal_cnt + 1) % num_portals;
+ break;
+ case FQ_TYPE_TX:
+ dpa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TX_QUEUES)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_ERROR:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+ break;
+ }
+ }
+
+ /* The number of Tx queues may be smaller than the number of cores, if
+ * the Tx queue range is specified in the device tree instead of being
+ * dynamically allocated.
+ * Make sure all CPUs receive a corresponding Tx queue.
+ */
+ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TX_QUEUES)
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(dpa_fq_setup);
+
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
+{
+ int _errno;
+ const struct dpa_priv_s *priv;
+ struct device *dev;
+ struct qman_fq *fq;
+ struct qm_mcc_initfq initfq;
+ struct qman_fq *confq;
+ int queue_id;
+
+ priv = netdev_priv(dpa_fq->net_dev);
+ dev = dpa_fq->net_dev->dev.parent;
+
+ if (dpa_fq->fqid == 0)
+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+ if (_errno) {
+ dev_err(dev, "qman_create_fq() failed\n");
+ return _errno;
+ }
+ fq = &dpa_fq->fq_base;
+
+ if (dpa_fq->init) {
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ /* FIXME: why would we want to keep an empty FQ in cache? */
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+
+ /* FQ placement */
+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+
+ initfq.fqd.dest.channel = dpa_fq->channel;
+ initfq.fqd.dest.wq = dpa_fq->wq;
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX ||
+ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= QM_INITFQ_WE_CGID;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.fqd.oac_init.oac = QM_OAC_CG;
+ initfq.fqd.oac_init.oal =
+ (signed char)(min(sizeof(struct sk_buff) +
+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+ qm_fqd_taildrop_set(&initfq.fqd.td,
+ DPA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+ }
+
+ /* Configure the Tx confirmation queue, now that we know
+ * which Tx queue it pairs with.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX) {
+ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
+ if (queue_id >= 0) {
+ confq = priv->conf_fqs[queue_id];
+ if (confq) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * A0V=1 (contextA A0 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+ */
+ initfq.fqd.context_a.hi = 0x1e000000;
+ initfq.fqd.context_a.lo = 0x80000000;
+ }
+ }
+ }
+
+ /* Put all *private* ingress queues in our "ingress CGR". */
+ if (priv->use_ingress_cgr &&
+ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
+ dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
+ dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
+ initfq.we_mask |= QM_INITFQ_WE_CGID;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
+ /* Set a fixed overhead accounting, just like for the
+ * egress CGR.
+ */
+ initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.fqd.oac_init.oac = QM_OAC_CG;
+ initfq.fqd.oac_init.oal =
+ (signed char)(min(sizeof(struct sk_buff) +
+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |=
+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+ }
+
+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (_errno < 0) {
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+ qman_destroy_fq(fq, 0);
+ return _errno;
+ }
+ }
+
+ dpa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_fq_init);
+
+int __cold __attribute__((nonnull))
+_dpa_fq_free(struct device *dev, struct qman_fq *fq)
+{
+ int _errno, __errno;
+ struct dpa_fq *dpa_fq;
+ const struct dpa_priv_s *priv;
+
+ _errno = 0;
+
+ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
+ priv = netdev_priv(dpa_fq->net_dev);
+
+ if (dpa_fq->init) {
+ _errno = qman_retire_fq(fq, NULL);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+
+ __errno = qman_oos_fq(fq);
+ if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), __errno);
+ if (_errno >= 0)
+ _errno = __errno;
+ }
+ }
+
+ qman_destroy_fq(fq, 0);
+ list_del(&dpa_fq->list);
+
+ return _errno;
+}
+EXPORT_SYMBOL(_dpa_fq_free);
+
+int __cold __attribute__((nonnull))
+dpa_fq_free(struct device *dev, struct list_head *list)
+{
+ int _errno, __errno;
+ struct dpa_fq *dpa_fq, *tmp;
+
+ _errno = 0;
+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
+ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
+ if (unlikely(__errno < 0) && _errno >= 0)
+ _errno = __errno;
+ }
+
+ return _errno;
+}
+EXPORT_SYMBOL(dpa_fq_free);
+
+static void
+dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
+ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
+{
+ struct fm_port_params tx_port_param;
+ bool frag_enabled = false;
+
+ memset(&tx_port_param, 0, sizeof(tx_port_param));
+ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
+ buf_layout, frag_enabled);
+}
+
+static void
+dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
+ struct dpa_fq *errq, struct dpa_fq *defq,
+ struct dpa_buffer_layout_s *buf_layout)
+{
+ struct fm_port_params rx_port_param;
+ int i;
+ bool frag_enabled = false;
+
+ memset(&rx_port_param, 0, sizeof(rx_port_param));
+ count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
+ rx_port_param.num_pools = (uint8_t)count;
+ for (i = 0; i < count; i++) {
+ if (i >= rx_port_param.num_pools)
+ break;
+ rx_port_param.pool_param[i].id = bp[i].bpid;
+ rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
+ }
+
+ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
+ buf_layout, frag_enabled);
+}
+
+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
+/* Defined as weak, to be implemented by fman pcd tester. */
+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
+__attribute__((weak));
+
+int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
+#else
+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
+
+int dpa_free_pcd_fqids(struct device *, uint32_t);
+
+#endif /* CONFIG_FSL_SDK_FMAN_TEST */
+
+
+int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
+ uint8_t alignment, uint32_t *base_fqid)
+{
+ dev_crit(dev, "callback not implemented!\n");
+
+ return 0;
+}
+
+int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
+{
+
+ dev_crit(dev, "callback not implemented!\n");
+
+ return 0;
+}
+
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev)
+{
+ struct fm_port_pcd_param rx_port_pcd_param;
+ struct fm_port *rxport = mac_dev->port_dev[RX];
+ struct fm_port *txport = mac_dev->port_dev[TX];
+
+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
+
+ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
+ rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
+ rx_port_pcd_param.dev = dev;
+ fm_port_pcd_bind(rxport, &rx_port_pcd_param);
+}
+EXPORT_SYMBOL(dpaa_eth_init_ports);
+
+void dpa_release_sgt(struct qm_sg_entry *sgt)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
+ uint8_t i = 0, j;
+
+ memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
+
+ do {
+ dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
+ DPA_BUG_ON(!dpa_bp);
+
+ j = 0;
+ do {
+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
+ bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !qm_sg_entry_get_final(&sgt[i-1]) &&
+ qm_sg_entry_get_bpid(&sgt[i-1]) ==
+ qm_sg_entry_get_bpid(&sgt[i]));
+
+ while (bman_release(dpa_bp->pool, bmb, j, 0))
+ cpu_relax();
+ } while (!qm_sg_entry_get_final(&sgt[i-1]));
+}
+EXPORT_SYMBOL(dpa_release_sgt);
+
+void __attribute__((nonnull))
+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+ void *vaddr;
+
+ memset(&bmb, 0, sizeof(struct bm_buffer));
+ bm_buffer_set64(&bmb, fd->addr);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ DPA_BUG_ON(!dpa_bp);
+
+ if (fd->format == qm_fd_sg) {
+ vaddr = phys_to_virt(fd->addr);
+ sgt = vaddr + dpa_fd_offset(fd);
+
+ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+
+ dpa_release_sgt(sgt);
+ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ return;
+ }
+ bm_buffer_set64(&bmb, addr);
+ }
+
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+}
+EXPORT_SYMBOL(dpa_fd_release);
+
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+EXPORT_SYMBOL(count_ern);
+
+/**
+ * Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
+{
+ fm_prs_result_t *parse_result;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h = NULL;
+ u8 l4_proto;
+ u16 ethertype = ntohs(skb->protocol);
+ int retval = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (fm_prs_result_t *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+ iph = ip_hdr(skb);
+ DPA_BUG_ON(iph == NULL);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+ ipv6h = ipv6_hdr(skb);
+ DPA_BUG_ON(ipv6h == NULL);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_alert(priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ /* This can as well be a BUG() */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_alert(priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
+ parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+EXPORT_SYMBOL(dpa_enable_tx_csum);
+
+#ifdef CONFIG_FSL_DPAA_CEETM
+void dpa_enable_ceetm(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ priv->ceetm_en = true;
+}
+EXPORT_SYMBOL(dpa_enable_ceetm);
+
+void dpa_disable_ceetm(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ priv->ceetm_en = false;
+}
+EXPORT_SYMBOL(dpa_disable_ceetm);
+#endif
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
@@ -0,0 +1,227 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_COMMON_H
+#define __DPAA_ETH_COMMON_H
+
+#include <linux/etherdevice.h> /* struct net_device */
+#include <linux/fsl_bman.h> /* struct bm_buffer */
+#include <linux/of_platform.h> /* struct platform_device */
+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
+
+#include "dpaa_eth.h"
+#include "lnxwrp_fsl_fman.h"
+
+#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
+ frag_enabled) \
+{ \
+ param.errq = errq_id; \
+ param.defq = defq_id; \
+ param.priv_data_size = buf_layout->priv_data_size; \
+ param.parse_results = buf_layout->parse_results; \
+ param.hash_results = buf_layout->hash_results; \
+ param.frag_enable = frag_enabled; \
+ param.time_stamp = buf_layout->time_stamp; \
+ param.manip_extra_space = buf_layout->manip_extra_space; \
+ param.data_align = buf_layout->data_align; \
+ fm_set_##type##_port_params(port, &param); \
+}
+
+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+/* each S/G entry can be divided into two S/G entries */
+#define DPA_SGT_ENTRIES_THRESHOLD 7
+#else
+#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
+#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
+
+
+#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+/* return codes for the dpaa-eth hooks */
+enum dpaa_eth_hook_result {
+ /* fd/skb was retained by the hook.
+ *
+ * On the Rx path, this means the Ethernet driver will _not_
+ * deliver the skb to the stack. Instead, the hook implementation
+ * is expected to properly dispose of the skb.
+ *
+ * On the Tx path, the Ethernet driver's dpa_tx() function will
+ * immediately return NETDEV_TX_OK. The hook implementation is expected
+ * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
+ * unless you know exactly what you're doing!
+ *
+ * On the confirmation/error paths, the Ethernet driver will _not_
+ * perform any fd cleanup, nor update the interface statistics.
+ */
+ DPAA_ETH_STOLEN,
+ /* fd/skb was returned to the Ethernet driver for regular processing.
+ * The hook is not allowed to, for instance, reallocate the skb (as if
+ * by linearizing, copying, cloning or reallocating the headroom).
+ */
+ DPAA_ETH_CONTINUE
+};
+
+typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
+ struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
+typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
+ struct sk_buff *skb, struct net_device *net_dev);
+typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
+ struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
+
+/* used in napi related functions */
+extern u16 qman_portal_max;
+
+/* from dpa_ethtool.c */
+extern const struct ethtool_ops dpa_ethtool_ops;
+
+#ifdef CONFIG_FSL_DPAA_HOOKS
+/* Various hooks used for unit-testing and/or fastpath optimizations.
+ * Currently only one set of such hooks is supported.
+ */
+struct dpaa_eth_hooks_s {
+ /* Invoked on the Tx private path, immediately after receiving the skb
+ * from the stack.
+ */
+ dpaa_eth_egress_hook_t tx;
+
+ /* Invoked on the Rx private path, right before passing the skb
+ * up the stack. At that point, the packet's protocol id has already
+ * been set. The skb's data pointer is now at the L3 header, and
+ * skb->mac_header points to the L2 header. skb->len has been adjusted
+ * to be the length of L3+payload (i.e., the length of the
+ * original frame minus the L2 header len).
+ * For more details on what the skb looks like, see eth_type_trans().
+ */
+ dpaa_eth_ingress_hook_t rx_default;
+
+ /* Driver hook for the Rx error private path. */
+ dpaa_eth_confirm_hook_t rx_error;
+ /* Driver hook for the Tx confirmation private path. */
+ dpaa_eth_confirm_hook_t tx_confirm;
+ /* Driver hook for the Tx error private path. */
+ dpaa_eth_confirm_hook_t tx_error;
+};
+
+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
+
+extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
+#endif
+
+int dpa_netdev_init(struct net_device *net_dev,
+ const uint8_t *mac_addr,
+ uint16_t tx_timeout);
+int __cold dpa_start(struct net_device *net_dev);
+int __cold dpa_stop(struct net_device *net_dev);
+void __cold dpa_timeout(struct net_device *net_dev);
+struct rtnl_link_stats64 * __cold
+dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats);
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
+int dpa_ndo_init(struct net_device *net_dev);
+int dpa_set_features(struct net_device *dev, netdev_features_t features);
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features);
+#ifdef CONFIG_FSL_DPAA_TS
+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
+ enum port_type rx_tx, const void *data);
+/* Updates the skb shared hw timestamp from the hardware timestamp */
+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ struct skb_shared_hwtstamps *shhwtstamps, const void *data);
+#endif /* CONFIG_FSL_DPAA_TS */
+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int __cold dpa_remove(struct platform_device *of_dev);
+struct mac_device * __cold __must_check
+__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
+void dpa_set_rx_mode(struct net_device *net_dev);
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout);
+int __attribute__((nonnull))
+dpa_bp_alloc(struct dpa_bp *dpa_bp);
+void __cold __attribute__((nonnull))
+dpa_bp_free(struct dpa_priv_s *priv);
+struct dpa_bp *dpa_bpid2pool(int bpid);
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
+bool dpa_bpid2pool_use(int bpid);
+void dpa_bp_drain(struct dpa_bp *bp);
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+#endif
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ u32 fq_start,
+ u32 fq_count,
+ struct list_head *list,
+ enum dpa_fq_type fq_type);
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool tx_conf_fqs_per_core,
+ enum port_type ptype);
+int dpa_get_channel(void);
+void dpa_release_channel(void);
+int dpaa_eth_add_channel(void *__arg);
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port);
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
+int __cold __attribute__((nonnull))
+dpa_fq_free(struct device *dev, struct list_head *list);
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev);
+void dpa_release_sgt(struct qm_sg_entry *sgt);
+void __attribute__((nonnull))
+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg);
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
+#ifdef CONFIG_FSL_DPAA_CEETM
+void dpa_enable_ceetm(struct net_device *dev);
+void dpa_disable_ceetm(struct net_device *dev);
+#endif
+struct proxy_device {
+ struct mac_device *mac_dev;
+};
+
+/* mac device control functions exposed by proxy interface*/
+int dpa_proxy_start(struct net_device *net_dev);
+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
+ struct net_device *net_dev);
+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
+ struct net_device *net_dev);
+
+#endif /* __DPAA_ETH_COMMON_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
@@ -0,0 +1,1735 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/of_net.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/percpu.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_eth_base.h"
+#include "dpaa_eth_generic.h"
+
+#define DPA_DEFAULT_TX_HEADROOM 64
+#define DPA_GENERIC_SKB_COPY_MAX_SIZE 256
+#define DPA_GENERIC_NAPI_WEIGHT 64
+#define DPA_GENERIC_DESCRIPTION "FSL DPAA Generic Ethernet driver"
+#define DPA_GENERIC_BUFFER_QUOTA 4
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DPA_GENERIC_DESCRIPTION);
+
+static uint8_t generic_debug = -1;
+module_param(generic_debug, byte, S_IRUGO);
+MODULE_PARM_DESC(generic_debug, "Module/Driver verbosity level");
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static uint16_t tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+struct rtnl_link_stats64 *__cold
+dpa_generic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+static int dpa_generic_set_mac_address(struct net_device *net_dev,
+ void *addr);
+static int __cold dpa_generic_start(struct net_device *netdev);
+static int __cold dpa_generic_stop(struct net_device *netdev);
+static int dpa_generic_eth_probe(struct platform_device *_of_dev);
+static int dpa_generic_remove(struct platform_device *of_dev);
+static void dpa_generic_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+static int __hot dpa_generic_tx(struct sk_buff *skb,
+ struct net_device *netdev);
+static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf);
+static void dpa_generic_drain_sg_bp(struct dpa_bp *sg_bp, u8 nbuf);
+
+static const struct net_device_ops dpa_generic_ops = {
+ .ndo_open = dpa_generic_start,
+ .ndo_start_xmit = dpa_generic_tx,
+ .ndo_stop = dpa_generic_stop,
+ .ndo_set_mac_address = dpa_generic_set_mac_address,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_generic_get_stats64,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_fix_features,
+ .ndo_change_mtu = dpa_change_mtu,
+};
+
+static void dpa_generic_draining_timer(unsigned long arg)
+{
+ struct dpa_generic_priv_s *priv = (struct dpa_generic_priv_s *)arg;
+
+ dpa_generic_drain_bp(priv->draining_tx_bp, DPA_GENERIC_BUFFER_QUOTA);
+ dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp,
+ DPA_GENERIC_BUFFER_QUOTA);
+
+ if (priv->net_dev->flags & IFF_UP)
+ mod_timer(&(priv->timer), jiffies + 1);
+}
+
+struct rtnl_link_stats64 *__cold
+dpa_generic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(netdev);
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+
+ return stats;
+}
+
+static int dpa_generic_set_mac_address(struct net_device *net_dev,
+ void *addr)
+{
+ const struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
+ int _errno;
+
+ _errno = eth_mac_addr(net_dev, addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
+ return _errno;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id dpa_generic_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-generic"
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, dpa_generic_match);
+
+static struct platform_driver dpa_generic_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = dpa_generic_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpa_generic_eth_probe,
+ .remove = dpa_generic_remove
+};
+
+static int get_port_ref(struct device_node *dev_node,
+ struct fm_port **port)
+{
+ struct platform_device *port_of_dev = NULL;
+ struct device *op_dev = NULL;
+ struct device_node *port_node = NULL;
+
+ port_node = of_parse_phandle(dev_node, "fsl,fman-oh-port", 0);
+ if (port_node == NULL)
+ return -EINVAL;
+
+ port_of_dev = of_find_device_by_node(port_node);
+ of_node_put(port_node);
+
+ if (port_of_dev == NULL)
+ return -EINVAL;
+
+ /* get the reference to oh port from FMD */
+ op_dev = &port_of_dev->dev;
+ *port = fm_port_bind(op_dev);
+
+ if (*port == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void dpaa_generic_napi_enable(struct dpa_generic_priv_s *priv)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ for (j = 0; j < qman_portal_max; j++)
+ napi_enable(&percpu_priv->np[j].napi);
+ }
+}
+
+static void dpaa_generic_napi_disable(struct dpa_generic_priv_s *priv)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ for (j = 0; j < qman_portal_max; j++)
+ napi_disable(&percpu_priv->np[j].napi);
+ }
+}
+
+static struct device_node *get_rx_op_port_node(struct platform_device *_of_dev)
+{
+ struct device *dev = &_of_dev->dev;
+ struct device_node *port_node = NULL;
+ struct device_node *onic_node = NULL;
+ int num_ports = 0;
+
+ onic_node = dev->of_node;
+
+ num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
+ if (num_ports != 2) {
+ dev_err(dev, "There should be two O/H port handles in the device tree\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ port_node = of_parse_phandle(onic_node, "fsl,oh-ports", 0);
+ if (port_node == NULL) {
+ dev_err(dev, "Cannot find O/H port node in the device tree\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ return port_node;
+}
+
+static int __cold dpa_generic_start(struct net_device *netdev)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(netdev);
+
+ /* seed default buffer pool */
+ dpa_bp_priv_seed(priv->rx_bp);
+
+ dpaa_generic_napi_enable(priv);
+ netif_tx_start_all_queues(netdev);
+
+ mod_timer(&priv->timer, jiffies + 100);
+
+ return 0;
+}
+
+static int __cold dpa_generic_stop(struct net_device *netdev)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+ dpaa_generic_napi_disable(priv);
+
+ return 0;
+}
+
+static enum qman_cb_dqrr_result __hot
+dpa_generic_rx_err_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *netdev;
+ struct dpa_generic_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd;
+ int *countptr;
+
+ netdev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(netdev);
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
+ fd = &dq->fd;
+
+ /* TODO: extract bpid from the fd; when multiple bps are supported
+ * there won't be a default bp
+ */
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpa_fd_release(netdev, fd);
+ goto qman_consume;
+ }
+
+ /* limit common, possibly innocuous Rx FIFO Overflow errors'
+ * interference with zero-loss convergence benchmark results.
+ */
+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
+ else
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_err(netdev, "Err FD status 2 = 0x%08x\n",
+ fd->status & FM_FD_STAT_RX_ERRORS);
+
+
+ percpu_priv->stats.rx_errors++;
+
+ if (fd->status & FM_PORT_FRM_ERR_DMA)
+ percpu_priv->rx_errors.dme++;
+ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
+ percpu_priv->rx_errors.fpe++;
+ if (fd->status & FM_PORT_FRM_ERR_SIZE)
+ percpu_priv->rx_errors.fse++;
+ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
+ percpu_priv->rx_errors.phe++;
+
+ /* TODO dpa_csum_validation */
+
+ dpa_fd_release(netdev, fd);
+
+qman_consume:
+ return qman_cb_dqrr_consume;
+}
+
+
+static enum qman_cb_dqrr_result __hot
+dpa_generic_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *netdev;
+ struct dpa_generic_priv_s *priv;
+ struct dpa_bp *bp;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd = &dq->fd;
+ unsigned int skb_len;
+ u32 fd_status = fd->status;
+ u64 pad;
+ dma_addr_t addr = qm_fd_addr(fd);
+ unsigned int data_start;
+ unsigned long skb_addr;
+ int *countptr;
+
+ netdev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(netdev);
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
+
+ /* This is needed for TCP traffic as draining only on TX is not
+ * enough
+ */
+ dpa_generic_drain_bp(priv->draining_tx_bp, 1);
+ dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp, 1);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpa_fd_release(netdev, fd);
+ goto qman_consume;
+ }
+
+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), -1);
+
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(netdev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+ dpa_fd_release(netdev, fd);
+ goto qman_consume;
+ }
+ if (unlikely(fd->format != qm_fd_contig)) {
+ percpu_priv->stats.rx_dropped++;
+ if (netif_msg_rx_status(priv) && net_ratelimit())
+ netdev_warn(netdev, "Dropping a SG frame\n");
+ dpa_fd_release(netdev, fd);
+ goto qman_consume;
+ }
+
+ bp = dpa_bpid2pool(fd->bpid);
+
+ /* find out the pad */
+ skb_addr = virt_to_phys(skb->head);
+ pad = addr - skb_addr;
+
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
+
+ countptr = raw_cpu_ptr(bp->percpu_count);
+ (*countptr)--;
+
+ /* The skb is currently pointed at head + headroom. The packet
+ * starts at skb->head + pad + fd offset.
+ */
+ data_start = (unsigned int)(pad + dpa_fd_offset(fd) -
+ skb_headroom(skb));
+ skb_put(skb, dpa_fd_length(fd) + data_start);
+ skb_pull(skb, data_start);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) {
+ percpu_priv->stats.rx_dropped++;
+ dev_kfree_skb(skb);
+ goto qman_consume;
+ }
+
+ skb_len = skb->len;
+
+ if (fd->status & FM_FD_STAT_L4CV)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ goto qman_consume;
+
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += skb_len;
+
+qman_consume:
+ return qman_cb_dqrr_consume;
+}
+
+static void dpa_generic_drain_sg_bp(struct dpa_bp *sgbp, u8 nbuf)
+{
+ int ret;
+ struct bm_buffer bmb[8];
+
+ do {
+ ret = bman_acquire(sgbp->pool, bmb, nbuf, 0);
+ } while (ret >= 0);
+}
+
+inline void dpa_release_sg(struct sk_buff *skb, dma_addr_t addr,
+ struct dpa_bp *bp)
+{
+ struct qm_sg_entry *sgt = phys_to_virt(addr + DPA_DEFAULT_TX_HEADROOM);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ dma_addr_t sg_addr;
+ int j;
+
+ dma_unmap_single(bp->dev, addr, DPA_DEFAULT_TX_HEADROOM +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ DMA_BIDIRECTIONAL);
+
+ for (j = 0; j <= nr_frags; j++) {
+ DPA_BUG_ON(sgt[j].extension);
+ sg_addr = qm_sg_addr(&sgt[j]);
+ dma_unmap_page(bp->dev, sg_addr,
+ sgt[j].length, DMA_BIDIRECTIONAL);
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+inline void dpa_release_contig(struct sk_buff *skb, dma_addr_t addr,
+ struct dpa_bp *bp)
+{
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+}
+
+static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf)
+{
+ int ret, i;
+ struct bm_buffer bmb[8];
+ dma_addr_t addr;
+ int *countptr = raw_cpu_ptr(bp->percpu_count);
+ int count = *countptr;
+ struct sk_buff **skbh;
+
+ do {
+ /* bman_acquire will fail if nbuf > 8 */
+ ret = bman_acquire(bp->pool, bmb, nbuf, 0);
+ if (ret > 0) {
+ for (i = 0; i < nbuf; i++) {
+ addr = bm_buf_addr(&bmb[i]);
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_TO_DEVICE);
+
+ if (skb_is_nonlinear(*skbh))
+ dpa_release_sg(*skbh, addr, bp);
+ else
+ dpa_release_contig(*skbh, addr, bp);
+ }
+ count -= i;
+ }
+ } while (ret > 0);
+
+ *countptr = count;
+}
+
+/**
+ * Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+static int dpa_generic_tx_csum(struct dpa_generic_priv_s *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results)
+{
+ fm_prs_result_t *parse_result;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h = NULL;
+ int l4_proto;
+ int ethertype = ntohs(skb->protocol);
+ int retval = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (fm_prs_result_t *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
+ iph = ip_hdr(skb);
+ BUG_ON(iph == NULL);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
+ ipv6h = ipv6_hdr(skb);
+ BUG_ON(ipv6h == NULL);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_alert(priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ /* This can as well be a BUG() */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_alert(priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
+ parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+
+static inline int generic_skb_to_sg_fd(struct dpa_generic_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ struct dpa_bp *dpa_bp = priv->draining_tx_bp;
+ struct dpa_bp *dpa_sg_bp = priv->draining_tx_sg_bp;
+ dma_addr_t addr;
+ struct sk_buff **skbh;
+ struct net_device *net_dev = priv->net_dev;
+ int err;
+
+ struct qm_sg_entry *sgt;
+ void *sgt_buf;
+ void *buffer_start;
+ skb_frag_t *frag;
+ int i, j;
+ const enum dma_data_direction dma_dir = DMA_BIDIRECTIONAL;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ memset(fd, 0, sizeof(*fd));
+ fd->format = qm_fd_sg;
+
+ /* get a page frag to store the SGTable */
+ sgt_buf = netdev_alloc_frag(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ if (unlikely(!sgt_buf)) {
+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
+ return -ENOMEM;
+ }
+
+ memset(sgt_buf, 0, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+
+ /* do this before dma_map_single(DMA_TO_DEVICE), because we may need to
+ * write into the skb.
+ */
+ err = dpa_generic_tx_csum(priv, skb, fd,
+ sgt_buf + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "HW csum error: %d\n", err);
+ goto csum_failed;
+ }
+
+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sgt[0].bpid = dpa_sg_bp->bpid;
+ sgt[0].offset = 0;
+ sgt[0].length = skb_headlen(skb);
+ sgt[0].extension = 0;
+ sgt[0].final = 0;
+
+ addr = dma_map_single(dpa_sg_bp->dev, skb->data, sgt[0].length,
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
+ dev_err(dpa_sg_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg0_map_failed;
+ }
+
+ sgt[0].addr_hi = (uint8_t)upper_32_bits(addr);
+ sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
+
+ /* populate the rest of SGT entries */
+ for (i = 1; i <= nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ sgt[i].bpid = dpa_sg_bp->bpid;
+ sgt[i].offset = 0;
+ sgt[i].length = frag->size;
+ sgt[i].extension = 0;
+ sgt[i].final = 0;
+
+ DPA_BUG_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
+ dev_err(dpa_sg_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ /* keep the offset in the address */
+ sgt[i].addr_hi = (uint8_t)upper_32_bits(addr);
+ sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
+ }
+ sgt[i - 1].final = 1;
+
+ fd->length20 = skb->len;
+ fd->offset = priv->tx_headroom;
+
+ /* DMA map the SGT page */
+ buffer_start = (void *)sgt - dpa_fd_offset(fd);
+ /* Can't write at "negative" offset in buffer_start, because this skb
+ * may not have been allocated by us.
+ */
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+
+ addr = dma_map_single(dpa_bp->dev, buffer_start,
+ priv->tx_headroom + sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = dpa_bp->bpid;
+ fd->addr_hi = (uint8_t)upper_32_bits(addr);
+ fd->addr_lo = lower_32_bits(addr);
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(dpa_sg_bp->dev, qm_sg_addr(&sgt[j]),
+ be32_to_cpu(sgt[j].length), dma_dir);
+sg0_map_failed:
+csum_failed:
+ put_page(virt_to_head_page(sgt_buf));
+
+ return err;
+}
+
+static int __hot dpa_generic_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(netdev);
+ struct dpa_percpu_priv_s *percpu_priv =
+ raw_cpu_ptr(priv->percpu_priv);
+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
+ struct dpa_bp *bp = priv->draining_tx_bp;
+ struct dpa_bp *sg_bp = priv->draining_tx_sg_bp;
+ struct sk_buff **skbh = NULL;
+ dma_addr_t addr;
+ struct qm_fd fd;
+ int queue_mapping;
+ struct qman_fq *egress_fq;
+ const bool nonlinear = skb_is_nonlinear(skb);
+ int i = 0, err = 0;
+ int *countptr;
+
+ if (nonlinear && skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES) {
+ err = generic_skb_to_sg_fd(priv, skb, &fd);
+ if (unlikely(err < 0))
+ goto sg_failed;
+ percpu_priv->tx_frag_skbuffs++;
+ addr = qm_fd_addr(&fd);
+ } else {
+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
+ if (unlikely(!skb_new)) {
+ percpu_stats->tx_errors++;
+ kfree_skb(skb);
+ goto done;
+ }
+
+ kfree_skb(skb);
+ skb = skb_new;
+ }
+
+ clear_fd(&fd);
+
+ /* store skb backpointer to release the skb later */
+ skbh = (struct sk_buff **)(skb->data - priv->tx_headroom);
+ *skbh = skb;
+
+ /* do this before dma_map_single(), because we may need to write
+ * into the skb.
+ */
+ err = dpa_generic_tx_csum(priv, skb, &fd,
+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(netdev, "HW csum error: %d\n", err);
+ return err;
+ }
+
+ addr = dma_map_single(bp->dev, skbh,
+ skb->len + priv->tx_headroom, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(bp->dev, addr))) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(netdev, "dma_map_single() failed\n");
+ goto dma_mapping_failed;
+ }
+
+ fd.format = qm_fd_contig;
+ fd.length20 = skb->len;
+ fd.offset = priv->tx_headroom;
+ fd.addr_hi = (uint8_t)upper_32_bits(addr);
+ fd.addr_lo = lower_32_bits(addr);
+ /* fd.cmd |= FM_FD_CMD_FCO; */
+ fd.bpid = bp->bpid;
+ }
+
+ dpa_generic_drain_bp(bp, 1);
+ dpa_generic_drain_sg_bp(sg_bp, 1);
+
+ queue_mapping = dpa_get_queue_mapping(skb);
+ egress_fq = priv->egress_fqs[queue_mapping];
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(egress_fq, &fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_fifo_errors++;
+ goto xmit_failed;
+ }
+
+ countptr = raw_cpu_ptr(bp->percpu_count);
+ (*countptr)++;
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += fd.length20;
+ netdev->trans_start = jiffies;
+
+ goto done;
+
+xmit_failed:
+ dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE);
+sg_failed:
+dma_mapping_failed:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+done:
+ return NETDEV_TX_OK;
+}
+
+static int dpa_generic_napi_add(struct net_device *net_dev)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
+ qman_portal_max * sizeof(struct dpa_napi_portal),
+ GFP_KERNEL);
+
+ if (unlikely(percpu_priv->np == NULL)) {
+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < qman_portal_max; i++)
+ netif_napi_add(net_dev, &percpu_priv->np[i].napi,
+ dpaa_eth_poll, DPA_GENERIC_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void dpa_generic_napi_del(struct net_device *net_dev)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ if (percpu_priv->np) {
+ for (i = 0; i < qman_portal_max; i++)
+ netif_napi_del(&percpu_priv->np[i].napi);
+
+ devm_kfree(net_dev->dev.parent, percpu_priv->np);
+ }
+ }
+}
+
+
+static int dpa_generic_netdev_init(struct device_node *dpa_node,
+ struct net_device *netdev)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(netdev);
+ struct device *dev = netdev->dev.parent;
+ const uint8_t *mac_addr;
+ int err;
+
+ netdev->netdev_ops = &dpa_generic_ops;
+
+ mac_addr = of_get_mac_address(dpa_node);
+ if (mac_addr == NULL) {
+ if (netif_msg_probe(priv))
+ dev_err(dev, "No virtual MAC address found!\n");
+ return -EINVAL;
+ }
+
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netdev->features |= netdev->hw_features;
+ netdev->vlan_features = netdev->features;
+
+ memcpy(netdev->perm_addr, mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
+
+ netdev->ethtool_ops = &dpa_generic_ethtool_ops;
+
+ netdev->needed_headroom = priv->tx_headroom;
+ netdev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ err = register_netdev(netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct dpa_fq_cbs_t generic_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = dpa_generic_rx_dqrr } },
+ .rx_errq = { .cb = { .dqrr = dpa_generic_rx_err_dqrr } },
+ .egress_ern = { .cb = { .ern = dpa_generic_ern } }
+};
+
+static struct fqid_cell *__fq_alloc(struct device *dev,
+ int num_ranges,
+ const void *fqids_off)
+{
+ struct fqid_cell *fqids;
+ int i;
+
+ fqids = kzalloc(sizeof(*fqids) * num_ranges, GFP_KERNEL);
+ if (fqids == NULL)
+ return NULL;
+
+ /* convert to CPU endianess */
+ for (i = 0; i < num_ranges; i++) {
+ fqids[i].start = be32_to_cpup(fqids_off +
+ i * sizeof(*fqids));
+ fqids[i].count = be32_to_cpup(fqids_off +
+ i * sizeof(*fqids) + sizeof(__be32));
+ }
+
+ return fqids;
+}
+
+static struct list_head *dpa_generic_fq_probe(struct platform_device *_of_dev,
+ struct fm_port *tx_port)
+{
+ struct device *dev = &_of_dev->dev;
+ struct device_node *oh_node = NULL;
+ struct device_node *onic_node = NULL;
+ struct fqid_cell *fqids;
+ const void *fqids_off;
+ struct dpa_fq *fq, *tmp;
+ struct list_head *list;
+ int num_ranges;
+ int i, lenp;
+
+ onic_node = dev->of_node;
+
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
+ if (!list) {
+ dev_err(dev, "Cannot allocate space for frame queues list\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(list);
+
+ /* RX queues (RX error, RX default) are specified in Rx O/H port node */
+ oh_node = get_rx_op_port_node(_of_dev);
+ fqids_off = of_get_property(oh_node, "fsl,qman-frame-queues-oh", &lenp);
+ if (fqids_off == NULL) {
+ dev_err(dev, "Need Rx FQ definition in dts for generic devices\n");
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(oh_node);
+
+ num_ranges = lenp / sizeof(*fqids);
+ if (num_ranges != 2) {
+ dev_err(dev, "Need 2 Rx FQ definitions in dts for generic devices\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ fqids = __fq_alloc(dev, num_ranges, fqids_off);
+ if (!dpa_fq_alloc(dev, fqids[0].start, fqids[0].count, list,
+ FQ_TYPE_RX_ERROR) ||
+ !dpa_fq_alloc(dev, fqids[1].start, fqids[1].count,
+ list, FQ_TYPE_RX_DEFAULT)) {
+ dev_err(dev, "Cannot allocate space for default frame queues\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ kfree(fqids);
+
+ /* TX queues */
+ fqids_off = of_get_property(onic_node, "fsl,qman-frame-queues-tx",
+ &lenp);
+ if (fqids_off == NULL) {
+ dev_err(dev, "Need Tx FQ definition in dts for generic devices\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ num_ranges = lenp / sizeof(*fqids);
+ fqids = __fq_alloc(dev, num_ranges, fqids_off);
+ for (i = 0; i < num_ranges; i++) {
+ if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
+ FQ_TYPE_TX)) {
+ dev_err(dev, "_dpa_fq_alloc() failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ kfree(fqids);
+
+ /* optional RX PCD queues */
+ lenp = 0;
+ fqids_off = of_get_property(onic_node,
+ "fsl,qman-frame-queues-rx", &lenp);
+ num_ranges = lenp / sizeof(*fqids);
+ fqids = __fq_alloc(dev, num_ranges, fqids_off);
+ for (i = 0; i < num_ranges; i++) {
+ if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
+ FQ_TYPE_RX_PCD)) {
+ dev_err(dev, "_dpa_fq_alloc() failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ kfree(fqids);
+
+ list_for_each_entry_safe(fq, tmp, list, list) {
+ if (fq->fq_type == FQ_TYPE_TX)
+ fq->channel = fm_get_tx_port_channel(tx_port);
+ }
+
+ return list;
+}
+
+static void dpa_generic_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *netdev;
+ const struct dpa_generic_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct qm_fd fd = msg->ern.fd;
+
+ netdev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(netdev);
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+
+ /* release this buffer into the draining buffer pool */
+ dpa_fd_release(netdev, &fd);
+}
+
+static int dpa_generic_rx_bp_probe(struct platform_device *_of_dev,
+ struct fm_port *rx_port,
+ int *rx_bp_count,
+ struct dpa_bp **rx_bp,
+ struct dpa_buffer_layout_s **rx_buf_layout)
+{
+ struct device *dev = &_of_dev->dev;
+ struct fm_port_params params;
+ struct dpa_bp *bp = NULL;
+ int bp_count = 0;
+ int bpid;
+ const __be32 *bpool_cfg = NULL;
+ struct device_node *dev_node = NULL;
+ struct device_node *oh_node = NULL;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ int lenp = 0;
+ int na = 0, ns = 0;
+ int err = 0, i = 0;
+
+ oh_node = get_rx_op_port_node(_of_dev);
+
+ bp_count = of_count_phandle_with_args(oh_node,
+ "fsl,bman-buffer-pools", NULL);
+ if (bp_count <= 0) {
+ dev_err(dev, "Missing buffer pool handles from onic node from device tree\n");
+ return -EINVAL;
+ }
+
+ bp = devm_kzalloc(dev, bp_count * sizeof(*bp), GFP_KERNEL);
+ if (unlikely(bp == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ err = -ENOMEM;
+ goto _return_of_node_put;
+ }
+
+ dev_node = of_find_node_by_path("/");
+ if (unlikely(dev_node == NULL)) {
+ dev_err(dev, "of_find_node_by_path(/) failed\n");
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ na = of_n_addr_cells(dev_node);
+ ns = of_n_size_cells(dev_node);
+
+ of_node_put(dev_node);
+
+ for (i = 0; i < bp_count; i++) {
+ dev_node = of_parse_phandle(oh_node,
+ "fsl,bman-buffer-pools", i);
+ if (dev_node == NULL) {
+ dev_err(dev, "Cannot find buffer pool node in the device tree\n");
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
+ if (err) {
+ dev_err(dev, "Cannot find buffer pool ID in the buffer pool node in the device tree\n");
+ goto _return_of_node_put;
+ }
+
+ bp[i].bpid = (uint8_t)bpid;
+
+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
+ &lenp);
+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
+ bp[i].config_count = (int)of_read_number(bpool_cfg, ns);
+ bp[i].size = of_read_number(bpool_cfg + ns, ns);
+ bp[i].paddr = 0;
+ bp[i].seed_pool = false;
+ } else {
+ dev_err(dev, "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ bp[i].percpu_count = devm_alloc_percpu(dev,
+ *bp[i].percpu_count);
+ }
+
+ of_node_put(oh_node);
+
+ buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ err = -ENOMEM;
+ goto _return_of_node_put;
+ }
+
+ buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ buf_layout->parse_results = false;
+ buf_layout->hash_results = false;
+ buf_layout->time_stamp = false;
+ fm_port_get_buff_layout_ext_params(rx_port, &params);
+ buf_layout->manip_extra_space = params.manip_extra_space;
+ /* a value of zero for data alignment means "don't care", so align to
+ * a non-zero value to prevent FMD from using its own default
+ */
+ buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+
+ *rx_buf_layout = buf_layout;
+ *rx_bp = bp;
+ *rx_bp_count = bp_count;
+
+ return 0;
+
+_return_of_node_put:
+ if (dev_node)
+ of_node_put(dev_node);
+
+ return err;
+}
+
+static int dpa_generic_tx_bp_probe(struct platform_device *_of_dev,
+ struct fm_port *tx_port,
+ struct dpa_bp **draining_tx_bp,
+ struct dpa_bp **draining_tx_sg_bp,
+ struct dpa_buffer_layout_s **tx_buf_layout)
+{
+ struct device *dev = &_of_dev->dev;
+ struct fm_port_params params;
+ struct dpa_bp *bp = NULL;
+ struct dpa_bp *bp_sg = NULL;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+
+ buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ buf_layout->parse_results = true;
+ buf_layout->hash_results = true;
+ buf_layout->time_stamp = false;
+
+ fm_port_get_buff_layout_ext_params(tx_port, &params);
+ buf_layout->manip_extra_space = params.manip_extra_space;
+ buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+
+ bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL);
+ if (unlikely(bp == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ bp->size = dpa_bp_size(buf_layout);
+ bp->percpu_count = devm_alloc_percpu(dev, *bp->percpu_count);
+ bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ *draining_tx_bp = bp;
+
+ bp_sg = devm_kzalloc(dev, sizeof(*bp_sg), GFP_KERNEL);
+ if (unlikely(bp_sg == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ bp_sg->size = dpa_bp_size(buf_layout);
+ bp_sg->percpu_count = alloc_percpu(*bp_sg->percpu_count);
+ bp_sg->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ *draining_tx_sg_bp = bp_sg;
+
+ *tx_buf_layout = buf_layout;
+
+ return 0;
+}
+
+static int dpa_generic_buff_dealloc_probe(struct platform_device *_of_dev,
+ int *disable_buff_dealloc)
+{
+ struct device *dev = &_of_dev->dev;
+ const phandle *disable_handle = NULL;
+ int lenp = 0;
+ int err = 0;
+
+ disable_handle = of_get_property(dev->of_node,
+ "fsl,disable_buff_dealloc", &lenp);
+ if (disable_handle != NULL)
+ *disable_buff_dealloc = 1;
+
+ return err;
+}
+
+static int dpa_generic_port_probe(struct platform_device *_of_dev,
+ struct fm_port **rx_port,
+ struct fm_port **tx_port)
+{
+ struct device *dev = &_of_dev->dev;
+ struct device_node *dev_node = NULL;
+ struct device_node *onic_node = NULL;
+ int num_ports = 0;
+ int err = 0;
+
+ onic_node = dev->of_node;
+
+ num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
+ if (num_ports != 2) {
+ dev_err(dev, "There should be two OH ports in device tree (one for RX, one for TX\n");
+ return -EINVAL;
+ }
+
+ dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", RX);
+ if (dev_node == NULL) {
+ dev_err(dev, "Cannot find Rx OH port node in device tree\n");
+ return err;
+ }
+
+ err = get_port_ref(dev_node, rx_port);
+ if (err) {
+ dev_err(dev, "Cannot read Rx OH port node in device tree\n");
+ return err;
+ }
+
+ dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", TX);
+ if (dev_node == NULL) {
+ dev_err(dev, "Cannot find Tx OH port node in device tree\n");
+ return -EFAULT;
+ }
+
+ err = get_port_ref(dev_node, tx_port);
+ if (err) {
+ dev_err(dev, "Cannot read Tx OH port node in device tree\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void dpa_generic_setup_ingress(
+ const struct dpa_generic_priv_s *priv,
+ struct dpa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpa_generic_setup_egress(
+ const struct dpa_generic_priv_s *priv,
+ struct dpa_fq *fq,
+ struct fm_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = fm_get_tx_port_channel(port);
+}
+
+static void dpa_generic_fq_setup(struct dpa_generic_priv_s *priv,
+ const struct dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port)
+{
+ struct dpa_fq *fq;
+ int egress_cnt = 0;
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_RX_PCD:
+ dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_TX:
+ dpa_generic_setup_egress(priv, fq,
+ tx_port, &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TX_QUEUES)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ default:
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+ break;
+ }
+ }
+
+ /* The number of Tx queues may be smaller than the number of cores, if
+ * the Tx queue range is specified in the device tree instead of being
+ * dynamically allocated.
+ * Make sure all CPUs receive a corresponding Tx queue.
+ */
+ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TX_QUEUES)
+ break;
+ }
+ }
+}
+
+static int dpa_generic_fq_init(struct dpa_fq *dpa_fq, int disable_buff_dealloc)
+{
+ int _errno;
+ struct device *dev;
+ struct qman_fq *fq;
+ struct qm_mcc_initfq initfq;
+
+ dev = dpa_fq->net_dev->dev.parent;
+
+ if (dpa_fq->fqid == 0)
+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+ if (_errno) {
+ dev_err(dev, "qman_create_fq() failed\n");
+ return _errno;
+ }
+ fq = &dpa_fq->fq_base;
+
+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ /* FIXME: why would we want to keep an empty FQ in cache? */
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+ /* FQ placement */
+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+
+ initfq.fqd.dest.channel = dpa_fq->channel;
+ initfq.fqd.dest.wq = dpa_fq->wq;
+
+ if (dpa_fq->fq_type == FQ_TYPE_TX && !disable_buff_dealloc) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ /* ContextA: A2V=1 (contextA A2 field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ */
+ initfq.fqd.context_a.hi = 0x10000000;
+ initfq.fqd.context_a.lo = 0x80000000;
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |=
+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+ }
+
+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (_errno < 0) {
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+ qman_destroy_fq(fq, 0);
+ return _errno;
+ }
+
+ dpa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+static int dpa_generic_fq_create(struct net_device *netdev,
+ struct list_head *dpa_fq_list,
+ struct fm_port *tx_port)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(netdev);
+ struct dpa_fq *fqs = NULL, *tmp = NULL;
+ struct task_struct *kth;
+ int err = 0;
+ int channel;
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ list_replace_init(dpa_fq_list, &priv->dpa_fq_list);
+
+ channel = dpa_get_channel();
+ if (channel < 0)
+ return channel;
+ priv->channel = (uint16_t)channel;
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", netdev, priv->channel);
+ if (!kth)
+ return -ENOMEM;
+
+ dpa_generic_fq_setup(priv, &generic_fq_cbs, tx_port);
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(fqs, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_generic_fq_init(fqs, priv->disable_buff_dealloc);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpa_generic_bp_create(struct net_device *net_dev,
+ int rx_bp_count,
+ struct dpa_bp *rx_bp,
+ struct dpa_buffer_layout_s *rx_buf_layout,
+ struct dpa_bp *draining_tx_bp,
+ struct dpa_bp *draining_tx_sg_bp,
+ struct dpa_buffer_layout_s *tx_buf_layout)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ /* TODO: multiple Rx bps */
+ priv->rx_bp_count = rx_bp_count;
+ priv->rx_bp = rx_bp;
+ priv->rx_buf_layout = rx_buf_layout;
+ priv->draining_tx_bp = draining_tx_bp;
+ priv->draining_tx_sg_bp = draining_tx_sg_bp;
+ priv->tx_buf_layout = tx_buf_layout;
+
+ err = dpa_bp_alloc(priv->rx_bp);
+ if (err < 0) {
+ priv->rx_bp = NULL;
+ return err;
+ }
+
+ err = dpa_bp_alloc(priv->draining_tx_bp);
+ if (err < 0) {
+ priv->draining_tx_bp = NULL;
+ return err;
+ }
+
+ err = dpa_bp_alloc(priv->draining_tx_sg_bp);
+ if (err < 0) {
+ priv->draining_tx_sg_bp = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpa_generic_relase_bp(struct dpa_bp *bp)
+{
+ if (!bp)
+ return;
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpa_bp_drain(bp);
+
+ bman_free_pool(bp->pool);
+
+ if (bp->dev)
+ platform_device_unregister(to_platform_device(bp->dev));
+}
+
+static void dpa_generic_bp_free(struct dpa_generic_priv_s *priv)
+{
+ int i = 0;
+
+ /* release the rx bpools */
+ for (i = 0; i < priv->rx_bp_count; i++)
+ dpa_generic_relase_bp(&priv->rx_bp[i]);
+
+ /* release the tx draining bpools */
+ dpa_generic_relase_bp(priv->draining_tx_bp);
+ dpa_generic_relase_bp(priv->draining_tx_sg_bp);
+}
+
+static int dpa_generic_remove(struct platform_device *of_dev)
+{
+ int err;
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpa_generic_priv_s *priv;
+
+ dev = &of_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_generic_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
+
+ dpa_generic_napi_del(net_dev);
+
+ dpa_generic_bp_free(priv);
+
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int dpa_generic_eth_probe(struct platform_device *_of_dev)
+{
+ struct device *dev = &_of_dev->dev;
+ struct device_node *dpa_node = dev->of_node;
+ struct net_device *netdev = NULL;
+ struct dpa_generic_priv_s *priv;
+ struct fm_port *rx_port = NULL;
+ struct fm_port *tx_port = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int rx_bp_count = 0;
+ int disable_buff_dealloc = 0;
+ struct dpa_bp *rx_bp = NULL, *draining_tx_bp = NULL;
+ struct dpa_bp *draining_tx_sg_bp = NULL;
+ struct dpa_buffer_layout_s *rx_buf_layout = NULL, *tx_buf_layout = NULL;
+ struct list_head *dpa_fq_list;
+ static u8 generic_idx;
+ int err = 0;
+ int i = 0;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ err = dpa_generic_port_probe(_of_dev, &tx_port, &rx_port);
+ if (err < 0)
+ return err;
+
+ err = dpa_generic_rx_bp_probe(_of_dev, rx_port, &rx_bp_count,
+ &rx_bp, &rx_buf_layout);
+ if (err < 0)
+ return err;
+
+ err = dpa_generic_tx_bp_probe(_of_dev, tx_port, &draining_tx_bp,
+ &draining_tx_sg_bp, &tx_buf_layout);
+ if (err < 0)
+ return err;
+
+ dpa_fq_list = dpa_generic_fq_probe(_of_dev, tx_port);
+ if (IS_ERR(dpa_fq_list))
+ return PTR_ERR(dpa_fq_list);
+
+ err = dpa_generic_buff_dealloc_probe(_of_dev, &disable_buff_dealloc);
+ if (err < 0)
+ return err;
+
+ /* just one queue for now */
+ netdev = alloc_etherdev_mq(sizeof(*priv), 1);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(netdev, dev);
+ dev_set_drvdata(dev, netdev);
+ priv = netdev_priv(netdev);
+ priv->net_dev = netdev;
+ sprintf(priv->if_type, "generic%d", generic_idx++);
+ priv->msg_enable = netif_msg_init(generic_debug, -1);
+ priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
+
+ init_timer(&priv->timer);
+ priv->timer.data = (unsigned long)priv;
+ priv->timer.function = dpa_generic_draining_timer;
+
+ err = dpa_generic_bp_create(netdev, rx_bp_count, rx_bp, rx_buf_layout,
+ draining_tx_bp, draining_tx_sg_bp, tx_buf_layout);
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->disable_buff_dealloc = disable_buff_dealloc;
+
+ err = dpa_generic_fq_create(netdev, dpa_fq_list, rx_port);
+ if (err < 0)
+ goto fq_create_failed;
+
+ priv->tx_headroom = dpa_get_headroom(tx_buf_layout);
+ priv->rx_headroom = dpa_get_headroom(rx_buf_layout);
+ priv->rx_port = rx_port;
+ priv->tx_port = tx_port;
+ priv->mac_dev = NULL;
+
+
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ /* Initialize NAPI */
+ err = dpa_generic_napi_add(netdev);
+ if (err < 0)
+ goto napi_add_failed;
+
+ err = dpa_generic_netdev_init(dpa_node, netdev);
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_generic_sysfs_init(&netdev->dev);
+
+ pr_info("fsl_dpa_generic: Probed %s interface as %s\n",
+ priv->if_type, netdev->name);
+
+ return 0;
+
+netdev_init_failed:
+napi_add_failed:
+ dpa_generic_napi_del(netdev);
+alloc_percpu_failed:
+ if (netdev)
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+fq_create_failed:
+bp_create_failed:
+ if (netdev)
+ dpa_generic_bp_free(priv);
+ dev_set_drvdata(dev, NULL);
+ if (netdev)
+ free_netdev(netdev);
+
+ return err;
+}
+
+static int __init __cold dpa_generic_load(void)
+{
+ int _errno;
+
+ pr_info(KBUILD_MODNAME ": " DPA_GENERIC_DESCRIPTION "\n");
+
+ /* initialise dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_generic_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+
+/* waiting for all referenced ports to be initialized
+ * by other kernel modules (proxy ethernet, offline_port)
+ */
+late_initcall(dpa_generic_load);
+
+static void __exit __cold dpa_generic_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&dpa_generic_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(dpa_generic_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
@@ -0,0 +1,90 @@
+/* Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPA_ETH_GENERIC_H
+#define __DPA_ETH_GENERIC_H
+
+#include "lnxwrp_fsl_fman.h"
+#include "dpaa_eth.h"
+
+struct dpa_generic_priv_s {
+ struct net_device *net_dev;
+ /* use the same percpu_priv as other DPAA Ethernet drivers */
+ struct dpa_percpu_priv_s __percpu *percpu_priv;
+
+ /* up to 4 bps supported for RX */
+ int rx_bp_count;
+ struct dpa_bp *rx_bp;
+ struct dpa_buffer_layout_s *rx_buf_layout;
+
+ struct dpa_bp *draining_tx_bp;
+ struct dpa_bp *draining_tx_sg_bp;
+ struct dpa_buffer_layout_s *tx_buf_layout;
+
+ /* Store here the needed Tx headroom for convenience and speed
+ * (even though it can be computed based on the fields of buf_layout)
+ */
+ uint16_t tx_headroom;
+ uint16_t rx_headroom;
+
+ /* In some scenarios, when VSP are not enabled on the Tx O/H port,
+ * the buffers will be released by other hardware modules
+ */
+ int disable_buff_dealloc;
+
+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
+
+ struct fm_port *rx_port;
+ struct fm_port *tx_port;
+
+ /* oNIC can have limited control capabilities over a MAC device */
+ struct mac_device *mac_dev;
+
+ uint16_t channel; /* "fsl,qman-channel-id" */
+ struct list_head dpa_fq_list;
+
+ uint32_t msg_enable; /* net_device message level */
+
+ struct dpa_buffer_layout_s *buf_layout;
+ char if_type[30];
+
+ /* periodic drain */
+ struct timer_list timer;
+};
+
+extern const struct ethtool_ops dpa_generic_ethtool_ops;
+
+void dpaa_eth_generic_sysfs_init(struct device *dev);
+void dpaa_eth_generic_sysfs_remove(struct device *dev);
+int __init dpa_generic_debugfs_module_init(void);
+void __exit dpa_generic_debugfs_module_exit(void);
+
+#endif /* __DPA_ETH_GENERIC_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
@@ -0,0 +1,201 @@
+/* Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/of_net.h>
+
+#include "dpaa_eth_generic.h"
+#include "mac.h" /* struct mac_device */
+
+static ssize_t dpaa_eth_generic_show_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev)
+ return sprintf(buf, "%llx\n",
+ (unsigned long long)mac_dev->res->start);
+ else
+ return sprintf(buf, "none\n");
+}
+
+static ssize_t dpaa_eth_generic_show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t res = 0;
+ res = sprintf(buf, "generic\n");
+
+ return res;
+}
+
+static ssize_t dpaa_eth_generic_show_fqids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t bytes = 0;
+ int i = 0;
+ char *str;
+ struct dpa_fq *fq;
+ struct dpa_fq *tmp;
+ struct dpa_fq *prev = NULL;
+ u32 first_fqid = 0;
+ u32 last_fqid = 0;
+ char *prevstr = NULL;
+
+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ str = "Rx default";
+ break;
+ case FQ_TYPE_RX_ERROR:
+ str = "Rx error";
+ break;
+ case FQ_TYPE_RX_PCD:
+ str = "Rx PCD";
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ str = "Tx default confirmation";
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ str = "Tx confirmation (mq)";
+ break;
+ case FQ_TYPE_TX_ERROR:
+ str = "Tx error";
+ break;
+ case FQ_TYPE_TX:
+ str = "Tx";
+ break;
+ default:
+ str = "Unknown";
+ }
+
+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+ str != prevstr)) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes,
+ "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes,
+ "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
+ last_fqid = fq->fqid;
+ else
+ first_fqid = last_fqid = fq->fqid;
+
+ prev = fq;
+ prevstr = str;
+ i++;
+ }
+
+ if (prev) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
+ prev->fqid);
+ else
+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_generic_show_bpids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t bytes = 0;
+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct dpa_bp *rx_bp = priv->rx_bp;
+ struct dpa_bp *draining_tx_bp = priv->draining_tx_bp;
+ int i = 0;
+
+ bytes += snprintf(buf + bytes, PAGE_SIZE, "Rx buffer pools:\n");
+ for (i = 0; i < priv->rx_bp_count; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u ",
+ rx_bp[i].bpid);
+
+ bytes += snprintf(buf + bytes, PAGE_SIZE, "\n");
+ bytes += snprintf(buf + bytes, PAGE_SIZE, "Draining buffer pool:\n");
+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", draining_tx_bp->bpid);
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_generic_show_mac_regs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+ int n = 0;
+
+ if (mac_dev)
+ n = fm_mac_dump_regs(mac_dev, buf, n);
+ else
+ return sprintf(buf, "no mac control\n");
+
+ return n;
+}
+
+static struct device_attribute dpaa_eth_generic_attrs[] = {
+ __ATTR(device_addr, S_IRUGO, dpaa_eth_generic_show_addr, NULL),
+ __ATTR(device_type, S_IRUGO, dpaa_eth_generic_show_type, NULL),
+ __ATTR(fqids, S_IRUGO, dpaa_eth_generic_show_fqids, NULL),
+ __ATTR(bpids, S_IRUGO, dpaa_eth_generic_show_bpids, NULL),
+ __ATTR(mac_regs, S_IRUGO, dpaa_eth_generic_show_mac_regs, NULL),
+};
+
+void dpaa_eth_generic_sysfs_init(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
+ if (device_create_file(dev, &dpaa_eth_generic_attrs[i])) {
+ dev_err(dev, "Error creating sysfs file\n");
+ while (i > 0)
+ device_remove_file(dev,
+ &dpaa_eth_generic_attrs[--i]);
+ return;
+ }
+}
+
+void dpaa_eth_generic_sysfs_remove(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
+ device_remove_file(dev, &dpaa_eth_generic_attrs[i]);
+}
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
@@ -0,0 +1,499 @@
+/* Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_eth_base.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+#include "mac.h"
+
+/* For MAC-based interfaces, we compute the tx needed headroom from the
+ * associated Tx port's buffer layout settings.
+ * For MACless interfaces just use a default value.
+ */
+#define DPA_DEFAULT_TX_HEADROOM 64
+
+#define DPA_DESCRIPTION "FSL DPAA MACless Ethernet driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static uint16_t macless_tx_timeout = 1000;
+module_param(macless_tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(macless_tx_timeout, "The MACless Tx timeout in ms");
+
+/* forward declarations */
+static int __cold dpa_macless_start(struct net_device *net_dev);
+static int __cold dpa_macless_stop(struct net_device *net_dev);
+static int __cold dpa_macless_set_address(struct net_device *net_dev,
+ void *addr);
+static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev);
+
+static int dpaa_eth_macless_probe(struct platform_device *_of_dev);
+static netdev_features_t
+dpa_macless_fix_features(struct net_device *dev, netdev_features_t features);
+
+static const struct net_device_ops dpa_macless_ops = {
+ .ndo_open = dpa_macless_start,
+ .ndo_start_xmit = dpa_shared_tx,
+ .ndo_stop = dpa_macless_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_macless_set_address,
+ .ndo_set_rx_mode = dpa_macless_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_macless_fix_features,
+};
+
+static const struct of_device_id dpa_macless_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-macless"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_macless_match);
+
+static struct platform_driver dpa_macless_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME "-macless",
+ .of_match_table = dpa_macless_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa_eth_macless_probe,
+ .remove = dpa_remove
+};
+
+static const char macless_frame_queues[][25] = {
+ [RX] = "fsl,qman-frame-queues-rx",
+ [TX] = "fsl,qman-frame-queues-tx"
+};
+
+static int __cold dpa_macless_start(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
+
+ netif_tx_start_all_queues(net_dev);
+
+ if (proxy_dev)
+ dpa_proxy_start(net_dev);
+
+
+ return 0;
+}
+
+static int __cold dpa_macless_stop(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
+
+ netif_tx_stop_all_queues(net_dev);
+
+ if (proxy_dev)
+ dpa_proxy_stop(proxy_dev, net_dev);
+
+ return 0;
+}
+
+static int dpa_macless_set_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
+ int _errno;
+
+ _errno = eth_mac_addr(net_dev, addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
+ return _errno;
+ }
+
+ if (proxy_dev) {
+ _errno = dpa_proxy_set_mac_address(proxy_dev, net_dev);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev, "proxy_set_mac_address() = %d\n",
+ _errno);
+ return _errno;
+ }
+ }
+
+ return 0;
+}
+
+static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
+
+ if (proxy_dev)
+ dpa_proxy_set_rx_mode(proxy_dev, net_dev);
+}
+
+static netdev_features_t
+dpa_macless_fix_features(struct net_device *dev, netdev_features_t features)
+{
+ netdev_features_t unsupported_features = 0;
+
+ /* In theory we should never be requested to enable features that
+ * we didn't set in netdev->features and netdev->hw_features at probe
+ * time, but double check just to be on the safe side.
+ */
+ unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* We don't support enabling Rx csum through ethtool yet */
+ unsupported_features |= NETIF_F_RXCSUM;
+
+ features &= ~unsupported_features;
+
+ return features;
+}
+
+static int dpa_macless_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
+ struct device *dev = net_dev->dev.parent;
+ const uint8_t *mac_addr;
+
+ net_dev->netdev_ops = &dpa_macless_ops;
+
+ if (proxy_dev) {
+ struct mac_device *mac_dev = proxy_dev->mac_dev;
+ net_dev->mem_start = mac_dev->res->start;
+ net_dev->mem_end = mac_dev->res->end;
+
+ return dpa_netdev_init(net_dev, mac_dev->addr,
+ macless_tx_timeout);
+ } else {
+ /* Get the MAC address from device tree */
+ mac_addr = of_get_mac_address(dpa_node);
+
+ if (mac_addr == NULL) {
+ if (netif_msg_probe(priv))
+ dev_err(dev, "No MAC address found!\n");
+ return -EINVAL;
+ }
+
+ return dpa_netdev_init(net_dev, mac_addr,
+ macless_tx_timeout);
+ }
+}
+
+/* Probing of FQs for MACless ports */
+static int dpa_fq_probe_macless(struct device *dev, struct list_head *list,
+ enum port_type ptype)
+{
+ struct device_node *np = dev->of_node;
+ const struct fqid_cell *fqids;
+ int num_ranges;
+ int i, lenp;
+
+ fqids = of_get_property(np, macless_frame_queues[ptype], &lenp);
+ if (fqids == NULL) {
+ dev_err(dev, "Need FQ definition in dts for MACless devices\n");
+ return -EINVAL;
+ }
+
+ num_ranges = lenp / sizeof(*fqids);
+
+ /* All ranges defined in the device tree are used as Rx/Tx queues */
+ for (i = 0; i < num_ranges; i++) {
+ if (!dpa_fq_alloc(dev, be32_to_cpu(fqids[i].start),
+ be32_to_cpu(fqids[i].count), list,
+ ptype == RX ? FQ_TYPE_RX_PCD : FQ_TYPE_TX)) {
+ dev_err(dev, "_dpa_fq_alloc() failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+ static struct proxy_device *
+dpa_macless_proxy_probe(struct platform_device *_of_dev)
+{
+ struct device *dev;
+ const phandle *proxy_prop;
+ struct proxy_device *proxy_dev;
+ struct device_node *proxy_node;
+ struct platform_device *proxy_pdev;
+ int lenp;
+
+ dev = &_of_dev->dev;
+
+ proxy_prop = of_get_property(dev->of_node, "proxy", &lenp);
+ if (!proxy_prop)
+ return NULL;
+
+ proxy_node = of_find_node_by_phandle(*proxy_prop);
+ if (!proxy_node) {
+ dev_err(dev, "Cannot find proxy node\n");
+ return NULL;
+ }
+
+ proxy_pdev = of_find_device_by_node(proxy_node);
+ if (!proxy_pdev) {
+ of_node_put(proxy_node);
+ dev_err(dev, "Cannot find device represented by proxy node\n");
+ return NULL;
+ }
+
+ proxy_dev = dev_get_drvdata(&proxy_pdev->dev);
+
+ of_node_put(proxy_node);
+
+ return proxy_dev;
+}
+
+static int dpaa_eth_macless_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i, channel;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ static struct proxy_device *proxy_dev;
+ struct task_struct *kth;
+ static u8 macless_idx;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ for (i = 0; i < count; i++)
+ dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
+
+ proxy_dev = dpa_macless_proxy_probe(_of_dev);
+
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area (needed by 1588 code in dpa_mac_probe)
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ sprintf(priv->if_type, "macless%d", macless_idx++);
+
+ priv->msg_enable = netif_msg_init(advanced_debug, -1);
+
+ priv->peer = NULL;
+ priv->mac_dev = NULL;
+ if (proxy_dev) {
+ /* This is a temporary solution for the need of
+ * having main driver upstreamability: adjust_link
+ * is a general function that should work for both
+ * private driver and macless driver with MAC device
+ * control capabilities even if the last will not be
+ * upstreamable.
+ * TODO: find a convenient solution (wrapper over
+ * main priv structure, etc.)
+ */
+ priv->mac_dev = proxy_dev->mac_dev;
+
+ /* control over proxy's mac device */
+ priv->peer = (void *)proxy_dev;
+ }
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, RX);
+ if (!err)
+ err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list,
+ TX);
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+ priv->bp_count = count;
+ err = dpa_bp_create(net_dev, dpa_bp, count);
+ if (err < 0)
+ goto bp_create_failed;
+
+ channel = dpa_get_channel();
+
+ if (channel < 0) {
+ err = channel;
+ goto get_channel_failed;
+ }
+
+ priv->channel = (uint16_t)channel;
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+
+ dpa_fq_setup(priv, &shared_fq_cbs, NULL);
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ /* For MAC-less devices we only get here for RX frame queues
+ * initialization, which are the TX queues of the other
+ * partition.
+ * It is safe to rely on one partition to set the FQ taildrop
+ * threshold for the TX queues of the other partition
+ * because the ERN notifications will be received by the
+ * partition doing qman_enqueue.
+ */
+ err = dpa_fq_init(dpa_fq, true);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
+
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ err = dpa_macless_netdev_init(dpa_node, net_dev);
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ pr_info("fsl_dpa_macless: Probed %s interface as %s\n",
+ priv->if_type, net_dev->name);
+
+ return 0;
+
+netdev_init_failed:
+alloc_percpu_failed:
+fq_alloc_failed:
+ if (net_dev)
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+add_channel_failed:
+get_channel_failed:
+ if (net_dev)
+ dpa_bp_free(priv);
+bp_create_failed:
+fq_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ if (net_dev)
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int __init __cold dpa_macless_load(void)
+{
+ int _errno;
+
+ pr_info(DPA_DESCRIPTION "\n");
+
+ /* Initialize dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_macless_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_macless_load);
+
+static void __exit __cold dpa_macless_unload(void)
+{
+ platform_driver_unregister(&dpa_macless_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(dpa_macless_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
@@ -0,0 +1,2156 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/sock.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+
+#include "dpaa_eth_macsec.h"
+#include "dpaa_eth_common.h"
+
+#ifdef CONFIG_FSL_DPAA_1588
+#include "dpaa_1588.h"
+#endif
+
+static struct sock *nl_sk;
+static struct macsec_priv_s *macsec_priv[FM_MAX_NUM_OF_MACS];
+static char *macsec_ifs[FM_MAX_NUM_OF_MACS];
+static int macsec_ifs_cnt;
+
+static char ifs[MAX_LEN];
+const struct ethtool_ops *dpa_ethtool_ops_prev;
+static struct ethtool_ops dpa_macsec_ethtool_ops;
+
+module_param_string(ifs, ifs, MAX_LEN, 0000);
+MODULE_PARM_DESC(ifs, "Comma separated interface list");
+
+struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev)
+{
+ return macsec_priv[net_dev->ifindex - 1];
+}
+
+static void macsec_setup_ethtool_ops(struct net_device *net_dev)
+{
+ /* remember private driver's ethtool ops just once */
+ if (!dpa_ethtool_ops_prev) {
+ dpa_ethtool_ops_prev = net_dev->ethtool_ops;
+
+ memcpy(&dpa_macsec_ethtool_ops, net_dev->ethtool_ops,
+ sizeof(struct ethtool_ops));
+ dpa_macsec_ethtool_ops.get_sset_count =
+ dpa_macsec_get_sset_count;
+ dpa_macsec_ethtool_ops.get_ethtool_stats =
+ dpa_macsec_get_ethtool_stats;
+ dpa_macsec_ethtool_ops.get_strings =
+ dpa_macsec_get_strings;
+ }
+
+ net_dev->ethtool_ops = &dpa_macsec_ethtool_ops;
+}
+
+static void macsec_restore_ethtool_ops(struct net_device *net_dev)
+{
+ net_dev->ethtool_ops = dpa_ethtool_ops_prev;
+}
+
+
+static int ifname_to_id(char *ifname)
+{
+ int i;
+
+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
+ if (macsec_priv[i]->net_dev &&
+ (strcmp(ifname, macsec_priv[i]->net_dev->name) == 0)) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static void deinit_macsec(int macsec_id)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ int i;
+
+ selected_macsec_priv = macsec_priv[macsec_id];
+
+ if (selected_macsec_priv->en_state == SECY_ENABLED) {
+ for (i = 0; i < NUM_OF_RX_SC; i++) {
+ if (!selected_macsec_priv->rx_sc_dev[i])
+ continue;
+ fm_macsec_secy_rxsa_disable_receive(
+ selected_macsec_priv->fm_ms_secy,
+ selected_macsec_priv->rx_sc_dev[i],
+ selected_macsec_priv->an);
+ pr_debug("disable rx_sa done\n");
+
+ fm_macsec_secy_delete_rx_sa(
+ selected_macsec_priv->fm_ms_secy,
+ selected_macsec_priv->rx_sc_dev[i],
+ selected_macsec_priv->an);
+ pr_debug("delete rx_sa done\n");
+
+ fm_macsec_secy_delete_rxsc(
+ selected_macsec_priv->fm_ms_secy,
+ selected_macsec_priv->rx_sc_dev[i]);
+ pr_debug("delete rx_sc done\n");
+ }
+
+ fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
+ selected_macsec_priv->an);
+ pr_debug("delete tx_sa done\n");
+
+ fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
+ selected_macsec_priv->fm_ms_secy = NULL;
+ pr_debug("secy free done\n");
+ }
+
+ if (selected_macsec_priv->en_state != MACSEC_DISABLED) {
+ fm_macsec_disable(selected_macsec_priv->fm_macsec);
+ fm_macsec_free(selected_macsec_priv->fm_macsec);
+ selected_macsec_priv->fm_macsec = NULL;
+ pr_debug("macsec disable and free done\n");
+ }
+}
+
+static void parse_ifs(void)
+{
+ char *token, *strpos = ifs;
+
+ while ((token = strsep(&strpos, ","))) {
+ if (strlen(token) == 0)
+ return;
+ else
+ macsec_ifs[macsec_ifs_cnt] = token;
+ macsec_ifs_cnt++;
+ }
+}
+
+static void macsec_exception(handle_t _macsec_priv_s,
+ fm_macsec_exception exception)
+{
+ struct macsec_priv_s *priv;
+ priv = (struct macsec_priv_s *)_macsec_priv_s;
+
+ switch (exception) {
+ case (SINGLE_BIT_ECC):
+ dev_warn(priv->mac_dev->dev, "%s:%s SINGLE_BIT_ECC exception\n",
+ KBUILD_BASENAME".c", __func__);
+ break;
+ case (MULTI_BIT_ECC):
+ dev_warn(priv->mac_dev->dev, "%s:%s MULTI_BIT_ECC exception\n",
+ KBUILD_BASENAME".c", __func__);
+ break;
+ default:
+ dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
+ KBUILD_BASENAME".c", __func__, exception);
+ break;
+ }
+}
+
+
+static void macsec_secy_exception(handle_t _macsec_priv_s,
+ fm_macsec_secy_exception exception)
+{
+ struct macsec_priv_s *priv;
+ priv = (struct macsec_priv_s *)_macsec_priv_s;
+
+ switch (exception) {
+ case (SECY_EX_FRAME_DISCARDED):
+ dev_warn(priv->mac_dev->dev,
+ "%s:%s SECY_EX_FRAME_DISCARDED exception\n",
+ KBUILD_BASENAME".c", __func__);
+ break;
+ default:
+ dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
+ KBUILD_BASENAME".c", __func__, exception);
+ break;
+ }
+}
+
+static void macsec_secy_events(handle_t _macsec_priv_s,
+ fm_macsec_secy_event event)
+{
+ struct macsec_priv_s *priv;
+ priv = (struct macsec_priv_s *)_macsec_priv_s;
+
+ switch (event) {
+ case (SECY_EV_NEXT_PN):
+ dev_dbg(priv->mac_dev->dev, "%s:%s SECY_EV_NEXT_PN event\n",
+ KBUILD_BASENAME".c", __func__);
+ break;
+ default:
+ dev_dbg(priv->mac_dev->dev, "%s:%s event %d\n",
+ KBUILD_BASENAME".c", __func__, event);
+ break;
+ }
+}
+
+static struct qman_fq *macsec_get_tx_conf_queue(
+ const struct macsec_priv_s *macsec_priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < MACSEC_ETH_TX_QUEUES; i++)
+ if (macsec_priv->egress_fqs[i] == tx_fq)
+ return macsec_priv->conf_fqs[i];
+ return NULL;
+}
+
+/* Initialize qman fqs. Still need to set context_a, specifically the bits
+ * that identify the secure channel.
+ */
+static int macsec_fq_init(struct dpa_fq *dpa_fq)
+{
+ struct qman_fq *fq;
+ struct device *dev;
+ struct qm_mcc_initfq initfq;
+ uint32_t sc_phys_id;
+ int _errno, macsec_id;
+
+ dev = dpa_fq->net_dev->dev.parent;
+ macsec_id = dpa_fq->net_dev->ifindex - 1;
+
+ if (dpa_fq->fqid == 0)
+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+
+ if (_errno) {
+ dev_err(dev, "qman_create_fq() failed\n");
+ return _errno;
+ }
+
+ fq = &dpa_fq->fq_base;
+
+ if (dpa_fq->init) {
+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+
+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+
+ initfq.fqd.dest.channel = dpa_fq->channel;
+ initfq.fqd.dest.wq = dpa_fq->wq;
+
+ if (dpa_fq->fq_type == FQ_TYPE_TX) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+
+ /* Obtain the TX scId from fman */
+ _errno = fm_macsec_secy_get_txsc_phys_id(
+ macsec_priv[macsec_id]->fm_ms_secy,
+ &sc_phys_id);
+ if (unlikely(_errno < 0)) {
+ dev_err(dev, "fm_macsec_secy_get_txsc_phys_id = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ /* Write the TX SC-ID in the context of the FQ.
+ * A2V=1 (use the A2 field)
+ * A0V=1 (use the A0 field)
+ * OVOM=1
+ * MCV=1 (MACsec controlled frames)
+ * MACCMD=the TX scId
+ */
+ initfq.fqd.context_a.hi = 0x1a100000 |
+ sc_phys_id << 16;
+ initfq.fqd.context_a.lo = 0x80000000;
+ }
+
+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (_errno < 0) {
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+ qman_destroy_fq(fq, 0);
+ return _errno;
+ }
+ }
+
+ dpa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+/* Configure and enable secy. */
+static int enable_secy(struct generic_msg *gen, int *macsec_id)
+{
+ struct enable_secy *sec;
+ int _errno;
+ struct fm_macsec_secy_params secy_params;
+ struct dpa_fq *dpa_fq, *tmp;
+ struct macsec_priv_s *selected_macsec_priv;
+
+ sec = &gen->payload.secy;
+
+ if (sec->macsec_id < 0 || sec->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ _errno = -EINVAL;
+ goto _return;
+ }
+ *macsec_id = sec->macsec_id;
+ selected_macsec_priv = macsec_priv[sec->macsec_id];
+
+ if (selected_macsec_priv->fm_ms_secy) {
+ pr_err("Secy has already been enabled\n");
+ return -EINVAL;
+ }
+
+ memset(&secy_params, 0, sizeof(secy_params));
+ secy_params.fm_macsec_h = selected_macsec_priv->fm_macsec;
+ secy_params.num_receive_channels = NUM_OF_RX_SC;
+ secy_params.tx_sc_params.sci = sec->sci;
+
+ /* Set encryption method */
+ secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_128;
+#if (DPAA_VERSION >= 11)
+ secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_256;
+#endif /* (DPAA_VERSION >= 11) */
+ secy_params.exception_f = macsec_secy_exception;
+ secy_params.event_f = macsec_secy_events;
+ secy_params.app_h = selected_macsec_priv;
+
+ selected_macsec_priv->fm_ms_secy =
+ fm_macsec_secy_config(&secy_params);
+
+ if (unlikely(selected_macsec_priv->fm_ms_secy == NULL)) {
+ _errno = -EINVAL;
+ goto _return;
+ }
+
+ /* Configure the insertion mode */
+ if (sec->config_insertion_mode) {
+ _errno = fm_macsec_secy_config_sci_insertion_mode(
+ selected_macsec_priv->fm_ms_secy,
+ sec->sci_insertion_mode);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Configure the frame protection */
+ if (sec->config_protect_frames) {
+ _errno = fm_macsec_secy_config_protect_frames(
+ selected_macsec_priv->fm_ms_secy,
+ sec->protect_frames);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Configure the replay window */
+ if (sec->config_replay_window) {
+ _errno = fm_macsec_secy_config_replay_window(
+ selected_macsec_priv->fm_ms_secy,
+ sec->replay_protect,
+ sec->replay_window);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Configure the validation mode */
+ if (sec->config_validation_mode) {
+ _errno = fm_macsec_secy_config_validation_mode(
+ selected_macsec_priv->fm_ms_secy,
+ sec->validate_frames);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Select the exceptions that will be signaled */
+ if (sec->config_exception) {
+ _errno = fm_macsec_secy_config_exception(
+ selected_macsec_priv->fm_ms_secy,
+ sec->exception,
+ sec->enable_exception);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Select the events that will be signaled */
+ if (sec->config_event) {
+ _errno = fm_macsec_secy_config_event(
+ selected_macsec_priv->fm_ms_secy,
+ sec->event,
+ sec->enable_event);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Configure a point-to-point connection */
+ if (sec->config_point_to_point) {
+ _errno = fm_macsec_secy_config_point_to_point(
+ selected_macsec_priv->fm_ms_secy);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ /* Configure the connection's confidentiality state */
+ if (sec->config_confidentiality) {
+ _errno = fm_macsec_secy_config_confidentiality(
+ selected_macsec_priv->fm_ms_secy,
+ sec->confidentiality_enable,
+ sec->confidentiality_offset);
+ if (unlikely(_errno < 0))
+ goto _return;
+ }
+
+ _errno = fm_macsec_secy_init(selected_macsec_priv->fm_ms_secy);
+ if (unlikely(_errno < 0))
+ goto _return_fm_macsec_secy_free;
+
+ list_for_each_entry_safe(dpa_fq,
+ tmp,
+ &selected_macsec_priv->dpa_fq_list,
+ list) {
+ _errno = macsec_fq_init(dpa_fq);
+ if (_errno < 0)
+ goto _return;
+ }
+
+ return 0;
+
+_return_fm_macsec_secy_free:
+ fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
+ selected_macsec_priv->fm_ms_secy = NULL;
+_return:
+ return _errno;
+}
+
+static int set_macsec_exception(struct generic_msg *gen)
+{
+ struct set_exception *set_ex;
+ struct macsec_priv_s *selected_macsec_priv;
+ int rv;
+
+ set_ex = &(gen->payload.set_ex);
+
+ selected_macsec_priv = macsec_priv[set_ex->macsec_id];
+
+ rv = fm_macsec_set_exception(selected_macsec_priv->fm_macsec,
+ set_ex->exception,
+ set_ex->enable_exception);
+ if (unlikely(rv < 0))
+ pr_err("error when setting the macsec exception mask\n");
+
+ return rv;
+}
+
+static int create_tx_sa(struct generic_msg *gen)
+{
+ struct create_tx_sa *c_tx_sa;
+ macsec_sa_key_t sa_key;
+ int rv;
+ struct macsec_priv_s *selected_macsec_priv;
+
+ c_tx_sa = &(gen->payload.c_tx_sa);
+
+ if (c_tx_sa->macsec_id < 0 ||
+ c_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ kfree(c_tx_sa);
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[c_tx_sa->macsec_id];
+
+ /* set macsec_priv field */
+ selected_macsec_priv->an = c_tx_sa->an;
+
+ /* because of the algorithms used */
+ if (unlikely(c_tx_sa->sak_len > 32)) {
+ pr_warn("size of secure key is greater than 32 bytes!\n");
+ kfree(c_tx_sa);
+ return -EINVAL;
+ }
+
+ rv = copy_from_user(&sa_key,
+ c_tx_sa->sak,
+ c_tx_sa->sak_len);
+ if (unlikely(rv != 0)) {
+ pr_err("copy_from_user could not copy %i bytes\n", rv);
+ return -EFAULT;
+ }
+
+ rv = fm_macsec_secy_create_tx_sa(selected_macsec_priv->fm_ms_secy,
+ c_tx_sa->an,
+ sa_key);
+ if (unlikely(rv < 0))
+ pr_err("error when creating tx sa\n");
+
+ return rv;
+}
+
+static int modify_tx_sa_key(struct generic_msg *gen)
+{
+ struct modify_tx_sa_key *tx_sa_key;
+ struct macsec_priv_s *selected_macsec_priv;
+ macsec_sa_key_t sa_key;
+ int rv;
+
+ tx_sa_key = &(gen->payload.modify_tx_sa_key);
+
+ if (tx_sa_key->macsec_id < 0 ||
+ tx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+ selected_macsec_priv = macsec_priv[tx_sa_key->macsec_id];
+
+ /* set macsec_priv field */
+ selected_macsec_priv->an = tx_sa_key->an;
+
+ if (unlikely(tx_sa_key->sak_len > 32)) {
+ pr_warn("size of secure key is greater than 32 bytes!\n");
+ kfree(tx_sa_key);
+ return -EINVAL;
+ }
+
+ rv = copy_from_user(&sa_key,
+ tx_sa_key->sak,
+ tx_sa_key->sak_len);
+ if (unlikely(rv != 0)) {
+ pr_err("copy_from_user could not copy %i bytes\n", rv);
+ return -EFAULT;
+ }
+
+ rv = fm_macsec_secy_txsa_modify_key(selected_macsec_priv->fm_ms_secy,
+ tx_sa_key->an,
+ sa_key);
+ if (unlikely(rv < 0))
+ pr_err("error while modifying the tx sa key\n");
+
+ return rv;
+}
+
+static int activate_tx_sa(struct generic_msg *gen)
+{
+ struct activate_tx_sa *a_tx_sa;
+ struct macsec_priv_s *selected_macsec_priv;
+ int rv;
+
+ a_tx_sa = &(gen->payload.a_tx_sa);
+
+ if (a_tx_sa->macsec_id < 0 ||
+ a_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ kfree(a_tx_sa);
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[a_tx_sa->macsec_id];
+
+ rv = fm_macsec_secy_txsa_set_active(selected_macsec_priv->fm_ms_secy,
+ a_tx_sa->an);
+ if (unlikely(rv < 0))
+ pr_err("error when creating tx sa\n");
+
+ return rv;
+}
+
+static int get_tx_sa_an(struct generic_msg *gen, macsec_an_t *an)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+
+ if (gen->payload.macsec_id < 0 ||
+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+
+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
+
+ fm_macsec_secy_txsa_get_active(selected_macsec_priv->fm_ms_secy, an);
+
+ return 0;
+}
+
+static int create_rx_sc(struct generic_msg *gen)
+{
+ struct fm_macsec_secy_sc_params params;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *rx_sc_dev;
+ uint32_t sc_phys_id;
+ int i;
+
+ if (gen->payload.c_rx_sc.macsec_id < 0 ||
+ gen->payload.c_rx_sc.macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+ selected_macsec_priv = macsec_priv[gen->payload.c_rx_sc.macsec_id];
+
+ for (i = 0; i < NUM_OF_RX_SC; i++)
+ if (!selected_macsec_priv->rx_sc_dev[i])
+ break;
+ if (i == NUM_OF_RX_SC) {
+ pr_err("number of maximum RX_SC's has been reached\n");
+ return -EINVAL;
+ }
+
+ params.sci = gen->payload.c_rx_sc.sci;
+ params.cipher_suite = SECY_GCM_AES_128;
+#if (DPAA_VERSION >= 11)
+ params.cipher_suite = SECY_GCM_AES_256;
+#endif /* (DPAA_VERSION >= 11) */
+
+ rx_sc_dev = fm_macsec_secy_create_rxsc(selected_macsec_priv->fm_ms_secy,
+ &params);
+
+ fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
+ rx_sc_dev,
+ &sc_phys_id);
+
+ selected_macsec_priv->rx_sc_dev[sc_phys_id] = rx_sc_dev;
+
+ return sc_phys_id;
+}
+
+static int create_rx_sa(struct generic_msg *gen)
+{
+ struct create_rx_sa *c_rx_sa;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ macsec_sa_key_t sak;
+ int rv;
+
+ c_rx_sa = &(gen->payload.c_rx_sa);
+
+ if (unlikely(c_rx_sa->sak_len > 32)) {
+ pr_warn("size of secure key is greater than 32 bytes!\n");
+ return -EINVAL;
+ }
+ rv = copy_from_user(&sak,
+ c_rx_sa->sak,
+ c_rx_sa->sak_len);
+ if (unlikely(rv != 0)) {
+ pr_err("copy_from_user could not copy %i bytes\n", rv);
+ return -EFAULT;
+ }
+
+ if (c_rx_sa->macsec_id < 0 ||
+ c_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+
+ selected_macsec_priv = macsec_priv[c_rx_sa->macsec_id];
+
+ if (c_rx_sa->rx_sc_id < 0 || c_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+
+ selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[c_rx_sa->rx_sc_id];
+
+ rv = fm_macsec_secy_create_rx_sa(selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ c_rx_sa->an,
+ c_rx_sa->lpn,
+ sak);
+ if (unlikely(rv < 0)) {
+ pr_err("fm_macsec_secy_create_rx_sa failed\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int modify_rx_sa_key(struct generic_msg *gen)
+{
+ struct modify_rx_sa_key *rx_sa_key;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc;
+ macsec_sa_key_t sa_key;
+ int rv;
+
+ rx_sa_key = &(gen->payload.modify_rx_sa_key);
+
+ if (rx_sa_key->macsec_id < 0 ||
+ rx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+ selected_macsec_priv = macsec_priv[rx_sa_key->macsec_id];
+
+ if (rx_sa_key->rx_sc_id < 0 || rx_sa_key->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc = selected_macsec_priv->rx_sc_dev[rx_sa_key->rx_sc_id];
+
+ /* set macsec_priv field */
+ selected_macsec_priv->an = rx_sa_key->an;
+
+ if (unlikely(rx_sa_key->sak_len > 32)) {
+ pr_warn("size of secure key is greater than 32 bytes!\n");
+ kfree(rx_sa_key);
+ return -EINVAL;
+ }
+
+ rv = copy_from_user(&sa_key,
+ rx_sa_key->sak,
+ rx_sa_key->sak_len);
+ if (unlikely(rv != 0)) {
+ pr_err("copy_from_user could not copy %i bytes\n", rv);
+ return -EFAULT;
+ }
+
+ rv = fm_macsec_secy_rxsa_modify_key(selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc,
+ rx_sa_key->an,
+ sa_key);
+ if (unlikely(rv < 0))
+ pr_err("error while modifying the rx sa key\n");
+
+ return rv;
+}
+
+static int update_npn(struct generic_msg *gen)
+{
+ struct update_npn *update_npn;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ update_npn = &(gen->payload.update_npn);
+
+ if (update_npn->macsec_id < 0 ||
+ update_npn->macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+ selected_macsec_priv = macsec_priv[update_npn->macsec_id];
+
+ if (update_npn->rx_sc_id < 0 || update_npn->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+
+ selected_rx_sc_dev =
+ selected_macsec_priv->rx_sc_dev[update_npn->rx_sc_id];
+
+ err = fm_macsec_secy_rxsa_update_next_pn(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ update_npn->an,
+ update_npn->pn);
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_rxsa_update_next_pn failed\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int update_lpn(struct generic_msg *gen)
+{
+ struct update_lpn *update_lpn;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ update_lpn = &(gen->payload.update_lpn);
+
+ if (update_lpn->macsec_id < 0 ||
+ update_lpn->macsec_id >= FM_MAX_NUM_OF_MACS)
+ return -EINVAL;
+ selected_macsec_priv = macsec_priv[update_lpn->macsec_id];
+
+ if (update_lpn->rx_sc_id < 0 || update_lpn->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc_dev =
+ selected_macsec_priv->rx_sc_dev[update_lpn->rx_sc_id];
+
+ err = fm_macsec_secy_rxsa_update_lowest_pn(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ update_lpn->an,
+ update_lpn->pn);
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_rxsa_update_lowest_pn failed\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int activate_rx_sa(struct generic_msg *gen)
+{
+ struct activate_rx_sa *a_rx_sa;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ a_rx_sa = &(gen->payload.a_rx_sa);
+
+ if (a_rx_sa->macsec_id < 0 ||
+ a_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[a_rx_sa->macsec_id];
+
+ if (a_rx_sa->rx_sc_id < 0 || a_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[a_rx_sa->rx_sc_id];
+
+ err = fm_macsec_secy_rxsa_enable_receive(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ a_rx_sa->an);
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_rxsa_enable_receive failed\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int get_tx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ int err;
+
+ if (gen->payload.macsec_id < 0 ||
+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
+
+ err = fm_macsec_secy_get_txsc_phys_id(selected_macsec_priv->fm_ms_secy,
+ sc_id);
+
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_get_txsc_phys_id failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int get_rx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
+{
+ struct get_rx_sc_id *get_rx_sc_id;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ get_rx_sc_id = &(gen->payload.get_rx_sc_id);
+
+ if (get_rx_sc_id->macsec_id < 0 ||
+ get_rx_sc_id->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[get_rx_sc_id->macsec_id];
+
+ if (get_rx_sc_id->rx_sc_id < 0 ||
+ get_rx_sc_id->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc_dev =
+ selected_macsec_priv->rx_sc_dev[get_rx_sc_id->rx_sc_id];
+
+ err = fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ sc_id);
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_get_rxsc_phys_id failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int get_macsec_revision(struct generic_msg *gen, int *macsec_revision)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ int err;
+
+ if (gen->payload.macsec_id < 0 ||
+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
+
+ err = fm_macsec_get_revision(selected_macsec_priv->fm_macsec,
+ macsec_revision);
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_get_revision failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int rx_sa_disable(struct generic_msg *gen)
+{
+ struct disable_rx_sa *disable_rx_sa;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ disable_rx_sa = &(gen->payload.d_rx_sa);
+
+ if (disable_rx_sa->macsec_id < 0 ||
+ disable_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[disable_rx_sa->macsec_id];
+
+ if (disable_rx_sa->rx_sc_id < 0 ||
+ disable_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc_dev =
+ selected_macsec_priv->rx_sc_dev[disable_rx_sa->rx_sc_id];
+
+ err = fm_macsec_secy_rxsa_disable_receive(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ selected_macsec_priv->an);
+
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_rxsa_disable_receive failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int rx_sa_delete(struct generic_msg *gen)
+{
+ struct delete_rx_sa *delete_rx_sa;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ delete_rx_sa = &(gen->payload.del_rx_sa);
+
+ if (delete_rx_sa->macsec_id < 0 ||
+ delete_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[delete_rx_sa->macsec_id];
+
+ if (delete_rx_sa->rx_sc_id < 0 ||
+ delete_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc_dev =
+ selected_macsec_priv->rx_sc_dev[delete_rx_sa->rx_sc_id];
+
+ err = fm_macsec_secy_delete_rx_sa(selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ selected_macsec_priv->an);
+
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_delete_rx_sa failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int rx_sc_delete(struct generic_msg *gen)
+{
+ struct delete_rx_sc *delete_rx_sc;
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err;
+
+ delete_rx_sc = &(gen->payload.del_rx_sc);
+
+ if (delete_rx_sc->macsec_id < 0 ||
+ delete_rx_sc->macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[delete_rx_sc->macsec_id];
+
+ if (delete_rx_sc->rx_sc_id < 0 ||
+ delete_rx_sc->rx_sc_id >= NUM_OF_RX_SC)
+ return -EINVAL;
+ selected_rx_sc_dev =
+ selected_macsec_priv->rx_sc_dev[delete_rx_sc->rx_sc_id];
+
+ err = fm_macsec_secy_delete_rxsc(selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev);
+
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_delete_rxsc failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tx_sa_delete(struct generic_msg *gen)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ int err;
+
+ if (gen->payload.del_tx_sa.macsec_id < 0 ||
+ gen->payload.del_tx_sa.macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[gen->payload.del_tx_sa.macsec_id];
+
+ err = fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
+ selected_macsec_priv->an);
+
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_delete_tx_sa failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int disable_secy(struct generic_msg *gen, int *macsec_id)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ int err;
+
+ if (gen->payload.macsec_id < 0 ||
+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
+ *macsec_id = gen->payload.macsec_id;
+
+ err = fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
+ selected_macsec_priv->fm_ms_secy = NULL;
+
+ if (unlikely(err < 0)) {
+ pr_err("fm_macsec_secy_free failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int disable_macsec(struct generic_msg *gen, int *macsec_id)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ int err;
+
+ if (gen->payload.macsec_id < 0 ||
+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+
+ selected_macsec_priv =
+ macsec_priv[gen->payload.macsec_id];
+ *macsec_id = gen->payload.macsec_id;
+
+ err = fm_macsec_disable(selected_macsec_priv->fm_macsec);
+ err += fm_macsec_free(selected_macsec_priv->fm_macsec);
+ selected_macsec_priv->fm_macsec = NULL;
+
+ if (unlikely(err < 0)) {
+ pr_err("macsec disable failed\n");
+ return err;
+ }
+
+ return 0;
+
+}
+
+static int disable_all(struct generic_msg *gen, int *macsec_id)
+{
+ struct macsec_priv_s *selected_macsec_priv;
+ struct rx_sc_dev *selected_rx_sc_dev;
+ int err = 0, i;
+
+ if (gen->payload.macsec_id < 0 ||
+ gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
+ return -EINVAL;
+ }
+
+ selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
+ *macsec_id = gen->payload.macsec_id;
+
+ for (i = 0; i < NUM_OF_RX_SC; i++) {
+ selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[i];
+
+ if (!selected_rx_sc_dev)
+ continue;
+
+ err += fm_macsec_secy_rxsa_disable_receive(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ selected_macsec_priv->an);
+
+ err += fm_macsec_secy_delete_rx_sa(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev,
+ selected_macsec_priv->an);
+
+ err += fm_macsec_secy_delete_rxsc(
+ selected_macsec_priv->fm_ms_secy,
+ selected_rx_sc_dev);
+ }
+
+ err += fm_macsec_secy_delete_tx_sa(
+ selected_macsec_priv->fm_ms_secy,
+ selected_macsec_priv->an);
+
+ err += fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
+ selected_macsec_priv->fm_ms_secy = NULL;
+
+ err += fm_macsec_disable(selected_macsec_priv->fm_macsec);
+
+ err += fm_macsec_free(selected_macsec_priv->fm_macsec);
+ selected_macsec_priv->fm_macsec = NULL;
+
+ if (unlikely(err < 0)) {
+ pr_err("macsec disable failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void macsec_setup_ingress(struct macsec_priv_s *macsec_priv,
+ struct dpa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = macsec_priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = macsec_priv->channel;
+}
+
+static inline void macsec_setup_egress(struct macsec_priv_s *macsec_priv,
+ struct dpa_fq *fq,
+ struct fm_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = macsec_priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (uint16_t)fm_get_tx_port_channel(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+/* At the moment, we don't create recycle queues. */
+static void macsec_fq_setup(struct macsec_priv_s *macsec_priv,
+ const struct dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port)
+{
+ struct dpa_fq *fq;
+ int egress_cnt = 0, conf_cnt = 0;
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ /* Normal TX queues */
+ case FQ_TYPE_TX:
+ macsec_setup_egress(macsec_priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < MACSEC_ETH_TX_QUEUES)
+ macsec_priv->egress_fqs[egress_cnt++] =
+ &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ BUG_ON(!macsec_priv->mac_dev);
+ macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
+ break;
+ /* TX confirm multiple queues */
+ case FQ_TYPE_TX_CONF_MQ:
+ BUG_ON(!macsec_priv->mac_dev);
+ macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
+ macsec_priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_ERROR:
+ BUG_ON(!macsec_priv->mac_dev);
+ macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+ dev_warn(macsec_priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+ break;
+ }
+ }
+
+ /* The number of Tx queues may be smaller than the number of cores, if
+ * the Tx queue range is specified in the device tree instead of being
+ * dynamically allocated.
+ * Make sure all CPUs receive a corresponding Tx queue.
+ */
+ while (egress_cnt < MACSEC_ETH_TX_QUEUES) {
+ list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ macsec_priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == MACSEC_ETH_TX_QUEUES)
+ break;
+ }
+ }
+
+}
+
+static const struct fqid_cell tx_fqids[] = {
+ {0, MACSEC_ETH_TX_QUEUES}
+};
+
+static const struct fqid_cell tx_confirm_fqids[] = {
+ {0, MACSEC_ETH_TX_QUEUES}
+};
+
+/* Allocate percpu priv. This is used to keep track of rx and tx packets on
+ * each cpu (take into consideration that the number of queues is equal to the
+ * number of cpus, so there is one queue/cpu).
+ */
+static void alloc_priv(struct macsec_percpu_priv_s *percpu_priv,
+ struct macsec_priv_s *macsec_priv, struct device *dev)
+{
+ int i, err;
+
+ macsec_priv->percpu_priv = alloc_percpu(*macsec_priv->percpu_priv);
+
+ if (unlikely(macsec_priv->percpu_priv == NULL)) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ dpa_fq_free(dev, &macsec_priv->dpa_fq_list);
+ }
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(macsec_priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+}
+
+/* On RX, we only need to retain the information about frames, if they were
+ * encrypted or not. Statistics regarding this will be printed in a log file.
+ */
+static int macsec_rx_hook(void *ptr, struct net_device *net_dev, u32 fqid)
+{
+
+ struct qm_fd *rx_fd = (struct qm_fd *)ptr;
+ struct macsec_percpu_priv_s *percpu_priv_m;
+ struct macsec_priv_s *selected_macsec_priv;
+
+ selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
+
+ percpu_priv_m = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
+
+ if ((rx_fd->status & FM_FD_STAT_RX_MACSEC) != 0) {
+ if (netif_msg_hw(selected_macsec_priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%u\n",
+ rx_fd->status & FM_FD_STAT_RX_MACSEC);
+ percpu_priv_m->rx_macsec++;
+ }
+
+ return DPAA_ETH_CONTINUE;
+}
+
+/* Split TX traffic. If encryption enabled, send packets on specific QMAN frame
+ * queues. Other way, let them be handled by dpa eth. Also, keep track of the
+ * number of packets that are walking away through "macsec" queues.
+ */
+static enum dpaa_eth_hook_result macsec_tx_hook(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct dpa_priv_s *dpa_priv;
+ struct qm_fd fd;
+ struct macsec_percpu_priv_s *macsec_percpu_priv;
+ struct dpa_percpu_priv_s *dpa_percpu_priv;
+ int i, err = 0;
+ int *countptr, offset = 0;
+ const bool nonlinear = skb_is_nonlinear(skb);
+ struct qman_fq *egress_fq;
+ struct macsec_priv_s *selected_macsec_priv;
+
+ selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
+
+ if (!selected_macsec_priv->net_dev ||
+ (selected_macsec_priv->en_state != SECY_ENABLED) ||
+ (ntohs(skb->protocol) == ETH_P_PAE))
+ return DPAA_ETH_CONTINUE;
+
+ dpa_priv = netdev_priv(net_dev);
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ macsec_percpu_priv = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
+ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
+
+ countptr = raw_cpu_ptr(dpa_priv->dpa_bp->percpu_count);
+
+ clear_fd(&fd);
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (dpa_priv->tsu && dpa_priv->tsu->valid &&
+ dpa_priv->tsu->hwts_tx_en_ioctl)
+ fd.cmd |= FM_FD_CMD_UPD;
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(dpa_priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ fd.cmd |= FM_FD_CMD_UPD;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
+ * we don't feed FMan with more fragments than it supports.
+ * Btw, we're using the first sgt entry to store the linear part of
+ * the skb, so we're one extra frag short.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(dpa_priv, skb, &fd);
+ dpa_percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* Make sure we have enough headroom to accommodate private
+ * data, parse results, etc. Normally this shouldn't happen if
+ * we're here via the standard kernel stack.
+ */
+ if (unlikely(skb_headroom(skb) < dpa_priv->tx_headroom)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb,
+ dpa_priv->tx_headroom);
+ if (unlikely(!skb_new)) {
+ dev_kfree_skb(skb);
+ dpa_percpu_priv->stats.tx_errors++;
+ return DPAA_ETH_STOLEN;
+ }
+ dev_kfree_skb(skb);
+ skb = skb_new;
+ }
+
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ */
+
+ /* Code borrowed from skb_unshare(). */
+ if (skb_cloned(skb)) {
+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+ kfree_skb(skb);
+ skb = nskb;
+ /* skb_copy() has now linearized the skbuff. */
+ } else if (unlikely(nonlinear)) {
+ /* We are here because the egress skb contains
+ * more fragments than we support. In this case,
+ * we have no choice but to linearize it ourselves.
+ */
+ err = __skb_linearize(skb);
+ }
+ if (unlikely(!skb || err < 0)) {
+ /* Common out-of-memory error path */
+ goto enomem;
+ }
+
+ /* Finally, create a contig FD from this skb */
+ err = skb_to_contig_fd(dpa_priv, skb, &fd, countptr, &offset);
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ if (fd.bpid != 0xff) {
+ skb_recycle(skb);
+ /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
+ * but we need the skb to look as if returned by build_skb().
+ * We need to manually adjust the tailptr as well.
+ */
+ skb->data = skb->head + offset;
+ skb_reset_tail_pointer(skb);
+
+ (*countptr)++;
+ dpa_percpu_priv->tx_returned++;
+ }
+
+ egress_fq = selected_macsec_priv->egress_fqs[smp_processor_id()];
+ if (fd.bpid == 0xff)
+ fd.cmd |= qman_fq_fqid(macsec_get_tx_conf_queue(
+ selected_macsec_priv,
+ egress_fq));
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(egress_fq, &fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ dpa_percpu_priv->stats.tx_errors++;
+ dpa_percpu_priv->stats.tx_fifo_errors++;
+ goto xmit_failed;
+ }
+
+ macsec_percpu_priv->tx_macsec++;
+ dpa_percpu_priv->stats.tx_packets++;
+ dpa_percpu_priv->stats.tx_bytes += dpa_fd_length(&fd);
+
+ net_dev->trans_start = jiffies;
+ return DPAA_ETH_STOLEN;
+
+xmit_failed:
+ if (fd.bpid != 0xff) {
+ (*countptr)--;
+ dpa_percpu_priv->tx_returned--;
+ dpa_fd_release(net_dev, &fd);
+ dpa_percpu_priv->stats.tx_errors++;
+ return DPAA_ETH_STOLEN;
+ }
+ _dpa_cleanup_tx_fd(dpa_priv, &fd);
+skb_to_fd_failed:
+enomem:
+ dpa_percpu_priv->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return DPAA_ETH_STOLEN;
+}
+
+/* Allocate and initialize macsec priv and fqs. Also, create debugfs entry for
+ * a spcific interface. Iterate thourgh existing devices in order to find the
+ * one we want to have macsec for.
+ */
+static int macsec_setup(void)
+{
+ struct net_device *net_dev;
+ struct macsec_percpu_priv_s *percpu_priv = NULL;
+ struct dpa_priv_s *dpa_priv = NULL;
+ struct dpa_fq *dpa_fq;
+ struct device *dev = NULL;
+ int err, i, j, macsec_id;
+
+ pr_debug("Entering: %s\n", __func__);
+
+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
+ macsec_priv[i] = kzalloc(sizeof(*(macsec_priv[i])), GFP_KERNEL);
+
+ if (unlikely(macsec_priv[i] == NULL)) {
+ int j;
+ for (j = 0; j < i; j++)
+ kfree(macsec_priv[j]);
+ pr_err("could not allocate\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < macsec_ifs_cnt; i++) {
+ net_dev = first_net_device(&init_net);
+ macsec_id = net_dev->ifindex - 1;
+ while (net_dev) {
+ macsec_id = net_dev->ifindex - 1;
+
+ /* to maintain code readability and less than
+ * 80 characters per line
+ */
+ if (strcmp(net_dev->name, macsec_ifs[i]) != 0) {
+ net_dev = next_net_device(net_dev);
+ continue;
+ }
+
+ /* strcmp(net_dev->name, macsec_ifs[i]) == 0 */
+ macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
+ macsec_priv[macsec_id]->net_dev = net_dev;
+ dpa_priv = netdev_priv(net_dev);
+ macsec_priv[macsec_id]->mac_dev = dpa_priv->mac_dev;
+ macsec_priv[macsec_id]->channel = dpa_priv->channel;
+ dev = net_dev->dev.parent;
+
+ INIT_LIST_HEAD(&macsec_priv[macsec_id]->dpa_fq_list);
+
+ dpa_fq = dpa_fq_alloc(dev,
+ tx_fqids->start, tx_fqids->count,
+ &macsec_priv[macsec_id]->dpa_fq_list,
+ FQ_TYPE_TX);
+ if (unlikely(dpa_fq == NULL)) {
+ dev_err(dev, "dpa_fq_alloc() failed\n");
+ return -ENOMEM;
+ }
+
+ dpa_fq = dpa_fq_alloc(dev,
+ tx_confirm_fqids->start,
+ tx_confirm_fqids->count,
+ &macsec_priv[macsec_id]->dpa_fq_list,
+ FQ_TYPE_TX_CONF_MQ);
+ if (unlikely(dpa_fq == NULL)) {
+ dev_err(dev, "dpa_fq_alloc() failed\n");
+ return -ENOMEM;
+ }
+
+ macsec_fq_setup(macsec_priv[macsec_id], &private_fq_cbs,
+ macsec_priv[macsec_id]->mac_dev->port_dev[TX]);
+
+ alloc_priv(percpu_priv, macsec_priv[macsec_id], dev);
+
+ break;
+ }
+ if (macsec_priv[macsec_id]->net_dev == NULL) {
+ pr_err("Interface unknown\n");
+ err = -EINVAL;
+ goto _error;
+ }
+
+ /* setup specific ethtool ops for macsec */
+ macsec_setup_ethtool_ops(net_dev);
+ }
+ return 0;
+
+_error:
+ for (j = 0; j < i; i++) {
+ net_dev = first_net_device(&init_net);
+ while (net_dev) {
+ macsec_id = net_dev->ifindex - 1;
+ if (strcmp(net_dev->name, macsec_ifs[j]) != 0) {
+ net_dev = next_net_device(net_dev);
+ continue;
+ }
+ dpa_fq_free(net_dev->dev.parent,
+ &macsec_priv[macsec_id]->dpa_fq_list);
+ break;
+ }
+ macsec_restore_ethtool_ops(macsec_priv[j]->net_dev);
+ kfree(macsec_priv[j]);
+ }
+ for (j = i; j < FM_MAX_NUM_OF_MACS; j++)
+ kfree(macsec_priv[j]);
+ return err;
+}
+
+static int enable_macsec(struct generic_msg *gen)
+{
+ struct fm_macsec_params macsec_params;
+ int rv, macsec_id;
+ void __iomem *mac_dev_base_addr;
+ uintptr_t macsec_reg_addr;
+ struct macsec_data *mdata;
+ char if_name[IFNAMSIZ];
+ struct macsec_priv_s *selected_macsec_priv;
+
+ mdata = &gen->payload.en_macsec;
+
+ if (unlikely(mdata->if_name_length > IFNAMSIZ)) {
+ pr_err("interface name too long\n");
+ return -EINVAL;
+ }
+
+ rv = copy_from_user(if_name, mdata->if_name, mdata->if_name_length);
+ if (unlikely(rv != 0)) {
+ pr_err("copy_from_user could not copy %i bytes\n", rv);
+ return -EFAULT;
+ }
+
+ macsec_id = ifname_to_id(if_name);
+ if (macsec_id < 0 || macsec_id >= FM_MAX_NUM_OF_MACS) {
+ pr_err("error on converting to macsec_id\n");
+ return -ENXIO;
+ }
+
+ selected_macsec_priv = macsec_priv[macsec_id];
+
+ if (selected_macsec_priv->fm_macsec) {
+ pr_err("macsec has already been configured\n");
+ return -EINVAL;
+ }
+
+ mac_dev_base_addr = selected_macsec_priv->mac_dev->vaddr;
+
+ macsec_reg_addr = (uintptr_t)(mac_dev_base_addr + MACSEC_REG_OFFSET);
+
+ memset(&macsec_params, 0, sizeof(macsec_params));
+ macsec_params.fm_h = (handle_t)selected_macsec_priv->mac_dev->fm;
+ macsec_params.guest_mode = FALSE;
+ /* The MACsec offset relative to the memory mapped MAC device */
+ macsec_params.non_guest_params.base_addr = macsec_reg_addr;
+ macsec_params.non_guest_params.fm_mac_h =
+ (handle_t)selected_macsec_priv->mac_dev->get_mac_handle(
+ selected_macsec_priv->mac_dev);
+ macsec_params.non_guest_params.exception_f = macsec_exception;
+ macsec_params.non_guest_params.app_h = selected_macsec_priv->mac_dev;
+
+ selected_macsec_priv->fm_macsec = fm_macsec_config(&macsec_params);
+ if (unlikely(selected_macsec_priv->fm_macsec == NULL))
+ return -EINVAL;
+
+ if (mdata->config_unknown_sci_treatment) {
+ rv = fm_macsec_config_unknown_sci_frame_treatment(
+ selected_macsec_priv->fm_macsec,
+ mdata->unknown_sci_treatment);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_invalid_tag_treatment) {
+ rv = fm_macsec_config_invalid_tags_frame_treatment(
+ selected_macsec_priv->fm_macsec,
+ mdata->deliver_uncontrolled);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_kay_frame_treatment) {
+ rv = fm_macsec_config_kay_frame_treatment(
+ selected_macsec_priv->fm_macsec,
+ mdata->discard_uncontrolled);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_untag_treatment) {
+ rv = fm_macsec_config_untag_frame_treatment(
+ selected_macsec_priv->fm_macsec,
+ mdata->untag_treatment);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_pn_exhaustion_threshold) {
+ rv = fm_macsec_config_pn_exhaustion_threshold(
+ selected_macsec_priv->fm_macsec,
+ mdata->pn_threshold);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_keys_unreadable) {
+ rv = fm_macsec_config_keys_unreadable(
+ selected_macsec_priv->fm_macsec);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_sectag_without_sci) {
+ rv = fm_macsec_config_sectag_without_sci(
+ selected_macsec_priv->fm_macsec);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ if (mdata->config_exception) {
+ rv = fm_macsec_config_exception(selected_macsec_priv->fm_macsec,
+ mdata->exception,
+ mdata->enable_exception);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+ }
+
+ rv = fm_macsec_init(selected_macsec_priv->fm_macsec);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+
+ rv = fm_macsec_enable(selected_macsec_priv->fm_macsec);
+ if (unlikely(rv < 0))
+ goto _return_fm_macsec_free;
+
+ return macsec_id;
+
+_return_fm_macsec_free:
+ fm_macsec_free(selected_macsec_priv->fm_macsec);
+ selected_macsec_priv->fm_macsec = NULL;
+ return rv;
+}
+
+static int send_result(struct nlmsghdr *nlh, int pid, int result)
+{
+ int res;
+ struct sk_buff *skb_out;
+ size_t msg_size = sizeof(result);
+
+ skb_out = nlmsg_new(msg_size, 0);
+ if (unlikely(!skb_out)) {
+ pr_err("Failed to allocate new skb\n");
+ goto _ret_err;
+ }
+
+ nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
+ if (unlikely(!nlh)) {
+ pr_err("Failed to send\n");
+ goto _ret_err;
+ }
+
+ NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
+ memcpy(nlmsg_data(nlh), &result, msg_size);
+
+ res = nlmsg_unicast(nl_sk, skb_out, pid);
+ if (unlikely(res < 0)) {
+ pr_err("Error while sending back to user\n");
+ goto _ret_err;
+ }
+
+ return 0;
+
+_ret_err:
+ return -1;
+}
+
+/* Kernel communicates with user space through netlink sockets. This function
+ * implements the responses of the kernel. The generic struct is used for
+ * easier handling of the code, which otherwise would have been duplicated.
+ */
+static void switch_messages(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ int pid, rv;
+ enum msg_type cmd;
+
+ struct dpa_fq *dpa_fq, *tmp;
+ struct device *dev;
+
+ struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
+
+ struct generic_msg *check;
+ int macsec_id = 0;
+ uint32_t sc_id, macsec_revision;
+ macsec_an_t ret_an;
+ int i;
+
+ pr_debug("Entering: %s\n", __func__);
+
+ if (unlikely(!skb)) {
+ pr_err("skb null\n");
+ return;
+ }
+
+ nlh = (struct nlmsghdr *)skb->data;
+ check = kmalloc(sizeof(*check), GFP_KERNEL);
+ memcpy(check, nlmsg_data(nlh), sizeof(*check));
+ pid = nlh->nlmsg_pid; /*pid of sending process */
+ cmd = check->chf;
+
+ switch (cmd) {
+ case ENABLE_MACSEC:
+ pr_debug("ENABLE_MACSEC\n");
+
+ macsec_id = enable_macsec(check);
+
+ if (macsec_id >= 0)
+ macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
+
+ rv = send_result(nlh, pid, (macsec_id < 0) ? NACK : macsec_id);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case SET_EXCEPTION:
+ pr_debug("SET_EXCEPTION\n");
+
+ rv = set_macsec_exception(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case ENABLE_SECY:
+ pr_debug("ENABLE_SECY\n");
+
+ rv = enable_secy(check, &macsec_id);
+
+ if (rv == 0)
+ macsec_priv[macsec_id]->en_state = SECY_ENABLED;
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case GET_REVISION:
+ pr_debug("GET_REVISION\n");
+
+ rv = get_macsec_revision(check, &macsec_revision);
+
+ rv = send_result(nlh, pid,
+ (rv < 0) ? NACK : (int)macsec_revision);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case GET_TXSC_PHYS_ID:
+ pr_debug("GET_TXSC_PHYS_ID\n");
+
+ rv = get_tx_sc_phys_id(check, &sc_id);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case TX_SA_CREATE:
+ pr_debug("TX_SA_CREATE\n");
+
+ rv = create_tx_sa(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case MODIFY_TXSA_KEY:
+ pr_debug("MODIFY_TXSA_KEY\n");
+
+ rv = modify_tx_sa_key(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case TX_SA_ACTIVATE:
+ pr_debug("TX_SA_ACTIVATE\n");
+
+ rv = activate_tx_sa(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case GET_TXSA_AN:
+ pr_debug("GET_TXSA_AN\n");
+
+ rv = get_tx_sa_an(check, &ret_an);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)ret_an);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case RX_SC_CREATE:
+ pr_debug("RX_SC_CREATE\n");
+
+ sc_id = create_rx_sc(check);
+
+ rv = send_result(nlh, pid, (sc_id < 0) ? NACK : (int)sc_id);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case GET_RXSC_PHYS_ID:
+ pr_debug("GET_RXSC_PHYS_ID\n");
+
+ rv = get_rx_sc_phys_id(check, &sc_id);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case RX_SA_CREATE:
+ pr_debug("RX_SA_CREATE\n");
+
+ rv = create_rx_sa(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case MODIFY_RXSA_KEY:
+ pr_debug("MODIFY_RXSA_KEY\n");
+
+ rv = modify_rx_sa_key(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case UPDATE_NPN:
+ pr_debug("UPDATE_NPN\n");
+
+ rv = update_npn(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case UPDATE_LPN:
+ pr_debug("UPDATE_LPN\n");
+
+ rv = update_lpn(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case RX_SA_ACTIVATE:
+ pr_debug("RX_SA_ACTIVATE\n");
+
+ rv = activate_rx_sa(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case RX_SA_DISABLE:
+ pr_debug("RX_SA_DISABLE\n");
+
+ rv = rx_sa_disable(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case RX_SA_DELETE:
+ pr_debug("RX_SA_DELETE\n");
+
+ rv = rx_sa_delete(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case RX_SC_DELETE:
+ pr_debug("RX_SC_DELETE\n");
+
+ rv = rx_sc_delete(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case TX_SA_DELETE:
+ pr_debug("TX_SA_DELETE\n");
+
+ rv = tx_sa_delete(check);
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case DISABLE_SECY:
+ pr_debug("DISABLE_SECY\n");
+
+ rv = disable_secy(check, &macsec_id);
+
+ if (unlikely(rv < 0))
+ macsec_priv[macsec_id]->en_state = SECY_ENABLED;
+ else
+ macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case DISABLE_MACSEC:
+ pr_debug("DISABLE_MACSEC\n");
+
+ rv = disable_macsec(check, &macsec_id);
+
+ macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+
+ break;
+
+ case DISABLE_ALL:
+ pr_debug("DISABLE_ALL\n");
+
+ rv = disable_all(check, &macsec_id);
+
+ macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
+
+ rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
+ if (unlikely(rv < 0))
+ goto _release;
+ break;
+
+ default:
+ /* should never get here */
+ pr_err("not a state\n");
+ break;
+ }
+
+ return;
+
+_release:
+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++)
+ deinit_macsec(i);
+
+ /* Reset the TX hooks */
+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
+
+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
+
+ if (!macsec_priv[i]->net_dev)
+ continue;
+
+ free_percpu(macsec_priv[i]->percpu_priv);
+
+ /* Delete the fman queues */
+ list_for_each_entry_safe(dpa_fq,
+ tmp,
+ &macsec_priv[i]->dpa_fq_list,
+ list) {
+ dev = dpa_fq->net_dev->dev.parent;
+ rv = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
+ if (unlikely(rv < 0))
+ pr_err("_dpa_fq_fre=%d\n", rv);
+ }
+
+ macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
+ kfree(macsec_priv[i]);
+ macsec_priv[i] = NULL;
+ }
+
+ kfree(check);
+
+ netlink_kernel_release(nl_sk);
+}
+
+struct netlink_kernel_cfg ms_cfg = {
+ .groups = 1,
+ .input = switch_messages,
+};
+
+static int __init macsec_init(void)
+{
+ struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
+ int ret, i;
+
+ pr_debug("Entering: %s\n", __func__);
+
+ /* If there is no interface we want macsec on, just exit. */
+ parse_ifs();
+ for (i = 0; i < macsec_ifs_cnt; i++) {
+ if (!macsec_ifs[i]) {
+ pr_err("Interface unknown\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Actually send the info to the user through a given socket. */
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &ms_cfg);
+ if (unlikely(!nl_sk)) {
+ pr_err("Error creating socket.\n");
+ ret = -ENOMEM;
+ goto _release;
+ }
+
+ ret = macsec_setup();
+ if (unlikely(ret != 0)) {
+ pr_err("Setup of macsec failed\n");
+ goto _release;
+ }
+
+ /* set dpaa hooks for default queues */
+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
+ macsec_dpaa_eth_hooks.tx = (dpaa_eth_egress_hook_t)(macsec_tx_hook);
+ macsec_dpaa_eth_hooks.rx_default =
+ (dpaa_eth_ingress_hook_t)(macsec_rx_hook);
+
+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
+
+ return 0;
+
+_release:
+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
+ netlink_kernel_release(nl_sk);
+ return ret;
+}
+
+static void __exit macsec_exit(void)
+{
+ int _errno;
+ struct dpa_fq *dpa_fq, *tmp;
+ struct device *dev;
+ struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
+ int i;
+
+ pr_debug("exiting macsec module\n");
+
+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
+ /* release has already been done, due to errors,
+ * in switch_messages we will return to exit the module properly
+ */
+ if (!macsec_priv[i]->net_dev) {
+ pr_debug("no release needed\n");
+ continue;
+ }
+ deinit_macsec(i);
+ }
+
+ /* Reset the TX hooks before exiting */
+ memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
+ fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
+
+ for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
+
+ if (!macsec_priv[i]->net_dev) {
+ pr_debug("no release needed\n");
+ continue;
+ }
+
+ free_percpu(macsec_priv[i]->percpu_priv);
+
+ /* Delete the fman queues */
+ list_for_each_entry_safe(dpa_fq, tmp,
+ &macsec_priv[i]->dpa_fq_list, list) {
+ if (dpa_fq) {
+ dev = dpa_fq->net_dev->dev.parent;
+ _errno = _dpa_fq_free(dev,
+ (struct qman_fq *)dpa_fq);
+ if (unlikely(_errno < 0))
+ pr_err("_dpa_fq_fre=%d\n", _errno);
+ }
+ }
+
+ /* restore ethtool ops to the previous private ones */
+ macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
+
+ kfree(macsec_priv[i]);
+ }
+
+ netlink_kernel_release(nl_sk);
+
+ pr_debug("exited macsec module\n");
+}
+
+module_init(macsec_init);
+module_exit(macsec_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
@@ -0,0 +1,294 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_MACSEC_H
+#define __DPAA_ETH_MACSEC_H
+
+#include "mac.h"
+
+#define NETLINK_USER 31
+#define MAX_NUM_OF_SECY 1
+#define MAX_LEN 100
+#define FM_FD_STAT_RX_MACSEC 0x00800000
+#define MACSEC_ETH_TX_QUEUES NR_CPUS
+#define MACSEC_REG_OFFSET 0x800
+#define ACK 0
+#define NACK -1
+
+extern const struct dpa_fq_cbs_t private_fq_cbs;
+
+extern int dpa_macsec_get_sset_count(struct net_device *net_dev, int type);
+extern void
+dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats, u64 *data);
+extern void
+dpa_macsec_get_strings(struct net_device *net_dev,
+ u32 stringset, u8 *data);
+
+enum msg_type {ENABLE_MACSEC,
+ SET_EXCEPTION,
+ ENABLE_SECY,
+ TX_SA_CREATE,
+ TX_SA_ACTIVATE,
+ RX_SC_CREATE,
+ RX_SA_CREATE,
+ RX_SA_ACTIVATE,
+ RX_SA_DISABLE,
+ RX_SA_DELETE,
+ RX_SC_DELETE,
+ TX_SA_DELETE,
+ DISABLE_MACSEC,
+ DISABLE_SECY,
+ DISABLE_ALL,
+ GET_REVISION,
+ UPDATE_NPN,
+ UPDATE_LPN,
+ GET_TXSC_PHYS_ID,
+ GET_RXSC_PHYS_ID,
+ GET_TXSA_AN,
+ MODIFY_TXSA_KEY,
+ MODIFY_RXSA_KEY,
+};
+
+enum macsec_enablement {MACSEC_DISABLED, MACSEC_ENABLED, SECY_ENABLED};
+
+struct enable_secy {
+ int macsec_id;
+
+ u64 sci; /* MAC address(48b) + port_id(16b) */
+
+ bool config_insertion_mode;
+ fm_macsec_sci_insertion_mode sci_insertion_mode;
+
+ bool config_protect_frames;
+ bool protect_frames;
+
+ bool config_replay_window;
+ bool replay_protect;
+ uint32_t replay_window;
+
+ bool config_validation_mode;
+ fm_macsec_valid_frame_behavior validate_frames;
+
+ bool config_confidentiality;
+ bool confidentiality_enable;
+ uint32_t confidentiality_offset;
+
+ bool config_point_to_point;
+
+ bool config_exception;
+ bool enable_exception;
+ fm_macsec_secy_exception exception;
+
+ bool config_event;
+ bool enable_event;
+ fm_macsec_secy_event event;
+};
+
+struct macsec_data {
+ char *if_name;
+ size_t if_name_length; /* including string terminator */
+
+ bool config_unknown_sci_treatment;
+ fm_macsec_unknown_sci_frame_treatment unknown_sci_treatment;
+
+ bool config_invalid_tag_treatment;
+ bool deliver_uncontrolled;
+
+ bool config_kay_frame_treatment;
+ bool discard_uncontrolled;
+
+ bool config_untag_treatment;
+ fm_macsec_untag_frame_treatment untag_treatment;
+
+ bool config_pn_exhaustion_threshold;
+ uint32_t pn_threshold;
+
+ bool config_keys_unreadable;
+
+ bool config_sectag_without_sci;
+
+ bool config_exception;
+ bool enable_exception;
+ fm_macsec_exception exception;
+};
+
+struct set_exception {
+ int macsec_id;
+ bool enable_exception;
+ fm_macsec_exception exception;
+};
+
+struct create_tx_sa {
+ int macsec_id;
+ u8 an; /* association number */
+ u8 *sak; /* secure assoc key */
+ u32 sak_len; /* assoc key length */
+};
+
+struct modify_tx_sa_key {
+ int macsec_id;
+ u8 an; /* association number */
+ u8 *sak; /* secure assoc key */
+ u32 sak_len; /* assoc key length */
+};
+
+struct activate_tx_sa {
+ int macsec_id;
+ u8 an; /* association number */
+};
+
+struct create_rx_sc {
+ int macsec_id;
+ u64 sci;
+};
+
+struct delete_rx_sc {
+ int macsec_id;
+ u32 rx_sc_id;
+};
+
+struct get_rx_sc_id {
+ int macsec_id;
+ u32 rx_sc_id;
+};
+
+struct create_rx_sa {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+ u32 lpn;
+ u8 *sak;
+ u32 sak_len;
+};
+
+struct activate_rx_sa {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+};
+
+struct disable_rx_sa {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+};
+
+struct delete_rx_sa {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+};
+
+struct delete_tx_sa {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+};
+
+struct update_npn {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+ u32 pn;
+};
+
+struct update_lpn {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+ u32 pn;
+};
+
+struct modify_rx_sa_key {
+ int macsec_id;
+ u32 rx_sc_id;
+ u8 an;
+ u8 *sak;
+ u32 sak_len;
+};
+
+struct generic_msg {
+ enum msg_type chf;
+ union {
+ int macsec_id;
+ struct macsec_data en_macsec;
+ struct enable_secy secy;
+ struct create_tx_sa c_tx_sa;
+ struct activate_tx_sa a_tx_sa;
+ struct create_rx_sc c_rx_sc;
+ struct get_rx_sc_id get_rx_sc_id;
+ struct create_rx_sa c_rx_sa;
+ struct activate_rx_sa a_rx_sa;
+ struct disable_rx_sa d_rx_sa;
+ struct delete_rx_sa del_rx_sa;
+ struct delete_rx_sc del_rx_sc;
+ struct delete_tx_sa del_tx_sa;
+ struct update_npn update_npn;
+ struct update_lpn update_lpn;
+ struct modify_tx_sa_key modify_tx_sa_key;
+ struct modify_rx_sa_key modify_rx_sa_key;
+ struct set_exception set_ex;
+ } payload;
+};
+
+struct macsec_percpu_priv_s {
+ u64 rx_macsec;
+ u64 tx_macsec;
+};
+
+struct macsec_priv_s {
+ struct macsec_percpu_priv_s __percpu *percpu_priv;
+
+ struct net_device *net_dev;
+ struct mac_device *mac_dev;
+
+ struct qman_fq *egress_fqs[MACSEC_ETH_TX_QUEUES];
+ struct qman_fq *conf_fqs[MACSEC_ETH_TX_QUEUES];
+ struct list_head dpa_fq_list;
+ uint32_t msg_enable; /* net_device message level */
+ uint16_t channel;
+ struct fm_macsec_dev *fm_macsec;
+
+ struct fm_macsec_secy_dev *fm_ms_secy;
+ uint8_t an;
+
+ struct rx_sc_dev *rx_sc_dev[NUM_OF_RX_SC];
+ uint8_t *sa_key;
+ enum macsec_enablement en_state;
+
+ uintptr_t vaddr;
+ struct resource *fman_resource;
+};
+
+struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev);
+
+#endif /* __DPAA_ETH_MACSEC_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
@@ -0,0 +1,381 @@
+/* Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_eth_base.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+#include "mac.h"
+
+#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
+#ifdef CONFIG_PM
+
+static int proxy_suspend(struct device *dev)
+{
+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
+ struct mac_device *mac_dev = proxy_dev->mac_dev;
+ int err = 0;
+
+ err = fm_port_suspend(mac_dev->port_dev[RX]);
+ if (err)
+ goto port_suspend_failed;
+
+ err = fm_port_suspend(mac_dev->port_dev[TX]);
+ if (err)
+ err = fm_port_resume(mac_dev->port_dev[RX]);
+
+port_suspend_failed:
+ return err;
+}
+
+static int proxy_resume(struct device *dev)
+{
+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
+ struct mac_device *mac_dev = proxy_dev->mac_dev;
+ int err = 0;
+
+ err = fm_port_resume(mac_dev->port_dev[TX]);
+ if (err)
+ goto port_resume_failed;
+
+ err = fm_port_resume(mac_dev->port_dev[RX]);
+ if (err)
+ err = fm_port_suspend(mac_dev->port_dev[TX]);
+
+port_resume_failed:
+ return err;
+}
+
+static const struct dev_pm_ops proxy_pm_ops = {
+ .suspend = proxy_suspend,
+ .resume = proxy_resume,
+};
+
+#define PROXY_PM_OPS (&proxy_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define PROXY_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
+static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct list_head proxy_fq_list;
+ size_t count;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ struct mac_device *mac_dev;
+ struct proxy_device *proxy_dev;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev))
+ return PTR_ERR(mac_dev);
+
+ proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
+ if (!proxy_dev) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ proxy_dev->mac_dev = mac_dev;
+ dev_set_drvdata(dev, proxy_dev);
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ INIT_LIST_HEAD(&proxy_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
+ TX);
+ if (err < 0) {
+ devm_kfree(dev, buf_layout);
+ return err;
+ }
+
+ /* Proxy initializer - Just configures the MAC on behalf of
+ * another partition.
+ */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+ /* Proxy interfaces need to be started, and the allocated
+ * memory freed
+ */
+ devm_kfree(dev, buf_layout);
+ devm_kfree(dev, dpa_bp);
+
+ /* Free FQ structures */
+ devm_kfree(dev, port_fqs.rx_defq);
+ devm_kfree(dev, port_fqs.rx_errq);
+ devm_kfree(dev, port_fqs.tx_defq);
+ devm_kfree(dev, port_fqs.tx_errq);
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ err = fm_port_enable(mac_dev->port_dev[i]);
+ if (err)
+ goto port_enable_fail;
+ }
+
+ dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+
+ return 0; /* Proxy interface initialization ended */
+
+port_enable_fail:
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+ dpa_eth_proxy_remove(_of_dev);
+
+ return err;
+}
+
+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
+ struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ int _errno;
+
+ mac_dev = proxy_dev->mac_dev;
+
+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
+ net_dev->dev_addr);
+ if (_errno < 0)
+ return _errno;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_proxy_set_mac_address);
+
+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
+ struct net_device *net_dev)
+{
+ struct mac_device *mac_dev = proxy_dev->mac_dev;
+ int _errno;
+
+ if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
+ mac_dev->promisc = !mac_dev->promisc;
+ _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
+ mac_dev->promisc);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
+ _errno);
+ }
+
+ _errno = mac_dev->set_multi(net_dev, mac_dev);
+ if (unlikely(_errno < 0))
+ return _errno;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
+
+int dpa_proxy_start(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ const struct dpa_priv_s *priv;
+ struct proxy_device *proxy_dev;
+ int _errno;
+ int i;
+
+ priv = netdev_priv(net_dev);
+ proxy_dev = (struct proxy_device *)priv->peer;
+ mac_dev = proxy_dev->mac_dev;
+
+ _errno = mac_dev->init_phy(net_dev, mac_dev);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev, "init_phy() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ _errno = fm_port_enable(mac_dev->port_dev[i]);
+ if (_errno)
+ goto port_enable_fail;
+ }
+
+ _errno = mac_dev->start(mac_dev);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev, "mac_dev->start() = %d\n",
+ _errno);
+ goto port_enable_fail;
+ }
+
+ return _errno;
+
+port_enable_fail:
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+
+ return _errno;
+}
+EXPORT_SYMBOL(dpa_proxy_start);
+
+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
+{
+ struct mac_device *mac_dev = proxy_dev->mac_dev;
+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int _errno, i, err;
+
+ _errno = mac_dev->stop(mac_dev);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ err = fm_port_disable(mac_dev->port_dev[i]);
+ _errno = err ? err : _errno;
+ }
+
+ if (mac_dev->phy_dev)
+ phy_disconnect(mac_dev->phy_dev);
+ mac_dev->phy_dev = NULL;
+
+ return _errno;
+}
+EXPORT_SYMBOL(dpa_proxy_stop);
+
+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
+{
+ struct device *dev = &of_dev->dev;
+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
+
+ kfree(proxy_dev);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id dpa_proxy_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-init"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_proxy_match);
+
+static struct platform_driver dpa_proxy_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME "-proxy",
+ .of_match_table = dpa_proxy_match,
+ .owner = THIS_MODULE,
+ .pm = PROXY_PM_OPS,
+ },
+ .probe = dpaa_eth_proxy_probe,
+ .remove = dpa_eth_proxy_remove
+};
+
+static int __init __cold dpa_proxy_load(void)
+{
+ int _errno;
+
+ pr_info(DPA_DESCRIPTION "\n");
+
+ /* Initialize dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_proxy_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_proxy_load);
+
+static void __exit __cold dpa_proxy_unload(void)
+{
+ platform_driver_unregister(&dpa_proxy_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(dpa_proxy_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
@@ -0,0 +1,1128 @@
+/* Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/highmem.h>
+#include <linux/fsl_bman.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#ifdef CONFIG_FSL_DPAA_1588
+#include "dpaa_1588.h"
+#endif
+#ifdef CONFIG_FSL_DPAA_CEETM
+#include "dpaa_eth_ceetm.h"
+#endif
+
+/* DMA map and add a page frag back into the bpool.
+ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
+ * specifically for fitting into @dpa_bp.
+ */
+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
+ int *count_ptr)
+{
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+
+ memset(&bmb, 0, sizeof(struct bm_buffer));
+
+ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ return;
+ }
+
+ bm_buffer_set64(&bmb, addr);
+
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+
+ (*count_ptr)++;
+}
+
+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
+{
+ struct bm_buffer bmb[8];
+ void *new_buf;
+ dma_addr_t addr;
+ uint8_t i;
+ struct device *dev = dpa_bp->dev;
+ struct sk_buff *skb, **skbh;
+
+ memset(bmb, 0, sizeof(struct bm_buffer) * 8);
+
+ for (i = 0; i < 8; i++) {
+ /* We'll prepend the skb back-pointer; can't use the DPA
+ * priv space, because FMan will overwrite it (from offset 0)
+ * if it ends up being the second, third, etc. fragment
+ * in a S/G frame.
+ *
+ * We only need enough space to store a pointer, but allocate
+ * an entire cacheline for performance reasons.
+ */
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ new_buf = page_address(alloc_page(GFP_ATOMIC));
+#else
+ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
+#endif
+ if (unlikely(!new_buf))
+ goto netdev_alloc_failed;
+ new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
+
+ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ put_page(virt_to_head_page(new_buf));
+ goto build_skb_failed;
+ }
+ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
+
+ addr = dma_map_single(dev, new_buf,
+ dpa_bp->size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto dma_map_failed;
+
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+release_bufs:
+ /* Release the buffers. In case bman is busy, keep trying
+ * until successful. bman_release() is guaranteed to succeed
+ * in a reasonable amount of time
+ */
+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
+ cpu_relax();
+ return i;
+
+dma_map_failed:
+ kfree_skb(skb);
+
+build_skb_failed:
+netdev_alloc_failed:
+ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
+ WARN_ONCE(1, "Memory allocation failure on Rx\n");
+
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
+{
+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
+}
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+ for_each_possible_cpu(i) {
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpa_bp->config_count; j += 8)
+ dpa_bp_add_8_bufs(dpa_bp, i);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dpa_bp_priv_seed);
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dpaa_eth_refill_bpools);
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd)
+{
+ const struct qm_sg_entry *sgt;
+ int i;
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ dma_addr_t addr = qm_fd_addr(fd);
+ dma_addr_t sg_addr;
+ struct sk_buff **skbh;
+ struct sk_buff *skb = NULL;
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ int nr_frags;
+ int sg_len;
+
+ /* retrieve skb back pointer */
+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
+
+ if (unlikely(fd->format == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+/* addressing the 4k DMA issue can yield a larger number of fragments than
+ * the skb had
+ */
+ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
+ dma_dir);
+#else
+ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+#endif
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = phys_to_virt(addr + dpa_fd_offset(fd));
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid &&
+ priv->tsu->hwts_tx_en_ioctl)
+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ sg_addr = qm_sg_addr(&sgt[0]);
+ sg_len = qm_sg_entry_get_len(&sgt[0]);
+ dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ i = 1;
+ do {
+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_len = qm_sg_entry_get_len(&sgt[i]);
+ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
+ } while (!qm_sg_entry_get_final(&sgt[i++]));
+#else
+ /* remaining pages were mapped with dma_map_page() */
+ for (i = 1; i <= nr_frags; i++) {
+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_len = qm_sg_entry_get_len(&sgt[i]);
+ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
+ }
+#endif
+
+ /* Free the page frag that we allocated on Tx */
+ put_page(virt_to_head_page(sgt));
+ } else {
+ dma_unmap_single(dpa_bp->dev, addr,
+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+#ifdef CONFIG_FSL_DPAA_TS
+ /* get the timestamp for non-SG frames */
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid &&
+ priv->tsu->hwts_tx_en_ioctl)
+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
+#endif
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+#endif
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
+
+#ifndef CONFIG_FSL_DPAA_TS
+bool dpa_skb_is_recyclable(struct sk_buff *skb)
+{
+ /* No recycling possible if skb buffer is kmalloc'ed */
+ if (skb->head_frag == 0)
+ return false;
+
+ /* or if it's an userspace buffer */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+ return false;
+
+ /* or if it's cloned or shared */
+ if (skb_shared(skb) || skb_cloned(skb) ||
+ skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(dpa_skb_is_recyclable);
+
+bool dpa_buf_is_recyclable(struct sk_buff *skb,
+ uint32_t min_size,
+ uint16_t min_offset,
+ unsigned char **new_buf_start)
+{
+ unsigned char *new;
+
+ /* In order to recycle a buffer, the following conditions must be met:
+ * - buffer size no less than the buffer pool size
+ * - buffer size no higher than an upper limit (to avoid moving too much
+ * system memory to the buffer pools)
+ * - buffer address aligned to cacheline bytes
+ * - offset of data from start of buffer no lower than a minimum value
+ * - offset of data from start of buffer no higher than a maximum value
+ */
+ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
+
+ /* left align to the nearest cacheline */
+ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
+
+ if (likely(new >= skb->head &&
+ new >= (skb->data - DPA_MAX_FD_OFFSET) &&
+ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
+ *new_buf_start = new;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(dpa_buf_is_recyclable);
+#endif
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd, int *use_gro)
+{
+ dma_addr_t addr = qm_fd_addr(fd);
+ ssize_t fd_off = dpa_fd_offset(fd);
+ void *vaddr;
+ const fm_prs_result_t *parse_results;
+ struct sk_buff *skb = NULL, **skbh;
+
+ vaddr = phys_to_virt(addr);
+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ /* Retrieve the skb and adjust data and tail pointers, to make sure
+ * forwarded skbs will have enough space on Tx if extra headers
+ * are added.
+ */
+ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
+
+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
+ /* When using jumbo Rx buffers, we risk having frames dropped due to
+ * the socket backlog reaching its maximum allowed size.
+ * Use the frame length for the skb truesize instead of the buffer
+ * size, as this is the size of the data that actually gets copied to
+ * userspace.
+ */
+ skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
+#endif
+
+ DPA_BUG_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, dpa_fd_length(fd));
+
+ /* Peek at the parse results for csum validation */
+ parse_results = (const fm_prs_result_t *)(vaddr +
+ DPA_RX_PRIV_DATA_SIZE);
+ _dpa_process_parse_results(parse_results, fd, skb, use_gro);
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
+ dpa_ptp_store_rxstamp(priv, skb, vaddr);
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (priv->ts_rx_en)
+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ return skb;
+}
+
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd, int *use_gro,
+ int *count_ptr)
+{
+ const struct qm_sg_entry *sgt;
+ dma_addr_t addr = qm_fd_addr(fd);
+ ssize_t fd_off = dpa_fd_offset(fd);
+ dma_addr_t sg_addr;
+ void *vaddr, *sg_vaddr;
+ struct dpa_bp *dpa_bp;
+ struct page *page, *head_page;
+ int frag_offset, frag_len;
+ int page_offset;
+ int i;
+ const fm_prs_result_t *parse_results;
+ struct sk_buff *skb = NULL, *skb_tmp, **skbh;
+
+ vaddr = phys_to_virt(addr);
+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ dpa_bp = priv->dpa_bp;
+ /* Iterate through the SGT entries and add data buffers to the skb */
+ sgt = vaddr + fd_off;
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ /* Extension bit is not supported */
+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
+
+ /* We use a single global Rx pool */
+ DPA_BUG_ON(dpa_bp !=
+ dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+ SMP_CACHE_BYTES));
+
+ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+ if (i == 0) {
+ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
+ DPA_BUG_ON(skb->head != sg_vaddr);
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid &&
+ priv->tsu->hwts_rx_en_ioctl)
+ dpa_ptp_store_rxstamp(priv, skb, vaddr);
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (priv->ts_rx_en)
+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ /* In the case of a SG frame, FMan stores the Internal
+ * Context in the buffer containing the sgt.
+ * Inspect the parse results before anything else.
+ */
+ parse_results = (const fm_prs_result_t *)(vaddr +
+ DPA_RX_PRIV_DATA_SIZE);
+ _dpa_process_parse_results(parse_results, fd, skb,
+ use_gro);
+
+ /* Make sure forwarded skbs will have enough space
+ * on Tx, if extra headers are added.
+ */
+ DPA_BUG_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
+ } else {
+ /* Not the first S/G entry; all data from buffer will
+ * be added in an skb fragment; fragment index is offset
+ * by one since first S/G entry was incorporated in the
+ * linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Free (only) the skbuff shell because its data buffer
+ * is already a frag in the main skb.
+ */
+ get_page(head_page);
+ dev_kfree_skb(skb_tmp);
+
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
+ */
+ frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
+ page_offset;
+ frag_len = qm_sg_entry_get_len(&sgt[i]);
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
+ */
+ skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
+ frag_len, dpa_bp->size);
+ }
+ /* Update the pool count for the current {cpu x bpool} */
+ (*count_ptr)--;
+
+ if (qm_sg_entry_get_final(&sgt[i]))
+ break;
+ }
+ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+ /* recycle the SGT fragment */
+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
+ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
+ return skb;
+}
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
+ struct sk_buff *skb)
+{
+ if (unlikely(priv->loop_to < 0))
+ return 0; /* loop disabled by default */
+
+ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
+ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
+
+ return 1; /* Frame Tx on the selected interface */
+}
+#endif
+
+void __hot _dpa_rx(struct net_device *net_dev,
+ struct qman_portal *portal,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid,
+ int *count_ptr)
+{
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ dma_addr_t addr = qm_fd_addr(fd);
+ u32 fd_status = fd->status;
+ unsigned int skb_len;
+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
+ int use_gro = net_dev->features & NETIF_F_GRO;
+
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_stats->rx_errors++;
+ goto _release_frame;
+ }
+
+ dpa_bp = priv->dpa_bp;
+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
+
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
+ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
+
+ /* The only FD types that we may receive are contig and S/G */
+ DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
+
+ if (likely(fd->format == qm_fd_contig)) {
+#ifdef CONFIG_FSL_DPAA_HOOKS
+ /* Execute the Rx processing hook, if it exists. */
+ if (dpaa_eth_hooks.rx_default &&
+ dpaa_eth_hooks.rx_default((void *)fd, net_dev,
+ fqid) == DPAA_ETH_STOLEN) {
+ /* won't count the rx bytes in */
+ return;
+ }
+#endif
+ skb = contig_fd_to_skb(priv, fd, &use_gro);
+ } else {
+ skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
+ percpu_priv->rx_sg++;
+ }
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ (*count_ptr)--;
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ /* IP Reassembled frames are allowed to be larger than MTU */
+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
+ !(fd_status & FM_FD_IPR))) {
+ percpu_stats->rx_dropped++;
+ goto drop_bad_frame;
+ }
+
+ skb_len = skb->len;
+
+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
+ if (dpa_skb_loop(priv, skb)) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+ return;
+ }
+#endif
+
+ if (use_gro) {
+ gro_result_t gro_result;
+ const struct qman_portal_config *pc =
+ qman_p_get_portal_config(portal);
+ struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
+
+ np->p = portal;
+ gro_result = napi_gro_receive(&np->napi, skb);
+ /* If frame is dropped by the stack, rx_dropped counter is
+ * incremented automatically, so no need for us to update it
+ */
+ if (unlikely(gro_result == GRO_DROP))
+ goto packet_dropped;
+ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ goto packet_dropped;
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+
+packet_dropped:
+ return;
+
+drop_bad_frame:
+ dev_kfree_skb(skb);
+ return;
+
+_release_frame:
+ dpa_fd_release(net_dev, fd);
+}
+
+int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *count_ptr, int *offset)
+{
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ struct net_device *net_dev = priv->net_dev;
+ int err;
+ enum dma_data_direction dma_dir;
+ unsigned char *buffer_start;
+
+#ifndef CONFIG_FSL_DPAA_TS
+ /* Check recycling conditions; only if timestamp support is not
+ * enabled, otherwise we need the fd back on tx confirmation
+ */
+
+ /* We can recycle the buffer if:
+ * - the pool is not full
+ * - the buffer meets the skb recycling conditions
+ * - the buffer meets our own (size, offset, align) conditions
+ */
+ if (likely((*count_ptr < dpa_bp->target_count) &&
+ dpa_skb_is_recyclable(skb) &&
+ dpa_buf_is_recyclable(skb, dpa_bp->size,
+ priv->tx_headroom, &buffer_start))) {
+ /* Buffer is recyclable; use the new start address
+ * and set fd parameters and DMA mapping direction
+ */
+ fd->bpid = dpa_bp->bpid;
+ DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
+ fd->offset = (uint16_t)(skb->data - buffer_start);
+ dma_dir = DMA_BIDIRECTIONAL;
+
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
+ *offset = skb_headroom(skb) - fd->offset;
+ } else
+#endif
+ {
+ /* Not recyclable.
+ * We are guaranteed to have at least tx_headroom bytes
+ * available, so just use that for offset.
+ */
+ fd->bpid = 0xff;
+ buffer_start = skb->data - priv->tx_headroom;
+ fd->offset = priv->tx_headroom;
+ dma_dir = DMA_TO_DEVICE;
+
+ /* The buffer will be Tx-confirmed, but the TxConf cb must
+ * necessarily look at our Tx private data to retrieve the
+ * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
+ */
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+ }
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpa_enable_tx_csum(priv, skb, fd,
+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "HW csum error: %d\n", err);
+ return err;
+ }
+
+ /* Fill in the rest of the FD fields */
+ fd->format = qm_fd_contig;
+ fd->length20 = skb->len;
+ fd->cmd |= FM_FD_CMD_FCO;
+
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(dpa_bp->dev, skbh,
+ skb_tail_pointer(skb) - buffer_start, dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+ fd->addr = addr;
+
+
+ return 0;
+}
+EXPORT_SYMBOL(skb_to_contig_fd);
+
+int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ dma_addr_t addr;
+ dma_addr_t sg_addr;
+ struct sk_buff **skbh;
+ struct net_device *net_dev = priv->net_dev;
+ int sg_len;
+ int err;
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ unsigned long boundary;
+ int k;
+#endif
+
+ struct qm_sg_entry *sgt;
+ void *sgt_buf;
+ void *buffer_start;
+ skb_frag_t *frag;
+ int i, j;
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ fd->format = qm_fd_sg;
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ /* get a page frag to store the SGTable */
+ sgt_buf = netdev_alloc_frag(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
+ if (unlikely(!sgt_buf)) {
+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
+ return -ENOMEM;
+ }
+
+ /* it seems that the memory allocator does not zero the allocated mem */
+ memset(sgt_buf, 0, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
+#else
+ /* get a page frag to store the SGTable */
+ sgt_buf = netdev_alloc_frag(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ if (unlikely(!sgt_buf)) {
+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
+ return -ENOMEM;
+ }
+
+ memset(sgt_buf, 0, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+#endif
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpa_enable_tx_csum(priv, skb, fd,
+ sgt_buf + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "HW csum error: %d\n", err);
+ goto csum_failed;
+ }
+
+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sg_len = skb_headlen(skb);
+ qm_sg_entry_set_bpid(&sgt[0], 0xff);
+ qm_sg_entry_set_offset(&sgt[0], 0);
+ qm_sg_entry_set_len(&sgt[0], sg_len);
+ qm_sg_entry_set_ext(&sgt[0], 0);
+ qm_sg_entry_set_final(&sgt[0], 0);
+
+ addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg0_map_failed;
+
+ }
+
+ qm_sg_entry_set64(&sgt[0], addr);
+
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ j = 0;
+ if (unlikely(HAS_DMA_ISSUE(skb->data, sg_len))) {
+ boundary = BOUNDARY_4K(skb->data, sg_len);
+ qm_sg_entry_set_len(&sgt[j], boundary -
+ (unsigned long)skb->data);
+
+ j++;
+ qm_sg_entry_set_bpid(&sgt[j], 0xff);
+ qm_sg_entry_set_offset(&sgt[j], 0);
+ qm_sg_entry_set_len(&sgt[j],
+ ((unsigned long)skb->data + (unsigned long)sg_len) -
+ boundary);
+ qm_sg_entry_set_ext(&sgt[j], 0);
+ qm_sg_entry_set_final(&sgt[j], 0);
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[j], addr +
+ (boundary -
+ (unsigned long)skb->data));
+ }
+ j++;
+
+ /* populate the rest of SGT entries */
+ for (i = 1; i <= nr_frags; i++, j++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ qm_sg_entry_set_bpid(&sgt[j], 0xff);
+ qm_sg_entry_set_offset(&sgt[j], 0);
+ qm_sg_entry_set_len(&sgt[j], frag->size);
+ qm_sg_entry_set_ext(&sgt[j], 0);
+
+ DPA_BUG_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[j], addr);
+
+ if (unlikely(HAS_DMA_ISSUE(frag, frag->size))) {
+ boundary = BOUNDARY_4K(frag, frag->size);
+ qm_sg_entry_set_len(&sgt[j], boundary -
+ (unsigned long)frag);
+
+ j++;
+ qm_sg_entry_set_bpid(&sgt[j], 0xff);
+ qm_sg_entry_set_offset(&sgt[j], 0);
+ qm_sg_entry_set_len(&sgt[j],
+ ((unsigned long)frag->size -
+ (boundary - (unsigned long)frag)));
+ qm_sg_entry_set_ext(&sgt[j], 0);
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[j], addr +
+ (boundary - (unsigned long)frag));
+ }
+
+ if (i == nr_frags)
+ qm_sg_entry_set_final(&sgt[j], 1);
+ else
+ qm_sg_entry_set_final(&sgt[j], 0);
+#else
+
+ /* populate the rest of SGT entries */
+ for (i = 1; i <= nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ qm_sg_entry_set_bpid(&sgt[i], 0xff);
+ qm_sg_entry_set_offset(&sgt[i], 0);
+ qm_sg_entry_set_len(&sgt[i], frag->size);
+ qm_sg_entry_set_ext(&sgt[i], 0);
+
+ if (i == nr_frags)
+ qm_sg_entry_set_final(&sgt[i], 1);
+ else
+ qm_sg_entry_set_final(&sgt[i], 0);
+
+ DPA_BUG_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[i], addr);
+#endif
+ }
+
+ fd->length20 = skb->len;
+ fd->offset = priv->tx_headroom;
+
+ /* DMA map the SGT page */
+ buffer_start = (void *)sgt - priv->tx_headroom;
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
+ dma_dir);
+#else
+ addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+#endif
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = 0xff;
+ fd->cmd |= FM_FD_CMD_FCO;
+ fd->addr = addr;
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ for (k = 0; k < j; k++) {
+ sg_addr = qm_sg_addr(&sgt[k]);
+ dma_unmap_page(dpa_bp->dev, sg_addr,
+ qm_sg_entry_get_len(&sgt[k]), dma_dir);
+ }
+#else
+ for (j = 0; j < i; j++) {
+ sg_addr = qm_sg_addr(&sgt[j]);
+ dma_unmap_page(dpa_bp->dev, sg_addr,
+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
+ }
+#endif
+sg0_map_failed:
+csum_failed:
+ put_page(virt_to_head_page(sgt_buf));
+
+ return err;
+}
+EXPORT_SYMBOL(skb_to_sg_fd);
+
+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ const int queue_mapping = dpa_get_queue_mapping(skb);
+ struct qman_fq *egress_fq, *conf_fq;
+
+#ifdef CONFIG_FSL_DPAA_HOOKS
+ /* If there is a Tx hook, run it. */
+ if (dpaa_eth_hooks.tx &&
+ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
+ /* won't update any Tx stats */
+ return NETDEV_TX_OK;
+#endif
+
+ priv = netdev_priv(net_dev);
+
+#ifdef CONFIG_FSL_DPAA_CEETM
+ if (priv->ceetm_en)
+ return ceetm_tx(skb, net_dev);
+#endif
+
+ egress_fq = priv->egress_fqs[queue_mapping];
+ conf_fq = priv->conf_fqs[queue_mapping];
+
+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
+}
+
+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
+ struct qman_fq *egress_fq, struct qman_fq *conf_fq)
+{
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct rtnl_link_stats64 *percpu_stats;
+ int err = 0;
+ const bool nonlinear = skb_is_nonlinear(skb);
+ int *countptr, offset = 0;
+
+ priv = netdev_priv(net_dev);
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ clear_fd(&fd);
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
+ fd.cmd |= FM_FD_CMD_UPD;
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ fd.cmd |= FM_FD_CMD_UPD;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
+ * we don't feed FMan with more fragments than it supports.
+ * Btw, we're using the first sgt entry to store the linear part of
+ * the skb, so we're one extra frag short.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_ENTRIES_THRESHOLD)) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* Make sure we have enough headroom to accommodate private
+ * data, parse results, etc. Normally this shouldn't happen if
+ * we're here via the standard kernel stack.
+ */
+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
+ if (unlikely(!skb_new)) {
+ dev_kfree_skb(skb);
+ percpu_stats->tx_errors++;
+ return NETDEV_TX_OK;
+ }
+ dev_kfree_skb(skb);
+ skb = skb_new;
+ }
+
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ */
+
+ /* Code borrowed from skb_unshare(). */
+ if (skb_cloned(skb)) {
+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+ kfree_skb(skb);
+ skb = nskb;
+ /* skb_copy() has now linearized the skbuff. */
+ } else if (unlikely(nonlinear)) {
+ /* We are here because the egress skb contains
+ * more fragments than we support. In this case,
+ * we have no choice but to linearize it ourselves.
+ */
+ err = __skb_linearize(skb);
+ }
+ if (unlikely(!skb || err < 0))
+ /* Common out-of-memory error path */
+ goto enomem;
+
+#ifdef DPAA_LS1043A_DMA_4K_ISSUE
+ if (unlikely(HAS_DMA_ISSUE(skb->data, skb->len))) {
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
+ }
+#else
+ /* Finally, create a contig FD from this skb */
+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
+#endif
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ if (fd.bpid != 0xff) {
+ skb_recycle(skb);
+ /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
+ * but we need the skb to look as if returned by build_skb().
+ * We need to manually adjust the tailptr as well.
+ */
+ skb->data = skb->head + offset;
+ skb_reset_tail_pointer(skb);
+
+ (*countptr)++;
+ percpu_priv->tx_returned++;
+ }
+
+ if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
+ goto xmit_failed;
+
+ net_dev->trans_start = jiffies;
+ return NETDEV_TX_OK;
+
+xmit_failed:
+ if (fd.bpid != 0xff) {
+ (*countptr)--;
+ percpu_priv->tx_returned--;
+ dpa_fd_release(net_dev, &fd);
+ percpu_stats->tx_errors++;
+ return NETDEV_TX_OK;
+ }
+ _dpa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL(dpa_tx_extended);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
@@ -0,0 +1,914 @@
+/* Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_eth_base.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+#include "mac.h"
+
+/* forward declarations */
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static enum qman_cb_dqrr_result __hot
+shared_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static enum qman_cb_dqrr_result
+shared_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+
+#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static uint16_t shared_tx_timeout = 1000;
+module_param(shared_tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(shared_tx_timeout, "The Tx timeout in ms");
+
+static const struct of_device_id dpa_shared_match[];
+
+static const struct net_device_ops dpa_shared_ops = {
+ .ndo_open = dpa_start,
+ .ndo_start_xmit = dpa_shared_tx,
+ .ndo_stop = dpa_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_set_rx_mode = dpa_set_rx_mode,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_fix_features,
+ .ndo_do_ioctl = dpa_ioctl,
+};
+
+const struct dpa_fq_cbs_t shared_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } },
+ .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } },
+ .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } },
+ .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } },
+ .egress_ern = { .cb = { .ern = shared_ern } }
+};
+EXPORT_SYMBOL(shared_fq_cbs);
+
+static inline void * __must_check __attribute__((nonnull))
+dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
+{
+ return dpa_bp->vaddr + (addr - dpa_bp->paddr);
+}
+
+static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
+ return dpa_bpid2pool(priv->dpa_bp[i].bpid);
+ return ERR_PTR(-ENODEV);
+}
+
+/* Copy to a memory region that requires kmapping from a linear buffer,
+ * taking into account page boundaries in the destination
+ */
+static void
+copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
+{
+ struct page *page;
+ size_t size, offset;
+ void *page_vaddr;
+
+ while (buf_size > 0) {
+ offset = offset_in_page(phys_start);
+ size = (offset + buf_size > PAGE_SIZE) ?
+ PAGE_SIZE - offset : buf_size;
+
+ page = pfn_to_page(phys_start >> PAGE_SHIFT);
+ page_vaddr = kmap_atomic(page);
+
+ memcpy(page_vaddr + offset, src, size);
+
+ kunmap_atomic(page_vaddr);
+
+ phys_start += size;
+ src += size;
+ buf_size -= size;
+ }
+}
+
+/* Copy from a memory region that requires kmapping to a linear buffer,
+ * taking into account page boundaries in the source
+ */
+static void
+copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
+{
+ struct page *page;
+ size_t size, offset;
+ void *page_vaddr;
+
+ while (buf_size > 0) {
+ offset = offset_in_page(phys_start);
+ size = (offset + buf_size > PAGE_SIZE) ?
+ PAGE_SIZE - offset : buf_size;
+
+ page = pfn_to_page(phys_start >> PAGE_SHIFT);
+ page_vaddr = kmap_atomic(page);
+
+ memcpy(dest, page_vaddr + offset, size);
+
+ kunmap_atomic(page_vaddr);
+
+ phys_start += size;
+ dest += size;
+ buf_size -= size;
+ }
+}
+
+static void
+dpa_fd_release_sg(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ const struct dpa_priv_s *priv;
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *_dpa_bp;
+ struct bm_buffer _bmb;
+
+ priv = netdev_priv(net_dev);
+
+ _bmb.hi = fd->addr_hi;
+ _bmb.lo = fd->addr_lo;
+
+ _dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(!_dpa_bp);
+
+ if (_dpa_bp->vaddr) {
+ sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
+ dpa_fd_offset(fd);
+ dpa_release_sgt(sgt);
+ } else {
+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
+ if (sgt == NULL) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Memory allocation failed\n");
+ return;
+ }
+
+ copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
+ dpa_fd_offset(fd),
+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ _dpa_bp->size));
+ dpa_release_sgt(sgt);
+ kfree(sgt);
+ }
+
+ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
+ cpu_relax();
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ struct qm_sg_entry *sgt;
+ int i;
+ void *frag_addr;
+ u32 frag_length;
+ u32 offset;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(!dpa_bp);
+
+ if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ goto out;
+ }
+
+ skb = __netdev_alloc_skb(net_dev,
+ priv->tx_headroom + dpa_fd_length(fd),
+ GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Could not alloc skb\n");
+
+ percpu_priv->stats.rx_dropped++;
+
+ goto out;
+ }
+
+ skb_reserve(skb, priv->tx_headroom);
+
+ if (fd->format == qm_fd_sg) {
+ if (dpa_bp->vaddr) {
+ sgt = dpa_phys2virt(dpa_bp,
+ qm_fd_addr(fd)) + dpa_fd_offset(fd);
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ offset = qm_sg_entry_get_offset(&sgt[i]);
+ frag_addr = dpa_phys2virt(dpa_bp,
+ qm_sg_addr(&sgt[i]) +
+ offset);
+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
+ frag_length = qm_sg_entry_get_len(&sgt[i]);
+
+ /* copy from sgt[i] */
+ memcpy(skb_put(skb, frag_length), frag_addr,
+ frag_length);
+ if (qm_sg_entry_get_final(&sgt[i]))
+ break;
+ }
+ } else {
+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ GFP_ATOMIC);
+ if (unlikely(sgt == NULL)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ copy_from_unmapped_area(sgt,
+ qm_fd_addr(fd) + dpa_fd_offset(fd),
+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ dpa_bp->size));
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
+ frag_length = qm_sg_entry_get_len(&sgt[i]);
+ copy_from_unmapped_area(
+ skb_put(skb, frag_length),
+ qm_sg_addr(&sgt[i]) +
+ qm_sg_entry_get_offset(&sgt[i]),
+ frag_length);
+
+ if (qm_sg_entry_get_final(&sgt[i]))
+ break;
+ }
+
+ kfree(sgt);
+ }
+ goto skb_copied;
+ }
+
+ /* otherwise fd->format == qm_fd_contig */
+ if (dpa_bp->vaddr) {
+ /* Fill the SKB */
+ memcpy(skb_put(skb, dpa_fd_length(fd)),
+ dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
+ dpa_fd_offset(fd), dpa_fd_length(fd));
+ } else {
+ copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
+ qm_fd_addr(fd) + dpa_fd_offset(fd),
+ dpa_fd_length(fd));
+ }
+
+skb_copied:
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ /* IP Reassembled frames are allowed to be larger than MTU */
+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
+ !(fd->status & FM_FD_IPR))) {
+ percpu_priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
+ goto out;
+ else {
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
+ }
+
+out:
+ if (fd->format == qm_fd_sg)
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+shared_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(!dpa_bp);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+
+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ percpu_priv->stats.tx_errors++;
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(!dpa_bp);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ percpu_priv->tx_confirm++;
+
+ return qman_cb_dqrr_consume;
+}
+
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
+
+ net_dev = dpa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ dpa_fd_release(net_dev, &msg->ern.fd);
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+}
+
+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ int queue_mapping;
+ int err;
+ void *dpa_bp_vaddr;
+ fm_prs_result_t parse_results;
+ fm_prs_result_t *parse_results_ref;
+ struct qman_fq *egress_fq, *conf_fq;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ memset(&fd, 0, sizeof(fd));
+ fd.format = qm_fd_contig;
+
+ queue_mapping = smp_processor_id();
+
+ dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
+ if (unlikely(!dpa_bp)) {
+ percpu_priv->stats.tx_errors++;
+ err = PTR_ERR(dpa_bp);
+ goto bpools_too_small_error;
+ }
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0)) {
+ percpu_priv->stats.tx_errors++;
+ if (err == 0)
+ err = -ENOMEM;
+ goto buf_acquire_failed;
+ }
+ fd.bpid = dpa_bp->bpid;
+
+ fd.length20 = skb_headlen(skb);
+ fd.addr_hi = (uint8_t)bmb.hi;
+ fd.addr_lo = bmb.lo;
+ fd.offset = priv->tx_headroom;
+
+ /* The virtual address of the buffer pool is expected to be NULL
+ * in scenarios like MAC-less or Shared-MAC between Linux and
+ * USDPAA. In this case the buffers are dynamically mapped/unmapped.
+ */
+ if (dpa_bp->vaddr) {
+ dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
+
+ /* Copy the packet payload */
+ skb_copy_from_linear_data(skb,
+ dpa_bp_vaddr + dpa_fd_offset(&fd),
+ dpa_fd_length(&fd));
+
+ /* if no mac device or peer set it's macless */
+ if (!priv->mac_dev || priv->peer) {
+ parse_results_ref = (fm_prs_result_t *) (dpa_bp_vaddr +
+ DPA_TX_PRIV_DATA_SIZE);
+ /* Default values; FMan will not generate/validate
+ * CSUM;
+ */
+ parse_results_ref->l3r = 0;
+ parse_results_ref->l4r = 0;
+ parse_results_ref->ip_off[0] = 0xff;
+ parse_results_ref->ip_off[1] = 0xff;
+ parse_results_ref->l4_off = 0xff;
+
+ fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
+ } else {
+ /* Enable L3/L4 hardware checksum computation,
+ * if applicable
+ */
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
+
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Tx HW csum error: %d\n", err);
+ percpu_priv->stats.tx_errors++;
+ goto l3_l4_csum_failed;
+ }
+ }
+
+ } else {
+ if (!priv->mac_dev || priv->peer) {
+ /* Default values; FMan will not generate/validate
+ * CSUM;
+ */
+ parse_results.l3r = 0;
+ parse_results.l4r = 0;
+ parse_results.ip_off[0] = 0xff;
+ parse_results.ip_off[1] = 0xff;
+ parse_results.l4_off = 0xff;
+
+ fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
+ } else {
+ /* Enable L3/L4 hardware checksum computation,
+ * if applicable
+ */
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ (char *)&parse_results);
+
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Tx HW csum error: %d\n", err);
+ percpu_priv->stats.tx_errors++;
+ goto l3_l4_csum_failed;
+ }
+
+ }
+
+ copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
+ &parse_results,
+ DPA_PARSE_RESULTS_SIZE);
+
+ copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
+ skb->data,
+ dpa_fd_length(&fd));
+ }
+
+ egress_fq = priv->egress_fqs[queue_mapping];
+ conf_fq = priv->conf_fqs[queue_mapping];
+
+ err = dpa_xmit(priv, &percpu_priv->stats, &fd, egress_fq, conf_fq);
+
+l3_l4_csum_failed:
+bpools_too_small_error:
+buf_acquire_failed:
+ /* We're done with the skb */
+ dev_kfree_skb(skb);
+
+ /* err remains unused, NETDEV_TX_OK must be returned here */
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL(dpa_shared_tx);
+
+static int dpa_shared_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ const uint8_t *mac_addr;
+
+ net_dev->netdev_ops = &dpa_shared_ops;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ return dpa_netdev_init(net_dev, mac_addr, shared_tx_timeout);
+}
+
+#ifdef CONFIG_PM
+
+static int dpa_shared_suspend(struct device *dev)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ int err = 0;
+
+ net_dev = dev_get_drvdata(dev);
+ if (net_dev->flags & IFF_UP) {
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ err = fm_port_suspend(mac_dev->port_dev[RX]);
+ if (err)
+ goto port_suspend_failed;
+
+ err = fm_port_suspend(mac_dev->port_dev[TX]);
+ if (err)
+ err = fm_port_resume(mac_dev->port_dev[RX]);
+ }
+
+port_suspend_failed:
+ return err;
+}
+
+static int dpa_shared_resume(struct device *dev)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ int err = 0;
+
+ net_dev = dev_get_drvdata(dev);
+ if (net_dev->flags & IFF_UP) {
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ err = fm_port_resume(mac_dev->port_dev[TX]);
+ if (err)
+ goto port_resume_failed;
+
+ err = fm_port_resume(mac_dev->port_dev[RX]);
+ if (err)
+ err = fm_port_suspend(mac_dev->port_dev[TX]);
+ }
+
+port_resume_failed:
+ return err;
+}
+
+static const struct dev_pm_ops shared_pm_ops = {
+ .suspend = dpa_shared_suspend,
+ .resume = dpa_shared_resume,
+};
+
+#define SHARED_PM_OPS (&shared_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define SHARED_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
+static int
+dpaa_eth_shared_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i, channel;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ struct mac_device *mac_dev;
+ struct task_struct *kth;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ for (i = 0; i < count; i++)
+ dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area (needed by 1588 code in dpa_mac_probe)
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ strcpy(priv->if_type, "shared");
+
+ priv->msg_enable = netif_msg_init(advanced_debug, -1);
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev) || !mac_dev) {
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ }
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ goto alloc_failed;
+ }
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs,
+ false, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+ &port_fqs, false, TX);
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+ priv->bp_count = count;
+ err = dpa_bp_create(net_dev, dpa_bp, count);
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = mac_dev;
+
+ channel = dpa_get_channel();
+
+ if (channel < 0) {
+ err = channel;
+ goto get_channel_failed;
+ }
+
+ priv->channel = (uint16_t)channel;
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+
+ dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto cgr_init_failed;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_fq_init(dpa_fq, false);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->buf_layout = buf_layout;
+ priv->tx_headroom =
+ dpa_get_headroom(&priv->buf_layout[TX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+ /* Now we need to initialize either a private or shared interface */
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ err = dpa_shared_netdev_init(dpa_node, net_dev);
+
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ pr_info("fsl_dpa_shared: Probed shared interface %s\n",
+ net_dev->name);
+
+ return 0;
+
+netdev_init_failed:
+alloc_percpu_failed:
+fq_alloc_failed:
+ if (net_dev) {
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ qman_delete_cgr(&priv->cgr_data.cgr);
+ }
+cgr_init_failed:
+add_channel_failed:
+get_channel_failed:
+ if (net_dev)
+ dpa_bp_free(priv);
+bp_create_failed:
+fq_probe_failed:
+ devm_kfree(dev, buf_layout);
+alloc_failed:
+mac_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ if (net_dev)
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static const struct of_device_id dpa_shared_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-shared"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_shared_match);
+
+static struct platform_driver dpa_shared_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME "-shared",
+ .of_match_table = dpa_shared_match,
+ .owner = THIS_MODULE,
+ .pm = SHARED_PM_OPS,
+ },
+ .probe = dpaa_eth_shared_probe,
+ .remove = dpa_remove
+};
+
+static int __init __cold dpa_shared_load(void)
+{
+ int _errno;
+
+ pr_info(DPA_DESCRIPTION "\n");
+
+ /* Initialize dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_shared_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_shared_load);
+
+static void __exit __cold dpa_shared_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&dpa_shared_driver);
+}
+module_exit(dpa_shared_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
@@ -0,0 +1,278 @@
+/* Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/of_net.h>
+#include "dpaa_eth.h"
+#include "mac.h" /* struct mac_device */
+#ifdef CONFIG_FSL_DPAA_1588
+#include "dpaa_1588.h"
+#endif
+
+static ssize_t dpaa_eth_show_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev)
+ return sprintf(buf, "%llx",
+ (unsigned long long)mac_dev->res->start);
+ else
+ return sprintf(buf, "none");
+}
+
+static ssize_t dpaa_eth_show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t res = 0;
+
+ if (priv)
+ res = sprintf(buf, "%s", priv->if_type);
+
+ return res;
+}
+
+static ssize_t dpaa_eth_show_fqids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t bytes = 0;
+ int i = 0;
+ char *str;
+ struct dpa_fq *fq;
+ struct dpa_fq *tmp;
+ struct dpa_fq *prev = NULL;
+ u32 first_fqid = 0;
+ u32 last_fqid = 0;
+ char *prevstr = NULL;
+
+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ str = "Rx default";
+ break;
+ case FQ_TYPE_RX_ERROR:
+ str = "Rx error";
+ break;
+ case FQ_TYPE_RX_PCD:
+ str = "Rx PCD";
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ str = "Tx default confirmation";
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ str = "Tx confirmation (mq)";
+ break;
+ case FQ_TYPE_TX_ERROR:
+ str = "Tx error";
+ break;
+ case FQ_TYPE_TX:
+ str = "Tx";
+ break;
+ case FQ_TYPE_RX_PCD_HI_PRIO:
+ str ="Rx PCD High Priority";
+ break;
+ default:
+ str = "Unknown";
+ }
+
+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+ str != prevstr)) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes,
+ "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes,
+ "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
+ last_fqid = fq->fqid;
+ else
+ first_fqid = last_fqid = fq->fqid;
+
+ prev = fq;
+ prevstr = str;
+ i++;
+ }
+
+ if (prev) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
+ prev->fqid);
+ else
+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_show_bpids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t bytes = 0;
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ int i = 0;
+
+ for (i = 0; i < priv->bp_count; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
+ dpa_bp[i].bpid);
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+ int n = 0;
+
+ if (mac_dev)
+ n = fm_mac_dump_regs(mac_dev, buf, n);
+ else
+ return sprintf(buf, "no mac registers\n");
+
+ return n;
+}
+
+static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+ int n = 0;
+
+ if (mac_dev)
+ n = fm_mac_dump_rx_stats(mac_dev, buf, n);
+ else
+ return sprintf(buf, "no mac rx stats\n");
+
+ return n;
+}
+
+static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+ int n = 0;
+
+ if (mac_dev)
+ n = fm_mac_dump_tx_stats(mac_dev, buf, n);
+ else
+ return sprintf(buf, "no mac tx stats\n");
+
+ return n;
+}
+
+#ifdef CONFIG_FSL_DPAA_1588
+static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ if (priv->tsu && priv->tsu->valid)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ unsigned int num;
+ unsigned long flags;
+
+ if (kstrtouint(buf, 0, &num) < 0)
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ if (num) {
+ if (priv->tsu)
+ priv->tsu->valid = TRUE;
+ } else {
+ if (priv->tsu)
+ priv->tsu->valid = FALSE;
+ }
+
+ local_irq_restore(flags);
+
+ return count;
+}
+#endif
+
+static struct device_attribute dpaa_eth_attrs[] = {
+ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
+ __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
+ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
+ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
+ __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
+ __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
+ __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
+#ifdef CONFIG_FSL_DPAA_1588
+ __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
+ dpaa_eth_set_ptp_1588),
+#endif
+};
+
+void dpaa_eth_sysfs_init(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
+ dev_err(dev, "Error creating sysfs file\n");
+ while (i > 0)
+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
+ return;
+ }
+}
+EXPORT_SYMBOL(dpaa_eth_sysfs_init);
+
+void dpaa_eth_sysfs_remove(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ device_remove_file(dev, &dpaa_eth_attrs[i]);
+}
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
@@ -0,0 +1,144 @@
+/* Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa_eth
+
+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa_eth.h"
+#include <linux/tracepoint.h>
+
+#define fd_format_name(format) { qm_fd_##format, #format }
+#define fd_format_list \
+ fd_format_name(contig), \
+ fd_format_name(sg)
+#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
+ " status=0x%08x"
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor and the FQ on which it was
+ * transmitted/received.
+ */
+DECLARE_EVENT_CLASS(dpaa_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fq, fd),
+
+ /* A structure containing the relevant information we want to record.
+ * Declare name and type for each normal element, name, type and size
+ * for arrays. Use __string for variable length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u32, fqid)
+ __field(u64, fd_addr)
+ __field(u8, fd_format)
+ __field(u16, fd_offset)
+ __field(u32, fd_length)
+ __field(u32, fd_status)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared fields */
+ TP_fast_assign(
+ __entry->fqid = fq->fqid;
+ __entry->fd_addr = qm_fd_addr_get64(fd);
+ __entry->fd_format = fd->format;
+ __entry->fd_offset = dpa_fd_offset(fd);
+ __entry->fd_length = dpa_fd_length(fd);
+ __entry->fd_status = fd->status;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is triggered */
+ /* TODO: print the status using __print_flags() */
+ TP_printk(TR_FMT,
+ __get_str(name), __entry->fqid, __entry->fd_addr,
+ __print_symbolic(__entry->fd_format, fd_format_list),
+ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa_eth_trace
+#include <trace/define_trace.h>
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
@@ -0,0 +1,544 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+#include "mac.h" /* struct mac_device */
+#include "dpaa_eth_common.h"
+
+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
+ "interrupts",
+ "rx packets",
+ "tx packets",
+ "tx recycled",
+ "tx confirm",
+ "tx S/G",
+ "rx S/G",
+ "tx error",
+ "rx error",
+ "bp count"
+};
+
+static char dpa_stats_global[][ETH_GSTRING_LEN] = {
+ /* dpa rx errors */
+ "rx dma error",
+ "rx frame physical error",
+ "rx frame size error",
+ "rx header error",
+ "rx csum error",
+
+ /* demultiplexing errors */
+ "qman cg_tdrop",
+ "qman wred",
+ "qman error cond",
+ "qman early window",
+ "qman late window",
+ "qman fq tdrop",
+ "qman fq retired",
+ "qman orp disabled",
+
+ /* congestion related stats */
+ "congestion time (ms)",
+ "entered congestion",
+ "congested (0/1)"
+};
+
+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
+
+static int __cold dpa_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ netdev_dbg(net_dev, "phy device not initialized\n");
+ return 0;
+ }
+
+ _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
+
+ return _errno;
+}
+
+static int __cold dpa_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
+
+ return _errno;
+}
+
+static void __cold dpa_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ int _errno;
+
+ strncpy(drvinfo->driver, KBUILD_MODNAME,
+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%X", 0);
+
+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
+ /* Truncated output */
+ netdev_notice(net_dev, "snprintf() = %d\n", _errno);
+ } else if (unlikely(_errno < 0)) {
+ netdev_warn(net_dev, "snprintf() = %d\n", _errno);
+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
+ }
+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
+}
+
+static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
+{
+ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
+}
+
+static void __cold dpa_set_msglevel(struct net_device *net_dev,
+ uint32_t msg_enable)
+{
+ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
+}
+
+static int __cold dpa_nway_reset(struct net_device *net_dev)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ _errno = 0;
+ if (priv->mac_dev->phy_dev->autoneg) {
+ _errno = phy_start_aneg(priv->mac_dev->phy_dev);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ _errno);
+ }
+
+ return _errno;
+}
+
+static void __cold dpa_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return;
+ }
+
+ phy_dev = mac_dev->phy_dev;
+ if (unlikely(phy_dev == NULL)) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return;
+ }
+
+ epause->autoneg = mac_dev->autoneg_pause;
+ epause->rx_pause = mac_dev->rx_pause_active;
+ epause->tx_pause = mac_dev->tx_pause_active;
+}
+
+static int __cold dpa_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+ int _errno;
+ u32 newadv, oldadv;
+ bool rx_pause, tx_pause;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+
+ phy_dev = mac_dev->phy_dev;
+ if (unlikely(phy_dev == NULL)) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ if (!(phy_dev->supported & SUPPORTED_Pause) ||
+ (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ /* The MAC should know how to handle PAUSE frame autonegotiation before
+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
+ * settings.
+ */
+ mac_dev->autoneg_pause = !!epause->autoneg;
+ mac_dev->rx_pause_req = !!epause->rx_pause;
+ mac_dev->tx_pause_req = !!epause->tx_pause;
+
+ /* Determine the sym/asym advertised PAUSE capabilities from the desired
+ * rx/tx pause settings.
+ */
+ newadv = 0;
+ if (epause->rx_pause)
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (epause->tx_pause)
+ newadv |= ADVERTISED_Asym_Pause;
+
+ oldadv = phy_dev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ /* If there are differences between the old and the new advertised
+ * values, restart PHY autonegotiation and advertise the new values.
+ */
+ if (oldadv != newadv) {
+ phy_dev->advertising &= ~(ADVERTISED_Pause
+ | ADVERTISED_Asym_Pause);
+ phy_dev->advertising |= newadv;
+ if (phy_dev->autoneg) {
+ _errno = phy_start_aneg(phy_dev);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ _errno);
+ }
+ }
+
+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
+
+ return _errno;
+}
+
+#ifdef CONFIG_PM
+static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
+ return;
+
+ if (priv->wol & DPAA_WOL_MAGIC) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = WAKE_MAGIC;
+ }
+}
+
+static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ netdev_dbg(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ if (!device_can_wakeup(net_dev->dev.parent) ||
+ (wol->wolopts & ~WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ priv->wol = 0;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ priv->wol = DPAA_WOL_MAGIC;
+ device_set_wakeup_enable(net_dev->dev.parent, 1);
+ } else {
+ device_set_wakeup_enable(net_dev->dev.parent, 0);
+ }
+
+ return 0;
+}
+#endif
+
+static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
+{
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+ if (priv->mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
+}
+
+static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
+{
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+ if (priv->mac_dev == NULL) {
+ netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
+}
+
+static int dpa_get_sset_count(struct net_device *net_dev, int type)
+{
+ unsigned int total_stats, num_stats;
+
+ num_stats = num_online_cpus() + 1;
+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
+
+ switch (type) {
+ case ETH_SS_STATS:
+ return total_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
+ int crr_cpu, u64 bp_count, u64 *data)
+{
+ int num_stat_values = num_cpus + 1;
+ int crr_stat = 0;
+
+ /* update current CPU's stats and also add them to the total values */
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+ data[crr_stat * num_stat_values + crr_cpu] = bp_count;
+ data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
+}
+
+static void dpa_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 bp_count, cg_time, cg_num, cg_status;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct qm_mcr_querycgr query_cgr;
+ struct dpa_rx_errors rx_errors;
+ struct dpa_ern_cnt ern_cnt;
+ struct dpa_priv_s *priv;
+ unsigned int num_cpus, offset;
+ struct dpa_bp *dpa_bp;
+ int total_stats, i;
+
+ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
+ priv = netdev_priv(net_dev);
+ dpa_bp = priv->dpa_bp;
+ num_cpus = num_online_cpus();
+ bp_count = 0;
+
+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
+ memset(data, 0, total_stats * sizeof(u64));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ if (dpa_bp->percpu_count)
+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
+
+ rx_errors.dme += percpu_priv->rx_errors.dme;
+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
+ rx_errors.fse += percpu_priv->rx_errors.fse;
+ rx_errors.phe += percpu_priv->rx_errors.phe;
+ rx_errors.cse += percpu_priv->rx_errors.cse;
+
+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
+
+ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
+ }
+
+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
+
+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
+
+ /* gather congestion related counters */
+ cg_num = 0;
+ cg_status = 0;
+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
+ cg_num = priv->cgr_data.cgr_congested_count;
+ cg_status = query_cgr.cgr.cs;
+
+ /* reset congestion stats (like QMan API does */
+ priv->cgr_data.congested_jiffies = 0;
+ priv->cgr_data.cgr_congested_count = 0;
+ }
+
+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
+ data[offset++] = cg_time;
+ data[offset++] = cg_num;
+ data[offset++] = cg_status;
+}
+
+static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
+{
+ unsigned int i, j, num_cpus, size;
+ char stat_string_cpu[ETH_GSTRING_LEN];
+ u8 *strings;
+
+ strings = data;
+ num_cpus = num_online_cpus();
+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ memcpy(strings, dpa_stats_global, size);
+}
+
+const struct ethtool_ops dpa_ethtool_ops = {
+ .get_settings = dpa_get_settings,
+ .set_settings = dpa_set_settings,
+ .get_drvinfo = dpa_get_drvinfo,
+ .get_msglevel = dpa_get_msglevel,
+ .set_msglevel = dpa_set_msglevel,
+ .nway_reset = dpa_nway_reset,
+ .get_pauseparam = dpa_get_pauseparam,
+ .set_pauseparam = dpa_set_pauseparam,
+ .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
+ .get_link = ethtool_op_get_link,
+ .get_eee = dpa_get_eee,
+ .set_eee = dpa_set_eee,
+ .get_sset_count = dpa_get_sset_count,
+ .get_ethtool_stats = dpa_get_ethtool_stats,
+ .get_strings = dpa_get_strings,
+#ifdef CONFIG_PM
+ .get_wol = dpa_get_wol,
+ .set_wol = dpa_set_wol,
+#endif
+};
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
@@ -0,0 +1,286 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_eth_generic.h"
+
+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
+ "interrupts",
+ "rx packets",
+ "tx packets",
+ "tx recycled",
+ "tx confirm",
+ "tx S/G",
+ "rx S/G (N/A)",
+ "tx error",
+ "rx error",
+ "bp count",
+ "bp draining count"
+};
+
+static char dpa_stats_global[][ETH_GSTRING_LEN] = {
+ /* dpa rx errors */
+ "rx dma error",
+ "rx frame physical error",
+ "rx frame size error",
+ "rx header error",
+ "rx csum error",
+
+ /* demultiplexing errors */
+ "qman cg_tdrop",
+ "qman wred",
+ "qman error cond",
+ "qman early window",
+ "qman late window",
+ "qman fq tdrop",
+ "qman fq retired",
+ "qman orp disabled",
+};
+
+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
+
+static int __cold dpa_generic_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
+ return -ENODEV;
+}
+
+static int __cold dpa_generic_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
+ return -ENODEV;
+}
+
+static void __cold dpa_generic_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ int _errno;
+
+ strncpy(drvinfo->driver, KBUILD_MODNAME,
+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%X", 0);
+
+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
+ /* Truncated output */
+ netdev_notice(net_dev, "snprintf() = %d\n", _errno);
+ } else if (unlikely(_errno < 0)) {
+ netdev_warn(net_dev, "snprintf() = %d\n", _errno);
+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
+ }
+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
+}
+
+static uint32_t __cold dpa_generic_get_msglevel(struct net_device *net_dev)
+{
+ return ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable;
+}
+
+static void __cold dpa_generic_set_msglevel(struct net_device *net_dev,
+ uint32_t msg_enable)
+{
+ ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable =
+ msg_enable;
+}
+
+static int __cold dpa_generic_nway_reset(struct net_device *net_dev)
+{
+ netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
+ return -ENODEV;
+}
+
+static int dpa_generic_get_sset_count(struct net_device *net_dev, int type)
+{
+ unsigned int total_stats, num_stats;
+
+ num_stats = num_online_cpus() + 1;
+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
+
+ switch (type) {
+ case ETH_SS_STATS:
+ return total_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv,
+ int num_cpus, int crr_cpu, u64 bp_count,
+ u64 bp_drain_count, u64 *data)
+{
+ int num_values = num_cpus + 1;
+ int crr = 0;
+
+ /* update current CPU's stats and also add them to the total values */
+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
+
+ data[crr * num_values + crr_cpu] = bp_drain_count;
+ data[crr++ * num_values + num_cpus] += bp_drain_count;
+}
+
+static void dpa_generic_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp, *drain_bp;
+ struct dpa_generic_priv_s *priv;
+ struct dpa_rx_errors rx_errors;
+ struct dpa_ern_cnt ern_cnt;
+ unsigned int num_cpus, offset;
+ u64 bp_cnt, drain_cnt;
+ int total_stats, i;
+
+ total_stats = dpa_generic_get_sset_count(net_dev, ETH_SS_STATS);
+ priv = netdev_priv(net_dev);
+ drain_bp = priv->draining_tx_bp;
+ dpa_bp = priv->rx_bp;
+ num_cpus = num_online_cpus();
+ drain_cnt = 0;
+ bp_cnt = 0;
+
+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
+ memset(data, 0, total_stats * sizeof(u64));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ if (dpa_bp->percpu_count)
+ bp_cnt = *(per_cpu_ptr(dpa_bp->percpu_count, i));
+
+ if (drain_bp->percpu_count)
+ drain_cnt = *(per_cpu_ptr(drain_bp->percpu_count, i));
+
+ rx_errors.dme += percpu_priv->rx_errors.dme;
+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
+ rx_errors.fse += percpu_priv->rx_errors.fse;
+ rx_errors.phe += percpu_priv->rx_errors.phe;
+ rx_errors.cse += percpu_priv->rx_errors.cse;
+
+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
+
+ copy_stats(percpu_priv, num_cpus, i, bp_cnt, drain_cnt, data);
+ }
+
+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
+
+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
+}
+
+static void dpa_generic_get_strings(struct net_device *net_dev,
+ u32 stringset, u8 *data)
+{
+ unsigned int i, j, num_cpus, size;
+ char string_cpu[ETH_GSTRING_LEN];
+ u8 *strings;
+
+ strings = data;
+ num_cpus = num_online_cpus();
+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
+ dpa_stats_percpu[i], j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
+ dpa_stats_percpu[i]);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ memcpy(strings, dpa_stats_global, size);
+}
+
+const struct ethtool_ops dpa_generic_ethtool_ops = {
+ .get_settings = dpa_generic_get_settings,
+ .set_settings = dpa_generic_set_settings,
+ .get_drvinfo = dpa_generic_get_drvinfo,
+ .get_msglevel = dpa_generic_get_msglevel,
+ .set_msglevel = dpa_generic_set_msglevel,
+ .nway_reset = dpa_generic_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_sset_count = dpa_generic_get_sset_count,
+ .get_ethtool_stats = dpa_generic_get_ethtool_stats,
+ .get_strings = dpa_generic_get_strings,
+};
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
@@ -0,0 +1,250 @@
+/* Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_macsec.h"
+
+static const char dpa_macsec_stats_percpu[][ETH_GSTRING_LEN] = {
+ "interrupts",
+ "rx packets",
+ "tx packets",
+ "tx recycled",
+ "tx confirm",
+ "tx S/G",
+ "rx S/G",
+ "tx error",
+ "rx error",
+ "bp count",
+ "tx macsec",
+ "rx macsec"
+};
+
+static char dpa_macsec_stats_global[][ETH_GSTRING_LEN] = {
+ /* dpa rx errors */
+ "rx dma error",
+ "rx frame physical error",
+ "rx frame size error",
+ "rx header error",
+ "rx csum error",
+
+ /* demultiplexing errors */
+ "qman cg_tdrop",
+ "qman wred",
+ "qman error cond",
+ "qman early window",
+ "qman late window",
+ "qman fq tdrop",
+ "qman fq retired",
+ "qman orp disabled",
+
+ /* congestion related stats */
+ "congestion time (ms)",
+ "entered congestion",
+ "congested (0/1)"
+};
+
+#define DPA_MACSEC_STATS_PERCPU_LEN ARRAY_SIZE(dpa_macsec_stats_percpu)
+#define DPA_MACSEC_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_macsec_stats_global)
+
+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
+ int crr_cpu, u64 bp_count, u64 tx_macsec,
+ u64 rx_macsec, u64 *data)
+{
+ int num_values = num_cpus + 1;
+ int crr = 0;
+
+ /* update current CPU's stats and also add them to the total values */
+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->rx_sg;
+ data[crr++ * num_values + num_cpus] += percpu_priv->rx_sg;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
+
+ data[crr * num_values + crr_cpu] = tx_macsec;
+ data[crr++ * num_values + num_cpus] += tx_macsec;
+
+ data[crr * num_values + crr_cpu] = rx_macsec;
+ data[crr++ * num_values + num_cpus] += rx_macsec;
+}
+
+int dpa_macsec_get_sset_count(struct net_device *net_dev, int type)
+{
+ unsigned int total_stats, num_stats;
+
+ num_stats = num_online_cpus() + 1;
+ total_stats = num_stats * DPA_MACSEC_STATS_PERCPU_LEN +
+ DPA_MACSEC_STATS_GLOBAL_LEN;
+
+ switch (type) {
+ case ETH_SS_STATS:
+ return total_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 bp_count, bp_total, cg_time, cg_num, cg_status;
+ struct macsec_percpu_priv_s *percpu_priv_macsec;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct macsec_priv_s *macsec_priv;
+ struct qm_mcr_querycgr query_cgr;
+ struct dpa_rx_errors rx_errors;
+ struct dpa_ern_cnt ern_cnt;
+ struct dpa_priv_s *priv;
+ unsigned int num_cpus, offset;
+ struct dpa_bp *dpa_bp;
+ int total_stats, i;
+
+ macsec_priv = dpa_macsec_get_priv(net_dev);
+ if (unlikely(!macsec_priv)) {
+ pr_err("selected macsec_priv is NULL\n");
+ return;
+ }
+
+ total_stats = dpa_macsec_get_sset_count(net_dev, ETH_SS_STATS);
+ priv = netdev_priv(net_dev);
+ dpa_bp = priv->dpa_bp;
+ num_cpus = num_online_cpus();
+ bp_count = 0;
+ bp_total = 0;
+
+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
+ memset(data, 0, total_stats * sizeof(u64));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv_macsec = per_cpu_ptr(macsec_priv->percpu_priv, i);
+
+ if (dpa_bp->percpu_count)
+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
+
+ rx_errors.dme += percpu_priv->rx_errors.dme;
+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
+ rx_errors.fse += percpu_priv->rx_errors.fse;
+ rx_errors.phe += percpu_priv->rx_errors.phe;
+ rx_errors.cse += percpu_priv->rx_errors.cse;
+
+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
+
+ copy_stats(percpu_priv, num_cpus, i, bp_count,
+ percpu_priv_macsec->tx_macsec,
+ percpu_priv_macsec->rx_macsec,
+ data);
+ }
+
+ offset = (num_cpus + 1) * DPA_MACSEC_STATS_PERCPU_LEN;
+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
+
+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
+
+ /* gather congestion related counters */
+ cg_num = 0;
+ cg_status = 0;
+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
+ cg_num = priv->cgr_data.cgr_congested_count;
+ cg_status = query_cgr.cgr.cs;
+
+ /* reset congestion stats (like QMan API does */
+ priv->cgr_data.congested_jiffies = 0;
+ priv->cgr_data.cgr_congested_count = 0;
+ }
+
+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
+ data[offset++] = cg_time;
+ data[offset++] = cg_num;
+ data[offset++] = cg_status;
+}
+
+void dpa_macsec_get_strings(struct net_device *net_dev,
+ u32 stringset, u8 *data)
+{
+ unsigned int i, j, num_cpus, size;
+ char string_cpu[ETH_GSTRING_LEN];
+ u8 *strings;
+
+ strings = data;
+ num_cpus = num_online_cpus();
+ size = DPA_MACSEC_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+ for (i = 0; i < DPA_MACSEC_STATS_PERCPU_LEN; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
+ dpa_macsec_stats_percpu[i], j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
+ dpa_macsec_stats_percpu[i]);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ memcpy(strings, dpa_macsec_stats_global, size);
+}
+
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
@@ -0,0 +1,287 @@
+/*
+ * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
+ *
+ * Author: Yangbo Lu <yangbo.lu@freescale.com>
+ *
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+*/
+
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <linux/ptp_clock_kernel.h>
+
+#include "dpaa_eth.h"
+#include "mac.h"
+
+struct ptp_clock *clock;
+
+static struct mac_device *mac_dev;
+static u32 freqCompensation;
+
+/* Bit definitions for the TMR_CTRL register */
+#define ALM1P (1<<31) /* Alarm1 output polarity */
+#define ALM2P (1<<30) /* Alarm2 output polarity */
+#define FS (1<<28) /* FIPER start indication */
+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+#define TCLK_PERIOD_MASK (0x3ff)
+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+#define FRD (1<<14) /* FIPER Realignment Disable */
+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+#define COPH (1<<7) /* Generated clock output phase. */
+#define CIPH (1<<6) /* External oscillator input clock phase */
+#define TMSR (1<<5) /* Timer soft reset. */
+#define BYP (1<<3) /* Bypass drift compensated clock */
+#define TE (1<<2) /* 1588 timer enable. */
+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+#define CKSEL_MASK (0x3)
+
+/* Bit definitions for the TMR_TEVENT register */
+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+
+/* Bit definitions for the TMR_TEMASK register */
+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+
+/* Bit definitions for the TMR_PEVENT register */
+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+#define RXP (1<<0) /* PTP frame has been received */
+
+/* Bit definitions for the TMR_PEMASK register */
+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+#define RXPEN (1<<0) /* Receive PTP packet event enable */
+
+/* Bit definitions for the TMR_STAT register */
+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+#define STAT_VEC_MASK (0x3f)
+
+/* Bit definitions for the TMR_PRSC register */
+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+#define PRSC_OCK_MASK (0xffff)
+
+
+#define N_EXT_TS 2
+
+static void set_alarm(void)
+{
+ u64 ns;
+
+ if (mac_dev->fm_rtc_get_cnt)
+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
+ ns += 1500000000ULL;
+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
+ ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
+ if (mac_dev->fm_rtc_set_alarm)
+ mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
+}
+
+static void set_fipers(void)
+{
+ u64 fiper;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(mac_dev->fm_dev);
+
+ set_alarm();
+ fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
+ if (mac_dev->fm_rtc_set_fiper)
+ mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(mac_dev->fm_dev);
+}
+
+/* PTP clock operations */
+
+static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, tmr_add;
+ int neg_adj = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ tmr_add = freqCompensation;
+ adj = tmr_add;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+
+ if (mac_dev->fm_rtc_set_drift)
+ mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
+
+ return 0;
+}
+
+static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+
+ if (mac_dev->fm_rtc_get_cnt)
+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
+
+ now += delta;
+
+ if (mac_dev->fm_rtc_set_cnt)
+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
+ set_fipers();
+
+ return 0;
+}
+
+static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+
+ if (mac_dev->fm_rtc_get_cnt)
+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return 0;
+}
+
+static int ptp_dpa_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ if (mac_dev->fm_rtc_set_cnt)
+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
+ set_fipers();
+ return 0;
+}
+
+static int ptp_dpa_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ u32 bit;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = ETS1EN;
+ break;
+ case 1:
+ bit = ETS2EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (on) {
+ if (mac_dev->fm_rtc_enable_interrupt)
+ mac_dev->fm_rtc_enable_interrupt(
+ mac_dev->fm_dev, bit);
+ } else {
+ if (mac_dev->fm_rtc_disable_interrupt)
+ mac_dev->fm_rtc_disable_interrupt(
+ mac_dev->fm_dev, bit);
+ }
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ if (on) {
+ if (mac_dev->fm_rtc_enable_interrupt)
+ mac_dev->fm_rtc_enable_interrupt(
+ mac_dev->fm_dev, PP1EN);
+ } else {
+ if (mac_dev->fm_rtc_disable_interrupt)
+ mac_dev->fm_rtc_disable_interrupt(
+ mac_dev->fm_dev, PP1EN);
+ }
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_dpa_caps = {
+ .owner = THIS_MODULE,
+ .name = "dpaa clock",
+ .max_adj = 512000,
+ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = ptp_dpa_adjfreq,
+ .adjtime = ptp_dpa_adjtime,
+ .gettime = ptp_dpa_gettime,
+ .settime = ptp_dpa_settime,
+ .enable = ptp_dpa_enable,
+};
+
+static int __init __cold dpa_ptp_load(void)
+{
+ struct device *ptp_dev;
+ struct timespec now;
+ int dpa_phc_index;
+ int err;
+
+ ptp_dev = &ptp_priv.of_dev->dev;
+ mac_dev = ptp_priv.mac_dev;
+
+ if (mac_dev->fm_rtc_get_drift)
+ mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
+
+ getnstimeofday(&now);
+ ptp_dpa_settime(&ptp_dpa_caps, &now);
+
+ clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
+ if (IS_ERR(clock)) {
+ err = PTR_ERR(clock);
+ return err;
+ }
+ dpa_phc_index = ptp_clock_index(clock);
+ return 0;
+}
+module_init(dpa_ptp_load);
+
+static void __exit __cold dpa_ptp_unload(void)
+{
+ if (mac_dev->fm_rtc_disable_interrupt)
+ mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
+ ptp_clock_unregister(clock);
+}
+module_exit(dpa_ptp_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
@@ -0,0 +1,915 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "dpaa_eth.h"
+#include "mac.h"
+#include "lnxwrp_fsl_fman.h"
+
+#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
+
+#include "fsl_fman_dtsec.h"
+#include "fsl_fman_tgec.h"
+#include "fsl_fman_memac.h"
+#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
+
+#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
+
+MODULE_DESCRIPTION(MAC_DESCRIPTION);
+
+struct mac_priv_s {
+ struct fm_mac_dev *fm_mac;
+};
+
+const char *mac_driver_description __initconst = MAC_DESCRIPTION;
+const size_t mac_sizeof_priv[] = {
+ [DTSEC] = sizeof(struct mac_priv_s),
+ [XGMAC] = sizeof(struct mac_priv_s),
+ [MEMAC] = sizeof(struct mac_priv_s)
+};
+
+static const enet_mode_t _100[] = {
+ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
+ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
+};
+
+static const enet_mode_t _1000[] = {
+ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
+ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
+ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
+ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
+};
+
+static enet_mode_t __cold __attribute__((nonnull))
+macdev2enetinterface(const struct mac_device *mac_dev)
+{
+ switch (mac_dev->max_speed) {
+ case SPEED_100:
+ return _100[mac_dev->phy_if];
+ case SPEED_1000:
+ return _1000[mac_dev->phy_if];
+ case SPEED_2500:
+ return e_ENET_MODE_SGMII_2500;
+ case SPEED_10000:
+ return e_ENET_MODE_XGMII_10000;
+ default:
+ return e_ENET_MODE_MII_100;
+ }
+}
+
+static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
+{
+ struct mac_device *mac_dev;
+
+ mac_dev = (struct mac_device *)_mac_dev;
+
+ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
+ /* don't flag RX FIFO after the first */
+ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
+ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
+ exception);
+ }
+
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
+ exception);
+}
+
+static int __cold init(struct mac_device *mac_dev)
+{
+ int _errno;
+ struct mac_priv_s *priv;
+ t_FmMacParams param;
+ uint32_t version;
+
+ priv = macdev_priv(mac_dev);
+
+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
+ mac_dev->dev, mac_dev->res->start, 0x2000);
+ param.enetMode = macdev2enetinterface(mac_dev);
+ memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
+ sizeof(mac_dev->addr)));
+ param.macId = mac_dev->cell_index;
+ param.h_Fm = (handle_t)mac_dev->fm;
+ param.mdioIrq = NO_IRQ;
+ param.f_Exception = mac_exception;
+ param.f_Event = mac_exception;
+ param.h_App = mac_dev;
+
+ priv->fm_mac = fm_mac_config(&param);
+ if (unlikely(priv->fm_mac == NULL)) {
+ _errno = -EINVAL;
+ goto _return;
+ }
+
+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
+
+ _errno = fm_mac_config_max_frame_length(priv->fm_mac,
+ fm_get_max_frm());
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
+ /* 10G always works with pad and CRC */
+ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+ _errno = fm_mac_config_half_duplex(priv->fm_mac,
+ mac_dev->half_duplex);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+ } else {
+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+ }
+
+ _errno = fm_mac_init(priv->fm_mac);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
+ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+ }
+#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
+
+ /* For 10G MAC, disable Tx ECC exception */
+ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
+ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+ }
+
+ _errno = fm_mac_get_version(priv->fm_mac, &version);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
+ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
+ "dTSEC" : "XGEC"), version);
+
+ goto _return;
+
+
+_return_fm_mac_free:
+ fm_mac_free(mac_dev->get_mac_handle(mac_dev));
+
+_return:
+ return _errno;
+}
+
+static int __cold memac_init(struct mac_device *mac_dev)
+{
+ int _errno;
+ struct mac_priv_s *priv;
+ t_FmMacParams param;
+
+ priv = macdev_priv(mac_dev);
+
+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
+ mac_dev->dev, mac_dev->res->start, 0x2000);
+ param.enetMode = macdev2enetinterface(mac_dev);
+ memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
+ param.macId = mac_dev->cell_index;
+ param.h_Fm = (handle_t)mac_dev->fm;
+ param.mdioIrq = NO_IRQ;
+ param.f_Exception = mac_exception;
+ param.f_Event = mac_exception;
+ param.h_App = mac_dev;
+
+ priv->fm_mac = fm_mac_config(&param);
+ if (unlikely(priv->fm_mac == NULL)) {
+ _errno = -EINVAL;
+ goto _return;
+ }
+
+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
+
+ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+ _errno = fm_mac_init(priv->fm_mac);
+ if (unlikely(_errno < 0))
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fm_mac_free:
+ fm_mac_free(priv->fm_mac);
+
+_return:
+ return _errno;
+}
+
+static int __cold start(struct mac_device *mac_dev)
+{
+ int _errno;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+
+ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
+
+ if (!_errno && phy_dev) {
+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)
+ phy_start(phy_dev);
+ else if (phy_dev->drv->read_status)
+ phy_dev->drv->read_status(phy_dev);
+ }
+
+ return _errno;
+}
+
+static int __cold stop(struct mac_device *mac_dev)
+{
+ if (mac_dev->phy_dev &&
+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000))
+ phy_stop(mac_dev->phy_dev);
+
+ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
+}
+
+static int __cold set_multi(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ struct mac_priv_s *mac_priv;
+ struct mac_address *old_addr, *tmp;
+ struct netdev_hw_addr *ha;
+ int _errno;
+
+ mac_priv = macdev_priv(mac_dev);
+
+ /* Clear previous address list */
+ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
+ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
+ (t_EnetAddr *)old_addr->addr);
+ if (_errno < 0)
+ return _errno;
+
+ list_del(&old_addr->list);
+ kfree(old_addr);
+ }
+
+ /* Add all the addresses from the new list */
+ netdev_for_each_mc_addr(ha, net_dev) {
+ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
+ (t_EnetAddr *)ha->addr);
+ if (_errno < 0)
+ return _errno;
+
+ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
+ if (!tmp) {
+ dev_err(mac_dev->dev, "Out of memory\n");
+ return -ENOMEM;
+ }
+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
+ list_add(&tmp->list, &mac_dev->mc_addr_list);
+ }
+ return 0;
+}
+
+/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
+ * active PAUSE settings. Otherwise, the new active settings should be reflected
+ * in FMan.
+ */
+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
+{
+ struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
+ int _errno = 0;
+
+ if (unlikely(rx != mac_dev->rx_pause_active)) {
+ _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
+ if (likely(_errno == 0))
+ mac_dev->rx_pause_active = rx;
+ }
+
+ if (unlikely(tx != mac_dev->tx_pause_active)) {
+ _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
+ if (likely(_errno == 0))
+ mac_dev->tx_pause_active = tx;
+ }
+
+ return _errno;
+}
+EXPORT_SYMBOL(set_mac_active_pause);
+
+/* Determine the MAC RX/TX PAUSE frames settings based on PHY
+ * autonegotiation or values set by eththool.
+ */
+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+
+ *rx_pause = *tx_pause = false;
+
+ if (!phy_dev->duplex)
+ return;
+
+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
+ * are those set by ethtool.
+ */
+ if (!mac_dev->autoneg_pause) {
+ *rx_pause = mac_dev->rx_pause_req;
+ *tx_pause = mac_dev->tx_pause_req;
+ return;
+ }
+
+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
+ * settings depend on the result of the link negotiation.
+ */
+
+ /* get local capabilities */
+ lcl_adv = 0;
+ if (phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phy_dev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ /* Calculate TX/RX settings based on local and peer advertised
+ * symmetric/asymmetric PAUSE capabilities.
+ */
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_RX)
+ *rx_pause = true;
+ if (flowctrl & FLOW_CTRL_TX)
+ *tx_pause = true;
+}
+EXPORT_SYMBOL(get_pause_cfg);
+
+static void adjust_link(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fm_mac_dev *fm_mac_dev;
+ bool rx_pause, tx_pause;
+ int _errno;
+
+ fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
+ fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
+ phy_dev->duplex);
+
+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (unlikely(_errno < 0))
+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int dtsec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev;
+
+ if (!mac_dev->phy_node)
+ phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
+ &adjust_link, mac_dev->phy_if);
+ else
+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
+ &adjust_link, 0, mac_dev->phy_if);
+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
+ netdev_err(net_dev, "Could not connect to PHY %s\n",
+ mac_dev->phy_node ?
+ mac_dev->phy_node->full_name :
+ mac_dev->fixed_bus_id);
+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= mac_dev->if_support;
+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
+ * as most of the PHY drivers do not enable them by default.
+ */
+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int xgmac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev;
+
+ if (!mac_dev->phy_node)
+ phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id,
+ mac_dev->phy_if);
+ else
+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
+ mac_dev->phy_if);
+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
+ netdev_err(net_dev, "Could not attach to PHY %s\n",
+ mac_dev->phy_node ?
+ mac_dev->phy_node->full_name :
+ mac_dev->fixed_bus_id);
+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
+ }
+
+ phy_dev->supported &= mac_dev->if_support;
+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
+ * as most of the PHY drivers do not enable them by default.
+ */
+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int memac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev;
+
+ if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
+ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)){
+ if (!mac_dev->phy_node) {
+ mac_dev->phy_dev = NULL;
+ return 0;
+ } else
+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
+ mac_dev->phy_if);
+ } else {
+ if (!mac_dev->phy_node)
+ phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
+ &adjust_link, mac_dev->phy_if);
+ else
+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
+ &adjust_link, 0,
+ mac_dev->phy_if);
+ }
+
+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
+ netdev_err(net_dev, "Could not connect to PHY %s\n",
+ mac_dev->phy_node ?
+ mac_dev->phy_node->full_name :
+ mac_dev->fixed_bus_id);
+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= mac_dev->if_support;
+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
+ * as most of the PHY drivers do not enable them by default.
+ */
+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
+{
+ int _errno, __errno;
+
+ _errno = fm_mac_disable(fm_mac_dev);
+ __errno = fm_mac_free(fm_mac_dev);
+
+ if (unlikely(__errno < 0))
+ _errno = __errno;
+
+ return _errno;
+}
+
+static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
+{
+ const struct mac_priv_s *priv;
+ priv = macdev_priv(mac_dev);
+ return priv->fm_mac;
+}
+
+static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
+{
+ struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
+ int i = 0, n = nn;
+
+ FM_DMP_SUBTITLE(buf, n, "\n");
+
+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
+
+ FM_DMP_V32(buf, n, p_mm, tsec_id);
+ FM_DMP_V32(buf, n, p_mm, tsec_id2);
+ FM_DMP_V32(buf, n, p_mm, ievent);
+ FM_DMP_V32(buf, n, p_mm, imask);
+ FM_DMP_V32(buf, n, p_mm, ecntrl);
+ FM_DMP_V32(buf, n, p_mm, ptv);
+ FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
+ FM_DMP_V32(buf, n, p_mm, tmr_pevent);
+ FM_DMP_V32(buf, n, p_mm, tmr_pemask);
+ FM_DMP_V32(buf, n, p_mm, tctrl);
+ FM_DMP_V32(buf, n, p_mm, rctrl);
+ FM_DMP_V32(buf, n, p_mm, maccfg1);
+ FM_DMP_V32(buf, n, p_mm, maccfg2);
+ FM_DMP_V32(buf, n, p_mm, ipgifg);
+ FM_DMP_V32(buf, n, p_mm, hafdup);
+ FM_DMP_V32(buf, n, p_mm, maxfrm);
+
+ FM_DMP_V32(buf, n, p_mm, macstnaddr1);
+ FM_DMP_V32(buf, n, p_mm, macstnaddr2);
+
+ for (i = 0; i < 7; ++i) {
+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
+ }
+
+ FM_DMP_V32(buf, n, p_mm, car1);
+ FM_DMP_V32(buf, n, p_mm, car2);
+
+ return n;
+}
+
+static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
+{
+ struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
+ int n = nn;
+
+ FM_DMP_SUBTITLE(buf, n, "\n");
+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
+
+ FM_DMP_V32(buf, n, p_mm, tgec_id);
+ FM_DMP_V32(buf, n, p_mm, command_config);
+ FM_DMP_V32(buf, n, p_mm, mac_addr_0);
+ FM_DMP_V32(buf, n, p_mm, mac_addr_1);
+ FM_DMP_V32(buf, n, p_mm, maxfrm);
+ FM_DMP_V32(buf, n, p_mm, pause_quant);
+ FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
+ FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
+ FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
+ FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
+ FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
+ FM_DMP_V32(buf, n, p_mm, mdio_command);
+ FM_DMP_V32(buf, n, p_mm, mdio_data);
+ FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
+ FM_DMP_V32(buf, n, p_mm, status);
+ FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
+ FM_DMP_V32(buf, n, p_mm, mac_addr_2);
+ FM_DMP_V32(buf, n, p_mm, mac_addr_3);
+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
+ FM_DMP_V32(buf, n, p_mm, imask);
+ FM_DMP_V32(buf, n, p_mm, ievent);
+
+ return n;
+}
+
+static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
+{
+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
+ int i = 0, n = nn;
+
+ FM_DMP_SUBTITLE(buf, n, "\n");
+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
+
+ FM_DMP_V32(buf, n, p_mm, command_config);
+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
+ FM_DMP_V32(buf, n, p_mm, maxfrm);
+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
+ FM_DMP_V32(buf, n, p_mm, ievent);
+ FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
+ FM_DMP_V32(buf, n, p_mm, imask);
+
+ for (i = 0; i < 4; ++i)
+ FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
+
+ for (i = 0; i < 4; ++i)
+ FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
+
+ FM_DMP_V32(buf, n, p_mm, rx_pause_status);
+
+ for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
+ }
+
+ FM_DMP_V32(buf, n, p_mm, lpwake_timer);
+ FM_DMP_V32(buf, n, p_mm, sleep_timer);
+ FM_DMP_V32(buf, n, p_mm, statn_config);
+ FM_DMP_V32(buf, n, p_mm, if_mode);
+ FM_DMP_V32(buf, n, p_mm, if_status);
+ FM_DMP_V32(buf, n, p_mm, hg_config);
+ FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
+ FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
+ FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
+ FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
+ FM_DMP_V32(buf, n, p_mm, rhm);
+ FM_DMP_V32(buf, n, p_mm, thm);
+
+ return n;
+}
+
+static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
+{
+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
+ int n = nn;
+
+ FM_DMP_SUBTITLE(buf, n, "\n");
+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
+
+ /* Rx Statistics Counter */
+ FM_DMP_V32(buf, n, p_mm, reoct_l);
+ FM_DMP_V32(buf, n, p_mm, reoct_u);
+ FM_DMP_V32(buf, n, p_mm, roct_l);
+ FM_DMP_V32(buf, n, p_mm, roct_u);
+ FM_DMP_V32(buf, n, p_mm, raln_l);
+ FM_DMP_V32(buf, n, p_mm, raln_u);
+ FM_DMP_V32(buf, n, p_mm, rxpf_l);
+ FM_DMP_V32(buf, n, p_mm, rxpf_u);
+ FM_DMP_V32(buf, n, p_mm, rfrm_l);
+ FM_DMP_V32(buf, n, p_mm, rfrm_u);
+ FM_DMP_V32(buf, n, p_mm, rfcs_l);
+ FM_DMP_V32(buf, n, p_mm, rfcs_u);
+ FM_DMP_V32(buf, n, p_mm, rvlan_l);
+ FM_DMP_V32(buf, n, p_mm, rvlan_u);
+ FM_DMP_V32(buf, n, p_mm, rerr_l);
+ FM_DMP_V32(buf, n, p_mm, rerr_u);
+ FM_DMP_V32(buf, n, p_mm, ruca_l);
+ FM_DMP_V32(buf, n, p_mm, ruca_u);
+ FM_DMP_V32(buf, n, p_mm, rmca_l);
+ FM_DMP_V32(buf, n, p_mm, rmca_u);
+ FM_DMP_V32(buf, n, p_mm, rbca_l);
+ FM_DMP_V32(buf, n, p_mm, rbca_u);
+ FM_DMP_V32(buf, n, p_mm, rdrp_l);
+ FM_DMP_V32(buf, n, p_mm, rdrp_u);
+ FM_DMP_V32(buf, n, p_mm, rpkt_l);
+ FM_DMP_V32(buf, n, p_mm, rpkt_u);
+ FM_DMP_V32(buf, n, p_mm, rund_l);
+ FM_DMP_V32(buf, n, p_mm, rund_u);
+ FM_DMP_V32(buf, n, p_mm, r64_l);
+ FM_DMP_V32(buf, n, p_mm, r64_u);
+ FM_DMP_V32(buf, n, p_mm, r127_l);
+ FM_DMP_V32(buf, n, p_mm, r127_u);
+ FM_DMP_V32(buf, n, p_mm, r255_l);
+ FM_DMP_V32(buf, n, p_mm, r255_u);
+ FM_DMP_V32(buf, n, p_mm, r511_l);
+ FM_DMP_V32(buf, n, p_mm, r511_u);
+ FM_DMP_V32(buf, n, p_mm, r1023_l);
+ FM_DMP_V32(buf, n, p_mm, r1023_u);
+ FM_DMP_V32(buf, n, p_mm, r1518_l);
+ FM_DMP_V32(buf, n, p_mm, r1518_u);
+ FM_DMP_V32(buf, n, p_mm, r1519x_l);
+ FM_DMP_V32(buf, n, p_mm, r1519x_u);
+ FM_DMP_V32(buf, n, p_mm, rovr_l);
+ FM_DMP_V32(buf, n, p_mm, rovr_u);
+ FM_DMP_V32(buf, n, p_mm, rjbr_l);
+ FM_DMP_V32(buf, n, p_mm, rjbr_u);
+ FM_DMP_V32(buf, n, p_mm, rfrg_l);
+ FM_DMP_V32(buf, n, p_mm, rfrg_u);
+ FM_DMP_V32(buf, n, p_mm, rcnp_l);
+ FM_DMP_V32(buf, n, p_mm, rcnp_u);
+ FM_DMP_V32(buf, n, p_mm, rdrntp_l);
+ FM_DMP_V32(buf, n, p_mm, rdrntp_u);
+
+ return n;
+}
+
+static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
+{
+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
+ int n = nn;
+
+ FM_DMP_SUBTITLE(buf, n, "\n");
+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
+
+
+ /* Tx Statistics Counter */
+ FM_DMP_V32(buf, n, p_mm, teoct_l);
+ FM_DMP_V32(buf, n, p_mm, teoct_u);
+ FM_DMP_V32(buf, n, p_mm, toct_l);
+ FM_DMP_V32(buf, n, p_mm, toct_u);
+ FM_DMP_V32(buf, n, p_mm, txpf_l);
+ FM_DMP_V32(buf, n, p_mm, txpf_u);
+ FM_DMP_V32(buf, n, p_mm, tfrm_l);
+ FM_DMP_V32(buf, n, p_mm, tfrm_u);
+ FM_DMP_V32(buf, n, p_mm, tfcs_l);
+ FM_DMP_V32(buf, n, p_mm, tfcs_u);
+ FM_DMP_V32(buf, n, p_mm, tvlan_l);
+ FM_DMP_V32(buf, n, p_mm, tvlan_u);
+ FM_DMP_V32(buf, n, p_mm, terr_l);
+ FM_DMP_V32(buf, n, p_mm, terr_u);
+ FM_DMP_V32(buf, n, p_mm, tuca_l);
+ FM_DMP_V32(buf, n, p_mm, tuca_u);
+ FM_DMP_V32(buf, n, p_mm, tmca_l);
+ FM_DMP_V32(buf, n, p_mm, tmca_u);
+ FM_DMP_V32(buf, n, p_mm, tbca_l);
+ FM_DMP_V32(buf, n, p_mm, tbca_u);
+ FM_DMP_V32(buf, n, p_mm, tpkt_l);
+ FM_DMP_V32(buf, n, p_mm, tpkt_u);
+ FM_DMP_V32(buf, n, p_mm, tund_l);
+ FM_DMP_V32(buf, n, p_mm, tund_u);
+ FM_DMP_V32(buf, n, p_mm, t64_l);
+ FM_DMP_V32(buf, n, p_mm, t64_u);
+ FM_DMP_V32(buf, n, p_mm, t127_l);
+ FM_DMP_V32(buf, n, p_mm, t127_u);
+ FM_DMP_V32(buf, n, p_mm, t255_l);
+ FM_DMP_V32(buf, n, p_mm, t255_u);
+ FM_DMP_V32(buf, n, p_mm, t511_l);
+ FM_DMP_V32(buf, n, p_mm, t511_u);
+ FM_DMP_V32(buf, n, p_mm, t1023_l);
+ FM_DMP_V32(buf, n, p_mm, t1023_u);
+ FM_DMP_V32(buf, n, p_mm, t1518_l);
+ FM_DMP_V32(buf, n, p_mm, t1518_u);
+ FM_DMP_V32(buf, n, p_mm, t1519x_l);
+ FM_DMP_V32(buf, n, p_mm, t1519x_u);
+ FM_DMP_V32(buf, n, p_mm, tcnp_l);
+ FM_DMP_V32(buf, n, p_mm, tcnp_u);
+
+ return n;
+}
+
+int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
+{
+ int n = nn;
+
+ n = h_mac->dump_mac_regs(h_mac, buf, n);
+
+ return n;
+}
+EXPORT_SYMBOL(fm_mac_dump_regs);
+
+int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
+{
+ int n = nn;
+
+ if(h_mac->dump_mac_rx_stats)
+ n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
+
+ return n;
+}
+EXPORT_SYMBOL(fm_mac_dump_rx_stats);
+
+int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
+{
+ int n = nn;
+
+ if(h_mac->dump_mac_tx_stats)
+ n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
+
+ return n;
+}
+EXPORT_SYMBOL(fm_mac_dump_tx_stats);
+
+static void __cold setup_dtsec(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = dtsec_init_phy;
+ mac_dev->init = init;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+ mac_dev->set_promisc = fm_mac_set_promiscuous;
+ mac_dev->change_addr = fm_mac_modify_mac_addr;
+ mac_dev->set_multi = set_multi;
+ mac_dev->uninit = uninit;
+ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
+ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
+ mac_dev->get_mac_handle = get_mac_handle;
+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
+ mac_dev->fm_rtc_enable = fm_rtc_enable;
+ mac_dev->fm_rtc_disable = fm_rtc_disable;
+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
+ mac_dev->set_wol = fm_mac_set_wol;
+ mac_dev->dump_mac_regs = dtsec_dump_regs;
+}
+
+static void __cold setup_xgmac(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = xgmac_init_phy;
+ mac_dev->init = init;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+ mac_dev->set_promisc = fm_mac_set_promiscuous;
+ mac_dev->change_addr = fm_mac_modify_mac_addr;
+ mac_dev->set_multi = set_multi;
+ mac_dev->uninit = uninit;
+ mac_dev->get_mac_handle = get_mac_handle;
+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
+ mac_dev->set_wol = fm_mac_set_wol;
+ mac_dev->dump_mac_regs = xgmac_dump_regs;
+}
+
+static void __cold setup_memac(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = memac_init_phy;
+ mac_dev->init = memac_init;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+ mac_dev->set_promisc = fm_mac_set_promiscuous;
+ mac_dev->change_addr = fm_mac_modify_mac_addr;
+ mac_dev->set_multi = set_multi;
+ mac_dev->uninit = uninit;
+ mac_dev->get_mac_handle = get_mac_handle;
+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
+ mac_dev->fm_rtc_enable = fm_rtc_enable;
+ mac_dev->fm_rtc_disable = fm_rtc_disable;
+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
+ mac_dev->set_wol = fm_mac_set_wol;
+ mac_dev->dump_mac_regs = memac_dump_regs;
+ mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
+ mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
+}
+
+void (*const mac_setup[])(struct mac_device *mac_dev) = {
+ [DTSEC] = setup_dtsec,
+ [XGMAC] = setup_xgmac,
+ [MEMAC] = setup_memac
+};
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
@@ -0,0 +1,470 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+
+#include "lnxwrp_fm_ext.h"
+
+#include "mac.h"
+
+#define DTSEC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause \
+ | SUPPORTED_MII)
+
+static const char phy_str[][11] = {
+ [PHY_INTERFACE_MODE_MII] = "mii",
+ [PHY_INTERFACE_MODE_GMII] = "gmii",
+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
+ [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
+ [PHY_INTERFACE_MODE_TBI] = "tbi",
+ [PHY_INTERFACE_MODE_RMII] = "rmii",
+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
+ [PHY_INTERFACE_MODE_XGMII] = "xgmii",
+ [PHY_INTERFACE_MODE_QSGMII] = "sgmii-2500"
+};
+
+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
+ if (strcmp(str, phy_str[i]) == 0)
+ return (phy_interface_t)i;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static const uint16_t phy2speed[] = {
+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_2500
+};
+
+static struct mac_device * __cold
+alloc_macdev(struct device *dev, size_t sizeof_priv,
+ void (*setup)(struct mac_device *mac_dev))
+{
+ struct mac_device *mac_dev;
+
+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
+ if (unlikely(mac_dev == NULL))
+ mac_dev = ERR_PTR(-ENOMEM);
+ else {
+ mac_dev->dev = dev;
+ dev_set_drvdata(dev, mac_dev);
+ setup(mac_dev);
+ }
+
+ return mac_dev;
+}
+
+static int __cold free_macdev(struct mac_device *mac_dev)
+{
+ dev_set_drvdata(mac_dev->dev, NULL);
+
+ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
+}
+
+static const struct of_device_id mac_match[] = {
+ [DTSEC] = {
+ .compatible = "fsl,fman-1g-mac"
+ },
+ [XGMAC] = {
+ .compatible = "fsl,fman-10g-mac"
+ },
+ [MEMAC] = {
+ .compatible = "fsl,fman-memac"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mac_match);
+
+static int __cold mac_probe(struct platform_device *_of_dev)
+{
+ int _errno, i;
+ struct device *dev;
+ struct device_node *mac_node, *dev_node;
+ struct mac_device *mac_dev;
+ struct platform_device *of_dev;
+ struct resource res;
+ const uint8_t *mac_addr;
+ const char *char_prop;
+ int nph;
+ u32 cell_index;
+ const struct of_device_id *match;
+
+ dev = &_of_dev->dev;
+ mac_node = dev->of_node;
+
+ match = of_match_device(mac_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
+ i++)
+ ;
+ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
+
+ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
+ if (IS_ERR(mac_dev)) {
+ _errno = PTR_ERR(mac_dev);
+ dev_err(dev, "alloc_macdev() = %d\n", _errno);
+ goto _return;
+ }
+
+ INIT_LIST_HEAD(&mac_dev->mc_addr_list);
+
+ /* Get the FM node */
+ dev_node = of_get_parent(mac_node);
+ if (unlikely(dev_node == NULL)) {
+ dev_err(dev, "of_get_parent(%s) failed\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (unlikely(of_dev == NULL)) {
+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->fm_dev = fm_bind(&of_dev->dev);
+ if (unlikely(mac_dev->fm_dev == NULL)) {
+ dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
+ _errno = -ENODEV;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
+ of_node_put(dev_node);
+
+ /* Get the address of the memory mapped registers */
+ _errno = of_address_to_resource(mac_node, 0, &res);
+ if (unlikely(_errno < 0)) {
+ dev_err(dev, "of_address_to_resource(%s) = %d\n",
+ mac_node->full_name, _errno);
+ goto _return_dev_set_drvdata;
+ }
+
+ mac_dev->res = __devm_request_region(
+ dev,
+ fm_get_mem_region(mac_dev->fm_dev),
+ res.start, res.end + 1 - res.start, "mac");
+ if (unlikely(mac_dev->res == NULL)) {
+ dev_err(dev, "__devm_request_mem_region(mac) failed\n");
+ _errno = -EBUSY;
+ goto _return_dev_set_drvdata;
+ }
+
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ mac_dev->res->end + 1
+ - mac_dev->res->start);
+ if (unlikely(mac_dev->vaddr == NULL)) {
+ dev_err(dev, "devm_ioremap() failed\n");
+ _errno = -EIO;
+ goto _return_dev_set_drvdata;
+ }
+
+#define TBIPA_OFFSET 0x1c
+#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
+ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (mac_dev->tbi_node) {
+ u32 tbiaddr = TBIPA_DEFAULT_ADDR;
+ const __be32 *tbi_reg;
+ void __iomem *addr;
+
+ tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
+ if (tbi_reg)
+ tbiaddr = be32_to_cpup(tbi_reg);
+ addr = mac_dev->vaddr + TBIPA_OFFSET;
+ /* TODO: out_be32 does not exist on ARM */
+ out_be32(addr, tbiaddr);
+ }
+
+ if (!of_device_is_available(mac_node)) {
+ devm_iounmap(dev, mac_dev->vaddr);
+ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
+ res.start, res.end + 1 - res.start);
+ fm_unbind(mac_dev->fm_dev);
+ devm_kfree(dev, mac_dev);
+ dev_set_drvdata(dev, NULL);
+ return -ENODEV;
+ }
+
+ /* Get the cell-index */
+ _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
+ if (unlikely(_errno)) {
+ dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
+ mac_node->full_name);
+ goto _return_dev_set_drvdata;
+ }
+ mac_dev->cell_index = (uint8_t)cell_index;
+
+ /* Get the MAC address */
+ mac_addr = of_get_mac_address(mac_node);
+ if (unlikely(mac_addr == NULL)) {
+ dev_err(dev, "of_get_mac_address(%s) failed\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+
+ /* Verify the number of port handles */
+ nph = of_count_phandle_with_args(mac_node, "fsl,port-handles", NULL);
+ if (unlikely(nph < 0)) {
+ dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
+ mac_node->full_name);
+ _errno = nph;
+ goto _return_dev_set_drvdata;
+ }
+
+ if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
+ dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ dev_node = of_parse_phandle(mac_node, "fsl,port-handles", i);
+ if (unlikely(dev_node == NULL)) {
+ dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (unlikely(of_dev == NULL)) {
+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
+ if (unlikely(mac_dev->port_dev[i] == NULL)) {
+ dev_err(dev, "dev_get_drvdata(%s) failed\n",
+ dev_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+ of_node_put(dev_node);
+ }
+
+ /* Get the PHY connection type */
+ _errno = of_property_read_string(mac_node, "phy-connection-type",
+ &char_prop);
+ if (unlikely(_errno)) {
+ dev_warn(dev,
+ "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
+ mac_node->full_name);
+ mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
+ } else
+ mac_dev->phy_if = str2phy(char_prop);
+
+ mac_dev->link = false;
+ mac_dev->half_duplex = false;
+ mac_dev->speed = phy2speed[mac_dev->phy_if];
+ mac_dev->max_speed = mac_dev->speed;
+ mac_dev->if_support = DTSEC_SUPPORTED;
+ /* We don't support half-duplex in SGMII mode */
+ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii"))
+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_100baseT_Half);
+
+ if (strstr(char_prop, "sgmii-2500"))
+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_100baseT_Half);
+
+ /* Gigabit support (no half-duplex) */
+ if (mac_dev->max_speed == 1000)
+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
+
+ /* The 10G interface only supports one mode */
+ if (strstr(char_prop, "xgmii"))
+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
+
+ /* Get the rest of the PHY information */
+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+ if (mac_dev->phy_node == NULL) {
+ u32 phy_id;
+
+ _errno = of_property_read_u32(mac_node, "fixed-link", &phy_id);
+ if (_errno) {
+ dev_err(dev, "No PHY (or fixed link) found\n");
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "fixed-0",
+ phy_id);
+ }
+
+ _errno = mac_dev->init(mac_dev);
+ if (unlikely(_errno < 0)) {
+ dev_err(dev, "mac_dev->init() = %d\n", _errno);
+ goto _return_dev_set_drvdata;
+ }
+
+ /* pause frame autonegotiation enabled*/
+ mac_dev->autoneg_pause = true;
+
+ /* by intializing the values to false, force FMD to enable PAUSE frames
+ * on RX and TX
+ */
+ mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
+ mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
+ _errno = set_mac_active_pause(mac_dev, true, true);
+ if (unlikely(_errno < 0))
+ dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
+
+ dev_info(dev,
+ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+
+ goto _return;
+
+_return_of_node_put:
+ of_node_put(dev_node);
+_return_dev_set_drvdata:
+ dev_set_drvdata(dev, NULL);
+_return:
+ return _errno;
+}
+
+static int __cold mac_remove(struct platform_device *of_dev)
+{
+ int i, _errno;
+ struct device *dev;
+ struct mac_device *mac_dev;
+
+ dev = &of_dev->dev;
+ mac_dev = (struct mac_device *)dev_get_drvdata(dev);
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_unbind(mac_dev->port_dev[i]);
+
+ fm_unbind(mac_dev->fm_dev);
+
+ _errno = free_macdev(mac_dev);
+
+ return _errno;
+}
+
+static struct platform_driver mac_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = mac_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = mac_probe,
+ .remove = mac_remove
+};
+
+static int __init __cold mac_load(void)
+{
+ int _errno;
+
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
+
+ _errno = platform_driver_register(&mac_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ goto _return;
+ }
+
+ goto _return;
+
+_return:
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(mac_load);
+
+static void __exit __cold mac_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&mac_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(mac_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
@@ -0,0 +1,134 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MAC_H
+#define __MAC_H
+
+#include <linux/device.h> /* struct device, BUS_ID_SIZE */
+#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/phy.h> /* phy_interface_t, struct phy_device */
+#include <linux/list.h>
+
+#include "lnxwrp_fsl_fman.h" /* struct port_device */
+
+enum {DTSEC, XGMAC, MEMAC};
+
+struct mac_device {
+ struct device *dev;
+ void *priv;
+ uint8_t cell_index;
+ struct resource *res;
+ void __iomem *vaddr;
+ uint8_t addr[ETH_ALEN];
+ bool promisc;
+
+ struct fm *fm_dev;
+ struct fm_port *port_dev[2];
+
+ phy_interface_t phy_if;
+ u32 if_support;
+ bool link;
+ bool half_duplex;
+ uint16_t speed;
+ uint16_t max_speed;
+ struct device_node *phy_node;
+ char fixed_bus_id[MII_BUS_ID_SIZE + 3];
+ struct device_node *tbi_node;
+ struct phy_device *phy_dev;
+ void *fm;
+ /* List of multicast addresses */
+ struct list_head mc_addr_list;
+
+ bool autoneg_pause;
+ bool rx_pause_req;
+ bool tx_pause_req;
+ bool rx_pause_active;
+ bool tx_pause_active;
+
+ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
+ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+ int (*init)(struct mac_device *mac_dev);
+ int (*start)(struct mac_device *mac_dev);
+ int (*stop)(struct mac_device *mac_dev);
+ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
+ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
+ int (*set_multi)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
+ int (*uninit)(struct fm_mac_dev *fm_mac_dev);
+ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
+ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
+ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
+ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
+ int (*fm_rtc_enable)(struct fm *fm_dev);
+ int (*fm_rtc_disable)(struct fm *fm_dev);
+ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
+ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
+ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
+ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
+ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
+ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
+ uint64_t fiper);
+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
+ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
+ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
+#endif
+ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
+ bool en);
+ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
+ int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
+ int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
+};
+
+struct mac_address {
+ uint8_t addr[ETH_ALEN];
+ struct list_head list;
+};
+
+#define get_fm_handle(net_dev) \
+ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
+
+#define for_each_port_device(i, port_dev) \
+ for (i = 0; i < ARRAY_SIZE(port_dev); i++)
+
+static inline __attribute((nonnull)) void *macdev_priv(
+ const struct mac_device *mac_dev)
+{
+ return (void *)mac_dev + sizeof(*mac_dev);
+}
+
+extern const char *mac_driver_description;
+extern const size_t mac_sizeof_priv[];
+extern void (*const mac_setup[])(struct mac_device *mac_dev);
+
+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
+
+#endif /* __MAC_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
@@ -0,0 +1,848 @@
+/* Copyright 2011-2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
+ * Validates device-tree configuration and sets up the offline ports.
+ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+#else
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": " fmt
+#endif
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/fsl_qman.h>
+
+#include "offline_port.h"
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+
+#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
+/* Manip extra space and data alignment for fragmentation */
+#define FRAG_MANIP_SPACE 128
+#define FRAG_DATA_ALIGN 64
+
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
+MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
+
+
+static const struct of_device_id oh_port_match_table[] = {
+ {
+ .compatible = "fsl,dpa-oh"
+ },
+ {
+ .compatible = "fsl,dpa-oh-shared"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, oh_port_match_table);
+
+#ifdef CONFIG_PM
+
+static int oh_suspend(struct device *dev)
+{
+ struct dpa_oh_config_s *oh_config;
+
+ oh_config = dev_get_drvdata(dev);
+ return fm_port_suspend(oh_config->oh_port);
+}
+
+static int oh_resume(struct device *dev)
+{
+ struct dpa_oh_config_s *oh_config;
+
+ oh_config = dev_get_drvdata(dev);
+ return fm_port_resume(oh_config->oh_port);
+}
+
+static const struct dev_pm_ops oh_pm_ops = {
+ .suspend = oh_suspend,
+ .resume = oh_resume,
+};
+
+#define OH_PM_OPS (&oh_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define OH_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
+/* Creates Frame Queues */
+static uint32_t oh_fq_create(struct qman_fq *fq,
+ uint32_t fq_id, uint16_t channel,
+ uint16_t wq_id)
+{
+ struct qm_mcc_initfq fq_opts;
+ uint32_t create_flags, init_flags;
+ uint32_t ret = 0;
+
+ if (fq == NULL)
+ return 1;
+
+ /* Set flags for FQ create */
+ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
+
+ /* Create frame queue */
+ ret = qman_create_fq(fq_id, create_flags, fq);
+ if (ret != 0)
+ return 1;
+
+ /* Set flags for FQ init */
+ init_flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Set FQ init options. Specify destination WQ ID and channel */
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
+ fq_opts.fqd.dest.wq = wq_id;
+ fq_opts.fqd.dest.channel = channel;
+
+ /* Initialize frame queue */
+ ret = qman_init_fq(fq, init_flags, &fq_opts);
+ if (ret != 0) {
+ qman_destroy_fq(fq, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void dump_fq(struct device *dev, int fqid, uint16_t channel)
+{
+ if (channel) {
+ /* display fqs with a valid (!= 0) destination channel */
+ dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
+ }
+}
+
+static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
+ int fqs_count, uint16_t channel_id)
+{
+ int i;
+ for (i = 0; i < fqs_count; i++)
+ dump_fq(dev, (fqs + i)->fqid, channel_id);
+}
+
+static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
+{
+ struct list_head *fq_list;
+ struct fq_duple *fqd;
+ int i;
+
+ dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
+ dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
+
+ /* TX queues (old initialization) */
+ dev_info(dev, "Initialized queues:");
+ for (i = 0; i < conf->egress_cnt; i++)
+ dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
+ conf->channel);
+
+ /* initialized ingress queues */
+ list_for_each(fq_list, &conf->fqs_ingress_list) {
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
+ }
+
+ /* initialized egress queues */
+ list_for_each(fq_list, &conf->fqs_egress_list) {
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
+ }
+}
+
+/* Destroys Frame Queues */
+static void oh_fq_destroy(struct qman_fq *fq)
+{
+ int _errno = 0;
+
+ _errno = qman_retire_fq(fq, NULL);
+ if (unlikely(_errno < 0))
+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__,
+ qman_fq_fqid(fq), _errno);
+
+ _errno = qman_oos_fq(fq);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__,
+ qman_fq_fqid(fq), _errno);
+ }
+
+ qman_destroy_fq(fq, 0);
+}
+
+/* Allocation code for the OH port's PCD frame queues */
+static int __cold oh_alloc_pcd_fqids(struct device *dev,
+ uint32_t num,
+ uint8_t alignment,
+ uint32_t *base_fqid)
+{
+ dev_crit(dev, "callback not implemented!\n");
+ BUG();
+
+ return 0;
+}
+
+static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
+{
+ dev_crit(dev, "callback not implemented!\n");
+ BUG();
+
+ return 0;
+}
+
+static void oh_set_buffer_layout(struct fm_port *port,
+ struct dpa_buffer_layout_s *layout)
+{
+ struct fm_port_params params;
+
+ layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ layout->parse_results = true;
+ layout->hash_results = true;
+ layout->time_stamp = false;
+
+ fm_port_get_buff_layout_ext_params(port, &params);
+ layout->manip_extra_space = params.manip_extra_space;
+ layout->data_align = params.data_align;
+}
+
+static int
+oh_port_probe(struct platform_device *_of_dev)
+{
+ struct device *dpa_oh_dev;
+ struct device_node *dpa_oh_node;
+ int lenp, _errno = 0, fq_idx, duple_idx;
+ int n_size, i, j, ret, duples_count;
+ struct platform_device *oh_of_dev;
+ struct device_node *oh_node, *bpool_node = NULL, *root_node;
+ struct device *oh_dev;
+ struct dpa_oh_config_s *oh_config = NULL;
+ const __be32 *oh_all_queues;
+ const __be32 *channel_ids;
+ const __be32 *oh_tx_queues;
+ uint32_t queues_count;
+ uint32_t crt_fqid_base;
+ uint32_t crt_fq_count;
+ bool frag_enabled = false;
+ struct fm_port_params oh_port_tx_params;
+ struct fm_port_pcd_param oh_port_pcd_params;
+ struct dpa_buffer_layout_s buf_layout;
+
+ /* True if the current partition owns the OH port. */
+ bool init_oh_port;
+
+ const struct of_device_id *match;
+ int crt_ext_pools_count;
+ u32 ext_pool_size;
+ u32 port_id;
+ u32 channel_id;
+
+ int channel_ids_count;
+ int channel_idx;
+ struct fq_duple *fqd;
+ struct list_head *fq_list, *fq_list_tmp;
+
+ const __be32 *bpool_cfg;
+ uint32_t bpid;
+
+ memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
+ dpa_oh_dev = &_of_dev->dev;
+ dpa_oh_node = dpa_oh_dev->of_node;
+ BUG_ON(dpa_oh_node == NULL);
+
+ match = of_match_device(oh_port_match_table, dpa_oh_dev);
+ if (!match)
+ return -EINVAL;
+
+ dev_dbg(dpa_oh_dev, "Probing OH port...\n");
+
+ /* Find the referenced OH node */
+ oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
+ if (oh_node == NULL) {
+ dev_err(dpa_oh_dev,
+ "Can't find OH node referenced from node %s\n",
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+ dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
+ match->compatible);
+
+ _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
+ if (_errno) {
+ dev_err(dpa_oh_dev, "No port id found in node %s\n",
+ dpa_oh_node->full_name);
+ goto return_kfree;
+ }
+
+ _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
+ &channel_id);
+ if (_errno) {
+ dev_err(dpa_oh_dev, "No channel id found in node %s\n",
+ dpa_oh_node->full_name);
+ goto return_kfree;
+ }
+
+ oh_of_dev = of_find_device_by_node(oh_node);
+ BUG_ON(oh_of_dev == NULL);
+ oh_dev = &oh_of_dev->dev;
+
+ /* The OH port must be initialized exactly once.
+ * The following scenarios are of interest:
+ * - the node is Linux-private (will always initialize it);
+ * - the node is shared between two Linux partitions
+ * (only one of them will initialize it);
+ * - the node is shared between a Linux and a LWE partition
+ * (Linux will initialize it) - "fsl,dpa-oh-shared"
+ */
+
+ /* Check if the current partition owns the OH port
+ * and ought to initialize it. It may be the case that we leave this
+ * to another (also Linux) partition.
+ */
+ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
+
+ /* If we aren't the "owner" of the OH node, we're done here. */
+ if (!init_oh_port) {
+ dev_dbg(dpa_oh_dev,
+ "Not owning the shared OH port %s, will not initialize it.\n",
+ oh_node->full_name);
+ of_node_put(oh_node);
+ return 0;
+ }
+
+ /* Allocate OH dev private data */
+ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
+ if (oh_config == NULL) {
+ dev_err(dpa_oh_dev,
+ "Can't allocate private data for OH node %s referenced from node %s!\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
+ INIT_LIST_HEAD(&oh_config->fqs_egress_list);
+
+ /* FQs that enter OH port */
+ lenp = 0;
+ oh_all_queues = of_get_property(dpa_oh_node,
+ "fsl,qman-frame-queues-ingress", &lenp);
+ if (lenp % (2 * sizeof(*oh_all_queues))) {
+ dev_warn(dpa_oh_dev,
+ "Wrong ingress queues format for OH node %s referenced from node %s!\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ /* just ignore the last unpaired value */
+ }
+
+ duples_count = lenp / (2 * sizeof(*oh_all_queues));
+ dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
+ duples_count);
+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
+
+ fqd = devm_kzalloc(dpa_oh_dev,
+ sizeof(struct fq_duple), GFP_KERNEL);
+ if (!fqd) {
+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
+ oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ fqd->fqs = devm_kzalloc(dpa_oh_dev,
+ crt_fq_count * sizeof(struct qman_fq),
+ GFP_KERNEL);
+ if (!fqd->fqs) {
+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
+ oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ for (j = 0; j < crt_fq_count; j++)
+ (fqd->fqs + j)->fqid = crt_fqid_base + j;
+ fqd->fqs_count = crt_fq_count;
+ fqd->channel_id = (uint16_t)channel_id;
+ list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
+ }
+
+ /* create the ingress queues */
+ list_for_each(fq_list, &oh_config->fqs_ingress_list) {
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+
+ for (j = 0; j < fqd->fqs_count; j++) {
+ ret = oh_fq_create(fqd->fqs + j,
+ (fqd->fqs + j)->fqid,
+ fqd->channel_id, 3);
+ if (ret != 0) {
+ dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
+ (fqd->fqs + j)->fqid,
+ oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ }
+ }
+
+ /* FQs that exit OH port */
+ lenp = 0;
+ oh_all_queues = of_get_property(dpa_oh_node,
+ "fsl,qman-frame-queues-egress", &lenp);
+ if (lenp % (2 * sizeof(*oh_all_queues))) {
+ dev_warn(dpa_oh_dev,
+ "Wrong egress queues format for OH node %s referenced from node %s!\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ /* just ignore the last unpaired value */
+ }
+
+ duples_count = lenp / (2 * sizeof(*oh_all_queues));
+ dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
+ duples_count);
+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
+
+ fqd = devm_kzalloc(dpa_oh_dev,
+ sizeof(struct fq_duple), GFP_KERNEL);
+ if (!fqd) {
+ dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
+ oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ fqd->fqs = devm_kzalloc(dpa_oh_dev,
+ crt_fq_count * sizeof(struct qman_fq),
+ GFP_KERNEL);
+ if (!fqd->fqs) {
+ dev_err(dpa_oh_dev,
+ "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
+ oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ for (j = 0; j < crt_fq_count; j++)
+ (fqd->fqs + j)->fqid = crt_fqid_base + j;
+ fqd->fqs_count = crt_fq_count;
+ /* channel ID is specified in another attribute */
+ fqd->channel_id = 0;
+ list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
+
+ /* allocate the queue */
+
+ }
+
+ /* channel_ids for FQs that exit OH port */
+ lenp = 0;
+ channel_ids = of_get_property(dpa_oh_node,
+ "fsl,qman-channel-ids-egress", &lenp);
+
+ channel_ids_count = lenp / (sizeof(*channel_ids));
+ if (channel_ids_count != duples_count) {
+ dev_warn(dpa_oh_dev,
+ "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ /* just ignore the queues that do not have a Channel ID */
+ }
+
+ channel_idx = 0;
+ list_for_each(fq_list, &oh_config->fqs_egress_list) {
+ if (channel_idx + 1 > channel_ids_count)
+ break;
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+ fqd->channel_id =
+ (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
+ }
+
+ /* create egress queues */
+ list_for_each(fq_list, &oh_config->fqs_egress_list) {
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+
+ if (fqd->channel_id == 0) {
+ /* missing channel id in dts */
+ continue;
+ }
+
+ for (j = 0; j < fqd->fqs_count; j++) {
+ ret = oh_fq_create(fqd->fqs + j,
+ (fqd->fqs + j)->fqid,
+ fqd->channel_id, 3);
+ if (ret != 0) {
+ dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
+ (fqd->fqs + j)->fqid,
+ oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ }
+ }
+
+ /* Read FQ ids/nums for the DPA OH node */
+ oh_all_queues = of_get_property(dpa_oh_node,
+ "fsl,qman-frame-queues-oh", &lenp);
+ if (oh_all_queues == NULL) {
+ dev_err(dpa_oh_dev,
+ "No frame queues have been defined for OH node %s referenced from node %s\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ /* Check that the OH error and default FQs are there */
+ BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
+ queues_count = lenp / (2 * sizeof(*oh_all_queues));
+ if (queues_count != 2) {
+ dev_err(dpa_oh_dev,
+ "Error and Default queues must be defined for OH node %s referenced from node %s\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ /* Read the FQIDs defined for this OH port */
+ dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
+ fq_idx = 0;
+
+ /* Error FQID - must be present */
+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
+ if (crt_fq_count != 1) {
+ dev_err(dpa_oh_dev,
+ "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
+ oh_node->full_name, dpa_oh_node->full_name,
+ crt_fq_count);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ oh_config->error_fqid = crt_fqid_base;
+ dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
+ oh_config->error_fqid, oh_node->full_name);
+
+ /* Default FQID - must be present */
+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
+ if (crt_fq_count != 1) {
+ dev_err(dpa_oh_dev,
+ "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
+ oh_node->full_name, dpa_oh_node->full_name,
+ crt_fq_count);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ oh_config->default_fqid = crt_fqid_base;
+ dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
+ oh_config->default_fqid, oh_node->full_name);
+
+ /* TX FQID - presence is optional */
+ oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
+ &lenp);
+ if (oh_tx_queues == NULL) {
+ dev_dbg(dpa_oh_dev,
+ "No tx queues have been defined for OH node %s referenced from node %s\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ goto config_port;
+ }
+
+ /* Check that queues-tx has only a base and a count defined */
+ BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
+ queues_count = lenp / (2 * sizeof(*oh_tx_queues));
+ if (queues_count != 1) {
+ dev_err(dpa_oh_dev,
+ "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ fq_idx = 0;
+ crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
+ crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
+ oh_config->egress_cnt = crt_fq_count;
+
+ /* Allocate TX queues */
+ dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
+ oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
+ crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
+ if (oh_config->egress_fqs == NULL) {
+ dev_err(dpa_oh_dev,
+ "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ /* Create TX queues */
+ for (i = 0; i < crt_fq_count; i++) {
+ ret = oh_fq_create(oh_config->egress_fqs + i,
+ crt_fqid_base + i, (uint16_t)channel_id, 3);
+ if (ret != 0) {
+ dev_err(dpa_oh_dev,
+ "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
+ crt_fqid_base + i, oh_node->full_name,
+ dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ }
+
+config_port:
+ /* Get a handle to the fm_port so we can set
+ * its configuration params
+ */
+ oh_config->oh_port = fm_port_bind(oh_dev);
+ if (oh_config->oh_port == NULL) {
+ dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
+ oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
+
+ /* read the pool handlers */
+ crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
+ "fsl,bman-buffer-pools", NULL);
+ if (crt_ext_pools_count <= 0) {
+ dev_info(dpa_oh_dev,
+ "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
+ oh_node->full_name);
+ goto init_port;
+ }
+
+ /* used for reading ext_pool_size*/
+ root_node = of_find_node_by_path("/");
+ if (root_node == NULL) {
+ dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ n_size = of_n_size_cells(root_node);
+ of_node_put(root_node);
+
+ dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
+ crt_ext_pools_count);
+
+ oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
+
+ for (i = 0; i < crt_ext_pools_count; i++) {
+ bpool_node = of_parse_phandle(dpa_oh_node,
+ "fsl,bman-buffer-pools", i);
+ if (bpool_node == NULL) {
+ dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
+ if (_errno) {
+ dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
+ dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
+
+ bpool_cfg = of_get_property(bpool_node,
+ "fsl,bpool-ethernet-cfg", &lenp);
+ if (bpool_cfg == NULL) {
+ dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
+ oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
+ dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
+ ext_pool_size);
+ of_node_put(bpool_node);
+
+ }
+
+ if (buf_layout.data_align != FRAG_DATA_ALIGN ||
+ buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
+ goto init_port;
+
+ frag_enabled = true;
+ dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
+ port_id);
+
+init_port:
+ of_node_put(oh_node);
+ /* Set Tx params */
+ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
+ oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
+ frag_enabled);
+ /* Set PCD params */
+ oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
+ oh_port_pcd_params.cbf = oh_free_pcd_fqids;
+ oh_port_pcd_params.dev = dpa_oh_dev;
+ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
+
+ dev_set_drvdata(dpa_oh_dev, oh_config);
+
+ /* Enable the OH port */
+ _errno = fm_port_enable(oh_config->oh_port);
+ if (_errno)
+ goto return_kfree;
+
+ dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
+
+ /* print of all referenced & created queues */
+ dump_oh_config(dpa_oh_dev, oh_config);
+
+ return 0;
+
+return_kfree:
+ if (bpool_node)
+ of_node_put(bpool_node);
+ if (oh_node)
+ of_node_put(oh_node);
+ if (oh_config && oh_config->egress_fqs)
+ devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
+
+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+ list_del(fq_list);
+ devm_kfree(dpa_oh_dev, fqd->fqs);
+ devm_kfree(dpa_oh_dev, fqd);
+ }
+
+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
+ list_del(fq_list);
+ devm_kfree(dpa_oh_dev, fqd->fqs);
+ devm_kfree(dpa_oh_dev, fqd);
+ }
+
+ devm_kfree(dpa_oh_dev, oh_config);
+ return _errno;
+}
+
+static int __cold oh_port_remove(struct platform_device *_of_dev)
+{
+ int _errno = 0, i;
+ struct dpa_oh_config_s *oh_config;
+
+ pr_info("Removing OH port...\n");
+
+ oh_config = dev_get_drvdata(&_of_dev->dev);
+ if (oh_config == NULL) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): No OH config in device private data!\n",
+ KBUILD_BASENAME".c", __LINE__, __func__);
+ _errno = -ENODEV;
+ goto return_error;
+ }
+
+ if (oh_config->egress_fqs)
+ for (i = 0; i < oh_config->egress_cnt; i++)
+ oh_fq_destroy(oh_config->egress_fqs + i);
+
+ if (oh_config->oh_port == NULL) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): No fm port in device private data!\n",
+ KBUILD_BASENAME".c", __LINE__, __func__);
+ _errno = -EINVAL;
+ goto free_egress_fqs;
+ }
+
+ _errno = fm_port_disable(oh_config->oh_port);
+
+free_egress_fqs:
+ if (oh_config->egress_fqs)
+ devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
+ devm_kfree(&_of_dev->dev, oh_config);
+ dev_set_drvdata(&_of_dev->dev, NULL);
+
+return_error:
+ return _errno;
+}
+
+static struct platform_driver oh_port_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = oh_port_match_table,
+ .owner = THIS_MODULE,
+ .pm = OH_PM_OPS,
+ },
+ .probe = oh_port_probe,
+ .remove = oh_port_remove
+};
+
+static int __init __cold oh_port_load(void)
+{
+ int _errno;
+
+ pr_info(OH_MOD_DESCRIPTION "\n");
+
+ _errno = platform_driver_register(&oh_port_driver);
+ if (_errno < 0) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+ return _errno;
+}
+module_init(oh_port_load);
+
+static void __exit __cold oh_port_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&oh_port_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(oh_port_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
@@ -0,0 +1,59 @@
+/* Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OFFLINE_PORT_H
+#define __OFFLINE_PORT_H
+
+struct fm_port;
+struct qman_fq;
+
+/* fqs are defined in duples (base_fq, fq_count) */
+struct fq_duple {
+ struct qman_fq *fqs;
+ int fqs_count;
+ uint16_t channel_id;
+ struct list_head fq_list;
+};
+
+/* OH port configuration */
+struct dpa_oh_config_s {
+ uint32_t error_fqid;
+ uint32_t default_fqid;
+ struct fm_port *oh_port;
+ uint32_t egress_cnt;
+ struct qman_fq *egress_fqs;
+ uint16_t channel;
+
+ struct list_head fqs_ingress_list;
+ struct list_head fqs_egress_list;
+};
+
+#endif /* __OFFLINE_PORT_H */