c0853e1
From fb909e29d6c073f4c5777a0db75df72b726e4314 Mon Sep 17 00:00:00 2001
c0853e1
From: Corentin LABBE <clabbe.montjoie@gmail.com>
c0853e1
Date: Fri, 7 Oct 2016 10:25:48 +0200
c0853e1
Subject: [PATCH 1/8] ethernet: add sun8i-emac driver
c0853e1
c0853e1
This patch add support for sun8i-emac ethernet MAC hardware.
c0853e1
It could be found in Allwinner H3/A83T/A64 SoCs.
c0853e1
c0853e1
It supports 10/100/1000 Mbit/s speed with half/full duplex.
c0853e1
It can use an internal PHY (MII 10/100) or an external PHY
c0853e1
via RGMII/RMII.
c0853e1
c0853e1
Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
c0853e1
---
c0853e1
 drivers/net/ethernet/allwinner/Kconfig      |   13 +
c0853e1
 drivers/net/ethernet/allwinner/Makefile     |    1 +
c0853e1
 drivers/net/ethernet/allwinner/sun8i-emac.c | 2266 +++++++++++++++++++++++++++
c0853e1
 3 files changed, 2280 insertions(+)
c0853e1
 create mode 100644 drivers/net/ethernet/allwinner/sun8i-emac.c
c0853e1
c0853e1
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
c0853e1
index 47da7e7..060569c 100644
c0853e1
--- a/drivers/net/ethernet/allwinner/Kconfig
c0853e1
+++ b/drivers/net/ethernet/allwinner/Kconfig
c0853e1
@@ -33,4 +33,17 @@ config SUN4I_EMAC
c0853e1
           To compile this driver as a module, choose M here.  The module
c0853e1
           will be called sun4i-emac.
c0853e1
 
c0853e1
+config SUN8I_EMAC
c0853e1
+	tristate "Allwinner sun8i EMAC support"
c0853e1
+	depends on ARCH_SUNXI || COMPILE_TEST
c0853e1
+	depends on OF
c0853e1
+	select MII
c0853e1
+	select PHYLIB
c0853e1
+        ---help---
c0853e1
+	  This driver support the sun8i EMAC ethernet driver present on
c0853e1
+	  H3/A83T/A64 Allwinner SoCs.
c0853e1
+
c0853e1
+          To compile this driver as a module, choose M here.  The module
c0853e1
+          will be called sun8i-emac.
c0853e1
+
c0853e1
 endif # NET_VENDOR_ALLWINNER
c0853e1
diff --git a/drivers/net/ethernet/allwinner/Makefile b/drivers/net/ethernet/allwinner/Makefile
c0853e1
index 03129f7..8bd1693c 100644
c0853e1
--- a/drivers/net/ethernet/allwinner/Makefile
c0853e1
+++ b/drivers/net/ethernet/allwinner/Makefile
c0853e1
@@ -3,3 +3,4 @@
c0853e1
 #
c0853e1
 
c0853e1
 obj-$(CONFIG_SUN4I_EMAC) += sun4i-emac.o
c0853e1
+obj-$(CONFIG_SUN8I_EMAC) += sun8i-emac.o
c0853e1
diff --git a/drivers/net/ethernet/allwinner/sun8i-emac.c b/drivers/net/ethernet/allwinner/sun8i-emac.c
c0853e1
new file mode 100644
c0853e1
index 0000000..bc74467
c0853e1
--- /dev/null
c0853e1
+++ b/drivers/net/ethernet/allwinner/sun8i-emac.c
c0853e1
@@ -0,0 +1,2266 @@
c0853e1
+/*
c0853e1
+ * sun8i-emac driver
c0853e1
+ *
c0853e1
+ * Copyright (C) 2015-2016 Corentin LABBE <clabbe.montjoie@gmail.com>
c0853e1
+ *
c0853e1
+ * This is the driver for Allwinner Ethernet MAC found in H3/A83T/A64 SoC
c0853e1
+ *
c0853e1
+ * TODO:
c0853e1
+ * - MAC filtering
c0853e1
+ * - Jumbo frame
c0853e1
+ * - features rx-all (NETIF_F_RXALL_BIT)
c0853e1
+ * - PM runtime
c0853e1
+ */
c0853e1
+#include <linux/bitops.h>
c0853e1
+#include <linux/clk.h>
c0853e1
+#include <linux/dma-mapping.h>
c0853e1
+#include <linux/etherdevice.h>
c0853e1
+#include <linux/interrupt.h>
c0853e1
+#include <linux/iopoll.h>
c0853e1
+#include <linux/mii.h>
c0853e1
+#include <linux/module.h>
c0853e1
+#include <linux/netdevice.h>
c0853e1
+#include <linux/of_device.h>
c0853e1
+#include <linux/of_mdio.h>
c0853e1
+#include <linux/of_net.h>
c0853e1
+#include <linux/phy.h>
c0853e1
+#include <linux/pinctrl/consumer.h>
c0853e1
+#include <linux/pinctrl/pinctrl.h>
c0853e1
+#include <linux/platform_device.h>
c0853e1
+#include <linux/reset.h>
c0853e1
+#include <linux/scatterlist.h>
c0853e1
+#include <linux/skbuff.h>
c0853e1
+#include <linux/mfd/syscon.h>
c0853e1
+#include <linux/regmap.h>
c0853e1
+
c0853e1
+#define EMAC_BASIC_CTL0	0x00
c0853e1
+#define EMAC_BASIC_CTL1	0x04
c0853e1
+#define EMAC_INT_STA	0x08
c0853e1
+#define EMAC_INT_EN	0x0C
c0853e1
+#define EMAC_TX_CTL0	0x10
c0853e1
+#define EMAC_TX_CTL1	0x14
c0853e1
+#define EMAC_TX_FLOW_CTL	0x1C
c0853e1
+#define EMAC_RX_CTL0	0x24
c0853e1
+#define EMAC_RX_CTL1	0x28
c0853e1
+#define EMAC_RX_FRM_FLT	0x38
c0853e1
+#define EMAC_MDIO_CMD	0x48
c0853e1
+#define EMAC_MDIO_DATA	0x4C
c0853e1
+#define EMAC_TX_DMA_STA	0xB0
c0853e1
+#define EMAC_TX_CUR_DESC	0xB4
c0853e1
+#define EMAC_TX_CUR_BUF	0xB8
c0853e1
+#define EMAC_RX_DMA_STA	0xC0
c0853e1
+
c0853e1
+#define MDIO_CMD_MII_BUSY	BIT(0)
c0853e1
+#define MDIO_CMD_MII_WRITE	BIT(1)
c0853e1
+#define MDIO_CMD_MII_PHY_REG_ADDR_MASK	GENMASK(8, 4)
c0853e1
+#define MDIO_CMD_MII_PHY_REG_ADDR_SHIFT	4
c0853e1
+#define MDIO_CMD_MII_PHY_ADDR_MASK	GENMASK(16, 12)
c0853e1
+#define MDIO_CMD_MII_PHY_ADDR_SHIFT	12
c0853e1
+
c0853e1
+#define EMAC_MACADDR_HI	0x50
c0853e1
+#define EMAC_MACADDR_LO	0x54
c0853e1
+
c0853e1
+#define EMAC_RX_DESC_LIST 0x34
c0853e1
+#define EMAC_TX_DESC_LIST 0x20
c0853e1
+
c0853e1
+#define EMAC_RX_DO_CRC BIT(27)
c0853e1
+#define EMAC_RX_STRIP_FCS BIT(28)
c0853e1
+
c0853e1
+#define LE32_BIT(x) (cpu_to_le32(BIT(x)))
c0853e1
+
c0853e1
+#define EMAC_COULD_BE_USED_BY_DMA LE32_BIT(31)
c0853e1
+
c0853e1
+/* Used in RX_CTL1*/
c0853e1
+#define EMAC_RX_DMA_EN	BIT(30)
c0853e1
+#define EMAC_RX_DMA_START	BIT(31)
c0853e1
+/* Used in TX_CTL1*/
c0853e1
+#define EMAC_TX_DMA_EN	BIT(30)
c0853e1
+#define EMAC_TX_DMA_START	BIT(31)
c0853e1
+
c0853e1
+/* Used in RX_CTL0 */
c0853e1
+#define EMAC_RX_RECEIVER_EN		BIT(31)
c0853e1
+/* Used in TX_CTL0 */
c0853e1
+#define EMAC_TX_TRANSMITTER_EN	BIT(31)
c0853e1
+
c0853e1
+/* Basic CTL0 */
c0853e1
+#define EMAC_BCTL0_FD BIT(0)
c0853e1
+#define EMAC_BCTL0_SPEED_10		2
c0853e1
+#define EMAC_BCTL0_SPEED_100		3
c0853e1
+#define EMAC_BCTL0_SPEED_MASK	GENMASK(3, 2)
c0853e1
+#define EMAC_BCTL0_SPEED_SHIFT	2
c0853e1
+
c0853e1
+#define EMAC_FLOW_RX 1
c0853e1
+#define EMAC_FLOW_TX 2
c0853e1
+
c0853e1
+#define EMAC_TX_INT		BIT(0)
c0853e1
+#define EMAC_TX_DMA_STOP_INT	BIT(1)
c0853e1
+#define EMAC_TX_BUF_UA_INT	BIT(2)
c0853e1
+#define EMAC_TX_TIMEOUT_INT	BIT(3)
c0853e1
+#define EMAC_TX_UNDERFLOW_INT	BIT(4)
c0853e1
+#define EMAC_TX_EARLY_INT	BIT(5)
c0853e1
+#define EMAC_RX_INT		BIT(8)
c0853e1
+#define EMAC_RX_BUF_UA_INT	BIT(9)
c0853e1
+#define EMAC_RX_DMA_STOP_INT	BIT(10)
c0853e1
+#define EMAC_RX_TIMEOUT_INT	BIT(11)
c0853e1
+#define EMAC_RX_OVERFLOW_INT	BIT(12)
c0853e1
+#define EMAC_RX_EARLY_INT	BIT(13)
c0853e1
+#define EMAC_RGMII_STA_INT	BIT(16)
c0853e1
+
c0853e1
+/* Bits used in frame RX status */
c0853e1
+#define EMAC_DSC_RX_FIRST		BIT(9)
c0853e1
+#define EMAC_DSC_RX_LAST		BIT(8)
c0853e1
+
c0853e1
+/* Bits used in frame TX ctl */
c0853e1
+#define EMAC_MAGIC_TX_BIT		LE32_BIT(24)
c0853e1
+#define EMAC_TX_DO_CRC		(LE32_BIT(27) | LE32_BIT(28))
c0853e1
+#define EMAC_DSC_TX_FIRST		LE32_BIT(29)
c0853e1
+#define EMAC_DSC_TX_LAST		LE32_BIT(30)
c0853e1
+#define EMAC_WANT_INT			LE32_BIT(31)
c0853e1
+
c0853e1
+/* struct emac_variant - Describe an emac variant of sun8i-emac
c0853e1
+ * @default_syscon_value: Default value of the syscon EMAC register
c0853e1
+ * The default_syscon_value is also used for powering down the PHY
c0853e1
+ * @internal_phy:	which PHY type is internal
c0853e1
+ * @support_mii:	Does the SoC support MII
c0853e1
+ * @support_rmii:	Does the SoC support RMII
c0853e1
+ * @support_rgmii:	Does the SoC support RGMII
c0853e1
+ */
c0853e1
+struct emac_variant {
c0853e1
+	u32 default_syscon_value;
c0853e1
+	int internal_phy;
c0853e1
+	bool support_mii;
c0853e1
+	bool support_rmii;
c0853e1
+	bool support_rgmii;
c0853e1
+};
c0853e1
+
c0853e1
+static const struct emac_variant emac_variant_h3 = {
c0853e1
+	.default_syscon_value = 0x58000,
c0853e1
+	.internal_phy = PHY_INTERFACE_MODE_MII,
c0853e1
+	.support_mii = true,
c0853e1
+	.support_rmii = true,
c0853e1
+	.support_rgmii = true
c0853e1
+};
c0853e1
+
c0853e1
+static const struct emac_variant emac_variant_a83t = {
c0853e1
+	.default_syscon_value = 0,
c0853e1
+	.internal_phy = 0,
c0853e1
+	.support_mii = true,
c0853e1
+	.support_rgmii = true
c0853e1
+};
c0853e1
+
c0853e1
+static const struct emac_variant emac_variant_a64 = {
c0853e1
+	.default_syscon_value = 0,
c0853e1
+	.internal_phy = 0,
c0853e1
+	.support_mii = true,
c0853e1
+	.support_rmii = true,
c0853e1
+	.support_rgmii = true
c0853e1
+};
c0853e1
+
c0853e1
+static const char const estats_str[][ETH_GSTRING_LEN] = {
c0853e1
+	/* errors */
c0853e1
+	"rx_payload_error",
c0853e1
+	"rx_CRC_error",
c0853e1
+	"rx_phy_error",
c0853e1
+	"rx_length_error",
c0853e1
+	"rx_col_error",
c0853e1
+	"rx_header_error",
c0853e1
+	"rx_overflow_error",
c0853e1
+	"rx_saf_error",
c0853e1
+	"rx_daf_error",
c0853e1
+	"rx_buf_error",
c0853e1
+	"rx_invalid_error",
c0853e1
+	"tx_timeout",
c0853e1
+	/* misc infos */
c0853e1
+	"tx_stop_queue",
c0853e1
+	"rx_dma_ua",
c0853e1
+	"rx_dma_stop",
c0853e1
+	"tx_dma_ua",
c0853e1
+	"tx_dma_stop",
c0853e1
+	"rx_hw_csum",
c0853e1
+	"tx_hw_csum",
c0853e1
+	/* interrupts */
c0853e1
+	"rx_int",
c0853e1
+	"tx_int",
c0853e1
+	"tx_early_int",
c0853e1
+	"tx_underflow_int",
c0853e1
+	"tx_timeout_int",
c0853e1
+	"rx_early_int",
c0853e1
+	"rx_overflow_int",
c0853e1
+	"rx_timeout_int",
c0853e1
+	"rgmii_state_int",
c0853e1
+	/* debug */
c0853e1
+	"tx_used_desc",
c0853e1
+	"napi_schedule",
c0853e1
+	"napi_underflow",
c0853e1
+};
c0853e1
+
c0853e1
+struct sun8i_emac_stats {
c0853e1
+	u64 rx_payload_error;
c0853e1
+	u64 rx_crc_error;
c0853e1
+	u64 rx_phy_error;
c0853e1
+	u64 rx_length_error;
c0853e1
+	u64 rx_col_error;
c0853e1
+	u64 rx_header_error;
c0853e1
+	u64 rx_overflow_error;
c0853e1
+	u64 rx_saf_fail;
c0853e1
+	u64 rx_daf_fail;
c0853e1
+	u64 rx_buf_error;
c0853e1
+	u64 rx_invalid_error;
c0853e1
+	u64 tx_timeout;
c0853e1
+
c0853e1
+	u64 tx_stop_queue;
c0853e1
+	u64 rx_dma_ua;
c0853e1
+	u64 rx_dma_stop;
c0853e1
+	u64 tx_dma_ua;
c0853e1
+	u64 tx_dma_stop;
c0853e1
+	u64 rx_hw_csum;
c0853e1
+	u64 tx_hw_csum;
c0853e1
+
c0853e1
+	u64 rx_int;
c0853e1
+	u64 tx_int;
c0853e1
+	u64 tx_early_int;
c0853e1
+	u64 tx_underflow_int;
c0853e1
+	u64 tx_timeout_int;
c0853e1
+	u64 rx_early_int;
c0853e1
+	u64 rx_overflow_int;
c0853e1
+	u64 rx_timeout_int;
c0853e1
+	u64 rgmii_state_int;
c0853e1
+
c0853e1
+	u64 tx_used_desc;
c0853e1
+	u64 napi_schedule;
c0853e1
+	u64 napi_underflow;
c0853e1
+};
c0853e1
+
c0853e1
+/* The datasheet said that each descriptor can transfers up to 4096bytes
c0853e1
+ * But latter, a register documentation reduce that value to 2048
c0853e1
+ * Anyway using 2048 cause strange behaviours and even BSP driver use 2047
c0853e1
+ */
c0853e1
+#define DESC_BUF_MAX 2044
c0853e1
+
c0853e1
+/* MAGIC value for knowing if a descriptor is available or not */
c0853e1
+#define DCLEAN cpu_to_le32(BIT(16) | BIT(14) | BIT(12) | BIT(10) | BIT(9))
c0853e1
+
c0853e1
+/* struct dma_desc - Structure of DMA descriptor used by the hardware
c0853e1
+ * @status:	Status of the frame written by HW, so RO for the
c0853e1
+ *		driver (except for BIT(31) which is R/W)
c0853e1
+ * @ctl:	Information on the frame written by the driver (INT, len,...)
c0853e1
+ * @buf_addr:	physical address of the frame data
c0853e1
+ * @next:	physical address of next dma_desc
c0853e1
+ */
c0853e1
+struct dma_desc {
c0853e1
+	__le32 status;
c0853e1
+	__le32 ctl;
c0853e1
+	__le32 buf_addr;
c0853e1
+	__le32 next;
c0853e1
+};
c0853e1
+
c0853e1
+/* Describe how data from skb are DMA mapped (used in txinfo map member) */
c0853e1
+#define MAP_SINGLE 1
c0853e1
+#define MAP_PAGE 2
c0853e1
+
c0853e1
+/* Structure for storing information about data in TX ring buffer */
c0853e1
+struct txinfo {
c0853e1
+	struct sk_buff *skb;
c0853e1
+	int map;
c0853e1
+};
c0853e1
+
c0853e1
+struct sun8i_emac_priv {
c0853e1
+	void __iomem *base;
c0853e1
+	struct regmap *regmap;
c0853e1
+	int irq;
c0853e1
+	struct device *dev;
c0853e1
+	struct net_device *ndev;
c0853e1
+	struct mii_bus *mdio;
c0853e1
+	struct napi_struct napi;
c0853e1
+	spinlock_t tx_lock;/* control the access of transmit descriptors */
c0853e1
+	int duplex;
c0853e1
+	int speed;
c0853e1
+	int link;
c0853e1
+	int phy_interface;
c0853e1
+	const struct emac_variant *variant;
c0853e1
+	struct device_node *phy_node;
c0853e1
+	struct device_node *mdio_node;
c0853e1
+	struct clk *ahb_clk;
c0853e1
+	struct clk *ephy_clk;
c0853e1
+	bool use_internal_phy;
c0853e1
+
c0853e1
+	struct reset_control *rst_mac;
c0853e1
+	struct reset_control *rst_ephy;
c0853e1
+
c0853e1
+	struct dma_desc *dd_rx;
c0853e1
+	dma_addr_t dd_rx_phy;
c0853e1
+	struct dma_desc *dd_tx;
c0853e1
+	dma_addr_t dd_tx_phy;
c0853e1
+	struct sk_buff **rx_skb;
c0853e1
+	struct txinfo *txl;
c0853e1
+
c0853e1
+	int nbdesc_tx;
c0853e1
+	int nbdesc_rx;
c0853e1
+	int tx_slot;
c0853e1
+	int tx_dirty;
c0853e1
+	int rx_dirty;
c0853e1
+	struct sun8i_emac_stats estats;
c0853e1
+	u32 msg_enable;
c0853e1
+	int flow_ctrl;
c0853e1
+	int pause;
c0853e1
+};
c0853e1
+
c0853e1
+static irqreturn_t sun8i_emac_dma_interrupt(int irq, void *dev_id);
c0853e1
+
c0853e1
+static void rb_inc(int *p, const int max)
c0853e1
+{
c0853e1
+	(*p)++;
c0853e1
+	(*p) %= max;
c0853e1
+}
c0853e1
+
c0853e1
+/* Locking strategy:
c0853e1
+ * RX queue does not need any lock since only sun8i_emac_poll() access it.
c0853e1
+ * (All other RX modifiers (ringparam/ndo_stop) disable NAPI and so
c0853e1
+ * sun8i_emac_poll())
c0853e1
+ * TX queue is handled by sun8i_emac_xmit(), sun8i_emac_complete_xmit() and
c0853e1
+ * sun8i_emac_tx_timeout()
c0853e1
+ * (All other RX modifiers (ringparam/ndo_stop) disable NAPI and stop queue)
c0853e1
+ *
c0853e1
+ * sun8i_emac_xmit() could fire only once (netif_tx_lock)
c0853e1
+ * sun8i_emac_complete_xmit() could fire only once (called from NAPI)
c0853e1
+ * sun8i_emac_tx_timeout() could fire only once (netif_tx_lock) and could not
c0853e1
+ * race with sun8i_emac_xmit (due to netif_tx_lock) and with
c0853e1
+ * sun8i_emac_complete_xmit which disable NAPI.
c0853e1
+ *
c0853e1
+ * So only sun8i_emac_xmit and sun8i_emac_complete_xmit could fire at the same
c0853e1
+ * time.
c0853e1
+ * But they never could modify the same descriptors:
c0853e1
+ * - sun8i_emac_complete_xmit() will modify only descriptors with empty status
c0853e1
+ * - sun8i_emac_xmit() will modify only descriptors set to DCLEAN
c0853e1
+ * Proper memory barriers ensure that descriptor set to DCLEAN could not be
c0853e1
+ * modified latter by sun8i_emac_complete_xmit().
c0853e1
+ */
c0853e1
+
c0853e1
+/* Return the number of contiguous free descriptors
c0853e1
+ * starting from tx_slot
c0853e1
+ */
c0853e1
+static int rb_tx_numfreedesc(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	if (priv->tx_slot < priv->tx_dirty)
c0853e1
+		return priv->tx_dirty - priv->tx_slot;
c0853e1
+
c0853e1
+	return (priv->nbdesc_tx - priv->tx_slot) + priv->tx_dirty;
c0853e1
+}
c0853e1
+
c0853e1
+/* sun8i_emac_rx_skb - Allocate a skb in a DMA descriptor
c0853e1
+ *
c0853e1
+ * @ndev:	The net_device for this interface
c0853e1
+ * @i:		index of slot to fill
c0853e1
+ *
c0853e1
+ * Refill a DMA descriptor with a fresh skb and map it for DMA.
c0853e1
+*/
c0853e1
+static int sun8i_emac_rx_skb(struct net_device *ndev, int i)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+	struct sk_buff *skb;
c0853e1
+
c0853e1
+	ddesc = priv->dd_rx + i;
c0853e1
+
c0853e1
+	ddesc->ctl = 0;
c0853e1
+
c0853e1
+	skb = netdev_alloc_skb_ip_align(ndev, DESC_BUF_MAX);
c0853e1
+	if (!skb)
c0853e1
+		return -ENOMEM;
c0853e1
+
c0853e1
+	/* should not happen */
c0853e1
+	if (unlikely(priv->rx_skb[i]))
c0853e1
+		dev_warn(priv->dev, "BUG: Leaking a skbuff\n");
c0853e1
+
c0853e1
+	priv->rx_skb[i] = skb;
c0853e1
+
c0853e1
+	ddesc->buf_addr = dma_map_single(priv->dev, skb->data,
c0853e1
+					 DESC_BUF_MAX, DMA_FROM_DEVICE);
c0853e1
+	if (dma_mapping_error(priv->dev, ddesc->buf_addr)) {
c0853e1
+		dev_err(priv->dev, "ERROR: Cannot map RX buffer for DMA\n");
c0853e1
+		dev_kfree_skb(skb);
c0853e1
+		return -EFAULT;
c0853e1
+	}
c0853e1
+	/* We cannot direcly use cpu_to_le32() after dma_map_single
c0853e1
+	 * since dma_mapping_error use it
c0853e1
+	 */
c0853e1
+	ddesc->buf_addr = cpu_to_le32(ddesc->buf_addr);
c0853e1
+	ddesc->ctl |= cpu_to_le32(DESC_BUF_MAX);
c0853e1
+	/* EMAC_COULD_BE_USED_BY_DMA must be the last value written */
c0853e1
+	wmb();
c0853e1
+	ddesc->status = EMAC_COULD_BE_USED_BY_DMA;
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_stop_tx(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	netif_stop_queue(ndev);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_TX_CTL0);
c0853e1
+	/* Disable transmitter after current reception */
c0853e1
+	v &= ~EMAC_TX_TRANSMITTER_EN;
c0853e1
+	writel(v, priv->base + EMAC_TX_CTL0);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_TX_CTL1);
c0853e1
+	/* Stop TX DMA */
c0853e1
+	v &= ~EMAC_TX_DMA_EN;
c0853e1
+	writel(v, priv->base + EMAC_TX_CTL1);
c0853e1
+
c0853e1
+	/* We must be sure that all is stopped before leaving this function */
c0853e1
+	wmb();
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_stop_rx(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL0);
c0853e1
+	/* Disable receiver after current reception */
c0853e1
+	v &= ~EMAC_RX_RECEIVER_EN;
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL0);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL1);
c0853e1
+	/* Stop RX DMA */
c0853e1
+	v &= ~EMAC_RX_DMA_EN;
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL1);
c0853e1
+
c0853e1
+	/* We must be sure that all is stopped before leaving this function */
c0853e1
+	wmb();
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_start_rx(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL0);
c0853e1
+	/* Enable receiver */
c0853e1
+	v |= EMAC_RX_RECEIVER_EN;
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL0);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL1);
c0853e1
+	v |= EMAC_RX_DMA_START;
c0853e1
+	v |= EMAC_RX_DMA_EN;
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL1);
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_start_tx(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_TX_CTL0);
c0853e1
+	v |= EMAC_TX_TRANSMITTER_EN;
c0853e1
+	writel(v, priv->base + EMAC_TX_CTL0);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_TX_CTL1);
c0853e1
+	v |= EMAC_TX_DMA_START;
c0853e1
+	v |= EMAC_TX_DMA_EN;
c0853e1
+	writel(v, priv->base + EMAC_TX_CTL1);
c0853e1
+}
c0853e1
+
c0853e1
+/* sun8i_emac_set_macaddr - Set MAC address for slot index
c0853e1
+ *
c0853e1
+ * @addr: the MAC address to set
c0853e1
+ * @index: The index of slot where to set address.
c0853e1
+ *
c0853e1
+ * The slot 0 is the main MAC address
c0853e1
+ */
c0853e1
+static void sun8i_emac_set_macaddr(struct sun8i_emac_priv *priv,
c0853e1
+				   const u8 *addr, int index)
c0853e1
+{
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	dev_info(priv->dev, "device MAC address slot %d %pM", index, addr);
c0853e1
+
c0853e1
+	v = (addr[5] << 8) | addr[4];
c0853e1
+	writel(v, priv->base + EMAC_MACADDR_HI + index * 8);
c0853e1
+
c0853e1
+	v = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
c0853e1
+	writel(v, priv->base + EMAC_MACADDR_LO + index * 8);
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_set_link_mode(struct sun8i_emac_priv *priv)
c0853e1
+{
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_BASIC_CTL0);
c0853e1
+
c0853e1
+	if (priv->duplex)
c0853e1
+		v |= EMAC_BCTL0_FD;
c0853e1
+	else
c0853e1
+		v &= ~EMAC_BCTL0_FD;
c0853e1
+
c0853e1
+	v &= ~EMAC_BCTL0_SPEED_MASK;
c0853e1
+
c0853e1
+	switch (priv->speed) {
c0853e1
+	case 1000:
c0853e1
+		break;
c0853e1
+	case 100:
c0853e1
+		v |= EMAC_BCTL0_SPEED_100 << EMAC_BCTL0_SPEED_SHIFT;
c0853e1
+		break;
c0853e1
+	case 10:
c0853e1
+		v |= EMAC_BCTL0_SPEED_10 << EMAC_BCTL0_SPEED_SHIFT;
c0853e1
+		break;
c0853e1
+	default:
c0853e1
+		dev_err(priv->dev, "Unsupported speed %d\n", priv->speed);
c0853e1
+		return;
c0853e1
+	}
c0853e1
+
c0853e1
+	writel(v, priv->base + EMAC_BASIC_CTL0);
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_flow_ctrl(struct sun8i_emac_priv *priv, int duplex,
c0853e1
+				 int fc)
c0853e1
+{
c0853e1
+	u32 flow = 0;
c0853e1
+
c0853e1
+	flow = readl(priv->base + EMAC_RX_CTL0);
c0853e1
+	if (fc & EMAC_FLOW_RX)
c0853e1
+		flow |= BIT(16);
c0853e1
+	else
c0853e1
+		flow &= ~BIT(16);
c0853e1
+	writel(flow, priv->base + EMAC_RX_CTL0);
c0853e1
+
c0853e1
+	flow = readl(priv->base + EMAC_TX_FLOW_CTL);
c0853e1
+	if (fc & EMAC_FLOW_TX)
c0853e1
+		flow |= BIT(0);
c0853e1
+	else
c0853e1
+		flow &= ~BIT(0);
c0853e1
+	writel(flow, priv->base + EMAC_TX_FLOW_CTL);
c0853e1
+}
c0853e1
+
c0853e1
+/* Grab a frame into a skb from descriptor number i */
c0853e1
+static int sun8i_emac_rx_from_ddesc(struct net_device *ndev, int i)
c0853e1
+{
c0853e1
+	struct sk_buff *skb;
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct dma_desc *ddesc = priv->dd_rx + i;
c0853e1
+	int frame_len;
c0853e1
+	int rxcsum_done = 0;
c0853e1
+	u32 dstatus = le32_to_cpu(ddesc->status);
c0853e1
+
c0853e1
+	if (ndev->features & NETIF_F_RXCSUM)
c0853e1
+		rxcsum_done = 1;
c0853e1
+
c0853e1
+	/* bit0/bit7 work only on IPv4/IPv6 TCP traffic,
c0853e1
+	 * (not on ARP for example) so we do not raise rx_errors/discard frame
c0853e1
+	 */
c0853e1
+	/* the checksum or length of received frame's payload is wrong*/
c0853e1
+	if (dstatus & BIT(0)) {
c0853e1
+		priv->estats.rx_payload_error++;
c0853e1
+		rxcsum_done = 0;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_CRC_ERR */
c0853e1
+	if (dstatus & BIT(1)) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->ndev->stats.rx_crc_errors++;
c0853e1
+		priv->estats.rx_crc_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_PHY_ERR */
c0853e1
+	if ((dstatus & BIT(3))) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->estats.rx_phy_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_LENGTH_ERR */
c0853e1
+	if ((dstatus & BIT(4))) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->ndev->stats.rx_length_errors++;
c0853e1
+		priv->estats.rx_length_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_COL_ERR */
c0853e1
+	if ((dstatus & BIT(6))) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->estats.rx_col_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_HEADER_ERR */
c0853e1
+	if ((dstatus & BIT(7))) {
c0853e1
+		priv->estats.rx_header_error++;
c0853e1
+		rxcsum_done = 0;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_OVERFLOW_ERR */
c0853e1
+	if ((dstatus & BIT(11))) {
c0853e1
+		priv->ndev->stats.rx_over_errors++;
c0853e1
+		priv->estats.rx_overflow_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* RX_NO_ENOUGTH_BUF_ERR */
c0853e1
+	if ((dstatus & BIT(14))) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->estats.rx_buf_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* BIT(9) is for the first frame, not having it is bad since we do not
c0853e1
+	 * handle Jumbo frame
c0853e1
+	 */
c0853e1
+	if ((dstatus & EMAC_DSC_RX_FIRST) == 0) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->estats.rx_invalid_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* this frame is not the last */
c0853e1
+	if ((dstatus & EMAC_DSC_RX_LAST) == 0) {
c0853e1
+		priv->ndev->stats.rx_errors++;
c0853e1
+		priv->estats.rx_invalid_error++;
c0853e1
+		goto discard_frame;
c0853e1
+	}
c0853e1
+
c0853e1
+	frame_len = (dstatus >> 16) & 0x3FFF;
c0853e1
+	if (!(ndev->features & NETIF_F_RXFCS))
c0853e1
+		frame_len -= ETH_FCS_LEN;
c0853e1
+
c0853e1
+	skb = priv->rx_skb[i];
c0853e1
+
c0853e1
+	netif_dbg(priv, rx_status, priv->ndev,
c0853e1
+		  "%s from %02d %pad len=%d status=%x st=%x\n",
c0853e1
+		  __func__, i, &ddesc, frame_len, dstatus,
c0853e1
+		  cpu_to_le32(ddesc->ctl));
c0853e1
+
c0853e1
+	skb_put(skb, frame_len);
c0853e1
+
c0853e1
+	dma_unmap_single(priv->dev, le32_to_cpu(ddesc->buf_addr), DESC_BUF_MAX,
c0853e1
+			 DMA_FROM_DEVICE);
c0853e1
+	skb->protocol = eth_type_trans(skb, priv->ndev);
c0853e1
+	if (rxcsum_done) {
c0853e1
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
c0853e1
+		priv->estats.rx_hw_csum++;
c0853e1
+	} else {
c0853e1
+		skb->ip_summed = CHECKSUM_PARTIAL;
c0853e1
+	}
c0853e1
+
c0853e1
+	priv->ndev->stats.rx_packets++;
c0853e1
+	priv->ndev->stats.rx_bytes += frame_len;
c0853e1
+	priv->rx_skb[i] = NULL;
c0853e1
+
c0853e1
+	sun8i_emac_rx_skb(ndev, i);
c0853e1
+	napi_gro_receive(&priv->napi, skb);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+	/* If the frame need to be dropped, we simply reuse the buffer */
c0853e1
+discard_frame:
c0853e1
+	ddesc->ctl = cpu_to_le32(DESC_BUF_MAX);
c0853e1
+	/* EMAC_COULD_BE_USED_BY_DMA must be the last value written */
c0853e1
+	wmb();
c0853e1
+	ddesc->status = EMAC_COULD_BE_USED_BY_DMA;
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+/* Iterate over dma_desc for finding completed xmit.
c0853e1
+ *
c0853e1
+ * The problem is: how to know that a descriptor is sent and not just in
c0853e1
+ * preparation.
c0853e1
+ * Need to have status=0 and st set but this is the state of first frame just
c0853e1
+ * before setting the own-by-DMA bit.
c0853e1
+ * The solution is to used the artificial value DCLEAN.
c0853e1
+ */
c0853e1
+static int sun8i_emac_complete_xmit(struct net_device *ndev, int budget)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+	int frame_len;
c0853e1
+	int work = 0;
c0853e1
+	unsigned int bytes_compl = 0, pkts_compl = 0;
c0853e1
+	u32 dstatus;
c0853e1
+
c0853e1
+	do {
c0853e1
+		ddesc = priv->dd_tx + priv->tx_dirty;
c0853e1
+
c0853e1
+		if (ddesc->status & EMAC_COULD_BE_USED_BY_DMA)
c0853e1
+			goto xmit_end;
c0853e1
+
c0853e1
+		if (ddesc->status == DCLEAN)
c0853e1
+			goto xmit_end;
c0853e1
+
c0853e1
+		dstatus = cpu_to_le32(ddesc->status);
c0853e1
+
c0853e1
+		if (ddesc->status == 0 && !ddesc->ctl) {
c0853e1
+			dev_err(priv->dev, "BUG: reached the void %d %d\n",
c0853e1
+				priv->tx_dirty, priv->tx_slot);
c0853e1
+			goto xmit_end;
c0853e1
+		}
c0853e1
+
c0853e1
+		/* TX_UNDERFLOW_ERR */
c0853e1
+		if (dstatus & BIT(1))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+		/* TX_DEFER_ERR */
c0853e1
+		if (dstatus & BIT(2))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+		/* BIT 6:3 numbers of collisions */
c0853e1
+		if (dstatus & 0x78)
c0853e1
+			priv->ndev->stats.collisions +=
c0853e1
+				(dstatus & 0x78) >> 3;
c0853e1
+		/* TX_COL_ERR_1 */
c0853e1
+		if (dstatus & BIT(8))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+		/* TX_COL_ERR_0 */
c0853e1
+		if (dstatus & BIT(9))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+		/* TX_CRS_ERR */
c0853e1
+		if (dstatus & BIT(10))
c0853e1
+			priv->ndev->stats.tx_carrier_errors++;
c0853e1
+		/* TX_PAYLOAD_ERR */
c0853e1
+		if (dstatus & BIT(12))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+		/* TX_LENGTH_ERR */
c0853e1
+		if (dstatus & BIT(14))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+		/* TX_HEADER_ERR */
c0853e1
+		if (dstatus & BIT(16))
c0853e1
+			priv->ndev->stats.tx_errors++;
c0853e1
+
c0853e1
+		frame_len = le32_to_cpu(ddesc->ctl) & 0x3FFF;
c0853e1
+		bytes_compl += frame_len;
c0853e1
+
c0853e1
+		if (priv->txl[priv->tx_dirty].map == MAP_SINGLE)
c0853e1
+			dma_unmap_single(priv->dev,
c0853e1
+					 le32_to_cpu(ddesc->buf_addr),
c0853e1
+					 frame_len, DMA_TO_DEVICE);
c0853e1
+		else
c0853e1
+			dma_unmap_page(priv->dev,
c0853e1
+				       le32_to_cpu(ddesc->buf_addr),
c0853e1
+				       frame_len, DMA_TO_DEVICE);
c0853e1
+		/* we can free skb only on last frame */
c0853e1
+		if (priv->txl[priv->tx_dirty].skb &&
c0853e1
+		    (ddesc->ctl & EMAC_DSC_TX_LAST)) {
c0853e1
+			dev_kfree_skb_irq(priv->txl[priv->tx_dirty].skb);
c0853e1
+			pkts_compl++;
c0853e1
+		}
c0853e1
+
c0853e1
+		priv->txl[priv->tx_dirty].skb = NULL;
c0853e1
+		priv->txl[priv->tx_dirty].map = 0;
c0853e1
+		ddesc->ctl = 0;
c0853e1
+		/* setting status to DCLEAN is the last value to be set */
c0853e1
+		wmb();
c0853e1
+		ddesc->status = DCLEAN;
c0853e1
+		work++;
c0853e1
+
c0853e1
+		rb_inc(&priv->tx_dirty, priv->nbdesc_tx);
c0853e1
+		ddesc = priv->dd_tx + priv->tx_dirty;
c0853e1
+	} while (ddesc->ctl &&
c0853e1
+		 !(ddesc->status & EMAC_COULD_BE_USED_BY_DMA) &&
c0853e1
+		 work < budget);
c0853e1
+
c0853e1
+xmit_end:
c0853e1
+	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
c0853e1
+
c0853e1
+	/* if we don't have handled all packets */
c0853e1
+	if (work < budget)
c0853e1
+		work = 0;
c0853e1
+
c0853e1
+	if (netif_queue_stopped(ndev) &&
c0853e1
+	    rb_tx_numfreedesc(ndev) > MAX_SKB_FRAGS + 1)
c0853e1
+		netif_wake_queue(ndev);
c0853e1
+	return work;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_poll(struct napi_struct *napi, int budget)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv =
c0853e1
+		container_of(napi, struct sun8i_emac_priv, napi);
c0853e1
+	struct net_device *ndev = priv->ndev;
c0853e1
+	int worked;
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+
c0853e1
+	priv->estats.napi_schedule++;
c0853e1
+	worked = sun8i_emac_complete_xmit(ndev, budget);
c0853e1
+
c0853e1
+	ddesc = priv->dd_rx + priv->rx_dirty;
c0853e1
+	while (!(ddesc->status & EMAC_COULD_BE_USED_BY_DMA) &&
c0853e1
+	       worked < budget) {
c0853e1
+		sun8i_emac_rx_from_ddesc(ndev, priv->rx_dirty);
c0853e1
+		worked++;
c0853e1
+		rb_inc(&priv->rx_dirty, priv->nbdesc_rx);
c0853e1
+		ddesc = priv->dd_rx + priv->rx_dirty;
c0853e1
+	};
c0853e1
+	if (worked < budget) {
c0853e1
+		priv->estats.napi_underflow++;
c0853e1
+		napi_complete(&priv->napi);
c0853e1
+		writel(EMAC_RX_INT | EMAC_TX_INT, priv->base + EMAC_INT_EN);
c0853e1
+	}
c0853e1
+	return worked;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
c0853e1
+{
c0853e1
+	struct net_device *ndev = bus->priv;
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int err;
c0853e1
+	u32 reg;
c0853e1
+
c0853e1
+	err = readl_poll_timeout(priv->base + EMAC_MDIO_CMD, reg,
c0853e1
+				 !(reg & MDIO_CMD_MII_BUSY), 100, 10000);
c0853e1
+	if (err) {
c0853e1
+		dev_err(priv->dev, "%s timeout %x\n", __func__, reg);
c0853e1
+		return err;
c0853e1
+	}
c0853e1
+
c0853e1
+	reg &= ~MDIO_CMD_MII_WRITE;
c0853e1
+	reg &= ~MDIO_CMD_MII_PHY_REG_ADDR_MASK;
c0853e1
+	reg |= (phy_reg << MDIO_CMD_MII_PHY_REG_ADDR_SHIFT) &
c0853e1
+		MDIO_CMD_MII_PHY_REG_ADDR_MASK;
c0853e1
+
c0853e1
+	reg &= ~MDIO_CMD_MII_PHY_ADDR_MASK;
c0853e1
+
c0853e1
+	reg |= (phy_addr << MDIO_CMD_MII_PHY_ADDR_SHIFT) &
c0853e1
+		MDIO_CMD_MII_PHY_ADDR_MASK;
c0853e1
+
c0853e1
+	reg |= MDIO_CMD_MII_BUSY;
c0853e1
+
c0853e1
+	writel(reg, priv->base + EMAC_MDIO_CMD);
c0853e1
+
c0853e1
+	err = readl_poll_timeout(priv->base + EMAC_MDIO_CMD, reg,
c0853e1
+				 !(reg & MDIO_CMD_MII_BUSY), 100, 10000);
c0853e1
+
c0853e1
+	if (err) {
c0853e1
+		dev_err(priv->dev, "%s timeout %x\n", __func__, reg);
c0853e1
+		return err;
c0853e1
+	}
c0853e1
+
c0853e1
+	return readl(priv->base + EMAC_MDIO_DATA);
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg,
c0853e1
+			    u16 data)
c0853e1
+{
c0853e1
+	struct net_device *ndev = bus->priv;
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 reg;
c0853e1
+	int err;
c0853e1
+
c0853e1
+	err = readl_poll_timeout(priv->base + EMAC_MDIO_CMD, reg,
c0853e1
+				 !(reg & MDIO_CMD_MII_BUSY), 100, 10000);
c0853e1
+	if (err) {
c0853e1
+		dev_err(priv->dev, "%s timeout %x\n", __func__, reg);
c0853e1
+		return err;
c0853e1
+	}
c0853e1
+
c0853e1
+	reg &= ~MDIO_CMD_MII_PHY_REG_ADDR_MASK;
c0853e1
+	reg |= (phy_reg << MDIO_CMD_MII_PHY_REG_ADDR_SHIFT) &
c0853e1
+		MDIO_CMD_MII_PHY_REG_ADDR_MASK;
c0853e1
+
c0853e1
+	reg &= ~MDIO_CMD_MII_PHY_ADDR_MASK;
c0853e1
+	reg |= (phy_addr << MDIO_CMD_MII_PHY_ADDR_SHIFT) &
c0853e1
+		MDIO_CMD_MII_PHY_ADDR_MASK;
c0853e1
+
c0853e1
+	reg |= MDIO_CMD_MII_WRITE;
c0853e1
+	reg |= MDIO_CMD_MII_BUSY;
c0853e1
+
c0853e1
+	writel(reg, priv->base + EMAC_MDIO_CMD);
c0853e1
+	writel(data, priv->base + EMAC_MDIO_DATA);
c0853e1
+
c0853e1
+	err = readl_poll_timeout(priv->base + EMAC_MDIO_CMD, reg,
c0853e1
+				 !(reg & MDIO_CMD_MII_BUSY), 100, 10000);
c0853e1
+	if (err) {
c0853e1
+		dev_err(priv->dev, "%s timeout %x\n", __func__, reg);
c0853e1
+		return err;
c0853e1
+	}
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_mdio_register(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct mii_bus *bus;
c0853e1
+	int ret;
c0853e1
+
c0853e1
+	bus = mdiobus_alloc();
c0853e1
+	if (!bus) {
c0853e1
+		netdev_err(ndev, "Failed to allocate a new mdio bus\n");
c0853e1
+		return -ENOMEM;
c0853e1
+	}
c0853e1
+
c0853e1
+	bus->name = dev_name(priv->dev);
c0853e1
+	bus->read = &sun8i_mdio_read;
c0853e1
+	bus->write = &sun8i_mdio_write;
c0853e1
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%x", bus->name, priv->dev->id);
c0853e1
+
c0853e1
+	bus->parent = priv->dev;
c0853e1
+	bus->priv = ndev;
c0853e1
+
c0853e1
+	ret = of_mdiobus_register(bus, priv->mdio_node);
c0853e1
+	if (ret) {
c0853e1
+		netdev_err(ndev, "Could not register a MDIO bus: %d\n", ret);
c0853e1
+		mdiobus_free(bus);
c0853e1
+		return ret;
c0853e1
+	}
c0853e1
+
c0853e1
+	priv->mdio = bus;
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_mdio_unregister(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	mdiobus_unregister(priv->mdio);
c0853e1
+	mdiobus_free(priv->mdio);
c0853e1
+}
c0853e1
+
c0853e1
+/* Run within phydev->lock */
c0853e1
+static void sun8i_emac_adjust_link(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct phy_device *phydev = ndev->phydev;
c0853e1
+	int new_state = 0;
c0853e1
+
c0853e1
+	netif_dbg(priv, link, priv->ndev,
c0853e1
+		  "%s link=%x duplex=%x speed=%x\n", __func__,
c0853e1
+		  phydev->link, phydev->duplex, phydev->speed);
c0853e1
+	if (!phydev)
c0853e1
+		return;
c0853e1
+
c0853e1
+	if (phydev->link) {
c0853e1
+		if (phydev->duplex != priv->duplex) {
c0853e1
+			new_state = 1;
c0853e1
+			priv->duplex = phydev->duplex;
c0853e1
+		}
c0853e1
+		if (phydev->pause)
c0853e1
+			sun8i_emac_flow_ctrl(priv, phydev->duplex,
c0853e1
+					     priv->flow_ctrl);
c0853e1
+
c0853e1
+		if (phydev->speed != priv->speed) {
c0853e1
+			new_state = 1;
c0853e1
+			priv->speed = phydev->speed;
c0853e1
+		}
c0853e1
+
c0853e1
+		if (priv->link == 0) {
c0853e1
+			new_state = 1;
c0853e1
+			priv->link = phydev->link;
c0853e1
+		}
c0853e1
+
c0853e1
+		netif_dbg(priv, link, priv->ndev,
c0853e1
+			  "%s new=%d link=%d pause=%d\n",
c0853e1
+			  __func__, new_state, priv->link, phydev->pause);
c0853e1
+		if (new_state)
c0853e1
+			sun8i_emac_set_link_mode(priv);
c0853e1
+	} else if (priv->link != phydev->link) {
c0853e1
+		new_state = 1;
c0853e1
+		priv->link = 0;
c0853e1
+		priv->speed = 0;
c0853e1
+		priv->duplex = -1;
c0853e1
+	}
c0853e1
+
c0853e1
+	if (new_state)
c0853e1
+		phy_print_status(phydev);
c0853e1
+}
c0853e1
+
c0853e1
+/* H3 specific bits for EPHY */
c0853e1
+#define H3_EPHY_ADDR_SHIFT	20
c0853e1
+#define H3_EPHY_LED_POL		BIT(17) /* 1: active low, 0: active high */
c0853e1
+#define H3_EPHY_SHUTDOWN	BIT(16) /* 1: shutdown, 0: power up */
c0853e1
+#define H3_EPHY_SELECT		BIT(15) /* 1: internal PHY, 0: external PHY */
c0853e1
+
c0853e1
+/* H3/A64 specific bits */
c0853e1
+#define SYSCON_RMII_EN		BIT(13) /* 1: enable RMII (overrides EPIT) */
c0853e1
+
c0853e1
+/* Generic system control EMAC_CLK bits */
c0853e1
+#define SYSCON_ETXDC_MASK		GENMASK(2, 0)
c0853e1
+#define SYSCON_ETXDC_SHIFT		10
c0853e1
+#define SYSCON_ERXDC_MASK		GENMASK(4, 0)
c0853e1
+#define SYSCON_ERXDC_SHIFT		5
c0853e1
+/* EMAC PHY Interface Type */
c0853e1
+#define SYSCON_EPIT			BIT(2) /* 1: RGMII, 0: MII */
c0853e1
+#define SYSCON_ETCS_MASK		GENMASK(1, 0)
c0853e1
+#define SYSCON_ETCS_MII		0x0
c0853e1
+#define SYSCON_ETCS_EXT_GMII	0x1
c0853e1
+#define SYSCON_ETCS_INT_GMII	0x2
c0853e1
+#define SYSCON_EMAC_REG		0x30
c0853e1
+
c0853e1
+static int sun8i_emac_set_syscon(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct device_node *node = priv->dev->of_node;
c0853e1
+	int ret;
c0853e1
+	u32 reg, val;
c0853e1
+
c0853e1
+	reg = priv->variant->default_syscon_value;
c0853e1
+
c0853e1
+	if (priv->variant->internal_phy) {
c0853e1
+		if (!priv->use_internal_phy) {
c0853e1
+			/* switch to external PHY interface */
c0853e1
+			reg &= ~H3_EPHY_SELECT;
c0853e1
+		} else {
c0853e1
+			reg |= H3_EPHY_SELECT;
c0853e1
+			reg &= ~H3_EPHY_SHUTDOWN;
c0853e1
+
c0853e1
+			if (of_property_read_bool(priv->phy_node,
c0853e1
+						  "allwinner,leds-active-low"))
c0853e1
+				reg |= H3_EPHY_LED_POL;
c0853e1
+
c0853e1
+			ret = of_mdio_parse_addr(priv->dev, priv->phy_node);
c0853e1
+			if (ret < 0) {
c0853e1
+				netdev_err(ndev, "Could not parse MDIO addr\n");
c0853e1
+				return ret;
c0853e1
+			}
c0853e1
+			/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
c0853e1
+			 * address. No need to mask it again.
c0853e1
+			 */
c0853e1
+			reg |= ret << H3_EPHY_ADDR_SHIFT;
c0853e1
+		}
c0853e1
+	}
c0853e1
+
c0853e1
+	if (!of_property_read_u32(node, "allwinner,tx-delay", &val)) {
c0853e1
+		if (val <= SYSCON_ETXDC_MASK) {
c0853e1
+			reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT);
c0853e1
+			reg |= (val << SYSCON_ETXDC_SHIFT);
c0853e1
+		} else {
c0853e1
+			netdev_warn(ndev, "Invalid TX clock delay: %d\n", val);
c0853e1
+		}
c0853e1
+	}
c0853e1
+
c0853e1
+	if (!of_property_read_u32(node, "allwinner,rx-delay", &val)) {
c0853e1
+		if (val <= SYSCON_ERXDC_MASK) {
c0853e1
+			reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT);
c0853e1
+			reg |= (val << SYSCON_ERXDC_SHIFT);
c0853e1
+		} else {
c0853e1
+			netdev_warn(ndev, "Invalid RX clock delay: %d\n", val);
c0853e1
+		}
c0853e1
+	}
c0853e1
+
c0853e1
+	/* Clear interface mode bits */
c0853e1
+	reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
c0853e1
+	if (priv->variant->support_rmii)
c0853e1
+		reg &= ~SYSCON_RMII_EN;
c0853e1
+
c0853e1
+	switch (priv->phy_interface) {
c0853e1
+	case PHY_INTERFACE_MODE_MII:
c0853e1
+		/* default */
c0853e1
+		break;
c0853e1
+	case PHY_INTERFACE_MODE_RGMII:
c0853e1
+		reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
c0853e1
+		break;
c0853e1
+	case PHY_INTERFACE_MODE_RMII:
c0853e1
+		reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
c0853e1
+		break;
c0853e1
+	default:
c0853e1
+		netdev_err(ndev, "Unsupported interface mode: %s",
c0853e1
+			   phy_modes(priv->phy_interface));
c0853e1
+		return -EINVAL;
c0853e1
+	}
c0853e1
+
c0853e1
+	regmap_write(priv->regmap, SYSCON_EMAC_REG, reg);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_unset_syscon(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 reg = priv->variant->default_syscon_value;
c0853e1
+
c0853e1
+	regmap_write(priv->regmap, SYSCON_EMAC_REG, reg);
c0853e1
+}
c0853e1
+
c0853e1
+/* Set Management Data Clock, must be call after device reset */
c0853e1
+static void sun8i_emac_set_mdc(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	unsigned long rate;
c0853e1
+	u32 reg;
c0853e1
+
c0853e1
+	rate = clk_get_rate(priv->ahb_clk);
c0853e1
+	if (rate > 160000000)
c0853e1
+		reg = 0x3 << 20; /* AHB / 128 */
c0853e1
+	else if (rate > 80000000)
c0853e1
+		reg = 0x2 << 20; /* AHB / 64 */
c0853e1
+	else if (rate > 40000000)
c0853e1
+		reg = 0x1 << 20; /* AHB / 32 */
c0853e1
+	else
c0853e1
+		reg = 0x0 << 20; /* AHB / 16 */
c0853e1
+	netif_dbg(priv, link, ndev, "MDC auto : %x\n", reg);
c0853e1
+	writel(reg, priv->base + EMAC_MDIO_CMD);
c0853e1
+}
c0853e1
+
c0853e1
+/* "power" the device, by enabling clk/reset/regulators */
c0853e1
+static int sun8i_emac_power(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int ret;
c0853e1
+
c0853e1
+	ret = clk_prepare_enable(priv->ahb_clk);
c0853e1
+	if (ret) {
c0853e1
+		netdev_err(ndev, "Could not enable AHB clock\n");
c0853e1
+		return ret;
c0853e1
+	}
c0853e1
+
c0853e1
+	if (priv->rst_mac) {
c0853e1
+		ret = reset_control_deassert(priv->rst_mac);
c0853e1
+		if (ret) {
c0853e1
+			netdev_err(ndev, "Could not deassert reset\n");
c0853e1
+			goto err_reset;
c0853e1
+		}
c0853e1
+	}
c0853e1
+
c0853e1
+	if (priv->ephy_clk) {
c0853e1
+		ret = clk_prepare_enable(priv->ephy_clk);
c0853e1
+		if (ret) {
c0853e1
+			netdev_err(ndev, "Could not enable EPHY clock\n");
c0853e1
+			goto err_ephy_clk;
c0853e1
+		}
c0853e1
+	}
c0853e1
+
c0853e1
+	if (priv->rst_ephy) {
c0853e1
+		ret = reset_control_deassert(priv->rst_ephy);
c0853e1
+		if (ret) {
c0853e1
+			netdev_err(ndev, "Could not deassert EPHY reset\n");
c0853e1
+			goto err_ephy_reset;
c0853e1
+		}
c0853e1
+	}
c0853e1
+
c0853e1
+	return 0;
c0853e1
+
c0853e1
+err_ephy_reset:
c0853e1
+	if (priv->ephy_clk)
c0853e1
+		clk_disable_unprepare(priv->ephy_clk);
c0853e1
+err_ephy_clk:
c0853e1
+	if (priv->rst_mac)
c0853e1
+		reset_control_assert(priv->rst_mac);
c0853e1
+err_reset:
c0853e1
+	clk_disable_unprepare(priv->ahb_clk);
c0853e1
+	return ret;
c0853e1
+}
c0853e1
+
c0853e1
+/* "Unpower" the device, disabling clocks and regulators, asserting reset */
c0853e1
+static void sun8i_emac_unpower(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	if (priv->rst_ephy)
c0853e1
+		reset_control_assert(priv->rst_ephy);
c0853e1
+
c0853e1
+	if (priv->ephy_clk)
c0853e1
+		clk_disable_unprepare(priv->ephy_clk);
c0853e1
+
c0853e1
+	if (priv->rst_mac)
c0853e1
+		reset_control_assert(priv->rst_mac);
c0853e1
+
c0853e1
+	clk_disable_unprepare(priv->ahb_clk);
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_init(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct device_node *node = priv->dev->of_node;
c0853e1
+	const u8 *addr;
c0853e1
+
c0853e1
+	/* Try to get MAC address from DT, or assign a random one */
c0853e1
+	addr = of_get_mac_address(node);
c0853e1
+	if (addr)
c0853e1
+		ether_addr_copy(ndev->dev_addr, addr);
c0853e1
+	else
c0853e1
+		eth_hw_addr_random(ndev);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_mdio_probe(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct phy_device *phydev = NULL;
c0853e1
+
c0853e1
+	phydev = of_phy_connect(ndev, priv->phy_node, &sun8i_emac_adjust_link,
c0853e1
+				0, priv->phy_interface);
c0853e1
+
c0853e1
+	if (!phydev) {
c0853e1
+		netdev_err(ndev, "Could not attach to PHY\n");
c0853e1
+		return -ENODEV;
c0853e1
+	}
c0853e1
+
c0853e1
+	phy_attached_info(phydev);
c0853e1
+
c0853e1
+	/* mask with MAC supported features */
c0853e1
+	phydev->supported &= PHY_GBIT_FEATURES;
c0853e1
+	phydev->advertising = phydev->supported;
c0853e1
+
c0853e1
+	priv->link = 0;
c0853e1
+	priv->speed = 0;
c0853e1
+	priv->duplex = -1;
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+/* Allocate both RX and TX ring buffer and init them
c0853e1
+ * This function also write the startbase of thoses ring in the device.
c0853e1
+ * All structures that help managing thoses rings are also handled
c0853e1
+ * by this functions (rx_skb/txl)
c0853e1
+ */
c0853e1
+static int sun8i_emac_alloc_rings(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+	int err, i;
c0853e1
+
c0853e1
+	priv->rx_skb = kcalloc(priv->nbdesc_rx, sizeof(struct sk_buff *),
c0853e1
+			      GFP_KERNEL);
c0853e1
+	if (!priv->rx_skb) {
c0853e1
+		err = -ENOMEM;
c0853e1
+		goto rx_skb_error;
c0853e1
+	}
c0853e1
+	priv->txl = kcalloc(priv->nbdesc_tx, sizeof(struct txinfo), GFP_KERNEL);
c0853e1
+	if (!priv->txl) {
c0853e1
+		err = -ENOMEM;
c0853e1
+		goto tx_error;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* allocate/init RX ring */
c0853e1
+	priv->dd_rx = dma_zalloc_coherent(priv->dev,
c0853e1
+			priv->nbdesc_rx * sizeof(struct dma_desc),
c0853e1
+			&priv->dd_rx_phy, GFP_KERNEL);
c0853e1
+	if (!priv->dd_rx) {
c0853e1
+		dev_err(priv->dev, "ERROR: cannot allocate DMA RX buffer");
c0853e1
+		err = -ENOMEM;
c0853e1
+		goto dma_rx_error;
c0853e1
+	}
c0853e1
+	ddesc = priv->dd_rx;
c0853e1
+	for (i = 0; i < priv->nbdesc_rx; i++) {
c0853e1
+		sun8i_emac_rx_skb(ndev, i);
c0853e1
+		ddesc->next = cpu_to_le32(priv->dd_rx_phy + (i + 1)
c0853e1
+			* sizeof(struct dma_desc));
c0853e1
+		ddesc++;
c0853e1
+	}
c0853e1
+	/* last descriptor point back to first one */
c0853e1
+	ddesc--;
c0853e1
+	ddesc->next = cpu_to_le32(priv->dd_rx_phy);
c0853e1
+
c0853e1
+	/* allocate/init TX ring */
c0853e1
+	priv->dd_tx = dma_zalloc_coherent(priv->dev,
c0853e1
+			priv->nbdesc_tx * sizeof(struct dma_desc),
c0853e1
+			&priv->dd_tx_phy, GFP_KERNEL);
c0853e1
+	if (!priv->dd_tx) {
c0853e1
+		dev_err(priv->dev, "ERROR: cannot allocate DMA TX buffer");
c0853e1
+		err = -ENOMEM;
c0853e1
+		goto dma_tx_error;
c0853e1
+	}
c0853e1
+	ddesc = priv->dd_tx;
c0853e1
+	for (i = 0; i < priv->nbdesc_tx; i++) {
c0853e1
+		ddesc->status = DCLEAN;
c0853e1
+		ddesc->ctl = 0;
c0853e1
+		ddesc->next = cpu_to_le32(priv->dd_tx_phy + (i + 1)
c0853e1
+			* sizeof(struct dma_desc));
c0853e1
+		ddesc++;
c0853e1
+	}
c0853e1
+	/* last descriptor point back to first one */
c0853e1
+	ddesc--;
c0853e1
+	ddesc->next = cpu_to_le32(priv->dd_tx_phy);
c0853e1
+	i--;
c0853e1
+
c0853e1
+	priv->tx_slot = 0;
c0853e1
+	priv->tx_dirty = 0;
c0853e1
+	priv->rx_dirty = 0;
c0853e1
+
c0853e1
+	/* write start of RX ring descriptor */
c0853e1
+	writel(priv->dd_rx_phy, priv->base + EMAC_RX_DESC_LIST);
c0853e1
+	/* write start of TX ring descriptor */
c0853e1
+	writel(priv->dd_tx_phy, priv->base + EMAC_TX_DESC_LIST);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+dma_tx_error:
c0853e1
+	dma_free_coherent(priv->dev, priv->nbdesc_rx * sizeof(struct dma_desc),
c0853e1
+			  priv->dd_rx, priv->dd_rx_phy);
c0853e1
+dma_rx_error:
c0853e1
+	kfree(priv->txl);
c0853e1
+tx_error:
c0853e1
+	kfree(priv->rx_skb);
c0853e1
+rx_skb_error:
c0853e1
+	return err;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_open(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int err;
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	err = sun8i_emac_power(ndev);
c0853e1
+	if (err)
c0853e1
+		return err;
c0853e1
+
c0853e1
+	err = request_irq(priv->irq, sun8i_emac_dma_interrupt, 0,
c0853e1
+			  dev_name(priv->dev), ndev);
c0853e1
+	if (err) {
c0853e1
+		dev_err(priv->dev, "Cannot request IRQ: %d\n", err);
c0853e1
+		goto err_power;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* Set interface mode (and configure internal PHY on H3) */
c0853e1
+	err = sun8i_emac_set_syscon(ndev);
c0853e1
+	if (err)
c0853e1
+		goto err_irq;
c0853e1
+
c0853e1
+	/* Do SOFT RST */
c0853e1
+	v = readl(priv->base + EMAC_BASIC_CTL1);
c0853e1
+	writel(v | 0x01, priv->base + EMAC_BASIC_CTL1);
c0853e1
+
c0853e1
+	err = readl_poll_timeout(priv->base + EMAC_BASIC_CTL1, v,
c0853e1
+				 !(v & 0x01), 100, 10000);
c0853e1
+	if (err) {
c0853e1
+		dev_err(priv->dev, "EMAC reset timeout\n");
c0853e1
+		err = -EFAULT;
c0853e1
+		goto err_syscon;
c0853e1
+	}
c0853e1
+
c0853e1
+	sun8i_emac_set_mdc(ndev);
c0853e1
+
c0853e1
+	err = sun8i_emac_mdio_register(ndev);
c0853e1
+	if (err)
c0853e1
+		goto err_syscon;
c0853e1
+
c0853e1
+	err = sun8i_emac_mdio_probe(ndev);
c0853e1
+	if (err)
c0853e1
+		goto err_syscon;
c0853e1
+
c0853e1
+	/* DMA */
c0853e1
+	v = (8 << 24);/* burst len */
c0853e1
+	writel(v, priv->base + EMAC_BASIC_CTL1);
c0853e1
+
c0853e1
+	writel(EMAC_RX_INT | EMAC_TX_INT, priv->base + EMAC_INT_EN);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL0);
c0853e1
+	/* CHECK_CRC */
c0853e1
+	if (ndev->features & NETIF_F_RXCSUM)
c0853e1
+		v |= EMAC_RX_DO_CRC;
c0853e1
+	else
c0853e1
+		v &= ~EMAC_RX_DO_CRC;
c0853e1
+	/* STRIP_FCS */
c0853e1
+	if (ndev->features & NETIF_F_RXFCS)
c0853e1
+		v &= ~EMAC_RX_STRIP_FCS;
c0853e1
+	else
c0853e1
+		v |= EMAC_RX_STRIP_FCS;
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL0);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_TX_CTL1);
c0853e1
+	/* TX_MD Transmission starts after a full frame located in TX DMA FIFO*/
c0853e1
+	v |= BIT(1);
c0853e1
+	/* Undocumented bit (called TX_NEXT_FRM in BSP), the original comment is
c0853e1
+	 * "Operating on second frame increase the performance
c0853e1
+	 * especially when transmit store-and-forward is used."
c0853e1
+	 */
c0853e1
+	v |= BIT(2);
c0853e1
+	writel(v, priv->base + EMAC_TX_CTL1);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL1);
c0853e1
+	/* RX_MD RX DMA reads data from RX DMA FIFO to host memory after a
c0853e1
+	 * complete frame has been written to RX DMA FIFO
c0853e1
+	*/
c0853e1
+	v |= BIT(1);
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL1);
c0853e1
+
c0853e1
+	sun8i_emac_set_macaddr(priv, ndev->dev_addr, 0);
c0853e1
+
c0853e1
+	err = sun8i_emac_alloc_rings(ndev);
c0853e1
+	if (err) {
c0853e1
+		netdev_err(ndev, "Fail to allocate rings\n");
c0853e1
+		goto err_mdio;
c0853e1
+	}
c0853e1
+
c0853e1
+	phy_start(ndev->phydev);
c0853e1
+
c0853e1
+	sun8i_emac_start_rx(ndev);
c0853e1
+	sun8i_emac_start_tx(ndev);
c0853e1
+
c0853e1
+	netif_napi_add(ndev, &priv->napi, sun8i_emac_poll, 64);
c0853e1
+	napi_enable(&priv->napi);
c0853e1
+	netif_start_queue(ndev);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+err_mdio:
c0853e1
+	phy_disconnect(ndev->phydev);
c0853e1
+err_syscon:
c0853e1
+	sun8i_emac_unset_syscon(ndev);
c0853e1
+err_irq:
c0853e1
+	free_irq(priv->irq, ndev);
c0853e1
+err_power:
c0853e1
+	sun8i_emac_unpower(ndev);
c0853e1
+	return err;
c0853e1
+}
c0853e1
+
c0853e1
+/* Clean the TX ring of any accepted skb for xmit */
c0853e1
+static void sun8i_emac_tx_clean(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int i;
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+	int frame_len;
c0853e1
+
c0853e1
+	for (i = 0; i < priv->nbdesc_tx; i++) {
c0853e1
+		if (priv->txl[i].skb) {
c0853e1
+			ddesc = priv->dd_tx + i;
c0853e1
+			frame_len = le32_to_cpu(ddesc->ctl) & 0x3FFF;
c0853e1
+			switch (priv->txl[i].map) {
c0853e1
+			case MAP_SINGLE:
c0853e1
+				dma_unmap_single(priv->dev,
c0853e1
+						 le32_to_cpu(ddesc->buf_addr),
c0853e1
+						 frame_len, DMA_TO_DEVICE);
c0853e1
+				break;
c0853e1
+			case MAP_PAGE:
c0853e1
+				dma_unmap_page(priv->dev,
c0853e1
+					       le32_to_cpu(ddesc->buf_addr),
c0853e1
+					       frame_len, DMA_TO_DEVICE);
c0853e1
+				break;
c0853e1
+			default:
c0853e1
+				dev_err(priv->dev, "Trying to free an empty slot\n");
c0853e1
+				continue;
c0853e1
+			}
c0853e1
+			dev_kfree_skb_any(priv->txl[i].skb);
c0853e1
+			priv->txl[i].skb = NULL;
c0853e1
+			ddesc->ctl = 0;
c0853e1
+			ddesc->status = DCLEAN;
c0853e1
+		}
c0853e1
+	}
c0853e1
+	priv->tx_slot = 0;
c0853e1
+	priv->tx_dirty = 0;
c0853e1
+}
c0853e1
+
c0853e1
+/* Clean the RX ring */
c0853e1
+static void sun8i_emac_rx_clean(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int i;
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+
c0853e1
+	/* clean RX ring */
c0853e1
+	for (i = 0; i < priv->nbdesc_rx; i++)
c0853e1
+		if (priv->rx_skb[i]) {
c0853e1
+			ddesc = priv->dd_rx + i;
c0853e1
+			dma_unmap_single(priv->dev,
c0853e1
+					 le32_to_cpu(ddesc->buf_addr),
c0853e1
+					 DESC_BUF_MAX, DMA_FROM_DEVICE);
c0853e1
+			dev_kfree_skb_any(priv->rx_skb[i]);
c0853e1
+			priv->rx_skb[i] = NULL;
c0853e1
+		}
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_stop(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	napi_disable(&priv->napi);
c0853e1
+
c0853e1
+	sun8i_emac_stop_tx(ndev);
c0853e1
+	sun8i_emac_stop_rx(ndev);
c0853e1
+
c0853e1
+	phy_stop(ndev->phydev);
c0853e1
+	phy_disconnect(ndev->phydev);
c0853e1
+
c0853e1
+	sun8i_emac_mdio_unregister(ndev);
c0853e1
+
c0853e1
+	sun8i_emac_unset_syscon(ndev);
c0853e1
+
c0853e1
+	free_irq(priv->irq, ndev);
c0853e1
+
c0853e1
+	sun8i_emac_rx_clean(ndev);
c0853e1
+	sun8i_emac_tx_clean(ndev);
c0853e1
+
c0853e1
+	kfree(priv->rx_skb);
c0853e1
+	kfree(priv->txl);
c0853e1
+
c0853e1
+	dma_free_coherent(priv->dev, priv->nbdesc_rx * sizeof(struct dma_desc),
c0853e1
+			  priv->dd_rx, priv->dd_rx_phy);
c0853e1
+	dma_free_coherent(priv->dev, priv->nbdesc_tx * sizeof(struct dma_desc),
c0853e1
+			  priv->dd_tx, priv->dd_tx_phy);
c0853e1
+
c0853e1
+	sun8i_emac_unpower(ndev);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static netdev_tx_t sun8i_emac_xmit(struct sk_buff *skb, struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct dma_desc *ddesc;
c0853e1
+	struct dma_desc *first;
c0853e1
+	int i = 0, rbd_first;
c0853e1
+	unsigned int len, fraglen, tlen;
c0853e1
+	u32 v;
c0853e1
+	int n;
c0853e1
+	int nf;
c0853e1
+	const skb_frag_t *frag;
c0853e1
+	int do_csum = 0;
c0853e1
+
c0853e1
+	if (skb_put_padto(skb, ETH_ZLEN))
c0853e1
+		return NETDEV_TX_OK;
c0853e1
+	len = skb_headlen(skb);
c0853e1
+
c0853e1
+	n = skb_shinfo(skb)->nr_frags;
c0853e1
+
c0853e1
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
c0853e1
+		do_csum = 1;
c0853e1
+		priv->estats.tx_hw_csum++;
c0853e1
+	}
c0853e1
+	netif_dbg(priv, tx_queued, ndev, "%s len=%u skblen=%u %x\n", __func__,
c0853e1
+		  len, skb->len,
c0853e1
+		  (skb->ip_summed == CHECKSUM_PARTIAL));
c0853e1
+
c0853e1
+	/* check for contigous space
c0853e1
+	 * We need at least 1(skb->data) + n(numfrags) + 1(one clean slot)
c0853e1
+	 */
c0853e1
+	if (rb_tx_numfreedesc(ndev) < n + 2) {
c0853e1
+		dev_err_ratelimited(priv->dev, "BUG!: TX is full %d %d\n",
c0853e1
+				    priv->tx_dirty, priv->tx_slot);
c0853e1
+		netif_stop_queue(ndev);
c0853e1
+		return NETDEV_TX_BUSY;
c0853e1
+	}
c0853e1
+	i = priv->tx_slot;
c0853e1
+
c0853e1
+	ddesc = priv->dd_tx + i;
c0853e1
+	first = priv->dd_tx + i;
c0853e1
+	rbd_first = i;
c0853e1
+
c0853e1
+	ddesc->buf_addr = dma_map_single(priv->dev, skb->data, len,
c0853e1
+					 DMA_TO_DEVICE);
c0853e1
+	if (dma_mapping_error(priv->dev, ddesc->buf_addr)) {
c0853e1
+		dev_err(priv->dev, "ERROR: Cannot map buffer for DMA\n");
c0853e1
+		goto xmit_error;
c0853e1
+	}
c0853e1
+	/* We cannot direcly use cpu_to_le32() after dma_map_single
c0853e1
+	 * since dma_mapping_error use it
c0853e1
+	 */
c0853e1
+	ddesc->buf_addr = cpu_to_le32(ddesc->buf_addr);
c0853e1
+	priv->txl[i].map = MAP_SINGLE;
c0853e1
+	priv->txl[i].skb = skb;
c0853e1
+
c0853e1
+	tlen = len;
c0853e1
+	ddesc->ctl = le32_to_cpu(len);
c0853e1
+	/* Undocumented bit that make it works
c0853e1
+	 * Without it, packets never be sent on H3 SoC
c0853e1
+	 */
c0853e1
+	ddesc->ctl |= EMAC_MAGIC_TX_BIT;
c0853e1
+	if (do_csum)
c0853e1
+		ddesc->ctl |= EMAC_TX_DO_CRC;
c0853e1
+
c0853e1
+	/* handle fragmented skb, one descriptor per fragment  */
c0853e1
+	for (nf = 0; nf < n; nf++) {
c0853e1
+		frag = &skb_shinfo(skb)->frags[nf];
c0853e1
+		rb_inc(&i, priv->nbdesc_tx);
c0853e1
+		priv->txl[i].skb = skb;
c0853e1
+		ddesc = priv->dd_tx + i;
c0853e1
+		fraglen = skb_frag_size(frag);
c0853e1
+		ddesc->ctl = le32_to_cpu(fraglen);
c0853e1
+		tlen += fraglen,
c0853e1
+		ddesc->ctl |= EMAC_MAGIC_TX_BIT;
c0853e1
+		if (do_csum)
c0853e1
+			ddesc->ctl |= EMAC_TX_DO_CRC;
c0853e1
+
c0853e1
+		ddesc->buf_addr = skb_frag_dma_map(priv->dev, frag, 0,
c0853e1
+				fraglen, DMA_TO_DEVICE);
c0853e1
+		if (dma_mapping_error(priv->dev, ddesc->buf_addr)) {
c0853e1
+			dev_err(priv->dev, "Cannot map buffer for DMA\n");
c0853e1
+			goto xmit_error;
c0853e1
+		}
c0853e1
+		/* Cannot directly use cpu_to_le32() after skb_frag_dma_map
c0853e1
+		 * since dma_mapping_error use it
c0853e1
+		 */
c0853e1
+		ddesc->buf_addr = cpu_to_le32(ddesc->buf_addr);
c0853e1
+		priv->txl[i].map = MAP_PAGE;
c0853e1
+		ddesc->status = EMAC_COULD_BE_USED_BY_DMA;
c0853e1
+	}
c0853e1
+
c0853e1
+	/* frame end */
c0853e1
+	ddesc->ctl |= EMAC_DSC_TX_LAST;
c0853e1
+	/* We want an interrupt after transmission */
c0853e1
+	ddesc->ctl |= EMAC_WANT_INT;
c0853e1
+
c0853e1
+	rb_inc(&i, priv->nbdesc_tx);
c0853e1
+
c0853e1
+	/* This line was previously after DMA start, but with that we hit a
c0853e1
+	 * small race with complete_xmit() where we complete more data than
c0853e1
+	 * sent.
c0853e1
+	 * The packet is sent just after EMAC_COULD_BE_USED_BY_DMA flag set and
c0853e1
+	 * complete_xmit fire just after before netdev_sent_queue().
c0853e1
+	 * This race could be observed only when overflowing a gigabit line.
c0853e1
+	 */
c0853e1
+	netdev_sent_queue(ndev, skb->len);
c0853e1
+
c0853e1
+	/* frame begin */
c0853e1
+	first->ctl |= EMAC_DSC_TX_FIRST;
c0853e1
+	wmb();/* EMAC_COULD_BE_USED_BY_DMA must be the last value written */
c0853e1
+	first->status = EMAC_COULD_BE_USED_BY_DMA;
c0853e1
+	priv->tx_slot = i;
c0853e1
+
c0853e1
+	/* Trying to optimize this (recording DMA start/stop) seems
c0853e1
+	 * to lead to errors. So we always start DMA.
c0853e1
+	 */
c0853e1
+	v = readl(priv->base + EMAC_TX_CTL1);
c0853e1
+	v |= EMAC_TX_DMA_START;
c0853e1
+	v |= EMAC_TX_DMA_EN;
c0853e1
+	writel_relaxed(v, priv->base + EMAC_TX_CTL1);
c0853e1
+
c0853e1
+	if (rb_tx_numfreedesc(ndev) < MAX_SKB_FRAGS + 1) {
c0853e1
+		netif_stop_queue(ndev);
c0853e1
+		priv->estats.tx_stop_queue++;
c0853e1
+	}
c0853e1
+	priv->estats.tx_used_desc = rb_tx_numfreedesc(ndev);
c0853e1
+	priv->ndev->stats.tx_packets++;
c0853e1
+	priv->ndev->stats.tx_bytes += tlen;
c0853e1
+
c0853e1
+	return NETDEV_TX_OK;
c0853e1
+
c0853e1
+xmit_error:
c0853e1
+	/* destroy skb and return TX OK Documentation/DMA-API-HOWTO.txt */
c0853e1
+	/* clean descritors from rbd_first to i */
c0853e1
+	ddesc->ctl = 0;
c0853e1
+	/* setting status to DCLEAN is the last value to be set */
c0853e1
+	wmb();
c0853e1
+	ddesc->status = DCLEAN;
c0853e1
+	do {
c0853e1
+		ddesc = priv->dd_tx + rbd_first;
c0853e1
+		ddesc->ctl = 0;
c0853e1
+		/* setting status to DCLEAN is the last value to be set */
c0853e1
+		wmb();
c0853e1
+		ddesc->status = DCLEAN;
c0853e1
+		rb_inc(&rbd_first, priv->nbdesc_tx);
c0853e1
+	} while (rbd_first != i);
c0853e1
+	dev_kfree_skb_any(skb);
c0853e1
+	return NETDEV_TX_OK;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_change_mtu(struct net_device *ndev, int new_mtu)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int max_mtu;
c0853e1
+
c0853e1
+	dev_info(priv->dev, "%s set MTU to %d\n", __func__, new_mtu);
c0853e1
+
c0853e1
+	if (netif_running(ndev)) {
c0853e1
+		dev_err(priv->dev, "%s: must be stopped to change its MTU\n",
c0853e1
+			ndev->name);
c0853e1
+		return -EBUSY;
c0853e1
+	}
c0853e1
+
c0853e1
+	max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
c0853e1
+
c0853e1
+	if ((new_mtu < 68) || (new_mtu > max_mtu)) {
c0853e1
+		dev_err(priv->dev, "%s: invalid MTU, max MTU is: %d\n",
c0853e1
+			ndev->name, max_mtu);
c0853e1
+		return -EINVAL;
c0853e1
+	}
c0853e1
+
c0853e1
+	ndev->mtu = new_mtu;
c0853e1
+	netdev_update_features(ndev);
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_set_features(struct net_device *ndev,
c0853e1
+				   netdev_features_t features)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 v;
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_BASIC_CTL0);
c0853e1
+	if (features & NETIF_F_LOOPBACK && netif_running(ndev)) {
c0853e1
+		netif_info(priv, hw, ndev, "Set loopback features");
c0853e1
+		v |= BIT(1);
c0853e1
+	} else {
c0853e1
+		netif_info(priv, hw, ndev, "Unset loopback features");
c0853e1
+		v &= ~BIT(1);
c0853e1
+	}
c0853e1
+	writel(v, priv->base + EMAC_BASIC_CTL0);
c0853e1
+
c0853e1
+	v = readl(priv->base + EMAC_RX_CTL0);
c0853e1
+	if (features & NETIF_F_RXCSUM) {
c0853e1
+		v |= EMAC_RX_DO_CRC;
c0853e1
+		netif_info(priv, hw, ndev, "Doing RX CRC check by hardware");
c0853e1
+	} else {
c0853e1
+		v &= ~EMAC_RX_DO_CRC;
c0853e1
+		netif_info(priv, hw, ndev, "No RX CRC check by hardware");
c0853e1
+	}
c0853e1
+	if (features & NETIF_F_RXFCS) {
c0853e1
+		v &= ~EMAC_RX_STRIP_FCS;
c0853e1
+		netif_info(priv, hw, ndev, "Keep FCS");
c0853e1
+	} else {
c0853e1
+		v |= EMAC_RX_STRIP_FCS;
c0853e1
+		netif_info(priv, hw, ndev, "Strip FCS");
c0853e1
+	}
c0853e1
+	writel(v, priv->base + EMAC_RX_CTL0);
c0853e1
+
c0853e1
+	netif_dbg(priv, drv, ndev, "%s %llx %x\n", __func__, features, v);
c0853e1
+
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_set_rx_mode(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	u32 v = 0;
c0853e1
+	int i = 0;
c0853e1
+	struct netdev_hw_addr *ha;
c0853e1
+
c0853e1
+	/* Receive all multicast frames */
c0853e1
+	v |= BIT(16);
c0853e1
+	/* Receive all control frames */
c0853e1
+	v |= BIT(13);
c0853e1
+	if (ndev->flags & IFF_PROMISC)
c0853e1
+		v |= BIT(1);
c0853e1
+	if (netdev_uc_count(ndev) > 7) {
c0853e1
+		v |= BIT(1);
c0853e1
+	} else {
c0853e1
+		netdev_for_each_uc_addr(ha, ndev) {
c0853e1
+			i++;
c0853e1
+			sun8i_emac_set_macaddr(priv, ha->addr, i);
c0853e1
+		}
c0853e1
+	}
c0853e1
+	writel(v, priv->base + EMAC_RX_FRM_FLT);
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_tx_timeout(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	netdev_err(ndev, "%s\n", __func__);
c0853e1
+
c0853e1
+	sun8i_emac_stop_tx(ndev);
c0853e1
+
c0853e1
+	sun8i_emac_tx_clean(ndev);
c0853e1
+
c0853e1
+	/* write start of the new TX ring descriptor */
c0853e1
+	writel(priv->dd_tx_phy, priv->base + EMAC_TX_DESC_LIST);
c0853e1
+
c0853e1
+	sun8i_emac_start_tx(ndev);
c0853e1
+
c0853e1
+	netdev_reset_queue(ndev);
c0853e1
+
c0853e1
+	priv->estats.tx_timeout++;
c0853e1
+	ndev->stats.tx_errors++;
c0853e1
+	netif_wake_queue(ndev);
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
c0853e1
+{
c0853e1
+	struct phy_device *phydev = ndev->phydev;
c0853e1
+
c0853e1
+	if (!netif_running(ndev))
c0853e1
+		return -EINVAL;
c0853e1
+
c0853e1
+	if (!phydev)
c0853e1
+		return -ENODEV;
c0853e1
+
c0853e1
+	return phy_mii_ioctl(phydev, rq, cmd);
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_check_if_running(struct net_device *ndev)
c0853e1
+{
c0853e1
+	if (!netif_running(ndev))
c0853e1
+		return -EINVAL;
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_get_sset_count(struct net_device *ndev, int sset)
c0853e1
+{
c0853e1
+	switch (sset) {
c0853e1
+	case ETH_SS_STATS:
c0853e1
+		return ARRAY_SIZE(estats_str);
c0853e1
+	}
c0853e1
+	return -EOPNOTSUPP;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_ethtool_get_settings(struct net_device *ndev,
c0853e1
+					   struct ethtool_cmd *cmd)
c0853e1
+{
c0853e1
+	struct phy_device *phy = ndev->phydev;
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	if (!phy) {
c0853e1
+		netdev_err(ndev, "%s: %s: PHY is not registered\n",
c0853e1
+			   __func__, ndev->name);
c0853e1
+		return -ENODEV;
c0853e1
+	}
c0853e1
+
c0853e1
+	if (!netif_running(ndev)) {
c0853e1
+		dev_err(priv->dev, "interface disabled: we cannot track link speed / duplex setting\n");
c0853e1
+		return -EBUSY;
c0853e1
+	}
c0853e1
+
c0853e1
+	return phy_ethtool_gset(phy, cmd);
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_ethtool_set_settings(struct net_device *ndev,
c0853e1
+					   struct ethtool_cmd *cmd)
c0853e1
+{
c0853e1
+	struct phy_device *phy = ndev->phydev;
c0853e1
+
c0853e1
+	return phy_ethtool_sset(phy, cmd);
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_ethtool_getdrvinfo(struct net_device *ndev,
c0853e1
+					  struct ethtool_drvinfo *info)
c0853e1
+{
c0853e1
+	strlcpy(info->driver, "sun8i_emac", sizeof(info->driver));
c0853e1
+	strcpy(info->version, "00");
c0853e1
+	info->fw_version[0] = '\0';
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_ethtool_stats(struct net_device *ndev,
c0853e1
+				     struct ethtool_stats *dummy, u64 *data)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	memcpy(data, &priv->estats,
c0853e1
+	       sun8i_emac_get_sset_count(ndev, ETH_SS_STATS) * sizeof(u64));
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_ethtool_strings(struct net_device *dev, u32 stringset,
c0853e1
+				       u8 *buffer)
c0853e1
+{
c0853e1
+	switch (stringset) {
c0853e1
+	case ETH_SS_STATS:
c0853e1
+		memcpy(buffer, &estats_str, sizeof(estats_str));
c0853e1
+		break;
c0853e1
+	}
c0853e1
+}
c0853e1
+
c0853e1
+static u32 sun8i_emac_ethtool_getmsglevel(struct net_device *ndev)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	return priv->msg_enable;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_ethtool_setmsglevel(struct net_device *ndev, u32 level)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	priv->msg_enable = level;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_get_pauseparam(struct net_device *ndev,
c0853e1
+				      struct ethtool_pauseparam *pause)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	pause->rx_pause = 0;
c0853e1
+	pause->tx_pause = 0;
c0853e1
+	pause->autoneg = ndev->phydev->autoneg;
c0853e1
+
c0853e1
+	if (priv->flow_ctrl & EMAC_FLOW_RX)
c0853e1
+		pause->rx_pause = 1;
c0853e1
+	if (priv->flow_ctrl & EMAC_FLOW_TX)
c0853e1
+		pause->tx_pause = 1;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_set_pauseparam(struct net_device *ndev,
c0853e1
+				     struct ethtool_pauseparam *pause)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	struct phy_device *phy = ndev->phydev;
c0853e1
+	int new_pause = 0;
c0853e1
+	int ret = 0;
c0853e1
+
c0853e1
+	if (pause->rx_pause)
c0853e1
+		new_pause |= EMAC_FLOW_RX;
c0853e1
+	if (pause->tx_pause)
c0853e1
+		new_pause |= EMAC_FLOW_TX;
c0853e1
+
c0853e1
+	priv->flow_ctrl = new_pause;
c0853e1
+	phy->autoneg = pause->autoneg;
c0853e1
+
c0853e1
+	if (phy->autoneg) {
c0853e1
+		if (netif_running(ndev))
c0853e1
+			ret = phy_start_aneg(phy);
c0853e1
+	} else {
c0853e1
+		sun8i_emac_flow_ctrl(priv, phy->duplex, priv->flow_ctrl);
c0853e1
+	}
c0853e1
+	return ret;
c0853e1
+}
c0853e1
+
c0853e1
+static void sun8i_emac_ethtool_get_ringparam(struct net_device *ndev,
c0853e1
+					     struct ethtool_ringparam *ring)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+
c0853e1
+	ring->rx_pending = priv->nbdesc_rx;
c0853e1
+	ring->tx_pending = priv->nbdesc_tx;
c0853e1
+}
c0853e1
+
c0853e1
+static int sun8i_emac_ethtool_set_ringparam(struct net_device *ndev,
c0853e1
+					    struct ethtool_ringparam *ring)
c0853e1
+{
c0853e1
+	struct sun8i_emac_priv *priv = netdev_priv(ndev);
c0853e1
+	int err;
c0853e1
+
c0853e1
+	if (ring->rx_max_pending || ring->rx_mini_max_pending ||
c0853e1
+	    ring->rx_jumbo_max_pending || ring->rx_mini_pending ||
c0853e1
+	    ring->rx_jumbo_pending || ring->tx_max_pending)
c0853e1
+		return -EINVAL;
c0853e1
+
c0853e1
+	if (ring->tx_pending < MAX_SKB_FRAGS + 1) {
c0853e1
+		netdev_err(ndev, "The number of TX descriptors is too low");
c0853e1
+		return -EINVAL;
c0853e1
+	}
c0853e1
+
c0853e1
+	sun8i_emac_stop_tx(ndev);
c0853e1
+	sun8i_emac_stop_rx(ndev);
c0853e1
+
c0853e1
+	sun8i_emac_rx_clean(ndev);
c0853e1
+	sun8i_emac_tx_clean(ndev);
c0853e1
+
c0853e1
+	kfree(priv->rx_skb);
c0853e1
+	kfree(priv->txl);
c0853e1
+
c0853e1
+	dma_free_coherent(priv->dev, priv->nbdesc_rx * sizeof(struct dma_desc),
c0853e1
+			  priv->dd_rx, priv->dd_rx_phy);
c0853e1
+	dma_free_coherent(priv->dev, priv->nbdesc_tx * sizeof(struct dma_desc),
c0853e1
+			  priv->dd_tx, priv->dd_tx_phy);
c0853e1
+
c0853e1
+	priv->nbdesc_rx = ring->rx_pending;
c0853e1
+	priv->nbdesc_tx = ring->tx_pending;
c0853e1
+	err = sun8i_emac_alloc_rings(ndev);
c0853e1
+	if (err) {
c0853e1
+		/* Fatal error, we cannot re start */
c0853e1
+		netdev_err(ndev, "Fail to allocate rings\n");
c0853e1
+		return -EFAULT;
c0853e1
+	}
c0853e1
+
c0853e1
+	sun8i_emac_start_rx(ndev);
c0853e1
+	sun8i_emac_start_tx(ndev);
c0853e1
+
c0853e1
+	netif_start_queue(ndev);
c0853e1
+
c0853e1
+	netdev_info(ndev, "Ring Param settings: rx: %d, tx %d\n",
c0853e1
+		    ring->rx_pending, ring->tx_pending);
c0853e1
+	return 0;
c0853e1
+}
c0853e1
+
c0853e1
+static const struct ethtool_ops sun8i_emac_ethtool_ops = {
c0853e1
+	.begin = sun8i_emac_check_if_running,
c0853e1
+	.get_settings = sun8i_emac_ethtool_get_settings,
c0853e1
+	.set_settings = sun8i_emac_ethtool_set_settings,
c0853e1
+	.get_link = ethtool_op_get_link,
c0853e1
+	.get_pauseparam = sun8i_emac_get_pauseparam,
c0853e1
+	.set_pauseparam = sun8i_emac_set_pauseparam,
c0853e1
+	.get_ethtool_stats = sun8i_emac_ethtool_stats,
c0853e1
+	.get_strings = sun8i_emac_ethtool_strings,
c0853e1
+	.get_sset_count = sun8i_emac_get_sset_count,
c0853e1
+	.get_drvinfo = sun8i_emac_ethtool_getdrvinfo,
c0853e1
+	.get_msglevel = sun8i_emac_ethtool_getmsglevel,
c0853e1
+	.set_msglevel = sun8i_emac_ethtool_setmsglevel,
c0853e1
+	.get_ringparam = sun8i_emac_ethtool_get_ringparam,
c0853e1
+	.set_ringparam = sun8i_emac_ethtool_set_ringparam,
c0853e1
+};
c0853e1
+
c0853e1
+static const struct net_device_ops sun8i_emac_netdev_ops = {
c0853e1
+	.ndo_init = sun8i_emac_init,
c0853e1
+	.ndo_open = sun8i_emac_open,
c0853e1
+	.ndo_start_xmit = sun8i_emac_xmit,
c0853e1
+	.ndo_stop = sun8i_emac_stop,
c0853e1
+	.ndo_change_mtu = sun8i_emac_change_mtu,
c0853e1
+	.ndo_set_features = sun8i_emac_set_features,
c0853e1
+	.ndo_set_rx_mode = sun8i_emac_set_rx_mode,
c0853e1
+	.ndo_tx_timeout = sun8i_emac_tx_timeout,
c0853e1
+	.ndo_do_ioctl = sun8i_emac_ioctl,
c0853e1
+	.ndo_set_mac_address = eth_mac_addr,
c0853e1
+};
c0853e1