2019-09-09 11:24:41 +00:00
|
|
|
--- a/drivers/clk/clk-devres.c
|
|
|
|
+++ b/drivers/clk/clk-devres.c
|
|
|
|
@@ -34,6 +34,17 @@ struct clk *devm_clk_get(struct device *
|
2019-08-30 05:28:47 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(devm_clk_get);
|
|
|
|
|
|
|
|
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
|
|
|
|
+{
|
|
|
|
+ struct clk *clk = devm_clk_get(dev, id);
|
|
|
|
+
|
|
|
|
+ if (clk == ERR_PTR(-ENOENT))
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ return clk;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(devm_clk_get_optional);
|
|
|
|
+
|
|
|
|
struct clk_bulk_devres {
|
|
|
|
struct clk_bulk_data *clks;
|
|
|
|
int num_clks;
|
2019-09-09 11:24:41 +00:00
|
|
|
--- a/drivers/pci/controller/pcie-mediatek.c
|
|
|
|
+++ b/drivers/pci/controller/pcie-mediatek.c
|
2019-08-30 05:28:47 +00:00
|
|
|
@@ -15,6 +15,7 @@
|
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/msi.h>
|
|
|
|
+#include <linux/module.h>
|
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_pci.h>
|
|
|
|
#include <linux/of_platform.h>
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -162,6 +163,7 @@ struct mtk_pcie_soc {
|
2019-08-30 05:28:47 +00:00
|
|
|
* @phy: pointer to PHY control block
|
|
|
|
* @lane: lane count
|
|
|
|
* @slot: port slot
|
|
|
|
+ * @irq: GIC irq
|
|
|
|
* @irq_domain: legacy INTx IRQ domain
|
|
|
|
* @inner_domain: inner IRQ domain
|
|
|
|
* @msi_domain: MSI IRQ domain
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -182,6 +184,7 @@ struct mtk_pcie_port {
|
2019-08-30 05:28:47 +00:00
|
|
|
struct phy *phy;
|
|
|
|
u32 lane;
|
|
|
|
u32 slot;
|
|
|
|
+ int irq;
|
|
|
|
struct irq_domain *irq_domain;
|
|
|
|
struct irq_domain *inner_domain;
|
|
|
|
struct irq_domain *msi_domain;
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -225,10 +228,8 @@ static void mtk_pcie_subsys_powerdown(st
|
2019-08-30 05:28:47 +00:00
|
|
|
|
|
|
|
clk_disable_unprepare(pcie->free_ck);
|
|
|
|
|
|
|
|
- if (dev->pm_domain) {
|
|
|
|
- pm_runtime_put_sync(dev);
|
|
|
|
- pm_runtime_disable(dev);
|
|
|
|
- }
|
|
|
|
+ pm_runtime_put_sync(dev);
|
|
|
|
+ pm_runtime_disable(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mtk_pcie_port_free(struct mtk_pcie_port *port)
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -394,75 +395,6 @@ static struct pci_ops mtk_pcie_ops_v2 =
|
2019-08-30 05:28:47 +00:00
|
|
|
.write = mtk_pcie_config_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
-static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
|
|
|
|
-{
|
|
|
|
- struct mtk_pcie *pcie = port->pcie;
|
|
|
|
- struct resource *mem = &pcie->mem;
|
|
|
|
- const struct mtk_pcie_soc *soc = port->pcie->soc;
|
|
|
|
- u32 val;
|
|
|
|
- size_t size;
|
|
|
|
- int err;
|
|
|
|
-
|
|
|
|
- /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
|
|
|
|
- if (pcie->base) {
|
|
|
|
- val = readl(pcie->base + PCIE_SYS_CFG_V2);
|
|
|
|
- val |= PCIE_CSR_LTSSM_EN(port->slot) |
|
|
|
|
- PCIE_CSR_ASPM_L1_EN(port->slot);
|
|
|
|
- writel(val, pcie->base + PCIE_SYS_CFG_V2);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Assert all reset signals */
|
|
|
|
- writel(0, port->base + PCIE_RST_CTRL);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Enable PCIe link down reset, if link status changed from link up to
|
|
|
|
- * link down, this will reset MAC control registers and configuration
|
|
|
|
- * space.
|
|
|
|
- */
|
|
|
|
- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
|
|
|
|
-
|
|
|
|
- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
|
|
|
|
- val = readl(port->base + PCIE_RST_CTRL);
|
|
|
|
- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
|
|
|
|
- PCIE_MAC_SRSTB | PCIE_CRSTB;
|
|
|
|
- writel(val, port->base + PCIE_RST_CTRL);
|
|
|
|
-
|
|
|
|
- /* Set up vendor ID and class code */
|
|
|
|
- if (soc->need_fix_class_id) {
|
|
|
|
- val = PCI_VENDOR_ID_MEDIATEK;
|
|
|
|
- writew(val, port->base + PCIE_CONF_VEND_ID);
|
|
|
|
-
|
|
|
|
- val = PCI_CLASS_BRIDGE_HOST;
|
|
|
|
- writew(val, port->base + PCIE_CONF_CLASS_ID);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* 100ms timeout value should be enough for Gen1/2 training */
|
|
|
|
- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
|
|
|
|
- !!(val & PCIE_PORT_LINKUP_V2), 20,
|
|
|
|
- 100 * USEC_PER_MSEC);
|
|
|
|
- if (err)
|
|
|
|
- return -ETIMEDOUT;
|
|
|
|
-
|
|
|
|
- /* Set INTx mask */
|
|
|
|
- val = readl(port->base + PCIE_INT_MASK);
|
|
|
|
- val &= ~INTX_MASK;
|
|
|
|
- writel(val, port->base + PCIE_INT_MASK);
|
|
|
|
-
|
|
|
|
- /* Set AHB to PCIe translation windows */
|
|
|
|
- size = mem->end - mem->start;
|
|
|
|
- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
|
|
|
|
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
|
|
|
|
-
|
|
|
|
- val = upper_32_bits(mem->start);
|
|
|
|
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
|
|
|
|
-
|
|
|
|
- /* Set PCIe to AXI translation memory space.*/
|
|
|
|
- val = fls(0xffffffff) | WIN_ENABLE;
|
|
|
|
- writel(val, port->base + PCIE_AXI_WINDOW0);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -601,6 +533,27 @@ static void mtk_pcie_enable_msi(struct m
|
2019-08-30 05:28:47 +00:00
|
|
|
writel(val, port->base + PCIE_INT_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
|
|
|
|
+{
|
|
|
|
+ struct mtk_pcie_port *port, *tmp;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
|
|
|
|
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
|
|
|
|
+
|
|
|
|
+ if (port->irq_domain)
|
|
|
|
+ irq_domain_remove(port->irq_domain);
|
|
|
|
+
|
|
|
|
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
|
|
|
+ if (port->msi_domain)
|
|
|
|
+ irq_domain_remove(port->msi_domain);
|
|
|
|
+ if (port->inner_domain)
|
|
|
|
+ irq_domain_remove(port->inner_domain);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ irq_dispose_mapping(port->irq);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
|
|
|
|
irq_hw_number_t hwirq)
|
|
|
|
{
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -630,6 +583,7 @@ static int mtk_pcie_init_irq_domain(stru
|
2019-08-30 05:28:47 +00:00
|
|
|
|
|
|
|
port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
|
|
|
|
&intx_domain_ops, port);
|
|
|
|
+ of_node_put(pcie_intc_node);
|
|
|
|
if (!port->irq_domain) {
|
|
|
|
dev_err(dev, "failed to get INTx IRQ domain\n");
|
|
|
|
return -ENODEV;
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -639,8 +593,6 @@ static int mtk_pcie_init_irq_domain(stru
|
2019-08-30 05:28:47 +00:00
|
|
|
ret = mtk_pcie_allocate_msi_domains(port);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
-
|
|
|
|
- mtk_pcie_enable_msi(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -693,7 +645,7 @@ static int mtk_pcie_setup_irq(struct mtk
|
2019-08-30 05:28:47 +00:00
|
|
|
struct mtk_pcie *pcie = port->pcie;
|
|
|
|
struct device *dev = pcie->dev;
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
- int err, irq;
|
|
|
|
+ int err;
|
|
|
|
|
|
|
|
err = mtk_pcie_init_irq_domain(port, node);
|
|
|
|
if (err) {
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -701,8 +653,81 @@ static int mtk_pcie_setup_irq(struct mtk
|
2019-08-30 05:28:47 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
- irq = platform_get_irq(pdev, port->slot);
|
|
|
|
- irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
|
|
|
|
+ port->irq = platform_get_irq(pdev, port->slot);
|
|
|
|
+ irq_set_chained_handler_and_data(port->irq,
|
|
|
|
+ mtk_pcie_intr_handler, port);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
|
|
|
|
+{
|
|
|
|
+ struct mtk_pcie *pcie = port->pcie;
|
|
|
|
+ struct resource *mem = &pcie->mem;
|
|
|
|
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
|
|
|
|
+ u32 val;
|
|
|
|
+ size_t size;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
|
|
|
|
+ if (pcie->base) {
|
|
|
|
+ val = readl(pcie->base + PCIE_SYS_CFG_V2);
|
|
|
|
+ val |= PCIE_CSR_LTSSM_EN(port->slot) |
|
|
|
|
+ PCIE_CSR_ASPM_L1_EN(port->slot);
|
|
|
|
+ writel(val, pcie->base + PCIE_SYS_CFG_V2);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Assert all reset signals */
|
|
|
|
+ writel(0, port->base + PCIE_RST_CTRL);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Enable PCIe link down reset, if link status changed from link up to
|
|
|
|
+ * link down, this will reset MAC control registers and configuration
|
|
|
|
+ * space.
|
|
|
|
+ */
|
|
|
|
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
|
|
|
|
+
|
|
|
|
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
|
|
|
|
+ val = readl(port->base + PCIE_RST_CTRL);
|
|
|
|
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
|
|
|
|
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
|
|
|
|
+ writel(val, port->base + PCIE_RST_CTRL);
|
|
|
|
+
|
|
|
|
+ /* Set up vendor ID and class code */
|
|
|
|
+ if (soc->need_fix_class_id) {
|
|
|
|
+ val = PCI_VENDOR_ID_MEDIATEK;
|
|
|
|
+ writew(val, port->base + PCIE_CONF_VEND_ID);
|
|
|
|
+
|
|
|
|
+ val = PCI_CLASS_BRIDGE_PCI;
|
|
|
|
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* 100ms timeout value should be enough for Gen1/2 training */
|
|
|
|
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
|
|
|
|
+ !!(val & PCIE_PORT_LINKUP_V2), 20,
|
|
|
|
+ 100 * USEC_PER_MSEC);
|
|
|
|
+ if (err)
|
|
|
|
+ return -ETIMEDOUT;
|
|
|
|
+
|
|
|
|
+ /* Set INTx mask */
|
|
|
|
+ val = readl(port->base + PCIE_INT_MASK);
|
|
|
|
+ val &= ~INTX_MASK;
|
|
|
|
+ writel(val, port->base + PCIE_INT_MASK);
|
|
|
|
+
|
|
|
|
+ if (IS_ENABLED(CONFIG_PCI_MSI))
|
|
|
|
+ mtk_pcie_enable_msi(port);
|
|
|
|
+
|
|
|
|
+ /* Set AHB to PCIe translation windows */
|
|
|
|
+ size = mem->end - mem->start;
|
|
|
|
+ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
|
|
|
|
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
|
|
|
|
+
|
|
|
|
+ val = upper_32_bits(mem->start);
|
|
|
|
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
|
|
|
|
+
|
|
|
|
+ /* Set PCIe to AXI translation memory space.*/
|
|
|
|
+ val = fls(0xffffffff) | WIN_ENABLE;
|
|
|
|
+ writel(val, port->base + PCIE_AXI_WINDOW0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -903,49 +928,29 @@ static int mtk_pcie_parse_port(struct mt
|
2019-08-30 05:28:47 +00:00
|
|
|
|
|
|
|
/* sys_ck might be divided into the following parts in some chips */
|
|
|
|
snprintf(name, sizeof(name), "ahb_ck%d", slot);
|
|
|
|
- port->ahb_ck = devm_clk_get(dev, name);
|
|
|
|
- if (IS_ERR(port->ahb_ck)) {
|
|
|
|
- if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
|
|
|
|
- return -EPROBE_DEFER;
|
|
|
|
-
|
|
|
|
- port->ahb_ck = NULL;
|
|
|
|
- }
|
|
|
|
+ port->ahb_ck = devm_clk_get_optional(dev, name);
|
|
|
|
+ if (IS_ERR(port->ahb_ck))
|
|
|
|
+ return PTR_ERR(port->ahb_ck);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "axi_ck%d", slot);
|
|
|
|
- port->axi_ck = devm_clk_get(dev, name);
|
|
|
|
- if (IS_ERR(port->axi_ck)) {
|
|
|
|
- if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
|
|
|
|
- return -EPROBE_DEFER;
|
|
|
|
-
|
|
|
|
- port->axi_ck = NULL;
|
|
|
|
- }
|
|
|
|
+ port->axi_ck = devm_clk_get_optional(dev, name);
|
|
|
|
+ if (IS_ERR(port->axi_ck))
|
|
|
|
+ return PTR_ERR(port->axi_ck);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "aux_ck%d", slot);
|
|
|
|
- port->aux_ck = devm_clk_get(dev, name);
|
|
|
|
- if (IS_ERR(port->aux_ck)) {
|
|
|
|
- if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
|
|
|
|
- return -EPROBE_DEFER;
|
|
|
|
-
|
|
|
|
- port->aux_ck = NULL;
|
|
|
|
- }
|
|
|
|
+ port->aux_ck = devm_clk_get_optional(dev, name);
|
|
|
|
+ if (IS_ERR(port->aux_ck))
|
|
|
|
+ return PTR_ERR(port->aux_ck);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "obff_ck%d", slot);
|
|
|
|
- port->obff_ck = devm_clk_get(dev, name);
|
|
|
|
- if (IS_ERR(port->obff_ck)) {
|
|
|
|
- if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
|
|
|
|
- return -EPROBE_DEFER;
|
|
|
|
-
|
|
|
|
- port->obff_ck = NULL;
|
|
|
|
- }
|
|
|
|
+ port->obff_ck = devm_clk_get_optional(dev, name);
|
|
|
|
+ if (IS_ERR(port->obff_ck))
|
|
|
|
+ return PTR_ERR(port->obff_ck);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "pipe_ck%d", slot);
|
|
|
|
- port->pipe_ck = devm_clk_get(dev, name);
|
|
|
|
- if (IS_ERR(port->pipe_ck)) {
|
|
|
|
- if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
|
|
|
|
- return -EPROBE_DEFER;
|
|
|
|
-
|
|
|
|
- port->pipe_ck = NULL;
|
|
|
|
- }
|
|
|
|
+ port->pipe_ck = devm_clk_get_optional(dev, name);
|
|
|
|
+ if (IS_ERR(port->pipe_ck))
|
|
|
|
+ return PTR_ERR(port->pipe_ck);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "pcie-rst%d", slot);
|
|
|
|
port->reset = devm_reset_control_get_optional_exclusive(dev, name);
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -998,10 +1003,8 @@ static int mtk_pcie_subsys_powerup(struc
|
2019-08-30 05:28:47 +00:00
|
|
|
pcie->free_ck = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (dev->pm_domain) {
|
|
|
|
- pm_runtime_enable(dev);
|
|
|
|
- pm_runtime_get_sync(dev);
|
|
|
|
- }
|
|
|
|
+ pm_runtime_enable(dev);
|
|
|
|
+ pm_runtime_get_sync(dev);
|
|
|
|
|
|
|
|
/* enable top level clock */
|
|
|
|
err = clk_prepare_enable(pcie->free_ck);
|
2019-09-09 11:24:41 +00:00
|
|
|
@@ -1013,10 +1016,8 @@ static int mtk_pcie_subsys_powerup(struc
|
2019-08-30 05:28:47 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_ck:
|
|
|
|
- if (dev->pm_domain) {
|
|
|
|
- pm_runtime_put_sync(dev);
|
|
|
|
- pm_runtime_disable(dev);
|
|
|
|
- }
|
|
|
|
+ pm_runtime_put_sync(dev);
|
|
|
|
+ pm_runtime_disable(dev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2019-12-05 09:22:56 +00:00
|
|
|
@@ -1122,8 +1122,6 @@
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
|
|
|
|
- if (err)
|
|
|
|
- return err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-11-21 13:09:35 +00:00
|
|
|
@@ -1127,34 +1128,6 @@ static int mtk_pcie_request_resources(st
|
2019-08-30 05:28:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static int mtk_pcie_register_host(struct pci_host_bridge *host)
|
|
|
|
-{
|
|
|
|
- struct mtk_pcie *pcie = pci_host_bridge_priv(host);
|
|
|
|
- struct pci_bus *child;
|
|
|
|
- int err;
|
|
|
|
-
|
|
|
|
- host->busnr = pcie->busn.start;
|
|
|
|
- host->dev.parent = pcie->dev;
|
|
|
|
- host->ops = pcie->soc->ops;
|
|
|
|
- host->map_irq = of_irq_parse_and_map_pci;
|
|
|
|
- host->swizzle_irq = pci_common_swizzle;
|
|
|
|
- host->sysdata = pcie;
|
|
|
|
-
|
|
|
|
- err = pci_scan_root_bus_bridge(host);
|
|
|
|
- if (err < 0)
|
|
|
|
- return err;
|
|
|
|
-
|
|
|
|
- pci_bus_size_bridges(host->bus);
|
|
|
|
- pci_bus_assign_resources(host->bus);
|
|
|
|
-
|
|
|
|
- list_for_each_entry(child, &host->bus->children, node)
|
|
|
|
- pcie_bus_configure_settings(child);
|
|
|
|
-
|
|
|
|
- pci_bus_add_devices(host->bus);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int mtk_pcie_probe(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct device *dev = &pdev->dev;
|
2019-11-21 13:09:35 +00:00
|
|
|
@@ -1181,7 +1154,14 @@ static int mtk_pcie_probe(struct platfor
|
2019-08-30 05:28:47 +00:00
|
|
|
if (err)
|
|
|
|
goto put_resources;
|
|
|
|
|
|
|
|
- err = mtk_pcie_register_host(host);
|
|
|
|
+ host->busnr = pcie->busn.start;
|
|
|
|
+ host->dev.parent = pcie->dev;
|
|
|
|
+ host->ops = pcie->soc->ops;
|
|
|
|
+ host->map_irq = of_irq_parse_and_map_pci;
|
|
|
|
+ host->swizzle_irq = pci_common_swizzle;
|
|
|
|
+ host->sysdata = pcie;
|
|
|
|
+
|
|
|
|
+ err = pci_host_probe(host);
|
|
|
|
if (err)
|
|
|
|
goto put_resources;
|
|
|
|
|
2019-11-21 13:09:35 +00:00
|
|
|
@@ -1194,6 +1174,80 @@ put_resources:
|
2019-08-30 05:28:47 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
+
|
|
|
|
+static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
|
|
|
|
+{
|
|
|
|
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
|
|
|
+ struct list_head *windows = &host->windows;
|
|
|
|
+
|
|
|
|
+ pci_free_resource_list(windows);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int mtk_pcie_remove(struct platform_device *pdev)
|
|
|
|
+{
|
|
|
|
+ struct mtk_pcie *pcie = platform_get_drvdata(pdev);
|
|
|
|
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
|
|
|
+
|
|
|
|
+ pci_stop_root_bus(host->bus);
|
|
|
|
+ pci_remove_root_bus(host->bus);
|
|
|
|
+ mtk_pcie_free_resources(pcie);
|
|
|
|
+
|
|
|
|
+ mtk_pcie_irq_teardown(pcie);
|
|
|
|
+
|
|
|
|
+ mtk_pcie_put_resources(pcie);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
|
|
|
|
+ struct mtk_pcie_port *port;
|
|
|
|
+
|
|
|
|
+ if (list_empty(&pcie->ports))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(port, &pcie->ports, list) {
|
|
|
|
+ clk_disable_unprepare(port->pipe_ck);
|
|
|
|
+ clk_disable_unprepare(port->obff_ck);
|
|
|
|
+ clk_disable_unprepare(port->axi_ck);
|
|
|
|
+ clk_disable_unprepare(port->aux_ck);
|
|
|
|
+ clk_disable_unprepare(port->ahb_ck);
|
|
|
|
+ clk_disable_unprepare(port->sys_ck);
|
|
|
|
+ phy_power_off(port->phy);
|
|
|
|
+ phy_exit(port->phy);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ clk_disable_unprepare(pcie->free_ck);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
|
|
|
|
+ struct mtk_pcie_port *port, *tmp;
|
|
|
|
+
|
|
|
|
+ if (list_empty(&pcie->ports))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ clk_prepare_enable(pcie->free_ck);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
|
|
|
|
+ mtk_pcie_enable_port(port);
|
|
|
|
+
|
|
|
|
+ /* In case of EP was removed while system suspend. */
|
|
|
|
+ if (list_empty(&pcie->ports))
|
|
|
|
+ clk_disable_unprepare(pcie->free_ck);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
|
|
|
|
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
|
|
|
|
+ mtk_pcie_resume_noirq)
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
|
|
|
|
.ops = &mtk_pcie_ops,
|
|
|
|
.startup = mtk_pcie_startup_port,
|
2019-11-21 13:09:35 +00:00
|
|
|
@@ -1222,10 +1276,13 @@ static const struct of_device_id mtk_pci
|
2019-08-30 05:28:47 +00:00
|
|
|
|
|
|
|
static struct platform_driver mtk_pcie_driver = {
|
|
|
|
.probe = mtk_pcie_probe,
|
|
|
|
+ .remove = mtk_pcie_remove,
|
|
|
|
.driver = {
|
|
|
|
.name = "mtk-pcie",
|
|
|
|
.of_match_table = mtk_pcie_ids,
|
|
|
|
.suppress_bind_attrs = true,
|
|
|
|
+ .pm = &mtk_pcie_pm_ops,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
-builtin_platform_driver(mtk_pcie_driver);
|
|
|
|
+module_platform_driver(mtk_pcie_driver);
|
|
|
|
+MODULE_LICENSE("GPL v2");
|
2019-09-09 11:24:41 +00:00
|
|
|
--- a/include/linux/clk.h
|
|
|
|
+++ b/include/linux/clk.h
|
|
|
|
@@ -349,6 +349,17 @@ int __must_check devm_clk_bulk_get(struc
|
2019-08-30 05:28:47 +00:00
|
|
|
struct clk *devm_clk_get(struct device *dev, const char *id);
|
|
|
|
|
|
|
|
/**
|
|
|
|
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
|
|
|
|
+ * clock producer.
|
|
|
|
+ * @dev: device for clock "consumer"
|
|
|
|
+ * @id: clock consumer ID
|
|
|
|
+ *
|
|
|
|
+ * Behaves the same as devm_clk_get() except where there is no clock producer.
|
|
|
|
+ * In this case, instead of returning -ENOENT, the function returns NULL.
|
|
|
|
+ */
|
|
|
|
+struct clk *devm_clk_get_optional(struct device *dev, const char *id);
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
* devm_get_clk_from_child - lookup and obtain a managed reference to a
|
|
|
|
* clock producer from child node.
|
|
|
|
* @dev: device for clock "consumer"
|