From 29eb0af20ede95aefdd6fa93136eccf0111c4403 Mon Sep 17 00:00:00 2001 From: Baikal Electronics Date: Tue, 13 Jun 2023 18:30:20 +0300 Subject: [PATCH] Kernel from sdk6.1 --- .../bindings/dma/snps,dma-spear1340.yaml | 2 +- .../memory-controllers/baikal,bt1-ddrc.yaml | 91 + .../snps,dw-umctl2-common.yaml | 75 + .../snps,dw-umctl2-ddrc.yaml | 57 +- .../bindings/mfd/baikal,bt1-boot-con.yaml | 112 + .../bindings/mfd/baikal,bt1-sys-con.yaml | 133 + .../devicetree/bindings/mfd/syscon.yaml | 3 +- .../devicetree/bindings/mtd/mtd-physmap.yaml | 4 + .../devicetree/bindings/mtd/mtd.yaml | 2 +- .../bindings/pci/baikal,bt1-pcie.yaml | 168 + .../bindings/pci/rockchip-dw-pcie.yaml | 4 +- .../bindings/pci/snps,dw-pcie-common.yaml | 266 ++ .../bindings/pci/snps,dw-pcie-ep.yaml | 212 +- .../devicetree/bindings/pci/snps,dw-pcie.yaml | 260 +- .../bindings/spi/snps,dw-apb-ssi.yaml | 6 + .../devicetree/bindings/sram/sram.yaml | 57 +- MAINTAINERS | 38 +- arch/mips/Kbuild.platforms | 1 + arch/mips/Kconfig | 57 + arch/mips/Kconfig.debug | 3 + arch/mips/baikal-t1/Kconfig | 117 + arch/mips/baikal-t1/Makefile | 9 + arch/mips/baikal-t1/Platform | 16 + arch/mips/baikal-t1/early_printk.c | 37 + arch/mips/baikal-t1/init.c | 315 ++ arch/mips/baikal-t1/irq.c | 58 + arch/mips/boot/compressed/Makefile | 1 + arch/mips/boot/compressed/uart-bt1.c | 2 + arch/mips/boot/dts/Makefile | 1 + arch/mips/boot/dts/baikal-t1/Makefile | 11 + arch/mips/boot/dts/baikal-t1/bfk3.dts | 358 ++ arch/mips/boot/dts/baikal-t1/generic.dts | 288 ++ arch/mips/boot/dts/baikal-t1/krkx4.dtsi | 65 + arch/mips/boot/dts/baikal-t1/oclk.dtsi | 72 + arch/mips/boot/dts/baikal-t1/soc.dtsi | 1154 ++++++ arch/mips/configs/baikal_t1_defconfig | 433 +++ arch/mips/configs/bfk3_defconfig | 414 +++ arch/mips/include/asm/cpu-info.h | 2 +- arch/mips/include/asm/io.h | 20 +- arch/mips/include/asm/kvm_host.h | 2 +- .../mach-baikal-t1/cpu-feature-overrides.h | 143 + arch/mips/include/asm/mach-baikal-t1/irq.h | 60 + .../asm/mach-baikal-t1/kernel-entry-init.h | 253 ++ arch/mips/include/asm/mach-baikal-t1/memory.h | 117 + .../include/asm/mach-baikal-t1/platform.h | 23 + arch/mips/include/asm/mach-baikal-t1/spaces.h | 18 + arch/mips/include/asm/mips-cm.h | 15 + arch/mips/include/asm/mipsregs.h | 3 + arch/mips/include/asm/mmu_context.h | 61 + arch/mips/include/asm/smp-ops.h | 2 +- arch/mips/kernel/setup.c | 4 +- arch/mips/mm/tlbex.c | 1 + drivers/dma/dw-edma/Kconfig | 2 +- drivers/dma/dw-edma/dw-edma-core.c | 192 +- drivers/dma/dw-edma/dw-edma-core.h | 10 +- drivers/dma/dw-edma/dw-edma-pcie.c | 56 +- drivers/dma/dw-edma/dw-edma-v0-core.c | 98 +- drivers/dma/dw-edma/dw-edma-v0-core.h | 1 - drivers/dma/dw-edma/dw-edma-v0-debugfs.c | 372 +- drivers/dma/dw-edma/dw-edma-v0-debugfs.h | 5 - drivers/edac/Kconfig | 8 +- drivers/edac/Makefile | 1 + drivers/edac/edac_mc.c | 137 +- drivers/edac/edac_mc.h | 4 + drivers/edac/synopsys_edac.c | 3203 +++++++++++------ drivers/edac/zynq_edac.c | 501 +++ drivers/net/ethernet/amd/Kconfig | 2 +- drivers/net/ethernet/amd/xgbe/Makefile | 3 +- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 10 + drivers/net/ethernet/amd/xgbe/xgbe-phy-v3.c | 693 ++++ drivers/net/ethernet/amd/xgbe/xgbe-platform.c | 321 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 8 + drivers/net/ethernet/stmicro/stmmac/Kconfig | 9 + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + .../net/ethernet/stmicro/stmmac/dwmac-bt1.c | 237 ++ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 12 +- .../net/ethernet/stmicro/stmmac/stmmac_mdio.c | 16 +- .../ethernet/stmicro/stmmac/stmmac_platform.c | 10 +- drivers/net/phy/Kconfig | 6 + drivers/net/phy/Makefile | 1 + drivers/net/phy/marvell-88x2222-kr.c | 442 +++ drivers/net/phy/marvell-88x2222.c | 1 + drivers/net/phy/realtek.c | 37 + drivers/pci/bus.c | 26 +- drivers/pci/controller/dwc/Kconfig | 9 + drivers/pci/controller/dwc/Makefile | 1 + drivers/pci/controller/dwc/pcie-bt1.c | 650 ++++ .../pci/controller/dwc/pcie-designware-ep.c | 41 +- .../pci/controller/dwc/pcie-designware-host.c | 71 +- drivers/pci/controller/dwc/pcie-designware.c | 446 ++- drivers/pci/controller/dwc/pcie-designware.h | 91 +- drivers/pci/controller/dwc/pcie-visconti.c | 37 +- drivers/pci/setup-bus.c | 16 +- drivers/usb/host/xhci-hub.c | 4 + include/asm-generic/mmu_context.h | 10 + include/dt-bindings/soc/bt1-boot-mode.h | 14 + include/linux/dma/edma.h | 25 +- include/linux/dmaengine.h | 2 +- include/linux/edac.h | 6 + include/linux/marvell_phy.h | 1 + include/linux/stmmac.h | 2 + mm/slab.c | 2 + 102 files changed, 11585 insertions(+), 1894 deletions(-) create mode 100644 Documentation/devicetree/bindings/memory-controllers/baikal,bt1-ddrc.yaml create mode 100644 Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-common.yaml create mode 100644 Documentation/devicetree/bindings/mfd/baikal,bt1-boot-con.yaml create mode 100644 Documentation/devicetree/bindings/mfd/baikal,bt1-sys-con.yaml create mode 100644 Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml create mode 100644 Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml create mode 100644 arch/mips/baikal-t1/Kconfig create mode 100644 arch/mips/baikal-t1/Makefile create mode 100644 arch/mips/baikal-t1/Platform create mode 100644 arch/mips/baikal-t1/early_printk.c create mode 100644 arch/mips/baikal-t1/init.c create mode 100644 arch/mips/baikal-t1/irq.c create mode 100644 arch/mips/boot/compressed/uart-bt1.c create mode 100644 arch/mips/boot/dts/baikal-t1/Makefile create mode 100644 arch/mips/boot/dts/baikal-t1/bfk3.dts create mode 100644 arch/mips/boot/dts/baikal-t1/generic.dts create mode 100644 arch/mips/boot/dts/baikal-t1/krkx4.dtsi create mode 100644 arch/mips/boot/dts/baikal-t1/oclk.dtsi create mode 100644 arch/mips/boot/dts/baikal-t1/soc.dtsi create mode 100644 arch/mips/configs/baikal_t1_defconfig create mode 100644 arch/mips/configs/bfk3_defconfig create mode 100644 arch/mips/include/asm/mach-baikal-t1/cpu-feature-overrides.h create mode 100644 arch/mips/include/asm/mach-baikal-t1/irq.h create mode 100644 arch/mips/include/asm/mach-baikal-t1/kernel-entry-init.h create mode 100644 arch/mips/include/asm/mach-baikal-t1/memory.h create mode 100644 arch/mips/include/asm/mach-baikal-t1/platform.h create mode 100644 arch/mips/include/asm/mach-baikal-t1/spaces.h create mode 100644 drivers/edac/zynq_edac.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-phy-v3.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-bt1.c create mode 100644 drivers/net/phy/marvell-88x2222-kr.c create mode 100644 drivers/pci/controller/dwc/pcie-bt1.c create mode 100644 include/dt-bindings/soc/bt1-boot-mode.h diff --git a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml index c13649bf7f19e..475cfb5d7f117 100644 --- a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml +++ b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml @@ -21,7 +21,7 @@ properties: - enum: - renesas,r9a06g032-dma - const: renesas,rzn1-dma - + - const: baikal,bt1-dmac "#dma-cells": minimum: 3 diff --git a/Documentation/devicetree/bindings/memory-controllers/baikal,bt1-ddrc.yaml b/Documentation/devicetree/bindings/memory-controllers/baikal,bt1-ddrc.yaml new file mode 100644 index 0000000000000..80353a0a676f0 --- /dev/null +++ b/Documentation/devicetree/bindings/memory-controllers/baikal,bt1-ddrc.yaml @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/memory-controllers/baikal,bt1-ddrc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Baikal-T1 DDR Controller + +maintainers: + - Serge Semin + +description: + Baikal-T1 DDRC is based on the DW uMCTL2 DDRC IP-core v2.51a with DDR2 + and DDR3 protocol capability, 32-bit data bus + 8-bit ECC + up to 2 + SDRAM ranks. There are individual IRQs for each ECC and DFI events. + The dedicated scrubber clock source is absent since it's fully + synchronous to the core clock. + +allOf: + - $ref: /schemas/memory-controllers/snps,dw-umctl2-common.yaml# + +properties: + compatible: + const: baikal,bt1-ddrc + + reg: + maxItems: 1 + + interrupts: + maxItems: 4 + + interrupt-names: + items: + - const: dfi_e + - const: ecc_ce + - const: ecc_ue + - const: ecc_sbr + + clocks: + maxItems: 3 + + clock-names: + items: + - const: pclk + - const: aclk + - const: core + + resets: + maxItems: 2 + + reset-names: + items: + - const: arst + - const: core + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + - interrupt-names + +additionalProperties: false + +examples: + - | + #include + #include + #include + + memory-controller@1f042000 { + compatible = "baikal,bt1-ddrc"; + reg = <0x1f042000 0x1000>; + + interrupts = , + , + , + ; + interrupt-names = "dfi_e", "ecc_ce", "ecc_ue", "ecc_sbr"; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_DDR_CLK>, + <&ccu_pll CCU_DDR_PLL>; + clock-names = "pclk", "aclk", "core"; + + resets = <&ccu_axi CCU_AXI_DDR_RST>, + <&ccu_sys CCU_SYS_DDR_INIT_RST>; + reset-names = "arst", "core"; + }; +... diff --git a/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-common.yaml b/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-common.yaml new file mode 100644 index 0000000000000..115fe5e8339a0 --- /dev/null +++ b/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-common.yaml @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/memory-controllers/snps,dw-umctl2-common.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DesignWare Universal Multi-Protocol Memory Controller + +maintainers: + - Krzysztof Kozlowski + - Manish Narani + - Michal Simek + +description: + Synopsys DesignWare Enhanced uMCTL2 DDR Memory Controller is capable of + working with the memory devices supporting up to (LP)DDR4 protocol. It can + be equipped with SEC/DEC ECC feature if DRAM data bus width is either + 16-bits or 32-bits or 64-bits wide. + +select: false + +properties: + interrupts: + description: + DW uMCTL2 DDRC IP-core provides individual IRQ signal for each event":" + ECC Corrected Error, ECC Uncorrected Error, ECC Address Protection, + Scrubber-Done signal, DFI Parity/CRC Error. Some platforms may have the + signals merged before they reach the IRQ controller or have some of them + absent in case if the corresponding feature is unavailable/disabled. + minItems: 1 + maxItems: 5 + + interrupt-names: + minItems: 1 + maxItems: 5 + oneOf: + - description: Common ECC CE/UE/Scrubber/DFI Errors IRQ + items: + - const: ecc + - description: Individual ECC CE/UE/Scrubber/DFI Errors IRQs + items: + enum: [ ecc_ce, ecc_ue, ecc_ap, ecc_sbr, dfi_e ] + + reg: + maxItems: 1 + + clocks: + description: + A standard set of the clock sources contains CSRs bus clock, AXI-ports + reference clock, DDRC core clock, Scrubber standalone clock + (synchronous to the DDRC clock). + minItems: 1 + maxItems: 4 + + clock-names: + minItems: 1 + maxItems: 4 + items: + enum: [ pclk, aclk, core, sbr ] + + resets: + description: + Each clock domain can have separate reset signal. + minItems: 1 + maxItems: 4 + + reset-names: + minItems: 1 + maxItems: 4 + items: + enum: [ prst, arst, core, sbr ] + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-ddrc.yaml b/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-ddrc.yaml index e68c4306025a1..eee5271684e8a 100644 --- a/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-ddrc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/snps,dw-umctl2-ddrc.yaml @@ -21,6 +21,11 @@ description: | controller. It has an optional SEC/DEC ECC support in 64- and 32-bits bus width configurations. +allOf: + - $ref: /schemas/memory-controllers/snps,dw-umctl2-common.yaml# + +# Please create a separate DT-schema for your DW uMCTL2 DDR controller +# with more detailed properties definition. properties: compatible: oneOf: @@ -32,62 +37,12 @@ properties: - description: Xilinx ZynqMP DDR controller v2.40a const: xlnx,zynqmp-ddrc-2.40a - interrupts: - description: - DW uMCTL2 DDRC IP-core provides individual IRQ signal for each event":" - ECC Corrected Error, ECC Uncorrected Error, ECC Address Protection, - Scrubber-Done signal, DFI Parity/CRC Error. Some platforms may have the - signals merged before they reach the IRQ controller or have some of them - absent in case if the corresponding feature is unavailable/disabled. - minItems: 1 - maxItems: 5 - - interrupt-names: - minItems: 1 - maxItems: 5 - oneOf: - - description: Common ECC CE/UE/Scrubber/DFI Errors IRQ - items: - - const: ecc - - description: Individual ECC CE/UE/Scrubber/DFI Errors IRQs - items: - enum: [ ecc_ce, ecc_ue, ecc_ap, ecc_sbr, dfi_e ] - - reg: - maxItems: 1 - - clocks: - description: - A standard set of the clock sources contains CSRs bus clock, AXI-ports - reference clock, DDRC core clock, Scrubber standalone clock - (synchronous to the DDRC clock). - minItems: 1 - maxItems: 4 - - clock-names: - minItems: 1 - maxItems: 4 - items: - enum: [ pclk, aclk, core, sbr ] - - resets: - description: - Each clock domain can have separate reset signal. - minItems: 1 - maxItems: 4 - - reset-names: - minItems: 1 - maxItems: 4 - items: - enum: [ prst, arst, core, sbr ] - required: - compatible - reg - interrupts -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/mfd/baikal,bt1-boot-con.yaml b/Documentation/devicetree/bindings/mfd/baikal,bt1-boot-con.yaml new file mode 100644 index 0000000000000..91aa8ecbaa114 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/baikal,bt1-boot-con.yaml @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright (C) 2022 BAIKAL ELECTRONICS, JSC +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/mfd/baikal,bt1-boot-con.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Baikal-T1 SoC Boot Controller + +maintainers: + - Serge Semin + +description: + Baikal-T1 SoC is equipped with a Boot Controller which is responsible for + the SoC proper boot up procedure. Depending on the external pin state the + system can boot up either from the internal ROM or from the externally attached + SPI flash (at least of 16MB) or from the internal SRAM (the 64KB of executional + code is pre-loaded from the external SPI flash). + +allOf: + - $ref: /schemas/mfd/syscon.yaml# + +properties: + compatible: + items: + - const: baikal,bt1-boot-con + - const: syscon + - const: simple-mfd + + reg: + items: + - description: + Baikal-T1 Boot Controller CSR space. It doesn't include many + settings':' corrent boot mode, SPI controller access mux, SRAM + access mux and device ID. + - description: Mirrored first 4MB of the boot SPI flash memory + + reg-names: + items: + - const: boot + - const: mirror + + "#address-cells": + const: 1 + + "#size-cells": + const: 1 + + ranges: true + + little-endian: true + + mux-controller: + $ref: /schemas/mux/reg-mux.yaml# + + rom@1bfc0000: + $ref: /schemas/mtd/mtd-physmap.yaml# + + spi@1f040100: + $ref: /schemas/spi/snps,dw-apb-ssi.yaml# + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + syscon@1f04d000 { + compatible = "baikal,bt1-boot-con", "syscon", "simple-mfd"; + reg = <0x1f040000 0x1000>, + <0x1fc00000 0x400000>; + reg-names = "boot", "mirror"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + little-endian; + reg-io-width = <4>; + + mux-controller { + compatible = "mmio-mux"; + #mux-control-cells = <1>; + + mux-reg-masks = <0x0 0x100>, <0x4 0x1>; + idle-states = <0x1>, <0x0>; + }; + + rom@1bfc0000 { + compatible = "baikal,bt1-int-rom", "mtd-rom"; + reg = <0x1bfc0000 0x10000>; + + no-unaligned-direct-access; + bank-width = <4>; + }; + + spi@1f040100 { + compatible = "baikal,bt1-sys-ssi"; + reg = <0x1f040100 0x900>, + <0x1c000000 0x1000000>; + reg-names = "config", "map"; + #address-cells = <1>; + #size-cells = <0>; + + mux-controls = <&boot_mux 0>; + + clocks = <&ccu_sys 1>; + clock-names = "ssi_clk"; + }; + }; +... diff --git a/Documentation/devicetree/bindings/mfd/baikal,bt1-sys-con.yaml b/Documentation/devicetree/bindings/mfd/baikal,bt1-sys-con.yaml new file mode 100644 index 0000000000000..1aa593f6c9c81 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/baikal,bt1-sys-con.yaml @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright (C) 2022 BAIKAL ELECTRONICS, JSC +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/mfd/baikal,bt1-sys-con.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Baikal-T1 SoC System Controller + +maintainers: + - Serge Semin + +description: + Baikal-T1 SoC is equipped with a System Controller which is responsible for + the SoC components setting up and consists of the next sub-blocks':' + PLL/AXI-bus/System devices Clocks Control Units, P5600 CM2 L2-RAM controller, + CPU cores reboot flag, persistent across reboots register, indirectly + accessible DW APB I2C controller, Boot Controller with a pre-installed memory + mapped firmware and a resource limited DW APB SSI, which is also can be used + to transparently access an external SPI flash by means of a dedicated IO + memory region. + +allOf: + - $ref: /schemas/mfd/syscon.yaml# + +properties: + compatible: + items: + - const: baikal,bt1-sys-con + - const: syscon + - const: simple-mfd + + reg: + description: + Baikal-T1 System Controller CSR space. It includes CCU (Clock Control + Unit), L2 settings, Reboot flag and Reboot tolerant register, System I2C + controller CSRs. + maxItems: 1 + + reg-names: + const: sys + + "#address-cells": + const: 1 + + "#size-cells": + const: 1 + + ranges: true + + little-endian: true + + clock-controller@1f04d000: + $ref: /schemas/clock/baikal,bt1-ccu-pll.yaml# + + clock-controller@1f04d030: + $ref: /schemas/clock/baikal,bt1-ccu-div.yaml# + + clock-controller@1f04d060: + $ref: /schemas/clock/baikal,bt1-ccu-div.yaml# + + l2@1f04d028: + $ref: /schemas/memory-controllers/baikal,bt1-l2-ctl.yaml# + + reboot: + $ref: /schemas/power/reset/syscon-reboot.yaml# + + reboot-mode: + $ref: /schemas/power/reset/syscon-reboot-mode.yaml# + + i2c@1f04d100: + $ref: /schemas/i2c/snps,designware-i2c.yaml# + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + syscon@1f04d000 { + compatible = "baikal,bt1-sys-con", "syscon", "simple-mfd"; + reg = <0x1f04d000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + little-endian; + reg-io-width = <4>; + + clock-controller@1f04d000 { + compatible = "baikal,bt1-ccu-pll"; + reg = <0x1f04d000 0x028>; + #clock-cells = <1>; + + clocks = <&clk25m>; + clock-names = "ref_clk"; + }; + + clock-controller@1f04d030 { + compatible = "baikal,bt1-ccu-axi"; + reg = <0x1f04d030 0x030>; + #clock-cells = <1>; + #reset-cells = <1>; + + clocks = <&ccu_pll 1>, + <&ccu_pll 2>, + <&ccu_pll 3>; + clock-names = "sata_clk", "pcie_clk", "eth_clk"; + }; + + l2@1f04d028 { + compatible = "baikal,bt1-l2-ctl"; + reg = <0x1f04d028 0x004>; + + baikal,l2-ws-latency = <0>; + baikal,l2-tag-latency = <0>; + baikal,l2-data-latency = <1>; + }; + + i2c@1f04d100 { + compatible = "baikal,bt1-sys-i2c"; + reg = <0x1f04d100 0x010>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = <0 32 4>; + + clocks = <&ccu_sys 1>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml index 4e4baf53796de..a06d5a2e46af0 100644 --- a/Documentation/devicetree/bindings/mfd/syscon.yaml +++ b/Documentation/devicetree/bindings/mfd/syscon.yaml @@ -77,8 +77,7 @@ properties: minItems: 2 maxItems: 5 # Should be enough - reg: - maxItems: 1 + reg: true reg-io-width: description: | diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml index 82eb4e0f453b9..8c3192266835c 100644 --- a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml +++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml @@ -52,6 +52,10 @@ properties: - cypress,cy7c1019dv33-10zsxi - arm,vexpress-psram - const: mtd-ram + - items: + - enum: + - baikal,bt1-int-rom + - const: mtd-rom - enum: - cfi-flash - jedec-flash diff --git a/Documentation/devicetree/bindings/mtd/mtd.yaml b/Documentation/devicetree/bindings/mtd/mtd.yaml index 3498e485679b1..7958c0480853c 100644 --- a/Documentation/devicetree/bindings/mtd/mtd.yaml +++ b/Documentation/devicetree/bindings/mtd/mtd.yaml @@ -12,7 +12,7 @@ maintainers: properties: $nodename: - pattern: "^flash(@.*)?$" + pattern: "^(flash|rom|sram-controller|.*sram)(@.*)?$" label: description: diff --git a/Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml b/Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml new file mode 100644 index 0000000000000..8eaa07ae97743 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/baikal,bt1-pcie.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Baikal-T1 PCIe Root Port Controller + +maintainers: + - Serge Semin + +description: + Embedded into Baikal-T1 SoC Root Complex controller with a single port + activated. It's based on the DWC RC PCIe v4.60a IP-core, which is configured + to have just a single Root Port function and is capable of establishing the + link up to Gen.3 speed on x4 lanes. It doesn't have embedded clock and reset + control module, so the proper interface initialization is supposed to be + performed by software. There four in- and four outbound iATU regions + which can be used to emit all required TLP types on the PCIe bus. + +allOf: + - $ref: /schemas/pci/snps,dw-pcie.yaml# + +properties: + compatible: + const: baikal,bt1-pcie + + reg: + description: + DBI, DBI2 and at least 4KB outbound iATU-capable region for the + peripheral devices CFG-space access. + maxItems: 3 + + reg-names: + items: + - const: dbi + - const: dbi2 + - const: config + + interrupts: + description: + MSI, AER, PME, Hot-plug, Link Bandwidth Management, Link Equalization + request and eight Read/Write eDMA IRQ lines are available. + maxItems: 14 + + interrupt-names: + items: + - const: dma0 + - const: dma1 + - const: dma2 + - const: dma3 + - const: dma4 + - const: dma5 + - const: dma6 + - const: dma7 + - const: msi + - const: aer + - const: pme + - const: hp + - const: bw_mg + - const: l_eq + + clocks: + description: + DBI (attached to the APB bus), AXI-bus master and slave interfaces + are fed up by the dedicated application clocks. A common reference + clock signal is supposed to be attached to the corresponding Ref-pad + of the SoC. It will be redistributed amongst the controller core + sub-modules (pipe, core, aux, etc). + maxItems: 4 + + clock-names: + items: + - const: dbi + - const: mstr + - const: slv + - const: ref + + resets: + description: + A comprehensive controller reset logic is supposed to be implemented + by software, so almost all the possible application and core reset + signals are exposed via the system CCU module. + maxItems: 9 + + reset-names: + items: + - const: mstr + - const: slv + - const: pwr + - const: hot + - const: phy + - const: core + - const: pipe + - const: sticky + - const: non-sticky + + baikal,bt1-syscon: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to the Baikal-T1 System Controller DT node. It's required to + access some additional PM, Reset-related and LTSSM signals. + + num-lanes: + maximum: 4 + + max-link-speed: + maximum: 3 + +required: + - compatible + - reg + - reg-names + - interrupts + - interrupt-names + +unevaluatedProperties: false + +examples: + - | + #include + #include + + pcie@1f052000 { + compatible = "baikal,bt1-pcie"; + device_type = "pci"; + reg = <0x1f052000 0x1000>, <0x1f053000 0x1000>, <0x1bdbf000 0x1000>; + reg-names = "dbi", "dbi2", "config"; + #address-cells = <3>; + #size-cells = <2>; + ranges = <0x81000000 0 0x00000000 0x1bdb0000 0 0x00008000>, + <0x82000000 0 0x20000000 0x08000000 0 0x13db0000>; + bus-range = <0x0 0xff>; + + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "dma0", "dma1", "dma2", "dma3", + "dma4", "dma5", "dma6", "dma7", + "msi", "aer", "pme", "hp", "bw_mg", + "l_eq"; + + clocks = <&ccu_sys 1>, <&ccu_axi 6>, <&ccu_axi 7>, <&clk_pcie>; + clock-names = "dbi", "mstr", "slv", "ref"; + + resets = <&ccu_axi 6>, <&ccu_axi 7>, <&ccu_sys 7>, <&ccu_sys 10>, + <&ccu_sys 4>, <&ccu_sys 6>, <&ccu_sys 5>, <&ccu_sys 8>, + <&ccu_sys 9>; + reset-names = "mstr", "slv", "pwr", "hot", "phy", "core", "pipe", + "sticky", "non-sticky"; + + reset-gpios = <&port0 0 GPIO_ACTIVE_LOW>; + + num-lanes = <4>; + max-link-speed = <3>; + }; +... diff --git a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml index bc0a9d1db750b..2be72ae1169f9 100644 --- a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml @@ -14,10 +14,10 @@ maintainers: description: |+ RK3568 SoC PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in - designware-pcie.txt. + snps,dw-pcie.yaml. allOf: - - $ref: /schemas/pci/pci-bus.yaml# + - $ref: /schemas/pci/snps,dw-pcie.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml new file mode 100644 index 0000000000000..d87e13496834a --- /dev/null +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml @@ -0,0 +1,266 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/snps,dw-pcie-common.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DWC PCIe RP/EP controller + +maintainers: + - Jingoo Han + - Gustavo Pimentel + +description: + Generic Synopsys DesignWare PCIe Root Port and Endpoint controller + properties. + +select: false + +properties: + reg: + description: + DWC PCIe CSR space is normally accessed over the dedicated Data Bus + Interface - DBI. In accordance with the reference manual the register + configuration space belongs to the Configuration-Dependent Module (CDM) + and is split up into several sub-parts Standard PCIe configuration + space, Port Logic Registers (PL), Shadow Config-space Registers, + iATU/eDMA registers. The particular sub-space is selected by the + CDM/ELBI (dbi_cs) and CS2 (dbi_cs2) signals (selector bits). Such + configuration provides a flexible interface for the system engineers to + either map the particular space at a desired MMIO address or just leave + them in a contiguous memory space if pure Native or AXI Bridge DBI access + is selected. Note the PCIe CFG-space, PL and Shadow registers are + specific for each activated function, while the rest of the sub-spaces + are common for all of them (if there are more than one). + minItems: 2 + maxItems: 6 + + reg-names: + minItems: 2 + maxItems: 6 + + interrupts: + description: + There are two main sub-blocks which are normally capable of + generating interrupts. It's System Information Interface and MSI + interface. While the former one has some common for the Host and + Endpoint controllers IRQ-signals, the later interface is obviously + Root Complex specific since it's responsible for the incoming MSI + messages signalling. The System Information IRQ signals are mainly + responsible for reporting the generic PCIe hierarchy and Root + Complex events like VPD IO request, general AER, PME, Hot-plug, link + bandwidth change, link equalization request, INTx asserted/deasserted + Message detection, embedded DMA Tx/Rx/Error. + minItems: 1 + maxItems: 26 + + interrupt-names: + minItems: 1 + maxItems: 26 + + clocks: + description: + DWC PCIe reference manual explicitly defines a set of the clocks required + to get the controller working correctly. In general all of them can + be divided into two groups':' application and core clocks. Note the + platforms may have some of the clock sources unspecified in case if the + corresponding domains are fed up from a common clock source. + minItems: 1 + maxItems: 7 + + clock-names: + minItems: 1 + maxItems: 7 + items: + oneOf: + - description: + Data Bus Interface (DBI) clock. Clock signal for the AXI-bus + interface of the Configuration-Dependent Module, which is + basically the set of the controller CSRs. + const: dbi + - description: + Application AXI-bus Master interface clock. Basically this is + a clock for the controller DMA interface (PCI-to-CPU). + const: mstr + - description: + Application AXI-bus Slave interface clock. This is a clock for + the CPU-to-PCI memory IO interface. + const: slv + - description: + Controller Core-PCS PIPE interface clock. It's normally + supplied by an external PCS-PHY. + const: pipe + - description: + Controller Primary clock. It's assumed that all controller input + signals (except resets) are synchronous to this clock. + const: core + - description: + Auxiliary clock for the controller PMC domain. The controller + partitioning implies having some parts to operate with this + clock in some power management states. + const: aux + - description: + Generic reference clock. In case if there are several + interfaces fed up with a common clock source it's advisable to + define it with this name (for instance pipe, core and aux can + be connected to a single source of the periodic signal). + const: ref + - description: + Clock for the PHY registers interface. Originally this is + a PHY-viewport-based interface, but some platform may have + specifically designed one. + const: phy_reg + - description: + Vendor-specific clock names. Consider using the generic names + above for new bindings. + oneOf: + - description: See native 'dbi' clock for details + enum: [ pcie, pcie_apb_sys, aclk_dbi ] + - description: See native 'mstr/slv' clock for details + enum: [ pcie_bus, pcie_inbound_axi, pcie_aclk, aclk_mst, aclk_slv ] + - description: See native 'pipe' clock for details + enum: [ pcie_phy, pcie_phy_ref, link ] + - description: See native 'aux' clock for details + enum: [ pcie_aux ] + - description: See native 'ref' clock for details. + enum: [ gio ] + - description: See nativs 'phy_reg' clock for details + enum: [ pcie_apb_phy, pclk ] + + resets: + description: + DWC PCIe reference manual explicitly defines a set of the reset + signals required to be de-asserted to properly activate the controller + sub-parts. All of these signals can be divided into two sub-groups':' + application and core resets with respect to the main sub-domains they + are supposed to reset. Note the platforms may have some of these signals + unspecified in case if they are automatically handled or aggregated into + a comprehensive control module. + minItems: 1 + maxItems: 10 + + reset-names: + minItems: 1 + maxItems: 10 + items: + oneOf: + - description: Data Bus Interface (DBI) domain reset + const: dbi + - description: AXI-bus Master interface reset + const: mstr + - description: AXI-bus Slave interface reset + const: slv + - description: Application-dependent interface reset + const: app + - description: Controller Non-sticky CSR flags reset + const: non-sticky + - description: Controller sticky CSR flags reset + const: sticky + - description: PIPE-interface (Core-PCS) logic reset + const: pipe + - description: + Controller primary reset (resets everything except PMC module) + const: core + - description: PCS/PHY block reset + const: phy + - description: PMC hot reset signal + const: hot + - description: Cold reset signal + const: pwr + - description: + Vendor-specific reset names. Consider using the generic names + above for new bindings. + oneOf: + - description: See native 'app' reset for details + enum: [ apps, gio, apb ] + - description: See native 'phy' reset for details + enum: [ pciephy, link ] + - description: See native 'pwr' reset for details + enum: [ turnoff ] + + phys: + description: + There can be up to the number of possible lanes PHYs specified placed in + the phandle array in the line-based order. Obviously each the specified + PHYs are supposed to be able to work in the PCIe mode with a speed + implied by the DWC PCIe controller they are attached to. + minItems: 1 + maxItems: 16 + + phy-names: + minItems: 1 + maxItems: 16 + oneOf: + - description: Generic PHY names + items: + pattern: '^pcie[0-9]+$' + - description: + Vendor-specific PHY names. Consider using the generic + names above for new bindings. + items: + oneOf: + - pattern: '^pcie(-?phy[0-9]*)?$' + - pattern: '^p2u-[0-7]$' + + reset-gpio: + deprecated: true + description: + Reference to the GPIO-controlled PERST# signal. It is used to reset all + the peripheral devices available on the PCIe bus. + maxItems: 1 + + reset-gpios: + description: + Reference to the GPIO-controlled PERST# signal. It is used to reset all + the peripheral devices available on the PCIe bus. + maxItems: 1 + + max-link-speed: + maximum: 5 + + num-lanes: + description: + Number of PCIe link lanes to use. Can be omitted if the already brought + up link is supposed to be preserved. + maximum: 16 + + num-ob-windows: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of outbound address translation windows. This parameter can be + auto-detected based on the iATU memory writability. So there is no + point in having a dedicated DT-property for it. + maximum: 256 + + num-ib-windows: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of inbound address translation windows. In the same way as + for the outbound AT windows, this parameter can be auto-detected based + on the iATU memory writability. There is no point having a dedicated + DT-property for it either. + maximum: 256 + + num-viewport: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of outbound view ports configured in hardware. It's the same as + the number of outbound AT windows. + maximum: 256 + + snps,enable-cdm-check: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable automatic checking of CDM (Configuration Dependent Module) + registers for data corruption. CDM registers include standard PCIe + configuration space registers, Port Logic registers, DMA and iATU + registers. This feature has been available since DWC PCIe v4.80a. + + dma-coherent: true + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml index b78535040f04c..8fc2151691a47 100644 --- a/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml @@ -13,76 +13,182 @@ maintainers: description: | Synopsys DesignWare PCIe host controller endpoint +# Please create a separate DT-schema for your DWC PCIe Endpoint controller +# and make sure it's assigned with the vendor-specific compatible string. +select: + properties: + compatible: + const: snps,dw-pcie-ep + required: + - compatible + allOf: - $ref: /schemas/pci/pci-ep.yaml# + - $ref: /schemas/pci/snps,dw-pcie-common.yaml# properties: - compatible: - anyOf: - - {} - - const: snps,dw-pcie-ep - reg: - description: | - It should contain Data Bus Interface (dbi) and config registers for all - versions. - For designware core version >= 4.80, it may contain ATU address space. + description: + DBI, DBI2 reg-spaces and outbound memory window are required for the + normal controller functioning. iATU memory IO region is also required + if the space is unrolled (IP-core version >= 4.80a). minItems: 2 - maxItems: 4 + maxItems: 5 reg-names: minItems: 2 - maxItems: 4 + maxItems: 5 items: - enum: [dbi, dbi2, config, atu, addr_space, link, atu_dma, appl] - - reset-gpio: - description: GPIO pin number of PERST# signal - maxItems: 1 - deprecated: true - - reset-gpios: - description: GPIO controlled connection to PERST# signal - maxItems: 1 - - snps,enable-cdm-check: - type: boolean - description: | - This is a boolean property and if present enables - automatic checking of CDM (Configuration Dependent Module) registers - for data corruption. CDM registers include standard PCIe configuration - space registers, Port Logic registers, DMA and iATU (internal Address - Translation Unit) registers. - - num-ib-windows: - $ref: /schemas/types.yaml#/definitions/uint32 - maximum: 256 - description: number of inbound address translation windows - deprecated: true - - num-ob-windows: - $ref: /schemas/types.yaml#/definitions/uint32 - maximum: 256 - description: number of outbound address translation windows - deprecated: true + oneOf: + - description: + Basic DWC PCIe controller configuration-space accessible over + the DBI interface. This memory space is either activated with + CDM/ELBI = 0 and CS2 = 0 or is a contiguous memory region + with all spaces. Note iATU/eDMA CSRs are indirectly accessible + via the PL viewports on the DWC PCIe controllers older than + v4.80a. + const: dbi + - description: + Shadow DWC PCIe config-space registers. This space is selected + by setting CDM/ELBI = 0 and CS2 = 1. This is an intermix of + the PCI-SIG PCIe CFG-space with the shadow registers for some + PCI Header space, PCI Standard and Extended Structures. It's + mainly relevant for the end-point controller configuration, + but still there are some shadow registers available for the + Root Port mode too. + const: dbi2 + - description: + External Local Bus registers. It's an application-dependent + registers normally defined by the platform engineers. The space + can be selected by setting CDM/ELBI = 1 and CS2 = 0 wires or can + be accessed over some platform-specific means (for instance + as a part of a system controller). + enum: [ elbi, app ] + - description: + iATU/eDMA registers common for all device functions. It's an + unrolled memory space with the internal Address Translation + Unit and Enhanced DMA, which is selected by setting CDM/ELBI = 1 + and CS2 = 1. For IP-core releases prior v4.80a, these registers + have been programmed via an indirect addressing scheme using a + set of viewport CSRs mapped into the PL space. Note iATU is + normally mapped to the 0x0 address of this region, while eDMA + is available at 0x80000 base address. + const: atu + - description: + Platform-specific eDMA registers. Some platforms may have eDMA + CSRs mapped in a non-standard base address. The registers offset + can be changed or the MS/LS-bits of the address can be attached + in an additional RTL block before the MEM-IO transactions reach + the DW PCIe slave interface. + const: dma + - description: + PHY/PCS configuration registers. Some platforms can have the + PCS and PHY CSRs accessible over a dedicated memory mapped + region, but mainly these registers are indirectly accessible + either by means of the embedded PHY viewport schema or by some + platform-specific method. + const: phy + - description: + Outbound iATU-capable memory-region which will be used to + generate various application-specific traffic on the PCIe bus + hierarchy. It's usage scenario depends on the endpoint + functionality, for instance it can be used to create MSI(X) + messages. + const: addr_space + - description: + Vendor-specific CSR names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native 'elbi/app' CSR region for details. + enum: [ link, appl ] + - description: See native 'atu' CSR region for details. + enum: [ atu_dma ] + allOf: + - contains: + const: dbi + - contains: + const: addr_space + + interrupts: + description: + There is no mandatory IRQ signals for the normal controller functioning, + but in addition to the native set the platforms may have a link- or + PM-related IRQs specified. + minItems: 1 + maxItems: 20 + + interrupt-names: + minItems: 1 + maxItems: 20 + items: + oneOf: + - description: + Controller request to read or write virtual product data + from/to the VPD capability registers. + const: vpd + - description: + Link Equalization Request flag is set in the Link Status 2 + register (applicable if the corresponding IRQ is enabled in + the Link Control 3 register). + const: l_eq + - description: + Indicates that the eDMA Tx/Rx transfer is complete or that an + error has occurred on the corresponding channel. eDMA can have + eight Tx (Write) and Rx (Read) eDMA channels thus supporting up + to 16 IRQ signals all together. Write eDMA channels shall go + first in the ordered row as per default edma_int[*] bus setup. + pattern: '^dma([0-9]|1[0-5])?$' + - description: + PCIe protocol correctable error or a Data Path protection + correctable error is detected by the automotive/safety + feature. + const: sft_ce + - description: + Indicates that the internal safety mechanism has detected an + uncorrectable error. + const: sft_ue + - description: + Application-specific IRQ raised depending on the vendor-specific + events basis. + const: app + - description: + Vendor-specific IRQ names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native "app" IRQ for details + enum: [ intr ] + + max-functions: + maximum: 32 required: + - compatible - reg - reg-names - - compatible additionalProperties: true examples: - | - bus { - #address-cells = <1>; - #size-cells = <1>; - pcie-ep@dfd00000 { - compatible = "snps,dw-pcie-ep"; - reg = <0xdfc00000 0x0001000>, /* IP registers 1 */ - <0xdfc01000 0x0001000>, /* IP registers 2 */ - <0xd0000000 0x2000000>; /* Configuration space */ - reg-names = "dbi", "dbi2", "addr_space"; - }; + pcie-ep@dfd00000 { + compatible = "snps,dw-pcie-ep"; + reg = <0xdfc00000 0x0001000>, /* IP registers 1 */ + <0xdfc01000 0x0001000>, /* IP registers 2 */ + <0xd0000000 0x2000000>; /* Configuration space */ + reg-names = "dbi", "dbi2", "addr_space"; + + interrupts = <23>, <24>; + interrupt-names = "dma0", "dma1"; + + clocks = <&sys_clk 12>, <&sys_clk 24>; + clock-names = "dbi", "ref"; + + resets = <&sys_rst 12>, <&sys_rst 24>; + reset-names = "dbi", "phy"; + + phys = <&pcie_phy0>, <&pcie_phy1>, <&pcie_phy2>, <&pcie_phy3>; + phy-names = "pcie0", "pcie1", "pcie2", "pcie3"; + + max-link-speed = <3>; + max-functions = /bits/ 8 <4>; }; diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml index 7287d395e1b65..c62c8fe517aef 100644 --- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml @@ -13,20 +13,25 @@ maintainers: description: | Synopsys DesignWare PCIe host controller +# Please create a separate DT-schema for your DWC PCIe Root Port controller +# and make sure it's assigned with the vendor-specific compatible string. +select: + properties: + compatible: + const: snps,dw-pcie + required: + - compatible + allOf: - $ref: /schemas/pci/pci-bus.yaml# + - $ref: /schemas/pci/snps,dw-pcie-common.yaml# properties: - compatible: - anyOf: - - {} - - const: snps,dw-pcie - reg: - description: | - It should contain Data Bus Interface (dbi) and config registers for all - versions. - For designware core version >= 4.80, it may contain ATU address space. + description: + At least DBI reg-space and peripheral devices CFG-space outbound window + are required for the normal controller work. iATU memory IO region is + also required if the space is unrolled (IP-core version >= 4.80a). minItems: 2 maxItems: 5 @@ -34,71 +39,192 @@ properties: minItems: 2 maxItems: 5 items: - enum: [ dbi, dbi2, config, atu, atu_dma, app, appl, elbi, mgmt, ctrl, - parf, cfg, link, ulreg, smu, mpu, apb, phy ] - - num-lanes: - description: | - number of lanes to use (this property should be specified unless - the link is brought already up in firmware) - maximum: 16 - - reset-gpio: - description: GPIO pin number of PERST# signal - maxItems: 1 - deprecated: true - - reset-gpios: - description: GPIO controlled connection to PERST# signal - maxItems: 1 - - interrupts: true - - interrupt-names: true - - clocks: true - - snps,enable-cdm-check: - type: boolean - description: | - This is a boolean property and if present enables - automatic checking of CDM (Configuration Dependent Module) registers - for data corruption. CDM registers include standard PCIe configuration - space registers, Port Logic registers, DMA and iATU (internal Address - Translation Unit) registers. - - num-viewport: - $ref: /schemas/types.yaml#/definitions/uint32 - maximum: 256 - description: | - number of view ports configured in hardware. If a platform - does not specify it, the driver autodetects it. - deprecated: true + oneOf: + - description: + Basic DWC PCIe controller configuration-space accessible over + the DBI interface. This memory space is either activated with + CDM/ELBI = 0 and CS2 = 0 or is a contiguous memory region + with all spaces. Note iATU/eDMA CSRs are indirectly accessible + via the PL viewports on the DWC PCIe controllers older than + v4.80a. + const: dbi + - description: + Shadow DWC PCIe config-space registers. This space is selected + by setting CDM/ELBI = 0 and CS2 = 1. This is an intermix of + the PCI-SIG PCIe CFG-space with the shadow registers for some + PCI Header space, PCI Standard and Extended Structures. It's + mainly relevant for the end-point controller configuration, + but still there are some shadow registers available for the + Root Port mode too. + const: dbi2 + - description: + External Local Bus registers. It's an application-dependent + registers normally defined by the platform engineers. The space + can be selected by setting CDM/ELBI = 1 and CS2 = 0 wires or can + be accessed over some platform-specific means (for instance + as a part of a system controller). + enum: [ elbi, app ] + - description: + iATU/eDMA registers common for all device functions. It's an + unrolled memory space with the internal Address Translation + Unit and Enhanced DMA, which is selected by setting CDM/ELBI = 1 + and CS2 = 1. For IP-core releases prior v4.80a, these registers + have been programmed via an indirect addressing scheme using a + set of viewport CSRs mapped into the PL space. Note iATU is + normally mapped to the 0x0 address of this region, while eDMA + is available at 0x80000 base address. + const: atu + - description: + Platform-specific eDMA registers. Some platforms may have eDMA + CSRs mapped in a non-standard base address. The registers offset + can be changed or the MS/LS-bits of the address can be attached + in an additional RTL block before the MEM-IO transactions reach + the DW PCIe slave interface. + const: dma + - description: + PHY/PCS configuration registers. Some platforms can have the + PCS and PHY CSRs accessible over a dedicated memory mapped + region, but mainly these registers are indirectly accessible + either by means of the embedded PHY viewport schema or by some + platform-specific method. + const: phy + - description: + Outbound iATU-capable memory-region which will be used to access + the peripheral PCIe devices configuration space. + const: config + - description: + Vendor-specific CSR names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native 'elbi/app' CSR region for details. + enum: [ apb, mgmt, link, ulreg, appl ] + - description: See native 'atu' CSR region for details. + enum: [ atu_dma ] + - description: Syscon-related CSR regions. + enum: [ smu, mpu ] + allOf: + - contains: + const: dbi + - contains: + const: config + + interrupts: + description: + DWC PCIe Root Port/Complex specific IRQ signals. At least MSI interrupt + signal is supposed to be specified for the host controller. + minItems: 1 + maxItems: 26 + + interrupt-names: + minItems: 1 + maxItems: 26 + items: + oneOf: + - description: + Controller request to read or write virtual product data + from/to the VPD capability registers. + const: vpd + - description: + Link Equalization Request flag is set in the Link Status 2 + register (applicable if the corresponding IRQ is enabled in + the Link Control 3 register). + const: l_eq + - description: + Indicates that the eDMA Tx/Rx transfer is complete or that an + error has occurred on the corresponding channel. eDMA can have + eight Tx (Write) and Rx (Read) eDMA channels thus supporting up + to 16 IRQ signals all together. Write eDMA channels shall go + first in the ordered row as per default edma_int[*] bus setup. + pattern: '^dma([0-9]|1[0-5])?$' + - description: + PCIe protocol correctable error or a Data Path protection + correctable error is detected by the automotive/safety + feature. + const: sft_ce + - description: + Indicates that the internal safety mechanism has detected an + uncorrectable error. + const: sft_ue + - description: + Application-specific IRQ raised depending on the vendor-specific + events basis. + const: app + - description: + DSP AXI MSI Interrupt detected. It gets de-asserted when there is + no more MSI interrupt pending. The interrupt is relevant to the + iMSI-RX - Integrated MSI Receiver (AXI bridge). + const: msi + - description: + Legacy A/B/C/D interrupt signal. Basically it's triggered by + receiving a Assert_INT{A,B,C,D}/Desassert_INT{A,B,C,D} message + from the downstream device. + pattern: "^int(a|b|c|d)$" + - description: + Error condition detected and a flag is set in the Root Error Status + register of the AER capability. It's asserted when the RC + internally generated an error or an error message is received by + the RC. + const: aer + - description: + PME message is received by the port. That means having the PME + status bit set in the Root Status register (the event is + supposed to be unmasked in the Root Control register). + const: pme + - description: + Hot-plug event is detected. That is a bit has been set in the + Slot Status register and the corresponding event is enabled in + the Slot Control register. + const: hp + - description: + Link Autonomous Bandwidth Status flag has been set in the Link + Status register (the event is supposed to be unmasked in the + Link Control register). + const: bw_au + - description: + Bandwidth Management Status flag has been set in the Link + Status register (the event is supposed to be unmasked in the + Link Control register). + const: bw_mg + - description: + Vendor-specific IRQ names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native "app" IRQ for details + enum: [ intr ] + allOf: + - contains: + const: msi additionalProperties: true required: + - compatible - reg - reg-names - - compatible examples: - | - bus { - #address-cells = <1>; - #size-cells = <1>; - pcie@dfc00000 { - device_type = "pci"; - compatible = "snps,dw-pcie"; - reg = <0xdfc00000 0x0001000>, /* IP registers */ - <0xd0000000 0x0002000>; /* Configuration space */ - reg-names = "dbi", "config"; - #address-cells = <3>; - #size-cells = <2>; - ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>, - <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>; - interrupts = <25>, <24>; - #interrupt-cells = <1>; - num-lanes = <1>; - }; + pcie@dfc00000 { + compatible = "snps,dw-pcie"; + device_type = "pci"; + reg = <0xdfc00000 0x0001000>, /* IP registers */ + <0xd0000000 0x0002000>; /* Configuration space */ + reg-names = "dbi", "config"; + #address-cells = <3>; + #size-cells = <2>; + ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>, + <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>; + bus-range = <0x0 0xff>; + + interrupts = <25>, <24>; + interrupt-names = "msi", "hp"; + #interrupt-cells = <1>; + + reset-gpios = <&port0 0 1>; + + phys = <&pcie_phy>; + phy-names = "pcie"; + + num-lanes = <1>; + max-link-speed = <3>; }; diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml index d33b72fabc5d8..20e42dcb23e09 100644 --- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml +++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml @@ -32,8 +32,13 @@ allOf: properties: mux-controls: maxItems: 1 + reg-names: + items: + - const: config + - const: map required: - mux-controls + - reg-names else: required: - interrupts @@ -177,6 +182,7 @@ examples: compatible = "baikal,bt1-sys-ssi"; reg = <0x1f040100 0x900>, <0x1c000000 0x1000000>; + reg-names = "config", "map"; #address-cells = <1>; #size-cells = <0>; mux-controls = <&boot_mux>; diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml index 993430be355b4..c19e95859da96 100644 --- a/Documentation/devicetree/bindings/sram/sram.yaml +++ b/Documentation/devicetree/bindings/sram/sram.yaml @@ -36,6 +36,7 @@ properties: - nvidia,tegra234-sysram - qcom,rpm-msg-ram - rockchip,rk3288-pmu-sram + - baikal,bt1-sram reg: maxItems: 1 @@ -46,6 +47,15 @@ properties: A list of phandle and clock specifier pair that controls the single SRAM clock. + clock-names: true + + resets: + description: + A list of phandle and reset specifier pair that controls the SRAM + state reset. + + reset-names: true + "#address-cells": const: 1 @@ -94,6 +104,7 @@ patternProperties: - samsung,exynos4210-sysram - samsung,exynos4210-sysram-ns - socionext,milbeaut-smp-sram + - baikal,bt1-boot-sram reg: description: @@ -135,21 +146,37 @@ required: - compatible - reg -if: - not: - properties: - compatible: - contains: - enum: - - qcom,rpm-msg-ram - - rockchip,rk3288-pmu-sram -then: - required: - - "#address-cells" - - "#size-cells" - - ranges - -additionalProperties: false +allOf: + - $ref: /schemas/mux/mux-consumer.yaml# + - if: + properties: + compatible: + contains: + const: baikal,bt1-sram + then: + properties: + mux-controls: + maxItems: 1 + required: + - mux-controls + else: + properties: + mux-controls: false + - if: + properties: + compatible: + not: + contains: + enum: + - qcom,rpm-msg-ram + - rockchip,rk3288-pmu-sram + then: + required: + - "#address-cells" + - "#size-cells" + - ranges + +unevaluatedProperties: false examples: - | diff --git a/MAINTAINERS b/MAINTAINERS index 350d7e3ba94f9..0e39f38df983a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3121,6 +3121,7 @@ F: arch/arm/mach-zynq/ F: drivers/clocksource/timer-cadence-ttc.c F: drivers/cpuidle/cpuidle-zynq.c F: drivers/edac/synopsys_edac.c +F: drivers/edac/zynq_edac.c F: drivers/i2c/busses/i2c-cadence.c F: drivers/i2c/busses/i2c-xiic.c F: drivers/mmc/host/sdhci-of-arasan.c @@ -3554,6 +3555,13 @@ F: drivers/video/backlight/ F: include/linux/backlight.h F: include/linux/pwm_backlight.h +BAIKAL-T1 PVT HARDWARE MONITOR DRIVER +M: Serge Semin +L: linux-hwmon@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml +F: drivers/hwmon/bt1-pvt.* + BARCO P50 GPIO DRIVER M: Santosh Kumar Yadav M: Peter Korsgaard @@ -5913,6 +5921,7 @@ F: drivers/mtd/nand/raw/denali* DESIGNWARE EDMA CORE IP DRIVER M: Gustavo Pimentel +R: Serge Semin L: dmaengine@vger.kernel.org S: Maintained F: drivers/dma/dw-edma/ @@ -13801,6 +13810,21 @@ F: arch/mips/ F: drivers/platform/mips/ F: include/dt-bindings/mips/ +MIPS/BAIKAL-T1 PLATFORM +M: Serge Semin +L: linux-mips@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/bus/baikal,bt1-* +F: Documentation/devicetree/bindings/clock/baikal,bt1-* +F: Documentation/devicetree/bindings/mfd/baikal,bt1-* +F: Documentation/devicetree/bindings/memory-controllers/baikal,bt1-* +F: arch/mips/baikal-t1/ +F: arch/mips/boot/dts/baikal-t1/ +F: arch/mips/include/asm/mach-baikal-t1/ +F: drivers/clk/baikal-t1/ +F: drivers/bus/bt1-* +F: drivers/memory/bt1-l2-ctl.c + MIPS BOSTON DEVELOPMENT BOARD M: Paul Burton L: linux-mips@vger.kernel.org @@ -14647,8 +14671,9 @@ F: tools/testing/selftests/ntb/ NTB IDT DRIVER M: Serge Semin L: ntb@lists.linux.dev -S: Supported +S: Maintained F: drivers/ntb/hw/idt/ +F: drivers/misc/eeprom/idt_89hpesx.c NTB INTEL DRIVER M: Dave Jiang @@ -15852,10 +15877,10 @@ F: drivers/pci/controller/dwc/pci-exynos.c PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han M: Gustavo Pimentel +R: Serge Semin L: linux-pci@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml -F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml +F: Documentation/devicetree/bindings/pci/snps,dw-pcie*.yaml F: drivers/pci/controller/dwc/*designware* PCI DRIVER FOR TI DRA7XX/J721E @@ -15998,6 +16023,13 @@ S: Maintained F: Documentation/devicetree/bindings/pci/axis,artpec* F: drivers/pci/controller/dwc/*artpec* +PCIE DRIVER FOR BAIKAL-T1 +M: Serge Semin +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml +F: drivers/pci/controller/dwc/pcie-bt1.c + PCIE DRIVER FOR CAVIUM THUNDERX M: Robert Richter L: linux-pci@vger.kernel.org diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 5d04438ee12ed..ca3ea083ef2ae 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -5,6 +5,7 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/ platform-$(CONFIG_AR7) += ar7/ platform-$(CONFIG_ATH25) += ath25/ platform-$(CONFIG_ATH79) += ath79/ +platform-$(CONFIG_MIPS_BAIKAL_T1) += baikal-t1/ platform-$(CONFIG_BCM47XX) += bcm47xx/ platform-$(CONFIG_BCM63XX) += bcm63xx/ platform-$(CONFIG_BMIPS_GENERIC) += bmips/ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b26b77673c2cc..2815b777851b5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -522,6 +522,54 @@ config MACH_LOONGSON64 and Loongson-2F which will be removed), developed by the Institute of Computing Technology (ICT), Chinese Academy of Sciences (CAS). +config MIPS_BAIKAL_T1 + bool "MIPS Baikal-T1 SoC" + imply MIPS_CPS + select BOOT_ELF32 + select BOOT_RAW + select USE_OF + select GENERIC_ISA_DMA + select DMA_NONCOHERENT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT + select SCHED_HRTICK + select HAVE_PCI + select PCI_DRIVERS_GENERIC + select COMMON_CLK + select ARCH_HAS_RESET_CONTROLLER + select UHI_BOOT + select MIPS_CPU_SCACHE + select IRQ_MIPS_CPU + select MIPS_GIC + select CLKSRC_MIPS_GIC + select CEVT_R4K + select CSRC_R4K + select HARDIRQS_SW_RESEND + select DW_APB_TIMER_OF + select MIPS_EXTERNAL_TIMER + select GENERIC_CLOCKEVENTS_MIN_ADJUST + select SMP_UP if SMP + select EDAC_SUPPORT + select EDAC_ATOMIC_SCRUB + select SOC_BUS + select STRONG_UC_ORDERING + select SYS_SUPPORTS_MIPS_CPS + select SYS_HAS_CPU_MIPS32_R2 + select SYS_HAS_CPU_MIPS32_R3_5 + select SYS_HAS_CPU_MIPS32_R5 + select SYS_HAS_CPU_P5600 + select SYS_HAS_EARLY_PRINTK + select SYS_SUPPORTS_LITTLE_ENDIAN + select SYS_SUPPORTS_HIGHMEM + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_RELOCATABLE + select SYS_SUPPORTS_ZBOOT + select SYS_SUPPORTS_ZBOOT_UART_PROM + select CPU_MIPSR2_IRQ_VI + select CPU_MIPSR2_IRQ_EI + select MIPS_L1_CACHE_SHIFT_5 + help + This enables support of Baikal Electronics Baikal-T1 SoC platform. + config MIPS_MALTA bool "MIPS Malta board" select ARCH_MAY_HAVE_PC_FDC @@ -992,6 +1040,7 @@ endchoice source "arch/mips/alchemy/Kconfig" source "arch/mips/ath25/Kconfig" source "arch/mips/ath79/Kconfig" +source "arch/mips/baikal-t1/Kconfig" source "arch/mips/bcm47xx/Kconfig" source "arch/mips/bcm63xx/Kconfig" source "arch/mips/bmips/Kconfig" @@ -1949,6 +1998,14 @@ config WEAK_ORDERING # config WEAK_REORDERING_BEYOND_LLSC bool + +# +# CPU may not reorder reads and writes R->R, R->W, W->R, W->W within Uncached +# Cacheability and Coherency Attribute (CCA=2) +# +config STRONG_UC_ORDERING + bool + endmenu # diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index f4ae7900fcd35..85bf874f5b93f 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -138,6 +138,7 @@ config MIPS_CPS_NS16550 config MIPS_CPS_NS16550_BASE hex "UART Base Address" default 0x1b0003f8 if MIPS_MALTA + default 0x1f04a000 if MIPS_BAIKAL_T1 default 0 help The base address of the ns16550 compatible UART on which to output @@ -147,6 +148,7 @@ config MIPS_CPS_NS16550_BASE config MIPS_CPS_NS16550_SHIFT int "UART Register Shift" + default 2 if MIPS_BAIKAL_T1 default 0 help The number of bits to shift ns16550 register indices by in order to @@ -155,6 +157,7 @@ config MIPS_CPS_NS16550_SHIFT config MIPS_CPS_NS16550_WIDTH int "UART Register Width" + default 4 if MIPS_BAIKAL_T1 default 1 help ns16550 registers width. UART registers IO access methods will be diff --git a/arch/mips/baikal-t1/Kconfig b/arch/mips/baikal-t1/Kconfig new file mode 100644 index 0000000000000..4202cdce4e95c --- /dev/null +++ b/arch/mips/baikal-t1/Kconfig @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC +# +# Baikal-T1 platform configs +# +if MIPS_BAIKAL_T1 + +config BT1_DEBUG + bool "Enable SoC/kernel debug options" + select EXPERT + select DEBUG_KERNEL + select DEBUG_ZBOOT + select DEBUG_MEMORY_INIT + select DEBUG_HIGHMEM if HIGHMEM + select DEBUG_STACKOVERFLOW + select RCU_TRACE + select EDAC_DEBUG if EDAC + select SCACHE_DEBUGFS + select GENERIC_IRQ_DEBUGFS + select CMA_DEBUGFS if CMA + select MIPS_CPS_NS16550_BOOL if MIPS_CPS + help + Use this option if you at the process of the kernel drivers + platform code development. + +config BT1_EARLY_UART + int "Default UART device for early printk and zboot" + range 0 1 + default 0 + help + There are two DW APB UART-based serial interfaces available on + Baikal-T1 SoC. By this option you can select one of them to be used + to print early logs and zboot debug symbols. Note having both + EARLY_PRINTK and SERIAL_EARLYCON configs enabled is prune to + getting duplicated log messages if both of these sub-systems are + using the same console. In case if you need to have the logs on both + UART devices make sure that this parameter and 'stdout-path' DT + property point to the different serial devices. + +config BT1_CPU_FEATURE_OVERRIDES + bool "Declare CPU features" + help + By default nearly all the MIPS IP-core features are detectable on + runtime. Corresponding cpu_has_* flags are constantly checked in + the code to enabled/disable corresponding platform features. Since + we indend to build the Baikal-T1 CPU specific kernel there is no + need in such flexibility, so we can freely define these flags with + values known at build-time. By doing so we not only decrease the + kernel size, but also speed it up. + + If unsure, say N. + +config BT1_SWIOTLB_SIZE + int "SWIOTLB size in MiB" if SWIOTLB + range 4 64 + default 8 + help + Due to the Baikal-T1 main interconnect controller invalid synthesis + parameters, SATA/USB/GMACx aren't able to access the physical memory + higher than 4GiB. So in case if XPA is enabled and bootloader states + there is more than 4GiB of physical memory, we need to have the + SWIOTLB declared. Since by default SWIOTLB consumes too much memory + we create a custom table with compile-time configurable buffer size. + +choice + prompt "Baikal-T1 SoC based boards devicetree" + default BT1_DTB_NONE + help + Select a devicetree of the board with Baikal-T1 SoC installed. + + config BT1_DTB_NONE + bool "None" + + config BT1_DTB_ALL + bool "All" + + config BT1_DTB_GENERIC + bool "Generic Baikal-T1 Board" + help + This option provides a dtb for a generic board. It just activates all + the Baikal-T1 SoC peripherals. So all the run-time detectable devices + will work out-of-box while undetectable platform devices will be left + untouched. + + config BT1_DTB_BFK + bool "Baikal Electronics BFK" + help + This option provides a dtb for the Baikal Electronics BFK boards. + It's a Baikal-T1 SoC evaluation board specifically designed for + the SoC-based software prototyping. + +endchoice + +menu "Baikal-T1 Errata" + +config BT1_ERRATA_JR_LS_BUG + bool "Fix load/store bonding and JR prediction bug" + help + Early Baikal-T1 chips had problems when load/store bonding and JR + prediction were enabled. Switch these features off if you are using + the engineering version of the chip. + + If unsure, say N. + +config BT1_ERRATA_GMAC_SPEED_INV_BUG + bool "Fix DW GMAC 10/100Mbit link speed bug" + help + DW GMAC on early Baikal-T1 chip releases had an inverted 10/100Mbit + MAC speed settings. So when 10Mbit link is requested then 100Mbit MAC + link speed should be setup and vise-versa. + + If unsure, say N. + +endmenu + +endif # MIPS_BAIKAL_T1 diff --git a/arch/mips/baikal-t1/Makefile b/arch/mips/baikal-t1/Makefile new file mode 100644 index 0000000000000..9052fa7ca6f6d --- /dev/null +++ b/arch/mips/baikal-t1/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC +# +# Baikal-T1 platform code makefile +# +obj-y += init.o irq.o + +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o diff --git a/arch/mips/baikal-t1/Platform b/arch/mips/baikal-t1/Platform new file mode 100644 index 0000000000000..089969fa8743f --- /dev/null +++ b/arch/mips/baikal-t1/Platform @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC +# +# Baikal-T1 platform makefile +# +platform-$(CONFIG_MIPS_BAIKAL_T1) += baikal-t1/ +cflags-$(CONFIG_MIPS_BAIKAL_T1) += -I$(srctree)/arch/mips/include/asm/mach-baikal-t1 +ifdef CONFIG_KVM_GUEST + load-$(CONFIG_MIPS_BAIKAL_T1) += 0x0000000040100000 + zload-$(CONFIG_MIPS_BAIKAL_T1) += 0xffffffff45100000 +else + load-$(CONFIG_MIPS_BAIKAL_T1) += 0xffffffff80100000 + zload-$(CONFIG_MIPS_BAIKAL_T1) += 0xffffffff85100000 +endif +all-$(CONFIG_MIPS_BAIKAL_T1) := $(COMPRESSION_FNAME).bin diff --git a/arch/mips/baikal-t1/early_printk.c b/arch/mips/baikal-t1/early_printk.c new file mode 100644 index 0000000000000..21cc2c7276837 --- /dev/null +++ b/arch/mips/baikal-t1/early_printk.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Alexey Malahov + * Serge Semin + * + * Baikal-T1 early printk + */ +#include +#include +#include + +#include + +#define BT1_UART_BASE(_id) \ + (void *)KSEG1ADDR(CONCATENATE(BT1_UART, CONCATENATE(_id, _BASE))) + +void prom_putchar(char c) +{ + void __iomem *uart_base = BT1_UART_BASE(CONFIG_BT1_EARLY_UART); + unsigned int timeout = 50000; + int status, bits; + + bits = UART_LSR_TEMT | UART_LSR_THRE; + + do { + status = __raw_readl(uart_base + (UART_LSR << 2)); + + if (--timeout == 0) + break; + } while ((status & bits) != bits); + + if (timeout) + __raw_writel(c, uart_base + (UART_TX << 2)); +} diff --git a/arch/mips/baikal-t1/init.c b/arch/mips/baikal-t1/init.c new file mode 100644 index 0000000000000..2909363ff0459 --- /dev/null +++ b/arch/mips/baikal-t1/init.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Alexey Malahov + * Serge Semin + * + * Baikal-T1 platform initialization + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static __initdata const void *fdt; + +/* + * The following configuration have been used to synthesize the Baikal-T1 + * MIPS Warroir P5600 core: + * 1) SI_EVAReset = 0 - boot in legacy (not EVA) memory layout mode after + * reset. + * 2) SI_UseExceptionBase = 0 - core uses legacy BEV mode, which selects + * 0xBFC00000 to be exception vector by default after reset. + * 3) SI_ExceptionBase[31:12] = 0xBFC00000 - externally set default exception + * SI_ExceptionBasePA[31:29] = 0x0 base address. It is used when + * CP0.CONFIG5.K = 1. + * 4) SI_EICPresent = 0 - even though GIC is always attached to the cores, + * this pin is hardwaired to the state of the + * GIC_VX_CTL_EIC bit. + */ + +/* + * Redefine the MIPS CDMM phys base method to be used at the earliest boot + * stage before DT is parsed. + */ +#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON + +phys_addr_t mips_cdmm_phys_base(void) +{ + return BT1_P5600_CDMM_BASE; +} + +#endif /* CONFIG_MIPS_EJTAG_FDC_EARLYCON */ + +/* + * We have to redefine the L2-sync phys base method, since the default + * region overlaps the Baikal-T1 boot memory following the CM2 GCRs. + */ +phys_addr_t mips_cm_l2sync_phys_base(void) +{ + return BT1_P5600_GCR_L2SYNC_BASE; +} + +void __init *plat_get_fdt(void) +{ + const char *str; + + /* Return already found fdt. */ + if (fdt) + return (void *)fdt; + + /* + * Generic method will search for appended, UHI and built-in DTBs. + * Some older version of Baikal-T1 bootloader could also pass DTB via + * the FW arg3 slot. So check that option too. + */ + fdt = get_fdt(); + if (fdt) { + str = (fw_arg0 == -2) ? "UHI" : "Built-in/Appended"; + } else if (fw_arg3) { + fdt = phys_to_virt(fw_arg3); + str = "Legacy position"; + } + + if (!fdt || fdt_check_header(fdt)) + panic("No valid dtb found. Can't continue."); + + pr_info("%s DTB found at %p\n", str, fdt); + + return (void *)fdt; +} + +#ifdef CONFIG_RELOCATABLE + +void __init plat_fdt_relocated(void *new_location) +{ + fdt = NULL; + + /* + * Forget about the way dtb has been passed at the system startup. Use + * UHI always. + */ + fw_arg0 = -2; + fw_arg1 = (unsigned long)new_location; +} + +#endif /* CONFIG_RELOCATABLE */ + +void __init prom_init(void) +{ + if (IS_ENABLED(CONFIG_EVA) && (read_c0_config5() & MIPS_CONF5_K)) + pr_info("Enhanced Virtual Addressing (EVA) enabled\n"); + + /* + * Disable Legacy SYNC transaction performed on the L2/Memory port. + * This shall significantly improve the concurrent MMIO access + * performance. + */ + change_gcr_control(CM_GCR_CONTROL_SYNCDIS, CM_GCR_CONTROL_SYNCDIS); + + plat_get_fdt(); +} + +void __init plat_mem_setup(void) +{ + memblock_add(BT1_LOMEM_BASE, BT1_LOMEM_SIZE); + +#ifdef CONFIG_HIGHMEM + memblock_add(BT1_HIMEM_BASE, BT1_HIMEM_SIZE); +#endif + +#ifdef CONFIG_PCI + PCIBIOS_MIN_IO = 0x100; +#endif + + __dt_setup_arch((void *)fdt); +} + +void __init device_tree_init(void) +{ + int err; + + unflatten_and_copy_device_tree(); + + mips_cpc_probe(); + + err = register_cps_smp_ops(); + if (err) + err = register_up_smp_ops(); +} + +#ifdef CONFIG_SWIOTLB + +void __init plat_swiotlb_setup(void) +{ + phys_addr_t top; + + /* + * Skip SWIOTLB initialization since there is no that much memory to + * cause the peripherals invalid access. + */ + top = memblock_end_of_DRAM(); + if (top <= SIZE_MAX) + return; + + /* + * Override the default SWIOTLB size with the configuration value. + * Note a custom size has been passed via the kernel parameter it won't + * be overwritten. + */ + swiotlb_adjust_size(CONFIG_BT1_SWIOTLB_SIZE * SZ_1M); + swiotlb_init(true, SWIOTLB_VERBOSE); +} + +#endif /* CONFIG_SWIOTLB */ + +void __init prom_free_prom_memory(void) {} + +#define HZ_TO_MHZ(_hz) (_hz / 1000000) +#define HZ_GET_KHZ(_hz) ((_hz / 1000) % 1000) +void __init plat_time_init(void) +{ + struct device_node *np; + unsigned long rate; + struct clk *clk; + + of_clk_init(NULL); + + np = of_get_cpu_node(0, NULL); + if (!np) { + pr_err("Failed to get CPU of node\n"); + goto err_timer_probe; + } + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get CPU clock (%ld)\n", PTR_ERR(clk)); + goto err_timer_probe; + } + + /* CPU count/compare timer runs at half the CPU frequency. */ + rate = clk_get_rate(clk); + mips_hpt_frequency = rate / 2; + + pr_info("MIPS CPU frequency: %lu.%03lu MHz\n", + HZ_TO_MHZ(rate), HZ_GET_KHZ(rate)); + pr_info("MIPS CPU count/compare timer frequency: %u.%03u MHz\n", + HZ_TO_MHZ(mips_hpt_frequency), HZ_GET_KHZ(mips_hpt_frequency)); + + clk_put(clk); + +err_timer_probe: + timer_probe(); +} + +const char *get_system_type(void) +{ + return "Baikal-T1 SoC"; +} + +static struct bt1_soc { + struct soc_device_attribute dev_attr; + char revision[16]; + char id[16]; +} soc; + +static int __init soc_setup(void) +{ + unsigned int cpuid = boot_cpu_data.processor_id; + struct soc_device *soc_dev; + struct device *parent = NULL; + int ret = 0; + + soc.dev_attr.machine = mips_get_machine_name(); + soc.dev_attr.family = get_system_type(); + soc.dev_attr.revision = soc.revision; + soc.dev_attr.soc_id = soc.id; + + snprintf(soc.revision, sizeof(soc.revision) - 1, "%u.%u.%u", + (cpuid >> 5) & 0x07, (cpuid >> 2) & 0x07, cpuid & 0x03); + snprintf(soc.id, sizeof(soc.id) - 1, "0x%08X", + readl(phys_to_virt(BT1_BOOT_CTRL_BASE + BT1_BOOT_CTRL_DRID))); + + soc_dev = soc_device_register(&soc.dev_attr); + if (IS_ERR(soc_dev)) { + ret = PTR_ERR(soc_dev); + goto err_return; + } + + parent = soc_device_to_device(soc_dev); + +err_return: + return ret; +} +arch_initcall(soc_setup); + +int __uncached_access(struct file *file, unsigned long addr) +{ + if (file->f_flags & O_DSYNC) + return 1; + + return addr >= __pa(high_memory) || + ((addr >= BT1_MMIO_START) && (addr < BT1_MMIO_END)); +} + +#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED + +static phys_addr_t uca_start, uca_end; + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + phys_addr_t offset = PFN_PHYS(pfn), end = offset + size; + + if (__uncached_access(file, offset)) { + if (uca_start && (offset >= uca_start) && + (end <= uca_end)) + return __pgprot((pgprot_val(vma_prot) & + ~_CACHE_MASK) | + _CACHE_UNCACHED_ACCELERATED); + else + return pgprot_noncached(vma_prot); + } + return vma_prot; +} + +int mips_set_uca_range(phys_addr_t start, phys_addr_t end) +{ + if (end <= start || end <= BT1_MMIO_START) + return -EINVAL; + + uca_start = start; + uca_end = end; + return 0; +} + +#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */ diff --git a/arch/mips/baikal-t1/irq.c b/arch/mips/baikal-t1/irq.c new file mode 100644 index 0000000000000..d38e6a44c5f26 --- /dev/null +++ b/arch/mips/baikal-t1/irq.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 IRQ initialization + */ +#include + +#include +#include +#include +#include + +int get_c0_fdc_int(void) +{ + return gic_get_c0_fdc_int(); +} + +int get_c0_perfcount_int(void) +{ + return gic_get_c0_perfcount_int(); +} + +unsigned int get_c0_compare_int(void) +{ + return gic_get_c0_compare_int(); +} + +/* + * If CP0.Cause.IV == 1 and cpu_has_veic = 1 the next method isn't supposed + * to be called ever. Otherwise we just handle a vectored interrupt, which was + * routed to the generic exception vector. + */ +#if !defined(CONFIG_IRQ_MIPS_CPU) + +asmlinkage void plat_irq_dispatch(void) +{ + extern unsigned long vi_handlers[]; + unsigned int cause = (read_c0_cause() & CAUSEF_IP) >> CAUSEB_IP2; + void (*isr)(void) = (void *)vi_handlers[cause]; + + if (cause && isr) + isr(); + else if (cause && !isr) + panic("Vectored interrupt %u handler is empty\n", cause); + else + spurious_interrupt(); +} + +#endif /* !CONFIG_IRQ_MIPS_CPU */ + +void __init arch_init_irq(void) +{ + if (!cpu_has_veic) + mips_cpu_irq_init(); + + irqchip_init(); +} diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 6cc28173bee89..0965e760de390 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -48,6 +48,7 @@ vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT) += $(obj)/dbg.o vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o +vmlinuzobjs-$(CONFIG_MIPS_BAIKAL_T1) += $(obj)/uart-bt1.o vmlinuzobjs-$(CONFIG_ATH79) += $(obj)/uart-ath79.o endif diff --git a/arch/mips/boot/compressed/uart-bt1.c b/arch/mips/boot/compressed/uart-bt1.c new file mode 100644 index 0000000000000..ef555577bac42 --- /dev/null +++ b/arch/mips/boot/compressed/uart-bt1.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "../../baikal-t1/early_printk.c" diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile index 928f38a79dff9..732dba86290de 100644 --- a/arch/mips/boot/dts/Makefile +++ b/arch/mips/boot/dts/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +subdir-$(CONFIG_MIPS_BAIKAL_T1) += baikal-t1 subdir-$(CONFIG_BMIPS_GENERIC) += brcm subdir-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon subdir-$(CONFIG_FIT_IMAGE_FDT_MARDUK) += img diff --git a/arch/mips/boot/dts/baikal-t1/Makefile b/arch/mips/boot/dts/baikal-t1/Makefile new file mode 100644 index 0000000000000..a704e92769966 --- /dev/null +++ b/arch/mips/boot/dts/baikal-t1/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2022 BAIKAL ELECTRONICS, JSC +# +# Baikal-T1 dtb makefile +# +dtb-$(CONFIG_BT1_DTB_ALL) += generic.dtb bfk3.dtb +dtb-$(CONFIG_BT1_DTB_GENERIC) += generic.dtb +dtb-$(CONFIG_BT1_DTB_BFK) += bfk3.dtb + +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/baikal-t1/bfk3.dts b/arch/mips/boot/dts/baikal-t1/bfk3.dts new file mode 100644 index 0000000000000..6bb47f03e2667 --- /dev/null +++ b/arch/mips/boot/dts/baikal-t1/bfk3.dts @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + * + * Baikal Electronics BFK v3.x evaluation board device tree + */ + +/dts-v1/; + +#include "soc.dtsi" +#include "krkx4.dtsi" + +/ { + model = "Baikal Electronics BFK v3.x Evaluation Board"; + compatible = "baikal,bfk3", "baikal,bt1"; + + chosen { + bootargs = "console=ttyS0,115200n8 earlycon maxcpus=2"; + stdout-path = "serial0:115200n8"; + + /* Bootloader may use these props to pass the initrd image */ + linux,initrd-start = <0 0>; + linux,initrd-end = <0 0>; + }; + + memory { + /* + * Assume at least 512MB of RAM: + * low memory - 128MB, high memory - 256MB. + */ + device_type = "memory"; + reg = <0 0x00000000 0 0x08000000>, + <0 0x20000000 0 0x10000000>; + }; + + clocks { + /* + * SATA/PCIe/xGMAC reference clocks are provided by the + * IDT 5P49V5901 which is out of the SoC reach and is + * initialized by the embedded BMC. + */ + xgmac_ref_clk: clock-oscillator-vc5p1 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <156250000>; + clock-output-names = "xgmac156m"; + }; + + pcie_ref_clk: clock-oscillator-vc5p3 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + clock-output-names = "pcie100m"; + }; + + sata_ref_clk: clock-oscillator-vc5p4 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + clock-output-names = "sata100m"; + }; + + usb_phy_clk: clock-oscillator-usb-phy { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "usbphy24m"; + }; + + gmac0_phy_clk: clock-oscillator-gmac0-phy { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + clock-output-names = "gmac0phy25m"; + }; + + gmac1_phy_clk: clock-oscillator-gmac1-phy { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + clock-output-names = "gmac1phy25m"; + }; + }; +}; + +&l2 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; +}; + +&int_rom { + status = "okay"; +}; + +&spi0 { + num-cs = <1>; + + status = "okay"; + + /* Micron N25Q128A11 */ + boot_flash: flash@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + + spi-max-frequency = <25000000>; + m25p,fast-read; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&i2c1 { + status = "okay"; + + /* STM32F205VET-based Board Management Controller */ + bmc: bmc@8 { + compatible = "baikal,bt1-bmc"; + reg = <0x08>; + }; +}; + +&i2c2 { + status = "okay"; + + spd: eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + + pagesize = <8>; + }; + + /* Might be absent */ + fw: eeprom@54 { + compatible = "atmel,24cs04"; + reg = <0x54>; + + pagesize = <8>; + }; + + rtc: rtc@56 { + compatible = "abracon,abeoz9"; + reg = <0x56>; + + trickle-resistor-ohms = <5000>; + }; +}; + +&timer_dw0 { + status = "okay"; +}; + +&timer_dw1 { + status = "okay"; +}; + +&timer_dw2 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&wdt { + status = "okay"; +}; + +&spi1 { + num-cs = <4>; + + /* + * XP20 port switches between CS0 and port1:0 chip-selects. + * XP21 port switches between CS1 and port1:1 chip-selects. + */ + cs-gpios = <0>, <0>, + <&port1 0 GPIO_ACTIVE_LOW>, <&port1 1 GPIO_ACTIVE_LOW>; + + status = "okay"; + + /* Micron N25Q256A13EF */ + test_flash11: flash@1 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <1>; + + spi-max-frequency = <25000000>; + m25p,fast-read; + }; + + /* Micron N25Q256A13EF */ + test_flash13: flash@3 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <3>; + + spi-max-frequency = <25000000>; + m25p,fast-read; + }; +}; + +&spi2 { + /* XP19 port switches between CS0 and port1:2 chip-selects */ + cs-gpios = <0>, <&port1 2 GPIO_ACTIVE_LOW>; + + status = "okay"; +}; + +&pvt { + status = "okay"; +}; + +&efuse { + status = "okay"; +}; + +&pcie { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_PCIE_M_CLK>, + <&ccu_axi CCU_AXI_PCIE_S_CLK>, + <&pcie_ref_clk>; + clock-names = "dbi", "mstr", "slv", "ref"; + + status = "okay"; +}; + +&sram { + status = "okay"; +}; + +&dma { + status = "okay"; +}; + +&mc { + status = "okay"; +}; + +&mc_phy { + status = "okay"; +}; + +&sata { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_SATA_CLK>, + <&sata_ref_clk>; + clock-names = "pclk", "aclk", "ref"; + + status = "okay"; +}; + +&sata0 { + hba-port-cap = ; + + status = "okay"; +}; + +&sata1 { + hba-port-cap = ; + + status = "okay"; +}; + +&xgmac { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_XGMAC_CLK>, + <&ccu_sys CCU_SYS_XGMAC_PTP_CLK>, + <&xgmac_ref_clk>; + clock-names = "pclk", "stmmaceth", "ptp_ref", "tx"; + + mac-address = [ 00 20 13 ba 1c a1 ]; + + status = "okay"; +}; + +&hwa { + status = "okay"; +}; + +&xpcs { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_sys CCU_SYS_XGMAC_REF_CLK>, + <&xgmac_ref_clk>; + clock-names = "pclk", "core", "pad"; + + status = "disabled"; +}; + +&mdio0 { + reset-delay-us = <10200>; + reset-post-delay-us = <1000>; + + /* Micrel KSZ9031RNX */ + gmac0_phy: ethernet-phy@3 { + compatible = "ethernet-phy-id0022.1620"; + reg = <0x3>; + + clocks = <&gmac0_phy_clk>; + clock-names = "ref"; + }; +}; + +&gmac0 { + mac-address = [ 00 26 58 80 01 02 ]; + + phy-handle = <&gmac0_phy>; + + status = "okay"; +}; + +&mdio1 { + reset-delay-us = <10200>; + reset-post-delay-us = <1000>; + + /* Micrel KSZ9031RNX */ + gmac1_phy: ethernet-phy@3 { + compatible = "ethernet-phy-id0022.1620"; + reg = <0x3>; + + clocks = <&gmac1_phy_clk>; + clock-names = "ref"; + }; +}; + +&gmac1 { + mac-address = [ 00 26 58 80 01 03 ]; + + phy-handle = <&gmac1_phy>; + + status = "okay"; +}; + +&usb { + status = "okay"; + + ulpi { + phy { + clocks = <&usb_phy_clk>; + clock-names = "ref"; + }; + }; +}; diff --git a/arch/mips/boot/dts/baikal-t1/generic.dts b/arch/mips/boot/dts/baikal-t1/generic.dts new file mode 100644 index 0000000000000..3610d8b4c00d8 --- /dev/null +++ b/arch/mips/boot/dts/baikal-t1/generic.dts @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 generic platform device tree + */ + +/dts-v1/; + +#include "soc.dtsi" + +/ { + model = "Baikal-T1 Generic Platform"; + compatible = "baikal,bt1"; + + chosen { + /* + * Note of having both EARLY_PRINTK and SERIAL_EARLYCON + * activated at the same time. If they both refer to the same + * device, you'll end up with duplicated log messages. + * Here by passing 'earlycon' to the kernel we'll activate it + * to parse the stdout-path property to find the early console + * device. System console will be then activated in accordance + * with it if 'console=' parameter isn't passed. Any of the + * following consoles are valid: ttyS{0,1}/uart{0,1} (which + * alias is serial{0,1}), early_fdc (CDMM-JTAG serial iface). + */ + bootargs = "console=ttyS0,115200n8 earlycon maxcpus=2"; + stdout-path = "serial0:115200n8"; + + /* It's implied that the bootloader updates the initrd address */ + linux,initrd-start = <0 0>; + linux,initrd-end = <0 0>; + }; + + memory { + /* + * Declare required low-memory and additional 256MB of high- + * memory, which due to the DW uMCTL2 controller specific setup + * nearly always exists as being remapped upper part of the + * first memory chip. Without low-level remapping that segment + * is hidden behind the MMIO region and isn't reachable. + * NOTE. For the reason of having MMIO above the very first + * 128MB of the low memory, the second 128MB of the physical + * memory is always unavailable as being hidden behind MMIO + * and non-remappable by DW uMCTL2. + */ + device_type = "memory"; + reg = <0 0x00000000 0 0x08000000>, + <0 0x20000000 0 0x10000000>; + }; + + /* Standard xGMAC/PCIe/SATA reference clocks setup */ + clocks { + xgmac_ref_clk: clock-oscillator-xgmac { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <156250000>; + clock-output-names = "xgmac156m"; + }; + + pcie_ref_clk: clock-oscillator-pcie { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + clock-output-names = "pcie100m"; + }; + + sata_ref_clk: clock-oscillator-sata { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + clock-output-names = "sata100m"; + }; + }; +}; + +&l2 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; +}; + +&int_rom { + status = "okay"; +}; + +&spi0 { + num-cs = <1>; + + status = "okay"; + + /* + * Most likely an SPI-nor flash will be always installed on each + * device with Baikal-T1 SoC on board. There is no just better + * alternative to boot a normal system on that CPU. + * Note Baikal-T1 is able to transparently access up to 16MB flash, + * so the system bootloader size can not exceed that limit, but an + * attached SPI-flash can as long as it supports 3bytes addressing + * of the lowest partition. + */ + boot_flash: flash@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + + spi-max-frequency = <25000000>; + m25p,fast-read; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&i2c1 { + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&timer_dw0 { + status = "okay"; +}; + +&timer_dw1 { + status = "okay"; +}; + +&timer_dw2 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&wdt { + status = "okay"; +}; + +&spi1 { + num-cs = <4>; + + status = "okay"; +}; + +&spi2 { + num-cs = <4>; + + status = "okay"; +}; + +&pvt { + status = "okay"; +}; + +&efuse { + status = "okay"; +}; + +&pcie { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_PCIE_M_CLK>, + <&ccu_axi CCU_AXI_PCIE_S_CLK>, + <&pcie_ref_clk>; + clock-names = "dbi", "mstr", "slv", "ref"; + + + status = "okay"; +}; + +&sram { + status = "okay"; +}; + +&dma { + status = "okay"; +}; + +&mc { + status = "okay"; +}; + +&mc_phy { + status = "okay"; +}; + +&sata { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_SATA_CLK>, + <&sata_ref_clk>; + clock-names = "pclk", "aclk", "ref"; + + status = "okay"; +}; + +&sata0 { + hba-port-cap = ; + + status = "okay"; +}; + +&sata1 { + hba-port-cap = ; + + status = "okay"; +}; + +&xgmac { + mac-address = [ 00 20 13 ba 1c a1 ]; + + status = "disabled"; +}; + +&hwa { + status = "okay"; +}; + +&xpcs { + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_sys CCU_SYS_XGMAC_REF_CLK>, + <&xgmac_ref_clk>; + clock-names = "pclk", "core", "pad"; + + status = "disabled"; +}; + +&mdio0 { + reset-delay-us = <10000>; + reset-post-delay-us = <30000>; + + /* + * We don't know actual PHY address on a generic device. Let the driver + * auto scan the MDIO bus looking for the IEEE 802.3 Clause 22 + * compatible PHY. + */ + gmac0_phy: ethernet-phy { + compatible = "ethernet-phy-ieee802.3-c22"; + }; +}; + +&gmac0 { + mac-address = [ 7a 72 6c 4a 7a 07 ]; + + phy-handle = <&gmac0_phy>; + + status = "okay"; +}; + +&mdio1 { + reset-delay-us = <10000>; + reset-post-delay-us = <30000>; + + /* + * We don't know actual PHY address on a generic device. Let the driver + * auto scan the MDIO bus looking for the IEEE 802.3 Clause 22 + * compatible PHY. + */ + gmac1_phy: ethernet-phy { + compatible = "ethernet-phy-ieee802.3-c22"; + }; +}; + +&gmac1 { + mac-address = [ 7a 72 6c 4a 7b 07 ]; + + phy-handle = <&gmac1_phy>; + + status = "okay"; +}; + +&usb { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/baikal-t1/krkx4.dtsi b/arch/mips/boot/dts/baikal-t1/krkx4.dtsi new file mode 100644 index 0000000000000..a9621998564d8 --- /dev/null +++ b/arch/mips/boot/dts/baikal-t1/krkx4.dtsi @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + * + * Baikal Electronics KR/KX4 SFI Mezzanine Card device tree + */ + +#include + +/ { + aliases { + mdio-gpio2 = &mdio2; + }; + + mdio2: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + /* PORT0.9 - MDC, PORT0.10 - MDO, GPIO0.11 - MDI */ + gpios = <&port0 9 GPIO_ACTIVE_HIGH>, <&port0 11 GPIO_ACTIVE_HIGH>, + <&port0 10 GPIO_ACTIVE_HIGH>; + + reset-gpios = <&gpio3 0 GPIO_ACTIVE_HIGH>; + reset-delay-us = <10000>; + reset-post-delay-us = <10000>; + + mv_ch0: ethernet-phy@c { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0x0c>; + + interrupt-parent = <&port0>; + interrupts = <27 IRQ_TYPE_LEVEL_LOW>; + }; + }; +}; + +&i2c1 { + status = "okay"; + + /* Marvell PHY Reset-controller (NXP PCA9500 8-bit GPIO) */ + gpio3: gpio@20 { + compatible = "nxp,pcf8574"; + reg = <0x20>; + + gpio-controller; /* 8 */ + #gpio-cells = <2>; + + /* nc - not connected */ + gpio-line-names = "RST_PHY", "nc", "nc", "nc", + "nc", "nc", "nc", "nc"; + }; + + /* Mezzanine card firmware (NXP PCA9500 2-kbit EEPROM) */ + fw1: eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + + pagesize = <4>; + }; +}; + +&xgmac { + phy-handle = <&mv_ch0>; +}; diff --git a/arch/mips/boot/dts/baikal-t1/oclk.dtsi b/arch/mips/boot/dts/baikal-t1/oclk.dtsi new file mode 100644 index 0000000000000..c9f6ec576d569 --- /dev/null +++ b/arch/mips/boot/dts/baikal-t1/oclk.dtsi @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 SoC overclocking device tree + */ + +#include + +/* + * WARNING! This file provides the SoC overclocking settings. Since the + * specified clock rates are officially unsupported there is no firm guarantee + * the system will stably work if they are applied. So use it at your own risk. + */ + +&cpu_opp { + opp-1300000000 { + opp-hz = /bits/ 64 <1300000000>; + clock-latency-ns = <20000>; + turbo-mode; + }; + + opp-1400000000 { + opp-hz = /bits/ 64 <1400000000>; + clock-latency-ns = <20000>; + turbo-mode; + }; + + opp-1500000000 { + opp-hz = /bits/ 64 <1500000000>; + clock-latency-ns = <20000>; + turbo-mode; + }; +}; + +/* + * In general the system is working well with the config bus freqs above 50MHz + * and up to 300MHz, but it hasn't been fully tested yet. For instance, DW DMA + * won't work well with APB clock being greater than 200 MHz. So if you mean to + * use the DMA-based communications over the I2C/UART/SPI interfaces don't + * exceed the 200MHz limit. + */ +&apb { + assigned-clocks = <&ccu_sys CCU_SYS_APB_CLK>; + assigned-clock-rates = <200000000>; +}; + +&pcie { + assigned-clocks = <&ccu_axi CCU_AXI_PCIE_M_CLK>, + <&ccu_axi CCU_AXI_PCIE_S_CLK>; + assigned-clock-rates = <600000000>, <600000000>; +}; + +&sata { + assigned-clocks = <&ccu_axi CCU_AXI_SATA_CLK>; + assigned-clock-rates = <300000000>; +}; + +&gmac0 { + assigned-clocks = <&ccu_axi CCU_AXI_GMAC0_CLK>; + assigned-clock-rates = <250000000>; +}; + +&gmac1 { + assigned-clocks = <&ccu_axi CCU_AXI_GMAC1_CLK>; + assigned-clock-rates = <250000000>; +}; + +&usb { + assigned-clocks = <&ccu_axi CCU_AXI_USB_CLK>; + assigned-clock-rates = <300000000>; +}; diff --git a/arch/mips/boot/dts/baikal-t1/soc.dtsi b/arch/mips/boot/dts/baikal-t1/soc.dtsi new file mode 100644 index 0000000000000..91ad79576d566 --- /dev/null +++ b/arch/mips/boot/dts/baikal-t1/soc.dtsi @@ -0,0 +1,1154 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 SoC generic device tree + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +/ { + model = "Baikal-T1 SoC"; + compatible = "baikal,bt1"; + #address-cells = <2>; + #size-cells = <2>; + + interrupt-parent = <&gic>; + + aliases { + serial0 = &uart0; + serial1 = &uart1; + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + spi0 = &spi0; + spi1 = &spi1; + spi2 = &spi2; + mc0 = &mc; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &xgmac; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + + core1 { + cpu = <&cpu1>; + }; + }; + }; + + cpu0: cpu@0 { + compatible = "img,p5600"; + device_type = "cpu"; + reg = <0x0>; + #cooling-cells = <2>; + + clocks = <&ccu_pll CCU_CPU_PLL>; + clock-names = "cpu_clk"; + + operating-points-v2 = <&cpu_opp>; + }; + + cpu1: cpu@1 { + compatible = "img,p5600"; + device_type = "cpu"; + reg = <0x1>; + #cooling-cells = <2>; + + clocks = <&ccu_pll CCU_CPU_PLL>; + clock-names = "cpu_clk"; + + operating-points-v2 = <&cpu_opp>; + }; + }; + + gic: gic@1bdc0000 { + compatible = "mti,gic"; + reg = <0 0x1bdc0000 0 0x20000>; + + interrupt-controller; + #interrupt-cells = <3>; + mti,reserved-ipi-vectors = <108 4>; + + timer_gic: timer { + compatible = "mti,gic-timer"; + + interrupts = ; + + clocks = <&ccu_pll CCU_CPU_PLL>; + }; + }; + + cpc: cpc@1bde0000 { + compatible = "mti,mips-cpc"; + reg = <0 0x1bde0000 0 0x8000>; + }; + + cdmm: cdmm@1bde8000 { + compatible = "mti,mips-cdmm"; + reg = <0 0x1bde8000 0 0x8000>; + }; + + cm2: cm2@1fbf8000 { + compatible = "mti,mips-cm"; + reg = <0 0x1fbf8000 0 0x8000>, + <0 0x1fbf0000 0 0x1000>; + reg-names = "gcr", "l2sync"; + }; + + /* + * Note setting up too low CPU frequency may cause time-critical + * applications not working correctly. For instance in order to have + * the DW APB SSI memory interface (EEPROM-read and Tx-only) working + * correctly with the whole CPU clock range defined below we had to + * accordingly constraint the SPI bus speed. + */ + cpu_opp: opp-table { + compatible = "operating-points-v2"; + opp-shared; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + clock-latency-ns = <20000>; + }; + + opp-400000000 { + opp-hz = /bits/ 64 <400000000>; + clock-latency-ns = <20000>; + }; + + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + clock-latency-ns = <20000>; + }; + + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + clock-latency-ns = <20000>; + }; + + opp-700000000 { + opp-hz = /bits/ 64 <700000000>; + clock-latency-ns = <20000>; + }; + + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + clock-latency-ns = <20000>; + }; + + opp-900000000 { + opp-hz = /bits/ 64 <900000000>; + clock-latency-ns = <20000>; + }; + + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + clock-latency-ns = <20000>; + }; + + opp-1100000000 { + opp-hz = /bits/ 64 <1100000000>; + clock-latency-ns = <20000>; + }; + + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + clock-latency-ns = <20000>; + }; + }; + + thermal-zones { + cpu-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&pvt>; + + trips { + cpu_alert0: trip0 { + temperature = <80000>; + hysteresis = <2000>; + type = "active"; + }; + + cpu_alert1: trip1 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_warn: trip2 { + temperature = <100000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_crit: trip3 { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map-alert1 { + trip = <&cpu_alert1>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; + + /* External fixed reference clocks */ + clocks { + ref_clk: clock-oscillator-ref { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + clock-output-names = "ref25m"; + }; + }; + + apb: bus@1f059000 { + compatible = "baikal,bt1-apb", "simple-bus"; + reg = <0 0x1f059000 0 0x1000>, + <0 0x1d000000 0 0x2040000>; + reg-names = "ehb", "nodev"; + #address-cells = <1>; + #size-cells = <1>; + + ranges = <0x1bfc0000 0 0x1bfc0000 0x03c38000>, + <0x1fc00000 0 0x1fc00000 0x00400000>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "pclk"; + + resets = <&ccu_sys CCU_SYS_APB_RST>; + reset-names = "prst"; + + syscon: syscon@1f04d000 { + compatible = "baikal,bt1-sys-con", "syscon", "simple-mfd"; + reg = <0x1f04d000 0x1000>; + reg-names = "sys"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + little-endian; + reg-io-width = <4>; + + ccu_pll: clock-controller@1f04d000 { + compatible = "baikal,bt1-ccu-pll"; + reg = <0x1f04d000 0x028>; + #clock-cells = <1>; + + clocks = <&ref_clk>; + clock-names = "ref_clk"; + }; + + ccu_axi: clock-controller@1f04d030 { + compatible = "baikal,bt1-ccu-axi"; + reg = <0x1f04d030 0x030>; + #clock-cells = <1>; + #reset-cells = <1>; + + clocks = <&ccu_pll CCU_SATA_PLL>, + <&ccu_pll CCU_PCIE_PLL>, + <&ccu_pll CCU_ETH_PLL>; + clock-names = "sata_clk", "pcie_clk", "eth_clk"; + }; + + ccu_sys: clock-controller@1f04d060 { + compatible = "baikal,bt1-ccu-sys"; + reg = <0x1f04d060 0x0a0>; + #clock-cells = <1>; + #reset-cells = <1>; + + clocks = <&ref_clk>, + <&ccu_pll CCU_SATA_PLL>, + <&ccu_pll CCU_PCIE_PLL>, + <&ccu_pll CCU_ETH_PLL>; + clock-names = "ref_clk", "sata_clk", "pcie_clk", + "eth_clk"; + }; + + l2: l2@1f04d028 { + compatible = "baikal,bt1-l2-ctl"; + reg = <0x1f04d028 0x004>; + + baikal,l2-ws-latency = <0>; + baikal,l2-tag-latency = <0>; + baikal,l2-data-latency = <1>; + + status = "disabled"; + }; + + reboot { + compatible = "syscon-reboot"; + offset = <0x118>; + + mask = <0x1>; + value = <0x1>; + + status = "disabled"; + }; + + reboot-mode { + compatible = "syscon-reboot-mode"; + offset = <0x154>; + + mode-normal = ; + mode-loader = ; + mode-recovery = ; + }; + + i2c0: i2c@1f04d100 { + compatible = "baikal,bt1-sys-i2c"; + reg = <0x1f04d100 0x010>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-frequency = <400000>; + + status = "disabled"; + }; + }; + + bootcon: syscon@1f040000 { + compatible = "baikal,bt1-boot-con", "syscon", "simple-mfd"; + reg = <0x1f040000 0x1000>, + <0x1fc00000 0x400000>; + reg-names = "boot", "mirror"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + little-endian; + reg-io-width = <4>; + + boot_mux: mux-controller { + compatible = "mmio-mux"; + #mux-control-cells = <1>; + + mux-reg-masks = <0x0 0x100>, <0x4 0x1>; + idle-states = <0x1>, <0x0>; + }; + + int_rom: rom@1bfc0000 { + compatible = "baikal,bt1-int-rom", "mtd-rom"; + reg = <0x1bfc0000 0x10000>; + + no-unaligned-direct-access; + bank-width = <4>; + + status = "disabled"; + }; + + /* + * Note that using the dirmap region stalls the APB bus + * until an IO operation is finished. It may cause + * significant lags in concurrent access to the system + * MMIO, since each SPI flash dword read operation takes + * at least 2.56 us to be finished (cmd + addr + data). + */ + spi0: spi@1f040100 { + compatible = "baikal,bt1-sys-ssi"; + reg = <0x1f040100 0x900>, + <0x1c000000 0x1000000>; + reg-names = "config", "map"; + #address-cells = <1>; + #size-cells = <0>; + + mux-controls = <&boot_mux 0>; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "ssi_clk"; + + status = "disabled"; + }; + }; + + gpio0: gpio@1f044000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x1f044000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_sys CCU_SYS_GPIO_CLK>; + clock-names = "bus", "db"; + + status = "disabled"; + + port0: gpio-port@0 { + compatible = "snps,dw-apb-gpio-port"; + reg = <0>; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <2>; + + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; + }; + }; + + gpio1: gpio@1f045000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x1f045000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_sys CCU_SYS_GPIO_CLK>; + clock-names = "bus", "db"; + + status = "disabled"; + + port1: gpio-port@0 { + compatible = "snps,dw-apb-gpio-port"; + reg = <0>; + + gpio-controller; + #gpio-cells = <2>; + ngpios = <3>; + }; + }; + + i2c1: i2c@1f046000 { + compatible = "snps,designware-i2c"; + reg = <0x1f046000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_I2C1_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "ref", "pclk"; + clock-frequency = <400000>; + + dmas = <&dma 4 0 1 0xff>, <&dma 5 0 1 0xff>; + dma-names = "tx", "rx"; + + status = "disabled"; + }; + + i2c2: i2c@1f047000 { + compatible = "snps,designware-i2c"; + reg = <0x1f047000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_I2C2_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "ref", "pclk"; + clock-frequency = <400000>; + + dmas = <&dma 6 0 1 0xff>, <&dma 7 0 1 0xff>; + dma-names = "tx", "rx"; + + status = "disabled"; + }; + + timer_dw0: timer@1f049000 { + compatible = "snps,dw-apb-timer"; + reg = <0x1f049000 0x14>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_TIMER0_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "timer", "pclk"; + + status = "disabled"; + }; + + timer_dw1: timer@1f049014 { + compatible = "snps,dw-apb-timer"; + reg = <0x1f049014 0x14>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_TIMER1_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "timer", "pclk"; + + status = "disabled"; + }; + + timer_dw2: timer@1f049028 { + compatible = "snps,dw-apb-timer"; + reg = <0x1f049028 0x14>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_TIMER2_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "timer", "pclk"; + + status = "disabled"; + }; + + uart0: serial@1f04a000 { + compatible = "snps,dw-apb-uart"; + reg = <0x1f04a000 0x1000>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_UART_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "baudclk", "apb_pclk"; + + dmas = <&dma 0 0 1 0xff>, <&dma 1 0 1 0xff>; + dma-names = "tx", "rx"; + + dcd-override; + dsr-override; + cts-override; + ri-override; + + /* earlycon settings. */ + reg-io-width = <4>; + reg-shift = <2>; + + status = "disabled"; + }; + + uart1: serial@1f04b000 { + compatible = "snps,dw-apb-uart"; + reg = <0x1f04b000 0x1000>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_UART_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "baudclk", "apb_pclk"; + + dmas = <&dma 2 0 1 0xff>, <&dma 3 0 1 0xff>; + dma-names = "tx", "rx"; + + /* earlycon settings. */ + reg-io-width = <4>; + reg-shift = <2>; + + status = "disabled"; + }; + + wdt: watchdog@1f04c000 { + compatible = "snps,dw-wdt"; + reg = <0x1f04c000 0x1000>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_WDT_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "tclk", "pclk"; + + /* Adjust ref-clock rate for better TOPs granularity */ + assigned-clocks = <&ccu_sys CCU_SYS_WDT_CLK>; + assigned-clock-rates = <65534>; + + snps,watchdog-tops = <0x000000ff 0x000001ff 0x000003ff + 0x000007ff 0x0000ffff 0x0001ffff + 0x0003ffff 0x0007ffff 0x000fffff + 0x001fffff 0x003fffff 0x007fffff + 0x00ffffff 0x01ffffff 0x03ffffff + 0x07ffffff>; + + status = "disabled"; + }; + + /* + * It's highly recommended to use all DW APB SSI controllers + * with GPIO-based CS, due to the native CS being automatically + * asserted/de-asserted on transmissions. Such HW design isn't + * that suitable for the kernel SPI subsystem, so GPIO-based CS + * will help to prevent very nasty, hard-to-fix errors. + */ + spi1: spi@1f04e000 { + compatible = "baikal,bt1-ssi"; + reg = <0x1f04e000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "ssi_clk"; + + /* + * Make sure Rx DMA channels have higher priority. Note + * also that first two DW DMAC channels aren't suitable + * for the well-balanced Tx and Rx SPI transfers. + */ + dmas = <&dma 8 0 1 0xe0>, <&dma 9 0 1 0x1c>; + dma-names = "tx", "rx"; + + reg-io-width = <4>; + + status = "disabled"; + }; + + spi2: spi@1f04f000 { + compatible = "baikal,bt1-ssi"; + reg = <0x1f04f000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "ssi_clk"; + + /* + * Make sure Rx DMA channels have higher priority. Note + * also that first two DW DMAC channels aren't suitable + * for the well-balanced Tx and Rx SPI transfers. + */ + dmas = <&dma 10 0 1 0xe0>, <&dma 11 0 1 0x1c>; + dma-names = "tx", "rx"; + + reg-io-width = <4>; + + status = "disabled"; + }; + + pvt: temperature-sensor@1f200000 { + compatible = "baikal,bt1-pvt"; + reg = <0x1f200000 0x1000>; + #thermal-sensor-cells = <0>; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_PVT_CLK>, + <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "ref", "pclk"; + + status = "disabled"; + }; + + efuse: efuse@1f201000 { + compatible = "baikal,bt1-efuse"; + reg = <0x1f201000 0x1000>; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "pclk"; + + status = "disabled"; + }; + }; + + axi: bus@1f05a000 { + compatible = "baikal,bt1-axi", "simple-bus"; + reg = <0 0x1f05a000 0 0x1000>, + <0 0x1f04d110 0 0x8>; + reg-names = "qos", "ehb"; + #address-cells = <2>; + #size-cells = <2>; + #interconnect-cells = <1>; + + /* + * CPU can find the AXI-accessible devices over the next MMIO + * ranges. + */ + ranges = <0 0x08000000 0 0x08000000 0 0x13dc0000>, + <0 0x1bf80000 0 0x1bf80000 0 0x00040000>, + <0 0x1bfc0000 0 0x1bfc0000 0 0x03c38000>; + + /* + * Not all AXI-bus DMA-capable devices can reach any address in + * the physical memory space. SATA/USB/GMACx are limited to work + * with the lowest 4GB of memory. Here we set the normal DMA + * ranges mapping, while device-specific dma-ranges or device + * driver software must make sure the devices have been + * restricted on working with the permited memory range. + */ + dma-ranges = <0 0 0 0 0x100 0>; + + interrupts = ; + + clocks = <&ccu_axi CCU_AXI_MAIN_CLK>; + clock-names = "aclk"; + + resets = <&ccu_axi CCU_AXI_MAIN_RST>; + reset-names = "arst"; + + syscon = <&syscon>; + + /* + * Note the (dma-)ranges mapping must be 64K aligned due to + * iATU constraints (lowest 16 bits aren't writable). Also + * note that we have to split the MEM-range up into two so + * one of them would be 256MB-aligned as some of the PCIe + * peripherals require. It can be done since AXI-interconnect + * doesn't permit the PCIe-master to access the MMIO-range + * anyway, so we can freely use the memory range above + * 0x1bfc0000 locally within the PCIe space. + */ + pcie: pcie@1f052000 { + compatible = "baikal,bt1-pcie"; + device_type = "pci"; + reg = <0 0x1f052000 0 0x1000>, + <0 0x1f053000 0 0x1000>, + <0 0x1bdb0000 0 0x10000>; + reg-names = "dbi", "dbi2", "config"; + #address-cells = <3>; + #size-cells = <2>; + ranges = <0x82000000 0 0x08000000 0 0x08000000 0 0x03da0000>, /* mem */ + <0x82000000 0 0x10000000 0 0x0bda0000 0 0x10000000>, /* mem */ + <0x81000000 0 0x0bda0000 0 0x1bda0000 0 0x00010000>; /* io */ + bus-range = <0x0 0xff>; + + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "dma0", "dma1", "dma2", "dma3", + "dma4", "dma5", "dma6", "dma7", + "msi", "aer", "pme", "hp", "bw_mg", + "l_eq"; + + /* External reference clock source must be provided */ + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_PCIE_M_CLK>, + <&ccu_axi CCU_AXI_PCIE_S_CLK>; + clock-names = "dbi", "mstr", "slv"; + + resets = <&ccu_axi CCU_AXI_PCIE_M_RST>, + <&ccu_axi CCU_AXI_PCIE_S_RST>, + <&ccu_sys CCU_SYS_PCIE_PWR_RST>, + <&ccu_sys CCU_SYS_PCIE_HOT_RST>, + <&ccu_sys CCU_SYS_PCIE_PCS_PHY_RST>, + <&ccu_sys CCU_SYS_PCIE_CORE_RST>, + <&ccu_sys CCU_SYS_PCIE_PIPE0_RST>, + <&ccu_sys CCU_SYS_PCIE_STICKY_RST>, + <&ccu_sys CCU_SYS_PCIE_NSTICKY_RST>; + reset-names = "mstr", "slv", "pwr", "hot", "phy", + "core", "pipe", "sticky", "non-sticky"; + + baikal,bt1-syscon = <&syscon>; + + num-lanes = <4>; + max-link-speed = <3>; + + status = "disabled"; + }; + + sram: sram-controller@1bf80000 { + compatible = "baikal,bt1-sram", "mmio-sram"; + reg = <0 0x1bf80000 0 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0x1bf80000 0x10000>; + + clocks = <&ccu_axi CCU_AXI_SRAM_CLK>; + clock-names = "aclk"; + + resets = <&ccu_axi CCU_AXI_SRAM_RST>; + reset-names = "arst"; + + mux-controls = <&boot_mux 1>; + + status = "disabled"; + + boot-sram@0 { + compatible = "baikal,bt1-boot-sram"; + reg = <0 0x10000>; + label="Internal SRAM"; + export; + }; + }; + + dma: dma-controller@1f041000 { + compatible = "baikal,bt1-dmac", "snps,dma-spear1340"; + reg = <0 0x1f041000 0 0x1000>; + #dma-cells = <4>; + + interrupts = ; + + /* Clock rate up to 200MHz */ + clocks = <&ccu_sys CCU_SYS_APB_CLK>; + clock-names = "hclk"; + + dma-channels = <8>; + dma-requests = <12>; + dma-masters = <2>; + + chan_allocation_order = <0>; + chan_priority = <0>; + block_size = <4095>; + data-width = <16 4>; + multi-block = <0 0 0 0 0 0 0 0>; + snps,max-burst-len = <16 16 4 4 4 4 4 4>; + + status = "disabled"; + }; + + mc: memory-controller@1f042000 { + compatible = "baikal,bt1-ddrc"; + reg = <0 0x1f042000 0 0x1000>; + + interrupts = , + , + , + ; + interrupt-names = "dfi_e", "ecc_ce", "ecc_ue", "ecc_sbr"; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_DDR_CLK>, + <&ccu_pll CCU_DDR_PLL>; + clock-names = "pclk", "aclk", "core"; + + resets = <&ccu_axi CCU_AXI_DDR_RST>, + <&ccu_sys CCU_SYS_DDR_INIT_RST>; + reset-names = "arst", "core"; + + status = "disabled"; + }; + + mc_phy: memory-controller-phy@1f043000 { + compatible = "baikal,bt1-ddrc-phy"; + reg = <0 0x1f043000 0 0x1000>; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_pll CCU_DDR_PLL>; + clock-names = "pclk", "ddr"; + + status = "disabled"; + }; + + /* + * DWC AHCI SATA controller has been configured with 32-bits + * AMBA Master Address Bus width. Make sure any buffer + * allocated above that limit is bounced down to the permitted + * memory space before being passed to the device. + */ + sata: sata@1f050000 { + compatible = "baikal,bt1-ahci"; + reg = <0 0x1f050000 0 0x2000>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + + /* Using an external 100MHz clock source is preferable */ + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_SATA_CLK>, + <&ccu_sys CCU_SYS_SATA_REF_CLK>; + clock-names = "pclk", "aclk", "ref"; + + resets = <&ccu_axi CCU_AXI_SATA_RST>, + <&ccu_sys CCU_SYS_SATA_REF_RST>; + reset-names = "arst", "ref"; + + ports-implemented = <0x3>; + + status = "disabled"; + + sata0: sata-port@0 { + reg = <0>; + + snps,tx-ts-max = <16>; + snps,rx-ts-max = <16>; + + status = "disabled"; + }; + + sata1: sata-port@1 { + reg = <1>; + + snps,tx-ts-max = <16>; + snps,rx-ts-max = <16>; + + status = "disabled"; + }; + }; + + xgmac: ethernet@1f054000 { + compatible = "baikal,bt1-xgmac"; + reg = <0 0x1f054000 0 0x4000>, + <0 0x1f05d000 0 0x1000>; + reg-names = "stmmaceth", "xpcs"; + + interrupts = , + , + , + , + , + ; + interrupt-names = "macirq", "dma_tx0", "dma_tx1", + "dma_rx0", "dma_rx1", "xpcs"; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_XGMAC_CLK>, + <&ccu_sys CCU_SYS_XGMAC_REF_CLK>, + <&ccu_sys CCU_SYS_XGMAC_PTP_CLK>; + clock-names = "pclk", "stmmaceth", "tx", "ptp_ref"; + + resets = <&ccu_axi CCU_AXI_XGMAC_RST>; + reset-names = "stmmaceth"; + + phy-mode = "xgmii"; + pcs-handle = <&xpcs>; + + rx-fifo-depth = <32768>; + tx-fifo-depth = <32768>; + + /* + * Actual burst length will be (32 * 8 * 16) bytes due + * to the snps,no-pbl-x8 property absence and having + * the AXI bus data width of 128 bits. + */ + snps,pbl = <32>; + snps,data-width = <16>; + + /* Enable TSO for all DMA channels */ + snps,tso; + + snps,perfect-filter-entries = <8>; + snps,multicast-filter-bins = <64>; + local-mac-address = [ 00 20 13 ba 1c a1 ]; + + status = "disabled"; + + axi-bus-config { + snps,wr_osr_lmt = <0x7>; + snps,rd_osr_lmt = <0x7>; + /* It's AXI3 bus so up to 16 xfers */ + snps,blen = <0 0 0 0 16 8 4>; + }; + }; + + hwa: hwa@1f05b000 { + compatible = "baikal,bt1-hwa"; + reg = <0 0x1f05b000 0 0x1000>, + <0 0x1f05c000 0 0x1000>; + reg-names = "core", "dma"; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_HWA_CLK>, + <&ccu_sys CCU_SYS_HWA_CLK>; + clock-names = "pclk", "aclk", "ref"; + + resets = <&ccu_axi CCU_AXI_HWA_RST>; + reset-names = "arst"; + + status = "disabled"; + }; + + xpcs: ethernet-phy@1f05d000 { + compatible = "baikal,bt1-xpcs", "snps,dwxpcs-3.11b", "snps,dwxpcs", + "ethernet-phy-id7996.ced0", "ethernet-phy-ieee802.3-c45"; + reg = <0 0x1f05d000 0 0x1000>; + reg-names = "indirect"; + + interrupts = ; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_sys CCU_SYS_XGMAC_REF_CLK>; + clock-names = "pclk", "core"; + + phy-is-integrated; + + status = "disabled"; + }; + + gmac0: ethernet@1f05e000 { + compatible = "baikal,bt1-gmac"; + reg = <0 0x1f05e000 0 0x2000>; + #address-cells = <1>; + #size-cells = <2>; + dma-ranges = <0 0 0 0x1 0>; + + interrupts = ; + interrupt-names = "macirq"; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_GMAC0_CLK>, + <&ccu_sys CCU_SYS_GMAC0_TX_CLK>, + <&ccu_sys CCU_SYS_GMAC0_PTP_CLK>; + clock-names = "pclk", "stmmaceth", "tx", "ptp_ref"; + + resets = <&ccu_axi CCU_AXI_GMAC0_RST>; + reset-names = "stmmaceth"; + + /* DW GMAC is configured to export 1xGPI and 1xGPO */ + ngpios = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + + gpio-controller; + #gpio-cells = <2>; + + /* + * MAC always adds 2ns delay of TXC with respect to TXD + * so let the PHY to add some RXC delay if it's + * applicable. + */ + phy-mode = "rgmii-rxid"; + tx-internal-delay-ps = <2000>; + + rx-fifo-depth = <16384>; + tx-fifo-depth = <16384>; + + /* + * Actual burst length will be (32 * 8 * 16) bytes due + * to the snps,no-pbl-x8 property absence and having + * the AXI bus data width of 128 bits. + */ + snps,pbl = <32>; + snps,data-width = <16>; + + snps,perfect-filter-entries = <8>; + snps,multicast-filter-bins = <0>; + loacl-mac-address = [ 7a 72 6c 4a 7a 07 ]; + + status = "disabled"; + + axi-bus-config { + snps,wr_osr_lmt = <0x3>; + snps,rd_osr_lmt = <0x3>; + snps,blen = <0 0 0 0 16 8 4>; + }; + + mdio0: mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + gmac1: ethernet@1f060000 { + compatible = "baikal,bt1-gmac"; + reg = <0 0x1f060000 0 0x2000>; + #address-cells = <1>; + #size-cells = <2>; + dma-ranges = <0 0 0 0x1 0>; + + interrupts = ; + interrupt-names = "macirq"; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_GMAC1_CLK>, + <&ccu_sys CCU_SYS_GMAC1_TX_CLK>, + <&ccu_sys CCU_SYS_GMAC1_PTP_CLK>; + clock-names = "pclk", "stmmaceth", "tx", "ptp_ref"; + + resets = <&ccu_axi CCU_AXI_GMAC1_RST>; + reset-names = "stmmaceth"; + + /* DW GMAC is configured to export 1xGPI and 1xGPO */ + ngpios = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + + gpio-controller; + #gpio-cells = <2>; + + /* + * MAC always adds 2ns delay of TXC with respect to TXD + * so let the PHY to add some RXC delay if it's + * applicable. + */ + phy-mode = "rgmii-rxid"; + tx-internal-delay-ps = <2000>; + + rx-fifo-depth = <16384>; + tx-fifo-depth = <16384>; + + /* + * Actual burst length will be (32 * 8 * 16) bytes due + * to the snps,no-pbl-x8 property absence and having + * the AXI bus data width of 128 bits. + */ + snps,pbl = <32>; + snps,data-width = <16>; + + snps,perfect-filter-entries = <8>; + snps,multicast-filter-bins = <0>; + loacl-mac-address = [ 7a 72 6c 4a 7b 07 ]; + + status = "disabled"; + + axi-bus-config { + snps,wr_osr_lmt = <0x3>; + snps,rd_osr_lmt = <0x3>; + snps,blen = <0 0 0 0 16 8 4>; + }; + + mdio1: mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + usb: usb@1f100000 { + compatible = "baikal,bt1-usb3", "snps,dwc3"; + reg = <0 0x1f100000 0 0x100000>; + #address-cells = <1>; + #size-cells = <0>; + + interrupts = ; + interrupt-names = "host"; + + clocks = <&ccu_sys CCU_SYS_APB_CLK>, + <&ccu_axi CCU_AXI_USB_CLK>, + <&ccu_sys CCU_SYS_USB_CLK>; + clock-names = "pclk", "bus_early", "ref"; + + resets = <&ccu_axi CCU_AXI_USB_RST>; + reset-names = "arst"; + + dr_mode = "host"; + phy_type = "ulpi"; + maximum-speed = "high-speed"; + + snps,incr-burst-type-adjustment = <1 4 8 16>; + + status = "disabled"; + }; + }; +}; diff --git a/arch/mips/configs/baikal_t1_defconfig b/arch/mips/configs/baikal_t1_defconfig new file mode 100644 index 0000000000000..30d30ba6a9279 --- /dev/null +++ b/arch/mips/configs/baikal_t1_defconfig @@ -0,0 +1,433 @@ +CONFIG_LOCALVERSION="-bt1" +CONFIG_DEFAULT_HOSTNAME="baikal" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_PREEMPT=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_MISC=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_MIPS_BAIKAL_T1=y +CONFIG_BT1_CPU_FEATURE_OVERRIDES=y +CONFIG_BT1_DTB_ALL=y +CONFIG_CPU_P5600=y +CONFIG_CPU_MIPS32_R5_FEATURES=y +CONFIG_CPU_MIPS32_R5_XPA=y +CONFIG_ZBOOT_LOAD_ADDRESS=0x85100000 +CONFIG_PAGE_SIZE_16KB=y +CONFIG_CPU_HAS_MSA=y +CONFIG_NR_CPUS=2 +CONFIG_IEEE754_DEFAULT_RELAXED=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPUFREQ_DT=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_SLAB=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_RAW_DIAG=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_LOG=y +CONFIG_NFT_LIMIT=y +CONFIG_NFT_MASQ=y +CONFIG_NFT_REDIR=y +CONFIG_NFT_NAT=y +CONFIG_NFT_REJECT=y +CONFIG_NFT_HASH=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NFT_DUP_IPV4=y +CONFIG_NF_TABLES_ARP=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_SYNPROXY=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NFT_DUP_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBS=y +CONFIG_NET_SCH_ETF=y +CONFIG_NET_SCH_TAPRIO=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOWER=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_NETLINK_DIAG=y +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PAGE_POOL_STATS=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=y +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM_PERFORMANCE=y +CONFIG_PCI_MSI=y +CONFIG_PCIE_BUS_PERFORMANCE=y +CONFIG_HOTPLUG_PCI=y +CONFIG_PCIE_BT1=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEBUG_DRIVER=y +CONFIG_DEBUG_DEVRES=y +CONFIG_BT1_APB=y +CONFIG_BT1_AXI=y +CONFIG_MIPS_CDMM=y +CONFIG_MTD=y +CONFIG_MTD_PARTITIONED_MASTER=y +CONFIG_MTD_ROM=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PHYSMAP_BT1_ROM=y +CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BLOCK=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=4 +CONFIG_BLK_DEV_NBD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=2 +CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_VERBOSE_ERRORS=y +CONFIG_NVME_HWMON=y +CONFIG_SRAM=y +CONFIG_EEPROM_AT24=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_ATA=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_DWC=y +# CONFIG_ATA_SFF is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_AMD_XGBE=y +CONFIG_AMD_XGBE_DCB=y +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_DAVICOM is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_ENGLEDER is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_FUNGIBLE is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_WANGXUN is not set +# CONFIG_NET_VENDOR_ADI is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MICROSOFT is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_SELFTESTS=y +CONFIG_DWMAC_BT1=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_TEHUTI=y +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VERTEXCOM is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_SFP=y +CONFIG_MARVELL_PHY=y +CONFIG_MARVELL_10G_PHY=y +CONFIG_MARVELL_88X2222_PHY=y +CONFIG_MICREL_PHY=y +CONFIG_MICROCHIP_PHY=y +CONFIG_MICROSEMI_PHY=y +CONFIG_REALTEK_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_VITESSE_PHY=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_GPIO=y +# CONFIG_USB_NET_DRIVERS is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_VT_CONSOLE is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=2 +CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_PERICOM is not set +CONFIG_MIPS_EJTAG_FDC_TTY=y +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_SLAVE_EEPROM=y +CONFIG_SPI=y +CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_DMA=y +CONFIG_SPI_DW_MMIO=y +CONFIG_SPI_DW_BT1=y +CONFIG_SPI_SPIDEV=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCF857X=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_BT1_PVT=y +CONFIG_SENSORS_BT1_PVT_ALARMS=y +CONFIG_SENSORS_TMP102=y +CONFIG_THERMAL=y +CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y +CONFIG_DW_WATCHDOG=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_HRTIMER=y +# CONFIG_SND_PCI is not set +# CONFIG_SND_SPI is not set +# CONFIG_SND_MIPS is not set +CONFIG_SND_USB_AUDIO=y +CONFIG_USB_ULPI_BUS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_ULPI=y +CONFIG_USB_HUB_USB251XB=y +CONFIG_MMC=y +CONFIG_MMC_SPI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_EDAC=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_SYNOPSYS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_ABEOZ9=y +CONFIG_RTC_DRV_PCF85363=y +CONFIG_RTC_DRV_PCF2127=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=y +CONFIG_DW_EDMA=y +CONFIG_SYNC_FILE=y +# CONFIG_VIRTIO_MENU is not set +CONFIG_COMMON_CLK_VC5=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_MEMORY=y +CONFIG_BT1_L2_CTL=y +CONFIG_NVMEM_U_BOOT_ENV=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_WBUF_VERIFY=y +CONFIG_UBIFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_UTF8=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_ANSI_CPRNG=y +# CONFIG_CRYPTO_HW is not set +CONFIG_FORCE_NR_CPUS=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SCHEDSTATS=y +# CONFIG_EARLY_PRINTK is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/mips/configs/bfk3_defconfig b/arch/mips/configs/bfk3_defconfig new file mode 100644 index 0000000000000..7584bf01c5e34 --- /dev/null +++ b/arch/mips/configs/bfk3_defconfig @@ -0,0 +1,414 @@ +CONFIG_LOCALVERSION="-bt1" +CONFIG_DEFAULT_HOSTNAME="baikal" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_PREEMPT=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_MISC=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_MIPS_BAIKAL_T1=y +CONFIG_BT1_CPU_FEATURE_OVERRIDES=y +CONFIG_BT1_DTB_BFK=y +CONFIG_CPU_P5600=y +CONFIG_CPU_MIPS32_R5_FEATURES=y +CONFIG_CPU_MIPS32_R5_XPA=y +CONFIG_ZBOOT_LOAD_ADDRESS=0x85100000 +CONFIG_PAGE_SIZE_16KB=y +CONFIG_CPU_HAS_MSA=y +CONFIG_NR_CPUS=2 +CONFIG_IEEE754_DEFAULT_RELAXED=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPUFREQ_DT=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_SLAB=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_RAW_DIAG=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_LOG=y +CONFIG_NFT_LIMIT=y +CONFIG_NFT_MASQ=y +CONFIG_NFT_REDIR=y +CONFIG_NFT_NAT=y +CONFIG_NFT_REJECT=y +CONFIG_NFT_HASH=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NFT_DUP_IPV4=y +CONFIG_NF_TABLES_ARP=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_SYNPROXY=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NFT_DUP_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBS=y +CONFIG_NET_SCH_ETF=y +CONFIG_NET_SCH_TAPRIO=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOWER=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_NETLINK_DIAG=y +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PAGE_POOL_STATS=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=y +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM_PERFORMANCE=y +CONFIG_PCI_MSI=y +CONFIG_PCIE_BUS_PERFORMANCE=y +CONFIG_HOTPLUG_PCI=y +CONFIG_PCIE_BT1=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEBUG_DRIVER=y +CONFIG_DEBUG_DEVRES=y +CONFIG_BT1_APB=y +CONFIG_BT1_AXI=y +CONFIG_MIPS_CDMM=y +CONFIG_MTD=y +CONFIG_MTD_PARTITIONED_MASTER=y +CONFIG_MTD_ROM=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PHYSMAP_BT1_ROM=y +CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BLOCK=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=4 +CONFIG_BLK_DEV_NBD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=2 +CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_VERBOSE_ERRORS=y +CONFIG_NVME_HWMON=y +CONFIG_SRAM=y +CONFIG_EEPROM_AT24=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_ATA=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_DWC=y +# CONFIG_ATA_SFF is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_AMD_XGBE=y +CONFIG_AMD_XGBE_DCB=y +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_DAVICOM is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_ENGLEDER is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_FUNGIBLE is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_WANGXUN is not set +# CONFIG_NET_VENDOR_ADI is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MICROSOFT is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_SELFTESTS=y +CONFIG_DWMAC_BT1=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_TEHUTI=y +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VERTEXCOM is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_SFP=y +CONFIG_MARVELL_PHY=y +CONFIG_MARVELL_10G_PHY=y +CONFIG_MARVELL_88X2222_KR_PHY=y +CONFIG_MICREL_PHY=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_GPIO=y +# CONFIG_USB_NET_DRIVERS is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_VT_CONSOLE is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=2 +CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_PERICOM is not set +CONFIG_MIPS_EJTAG_FDC_TTY=y +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_SLAVE_EEPROM=y +CONFIG_SPI=y +CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_DMA=y +CONFIG_SPI_DW_MMIO=y +CONFIG_SPI_DW_BT1=y +CONFIG_SPI_SPIDEV=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCF857X=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_BT1_PVT=y +CONFIG_SENSORS_BT1_PVT_ALARMS=y +CONFIG_SENSORS_TMP102=y +CONFIG_THERMAL=y +CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y +CONFIG_DW_WATCHDOG=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB_ULPI_BUS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_ULPI=y +CONFIG_EDAC=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_SYNOPSYS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_ABEOZ9=y +CONFIG_RTC_DRV_PCF85363=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=y +CONFIG_DW_EDMA=y +CONFIG_SYNC_FILE=y +# CONFIG_VIRTIO_MENU is not set +CONFIG_COMMON_CLK_VC5=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_MEMORY=y +CONFIG_BT1_L2_CTL=y +CONFIG_NVMEM_U_BOOT_ENV=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_WBUF_VERIFY=y +CONFIG_UBIFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_UTF8=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_ANSI_CPRNG=y +# CONFIG_CRYPTO_HW is not set +CONFIG_FORCE_NR_CPUS=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SCHEDSTATS=y +# CONFIG_EARLY_PRINTK is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a600670d00e97..b266d48925577 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -114,7 +114,7 @@ struct cpuinfo_mips { */ u32 loongson3_cpucfg_data[3]; #endif -} __attribute__((aligned(SMP_CACHE_BYTES))); +} __aligned(SMP_CACHE_BYTES) __randomize_layout; extern struct cpuinfo_mips cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index e6d5ccaa309ea..b7f97fbcefbec 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -216,7 +216,7 @@ void iounmap(const volatile void __iomem *addr); #define war_io_reorder_wmb() barrier() #endif -#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \ +#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, relax, irq) \ \ static inline void pfx##write##bwlq(type val, \ volatile void __iomem *mem) \ @@ -224,7 +224,7 @@ static inline void pfx##write##bwlq(type val, \ volatile type *__mem; \ type __val; \ \ - if (barrier) \ + if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \ iobarrier_rw(); \ else \ war_io_reorder_wmb(); \ @@ -265,7 +265,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ \ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ \ - if (barrier) \ + if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \ iobarrier_rw(); \ \ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ @@ -297,14 +297,14 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ return pfx##ioswab##bwlq(__mem, __val); \ } -#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \ +#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, relax, p) \ \ static inline void pfx##out##bwlq##p(type val, unsigned long port) \ { \ volatile type *__addr; \ type __val; \ \ - if (barrier) \ + if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \ iobarrier_rw(); \ else \ war_io_reorder_wmb(); \ @@ -328,7 +328,7 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ \ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ \ - if (barrier) \ + if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \ iobarrier_rw(); \ \ __val = *__addr; \ @@ -341,7 +341,7 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \ \ -__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1) +__BUILD_MEMORY_SINGLE(bus, bwlq, type, relax, 1) #define BUILDIO_MEM(bwlq, type) \ \ @@ -361,8 +361,8 @@ __BUILD_MEMORY_PFX(__mem_, q, u64, 0) #endif #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ - __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \ - __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p) + __BUILD_IOPORT_SINGLE(bus, bwlq, type, 0,) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, 0, _p) #define BUILDIO_IOPORT(bwlq, type) \ __BUILD_IOPORT_PFX(, bwlq, type) \ @@ -377,7 +377,7 @@ BUILDIO_IOPORT(q, u64) #define __BUILDIO(bwlq, type) \ \ -__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0) +__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0, 0) __BUILDIO(q, u64) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 5cedb28e8a408..b157b1fa48c29 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -756,7 +756,7 @@ struct kvm_mips_callbacks { int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); int (*vcpu_run)(struct kvm_vcpu *vcpu); void (*vcpu_reenter)(struct kvm_vcpu *vcpu); -}; +} __no_randomize_layout; extern struct kvm_mips_callbacks *kvm_mips_callbacks; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); diff --git a/arch/mips/include/asm/mach-baikal-t1/cpu-feature-overrides.h b/arch/mips/include/asm/mach-baikal-t1/cpu-feature-overrides.h new file mode 100644 index 0000000000000..a35eba5cd8f0a --- /dev/null +++ b/arch/mips/include/asm/mach-baikal-t1/cpu-feature-overrides.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 core features override + */ +#ifndef __ASM_MACH_BAIKAL_T1_CPU_FEATURE_OVERRIDES_H__ +#define __ASM_MACH_BAIKAL_T1_CPU_FEATURE_OVERRIDES_H__ + +#ifdef CONFIG_BT1_CPU_FEATURE_OVERRIDES + +#define cpu_has_tlb 1 +/* Don't override FTLB flag otherwise 'noftlb' option won't work. */ +/* #define cpu_has_ftlb 1 */ +#define cpu_has_tlbinv 1 +#define cpu_has_segments 1 +#define cpu_has_eva 1 +#define cpu_has_htw 1 +#define cpu_has_ldpte 0 +#define cpu_has_rixiex 1 +#define cpu_has_maar 1 +#define cpu_has_rw_llb 1 + +#define cpu_has_3kex 0 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_octeon_cache 0 + +/* Don't override FPU flags otherwise 'nofpu' option won't work. */ +/* #define cpu_has_fpu 1 */ +/* #define raw_cpu_has_fpu 1 */ +#define cpu_has_32fpr 1 + +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 +#define cpu_has_vce 0 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_prefetch 1 +#define cpu_has_mcheck 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 +#define cpu_has_bp_ghist 0 +#define cpu_has_guestctl0ext 1 /* ? */ +#define cpu_has_guestctl1 1 /* ? */ +#define cpu_has_guestctl2 1 /* ? */ +#define cpu_has_guestid 1 +#define cpu_has_drg 0 +#define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_rixi 1 + +#define cpu_has_mmips 0 +#define cpu_has_lpa 1 +#define cpu_has_mvh 1 +#define cpu_has_xpa 1 +#define cpu_has_vtag_icache 0 +#define cpu_has_dc_aliases 0 +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_pindexed_dcache 0 +/* Depends on the MIPS_CM/SMP configs. */ +/* #define cpu_icache_snoops_remote_store 1 */ + +/* + * MIPS P5600 Warrior is based on the MIPS32 Release 2 architecture, which + * makes it backward compatible with all 32bits early MIPS architecture + * releases. + */ +#define cpu_has_mips_1 1 +#define cpu_has_mips_2 1 +#define cpu_has_mips_3 0 +#define cpu_has_mips_4 0 +#define cpu_has_mips_5 0 +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips32r5 1 +#define cpu_has_mips32r6 0 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 +#define cpu_has_mips64r6 0 +#define cpu_has_mips_r2_exec_hazard 0 + +#define cpu_has_clo_clz 1 +#define cpu_has_wsbh 1 +#define cpu_has_dsp 0 +#define cpu_has_dsp2 0 +#define cpu_has_dsp3 0 +#define cpu_has_loongson_mmi 0 +#define cpu_has_loongson_cam 0 +#define cpu_has_loongson_ext 0 +#define cpu_has_loongson_ext2 0 +#define cpu_has_mipsmt 0 +#define cpu_has_vp 0 +#define cpu_has_userlocal 1 + +#define cpu_has_nofpuex 0 +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 +/* + * VINT is hardwired to 1 by P5600 design while VEIC as being SI_EICPresent + * and seeing we always have MIPS GIC available in the chip must have been set + * to 1. Alas the IP core engineers mistakenly made it to be wired with + * GIC_VX_CTL_EIC bit. Lets fix it by manually setting the flag to 1. + */ +#define cpu_has_vint 1 +#define cpu_has_veic 1 +/* Chaches line size is fixed by P5600 design. */ +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 +#define cpu_scache_line_size() 32 +#define cpu_tcache_line_size() 0 +#define cpu_hwrena_impl_bits 0 +#define cpu_has_perf_cntr_intr_bit 1 +#define cpu_has_vz 1 +#define cpu_has_msa 1 +#define cpu_has_ufr 1 +#define cpu_has_fre 0 +#define cpu_has_cdmm 1 +#define cpu_has_small_pages 0 +#define cpu_has_nan_legacy 0 +#define cpu_has_nan_2008 1 +#define cpu_has_ebase_wg 1 +#define cpu_has_badinstr 1 +#define cpu_has_badinstrp 1 +#define cpu_has_contextconfig 1 +#define cpu_has_perf 1 +#define cpu_has_mac2008_only 0 +#define cpu_has_mmid 0 +#define cpu_has_mm_sysad 0 +#define cpu_has_mm_full 1 + +#endif /* CONFIG_BT1_CPU_FEATURE_OVERRIDES */ + +#endif /* __ASM_MACH_BAIKAL_T1_CPU_FEATURE_OVERRIDES_H__ */ diff --git a/arch/mips/include/asm/mach-baikal-t1/irq.h b/arch/mips/include/asm/mach-baikal-t1/irq.h new file mode 100644 index 0000000000000..343b200de1625 --- /dev/null +++ b/arch/mips/include/asm/mach-baikal-t1/irq.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 IRQ numbers declaration + */ +#ifndef __ASM_MACH_BAIKAL_T1_IRQ_H__ +#define __ASM_MACH_BAIKAL_T1_IRQ_H__ + +#define NR_IRQS 255 +#define MIPS_CPU_IRQ_BASE 0 + +#define BT1_APB_EHB_IRQ 16 +#define BT1_WDT_IRQ 17 +#define BT1_GPIO32_IRQ 19 +#define BT1_TIMER0_IRQ 24 +#define BT1_TIMER1_IRQ 25 +#define BT1_TIMER2_IRQ 26 +#define BT1_PVT_IRQ 31 +#define BT1_I2C1_IRQ 33 +#define BT1_I2C2_IRQ 34 +#define BT1_SPI1_IRQ 40 +#define BT1_SPI2_IRQ 41 +#define BT1_UART0_IRQ 48 +#define BT1_UART1_IRQ 49 +#define BT1_DMAC_IRQ 56 +#define BT1_SATA_IRQ 64 +#define BT1_USB_IRQ 68 +#define BT1_GMAC0_IRQ 72 +#define BT1_GMAC1_IRQ 73 +#define BT1_XGMAC_IRQ 74 +#define BT1_XGMAC_TX0_IRQ 75 +#define BT1_XGMAC_TX1_IRQ 76 +#define BT1_XGMAC_RX0_IRQ 77 +#define BT1_XGMAC_RX1_IRQ 78 +#define BT1_XGMAC_XPCS_IRQ 79 +#define BT1_PCIE_EDMA_TX0_IRQ 80 +#define BT1_PCIE_EDMA_TX1_IRQ 81 +#define BT1_PCIE_EDMA_TX2_IRQ 82 +#define BT1_PCIE_EDMA_TX3_IRQ 83 +#define BT1_PCIE_EDMA_RX0_IRQ 84 +#define BT1_PCIE_EDMA_RX1_IRQ 85 +#define BT1_PCIE_EDMA_RX2_IRQ 86 +#define BT1_PCIE_EDMA_RX3_IRQ 87 +#define BT1_PCIE_MSI_IRQ 88 +#define BT1_PCIE_AER_IRQ 89 +#define BT1_PCIE_PME_IRQ 90 +#define BT1_PCIE_HP_IRQ 91 +#define BT1_PCIE_BW_IRQ 92 +#define BT1_PCIE_L_REQ_IRQ 93 +#define BT1_DDR_DFI_E_IRQ 96 +#define BT1_DDR_ECC_CE_IRQ 97 +#define BT1_DDR_ECC_UE_IRQ 98 +#define BT1_DDR_ECC_SBR_IRQ 99 +#define BT1_HWA_IRQ 104 +#define BT1_AXI_EHB_IRQ 127 + +#include_next + +#endif /* __ASM_MACH_BAIKAL_T1_IRQ_H__ */ diff --git a/arch/mips/include/asm/mach-baikal-t1/kernel-entry-init.h b/arch/mips/include/asm/mach-baikal-t1/kernel-entry-init.h new file mode 100644 index 0000000000000..a681bd927b776 --- /dev/null +++ b/arch/mips/include/asm/mach-baikal-t1/kernel-entry-init.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 platform low-level initialization + */ +#ifndef __ASM_MACH_BAIKAL_T1_KERNEL_ENTRY_INIT_H__ +#define __ASM_MACH_BAIKAL_T1_KERNEL_ENTRY_INIT_H__ + +#include +#include + + /* + * Prepare segments for EVA boot: + * + * This is in case the processor boots in legacy configuration + * (SI_EVAReset is de-asserted and CONFIG5.K == 0) + * + * =========================== Mappings =============================== + * CFG Virtual memory Physical memory CCA Mapping + * 5 0x00000000 0x3fffffff 0x80000000 0xBffffffff K0 MUSUK (kuseg) + * 4 0x40000000 0x7fffffff 0xC0000000 0xfffffffff K0 MUSUK (kuseg) + * Flat 2GB physical mem + * + * 3 0x80000000 0x9fffffff 0x00000000 0x1ffffffff K0 MUSUK (kseg0) + * 2 0xa0000000 0xbf000000 0x00000000 0x1ffffffff UC MUSUK (kseg1) + * 1 0xc0000000 0xdfffffff - K0 MK (kseg2) + * 0 0xe0000000 0xffffffff - K0 MK (kseg3) + * where UC = 2 Uncached non-coherent, + * WB = 3 Cacheable, non-coherent, write-back, write allocate, + * CWBE = 4 Cacheable, coherent, write-back, write-allocate, read + * misses request Exclusive, + * CWB = 5 Cacheable, coherent, write-back, write-allocate, read misses + * request Shared, + * UCA = 7 Uncached Accelerated, non-coherent. + * UK = 0 Kernel-only unmapped region, + * MK = 1 Kernel-only mapped region, + * MSK = 2 Supervisor and kernel mapped region, + * MUSK = 3 User, supervisor and kernel mapped region, + * MUSUK = 4 Used to implement a fully-mapped flat address space in + * user and supervisor modes, with unmapped regions which + * appear in kernel mode, + * USK = 5 Supervisor and kernel unmapped region, + * UUSK = 7 Unrestricted unmapped region. + * + * Note K0 = 2 by default on MIPS Warrior P5600. + * + * Lowmem is expanded to 2GB. + * + * The following code uses the t0, t1, t2 and ra registers without + * previously preserving them. + * + */ + .macro platform_eva_init + + .set push + .set reorder + /* + * Get Config.K0 value and use it to program + * the segmentation registers + */ + mfc0 t1, CP0_CONFIG + andi t1, 0x7 /* CCA */ + move t2, t1 + ins t2, t1, 16, 3 + /* SegCtl0 */ + li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) | \ + (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) + or t0, t2 + mtc0 t0, CP0_SEGCTL0 + + /* SegCtl1 */ + li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT) | \ + (2 << MIPS_SEGCFG_C_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) | \ + (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) + ins t0, t1, 16, 3 + mtc0 t0, CP0_SEGCTL1 + + /* SegCtl2 */ + li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \ + (6 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) | \ + (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \ + (4 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) + or t0, t2 + mtc0 t0, CP0_SEGCTL2 + + jal mips_ihb + mfc0 t0, CP0_CONFIG5 + li t2, MIPS_CONF5_K /* K bit */ + or t0, t0, t2 + mtc0 t0, CP0_CONFIG5 + sync + jal mips_ihb + nop + + .set pop + .endm + + /* + * Prepare segments for LEGACY boot: + * + * =========================== Mappings ============================== + * CFG Virtual memory Physical memory CCA Mapping + * 5 0x00000000 0x3fffffff - CWB MUSK (kuseg) + * 4 0x40000000 0x7fffffff - CWB MUSK (kuseg) + * 3 0x80000000 0x9fffffff 0x00000000 0x1ffffffff CWB UK (kseg0) + * 2 0xa0000000 0xbf000000 0x00000000 0x1ffffffff 2 UK (kseg1) + * 1 0xc0000000 0xdfffffff - CWB MSK (kseg2) + * 0 0xe0000000 0xffffffff - CWB MK (kseg3) + * + * The following code uses the t0, t1, t2 and ra registers without + * previously preserving them. + * + */ + .macro platform_legacy_init + + .set push + .set reorder + + /* + * Directly use cacheable, coherent, write-back, write-allocate, read + * misses request shared attribute (CWB). + */ + li t1, 0x5 + move t2, t1 + ins t2, t1, 16, 3 + /* SegCtl0 */ + li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT)) | \ + (((MIPS_SEGCFG_MSK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT)) << 16) + or t0, t2 + mtc0 t0, CP0_SEGCTL0 + + /* SegCtl1 */ + li t0, ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT) | \ + (2 << MIPS_SEGCFG_C_SHIFT)) | \ + (((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \ + (0 << MIPS_SEGCFG_PA_SHIFT)) << 16) + ins t0, t1, 16, 3 + mtc0 t0, CP0_SEGCTL1 + + /* SegCtl2 */ + li t0, ((MIPS_SEGCFG_MUSK << MIPS_SEGCFG_AM_SHIFT) | \ + (6 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) | \ + (((MIPS_SEGCFG_MUSK << MIPS_SEGCFG_AM_SHIFT) | \ + (4 << MIPS_SEGCFG_PA_SHIFT) | \ + (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) + or t0, t2 + mtc0 t0, CP0_SEGCTL2 + + jal mips_ihb + nop + + mfc0 t0, CP0_CONFIG, 5 + li t2, MIPS_CONF5_K /* K bit */ + or t0, t0, t2 + mtc0 t0, CP0_CONFIG, 5 + sync + jal mips_ihb + nop + + .set pop + .endm + + /* + * Baikal-T1 engineering chip had problems when the next features + * were enabled. + */ + .macro platform_errata_jr_ls_fix + + .set push + .set reorder + + jal mips_ihb + nop + + /* Disable load/store bonding. */ + mfc0 t0, CP0_CONFIG, 6 + lui t1, (MIPS_CONF6_DLSB >> 16) + or t0, t0, t1 + /* Disable all JR prediction except JR $31. */ + ori t0, t0, MIPS_CONF6_JRCD + mtc0 t0, CP0_CONFIG, 6 + sync + jal mips_ihb + nop + + /* Disable all JR $31 prediction through return prediction stack. */ + mfc0 t0, CP0_CONFIG, 7 + ori t0, t0, MIPS_CONF7_RPS + mtc0 t0, CP0_CONFIG, 7 + sync + jal mips_ihb + nop + + .set pop + .endm + + /* + * Setup Baikal-T1 platform specific setups of the memory segments + * layout. In case if the kernel is built for engineering version + * of the chip some errata must be fixed. + */ + .macro kernel_entry_setup + + sync + ehb + +#ifdef CONFIG_EVA + platform_eva_init +#else + platform_legacy_init +#endif + +#ifdef CONFIG_BT1_ERRATA_JR_LS_BUG + platform_errata_jr_ls_fix +#endif + + .endm + + /* + * Do SMP slave processor setup necessary before we can safely execute + * C code. + */ + .macro smp_slave_setup + sync + ehb + +#ifdef CONFIG_EVA + platform_eva_init +#else + platform_legacy_init +#endif + +#ifdef CONFIG_BT1_ERRATA_JR_LS_BUG + platform_errata_jr_ls_fix +#endif + + .endm +#endif /* __ASM_MACH_BAIKAL_T1_KERNEL_ENTRY_INIT_H__ */ diff --git a/arch/mips/include/asm/mach-baikal-t1/memory.h b/arch/mips/include/asm/mach-baikal-t1/memory.h new file mode 100644 index 0000000000000..a2c3f6cbcd711 --- /dev/null +++ b/arch/mips/include/asm/mach-baikal-t1/memory.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 SoC Static Memory Mapping + */ +#ifndef __ASM_MACH_BAIKAL_T1_MEMORY_H__ +#define __ASM_MACH_BAIKAL_T1_MEMORY_H__ + +#include +#include + +#define BT1_LOMEM_BASE 0x00000000 +#define BT1_LOMEM_SIZE SZ_128M + +#define BT1_MMIO_START BT1_PCIE_MAP_BASE + +#define BT1_PCIE_MAP_BASE 0x08000000 +#define BT1_PCIE_MAP_SIZE 0x13DC0000 + +#define BT1_P5600_GIC_BASE 0x1BDC0000 +#define BT1_P5600_GIC_SIZE SZ_128K +#define BT1_P5600_CPC_BASE 0x1BDE0000 +#define BT1_P5600_CPC_SIZE SZ_32K +#define BT1_P5600_CDMM_BASE 0x1BDE8000 +#define BT1_P5600_CDMM_SIZE SZ_32K + +#define BT1_SRAM_BASE 0x1BF80000 +#define BT1_SRAM_SIZE SZ_64K +#define BT1_ROM_BASE 0x1BFC0000 +#define BT1_ROM_SIZE SZ_64K +#define BT1_FLASH_BASE 0x1C000000 +#define BT1_FLASH_SIZE SZ_16M + +#define BT1_BOOT_CTRL_BASE 0x1F040000 +#define BT1_BOOT_CTRL_SIZE SZ_4K +#define BT1_BOOT_CTRL_CSR 0x00 +#define BT1_BOOT_CTRL_MAR 0x04 +#define BT1_BOOT_CTRL_DRID 0x08 +#define BT1_BOOT_CTRL_VID 0x0C + +#define BT1_DMAC_BASE 0x1F041000 +#define BT1_DMAC_SIZE SZ_4K +#define BT1_DDR_UMCTL2_BASE 0x1F042000 +#define BT1_DDR_UMCTL2_SIZE SZ_4K +#define BT1_DDR_PHY_BASE 0x1F043000 +#define BT1_DDR_PHY_SIZE SZ_4K +#define BT1_GPIO32_BASE 0x1F044000 +#define BT1_GPIO32_SIZE SZ_4K +#define BT1_GPIO3_BASE 0x1F045000 +#define BT1_GPIO3_SIZE SZ_4K +#define BT1_I2C1_BASE 0x1F046000 +#define BT1_I2C1_SIZE SZ_4K +#define BT1_I2C2_BASE 0x1F047000 +#define BT1_I2C2_SIZE SZ_4K +#define BT1_TIMERS_BASE 0x1F049000 +#define BT1_TIMERS_SIZE SZ_4K +#define BT1_UART0_BASE 0x1F04A000 +#define BT1_UART0_SIZE SZ_4K +#define BT1_UART1_BASE 0x1F04B000 +#define BT1_UART1_SIZE SZ_4K +#define BT1_WDT_BASE 0x1F04C000 +#define BT1_WDT_SIZE SZ_4K +#define BT1_CCU_BASE 0x1F04D000 +#define BT1_CCU_SIZE SZ_4K +#define BT1_SPI1_BASE 0x1F04E000 +#define BT1_SPI1_SIZE SZ_4K +#define BT1_SPI2_BASE 0x1F04F000 +#define BT1_SPI2_SIZE SZ_4K +#define BT1_SATA_BASE 0x1F050000 +#define BT1_SATA_SIZE SZ_4K +#define BT1_PCIE_BASE 0x1F052000 +#define BT1_PCIE_SIZE SZ_4K +#define BT1_PCIE_DBI2_BASE 0x1F053000 +#define BT1_PCIE_DBI2_SIZE SZ_4K +#define BT1_XGMAC_BASE 0x1F054000 +#define BT1_XGMAC_SIZE SZ_16K +#define BT1_APB_EHB_BASE 0x1F059000 +#define BT1_APB_EHB_SIZE SZ_4K +#define BT1_MAIN_IC_BASE 0x1F05A000 +#define BT1_MAIN_IC_SIZE SZ_4K +#define BT1_HWA_BASE 0x1F05B000 +#define BT1_HWA_SIZE SZ_8K +#define BT1_XGMAC_XPCS_BASE 0x1F05D000 +#define BT1_XGMAC_XPCS_SIZE SZ_4K +#define BT1_GMAC0_BASE 0x1F05E000 +#define BT1_GMAC0_SIZE SZ_8K +#define BT1_GMAC1_BASE 0x1F060000 +#define BT1_GMAC1_SIZE SZ_8K +#define BT1_USB_BASE 0x1F100000 +#define BT1_USB_SIZE SZ_1M +#define BT1_PVT_BASE 0x1F200000 +#define BT1_PVT_SIZE SZ_4K +#define BT1_EFUSE_BASE 0x1F201000 +#define BT1_EFUSE_SIZE SZ_4K + +#define BT1_P5600_GCR_L2SYNC_BASE 0x1FBF0000 +#define BT1_P5600_GCR_L2SYNC_SIZE SZ_4K +#define BT1_P5600_GCB_BASE 0x1FBF8000 +#define BT1_P5600_GCB_SIZE SZ_8K +#define BT1_P5600_CLCB_BASE 0x1FBFA000 +#define BT1_P5600_CLCB_SIZE SZ_8K +#define BT1_P5600_COCB_BASE 0x1FBFC000 +#define BT1_P5600_COCB_SIZE SZ_8K +#define BT1_P5600_DBG_BASE 0x1FBFE000 +#define BT1_P5600_DBG_SIZE SZ_8K +#define BT1_BOOT_MAP_BASE 0x1FC00000 +#define BT1_BOOT_MAP_SIZE SZ_4M + +#define BT1_DEFAULT_BEV KSEG1ADDR(BT1_BOOT_MAP_BASE) + +#define BT1_MMIO_END BT1_HIMEM_BASE + +#define BT1_HIMEM_BASE 0x20000000 +#define BT1_HIMEM_SIZE SZ_256M + +#endif /* __ASM_MACH_BAIKAL_T1_MEMORY_H__ */ diff --git a/arch/mips/include/asm/mach-baikal-t1/platform.h b/arch/mips/include/asm/mach-baikal-t1/platform.h new file mode 100644 index 0000000000000..5c873de374f21 --- /dev/null +++ b/arch/mips/include/asm/mach-baikal-t1/platform.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 platform declarations + */ +#ifndef __ASM_MACH_BAIKAL_T1_PLATFORM_H__ +#define __ASM_MACH_BAIKAL_T1_PLATFORM_H__ + +#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED + +int mips_set_uca_range(phys_addr_t start, phys_addr_t end); + +#else /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */ + +static inline int mips_set_uca_range(phys_addr_t start, phys_addr_t end) +{ + return 0; +} + +#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */ + +#endif /* __ASM_MACH_BAIKAL_T1_PLATFORM_H__ */ diff --git a/arch/mips/include/asm/mach-baikal-t1/spaces.h b/arch/mips/include/asm/mach-baikal-t1/spaces.h new file mode 100644 index 0000000000000..beccd1129cee5 --- /dev/null +++ b/arch/mips/include/asm/mach-baikal-t1/spaces.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 SoC Memory Spaces + */ +#ifndef __ASM_MACH_BAIKAL_T1_SPACES_H__ +#define __ASM_MACH_BAIKAL_T1_SPACES_H__ + +#define PCI_IOBASE mips_io_port_base +#define PCI_IOSIZE SZ_64K +#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) + +#define pci_remap_iospace pci_remap_iospace + +#include + +#endif /* __ASM_MACH_BAIKAL_T1_SPACES_H__ */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 23c67c0871b17..6ed1978830dd9 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -144,6 +144,21 @@ GCR_ACCESSOR_RW(64, 0x008, base) #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 +/* GCR_CONTROL - Global CM2 Settings */ +GCR_ACCESSOR_RW(64, 0x010, control) +#define CM_GCR_CONTROL_SYNCCTL BIT(16) +#define CM_GCR_CONTROL_SYNCDIS BIT(5) +#define CM_GCR_CONTROL_IVU_EN BIT(4) +#define CM_GCR_CONTROL_SHST_EN BIT(3) +#define CM_GCR_CONTROL_PARK_EN BIT(2) +#define CM_GCR_CONTROL_MMIO_LIMIT_DIS BIT(1) +#define CM_GCR_CONTROL_SPEC_READ_EN BIT(0) + +/* GCR_CONTROL2 - Global CM2 Settings (continue) */ +GCR_ACCESSOR_RW(64, 0x018, control2) +#define CM_GCR_CONTROL2_L2_CACHEOP_LIMIT GENMASK(19, 16) +#define CM_GCR_CONTROL2_L1_CACHEOP_LIMIT GENMASK(3, 0) + /* GCR_ACCESS - Controls core/IOCU access to GCRs */ GCR_ACCESSOR_RW(32, 0x020, access) #define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0) diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 99eeafe6dcabd..560c646419beb 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -2869,6 +2869,7 @@ set_##name(unsigned int set) \ res = read_##name(); \ new = res | set; \ write_##name(new); \ + _ehb(); \ \ return res; \ } \ @@ -2881,6 +2882,7 @@ clear_##name(unsigned int clear) \ res = read_##name(); \ new = res & ~clear; \ write_##name(new); \ + _ehb(); \ \ return res; \ } \ @@ -2894,6 +2896,7 @@ change_##name(unsigned int change, unsigned int val) \ new = res & ~change; \ new |= (val & change); \ write_##name(new); \ + _ehb(); \ \ return res; \ } diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index ed9f2d748f633..c0f8a4ee3fd70 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -232,6 +232,67 @@ drop_mmu_context(struct mm_struct *mm) local_irq_restore(flags); } +#ifdef CONFIG_MIPS_BAIKAL_T1 +/* Workaround for core stuck on TLB load exception */ +#define tlb_prefetch tlb_prefetch +static inline void tlb_prefetch(unsigned long addr) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + int idx, pid; + + if (addr < MAP_BASE) + return; + + addr &= (PAGE_MASK << 1); + if (cpu_has_mmid) { + write_c0_entryhi(addr); + } else { + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); + write_c0_entryhi(addr | pid); + } + pgdp = pgd_offset(&init_mm, addr); + mtc0_tlbw_hazard(); + tlb_probe(); + tlb_probe_hazard(); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); + pmdp = pmd_offset(pudp, addr); + idx = read_c0_index(); + + ptep = pte_offset_map(pmdp, addr); + +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#ifdef CONFIG_XPA + write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); + if (cpu_has_xpa) + writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); + ptep++; + write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); + if (cpu_has_xpa) + writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); +#else + write_c0_entrylo0(ptep->pte_high); + ptep++; + write_c0_entrylo1(ptep->pte_high); +#endif +#else + write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); + write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); +#endif + mtc0_tlbw_hazard(); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + + tlbw_use_hazard(); +} +#endif + #include #endif /* _ASM_MMU_CONTEXT_H */ diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index 864aea8039842..eb58457c7b3ac 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -37,7 +37,7 @@ struct plat_smp_ops { #ifdef CONFIG_KEXEC void (*kexec_nonboot_cpu)(void); #endif -}; +} __no_randomize_layout; extern void register_smp_ops(const struct plat_smp_ops *ops); diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index f1c88f8a1dc51..8e7f0630b70f6 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -323,11 +323,11 @@ static void __init bootmem_init(void) panic("Incorrect memory mapping !!!"); if (max_pfn > PFN_DOWN(HIGHMEM_START)) { + max_low_pfn = PFN_DOWN(HIGHMEM_START); #ifdef CONFIG_HIGHMEM - highstart_pfn = PFN_DOWN(HIGHMEM_START); + highstart_pfn = max_low_pfn; highend_pfn = max_pfn; #else - max_low_pfn = PFN_DOWN(HIGHMEM_START); max_pfn = max_low_pfn; #endif } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 80e05ee98d62f..dc20311e613c5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -356,6 +356,7 @@ static struct work_registers build_get_work_registers(u32 **p) if (scratch_reg >= 0) { /* Save in CPU local C0_KScratch? */ UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); + uasm_i_ehb(p); r.r1 = K0; r.r2 = K1; r.r3 = 1; diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig index 7ff17b2db6a16..550b2cece9a4e 100644 --- a/drivers/dma/dw-edma/Kconfig +++ b/drivers/dma/dw-edma/Kconfig @@ -12,7 +12,7 @@ config DW_EDMA config DW_EDMA_PCIE tristate "Synopsys DesignWare eDMA PCIe driver" depends on PCI && PCI_MSI - select DW_EDMA + depends on DW_EDMA help Provides a glue-logic between the Synopsys DesignWare eDMA controller and an endpoint PCIe device. This also serves diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 52bdf04aff511..1906a836f0aab 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -39,6 +39,17 @@ struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) return container_of(vd, struct dw_edma_desc, vd); } +static inline +u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr) +{ + struct dw_edma_chip *chip = chan->dw->chip; + + if (chip->ops->pci_address) + return chip->ops->pci_address(chip->dev, cpu_addr); + + return cpu_addr; +} + static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) { struct dw_edma_burst *burst; @@ -197,6 +208,24 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan) desc->chunks_alloc--; } +static void dw_edma_device_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + if (chan->dir == EDMA_DIR_READ) + caps->directions = BIT(DMA_DEV_TO_MEM); + else + caps->directions = BIT(DMA_MEM_TO_DEV); + } else { + if (chan->dir == EDMA_DIR_WRITE) + caps->directions = BIT(DMA_DEV_TO_MEM); + else + caps->directions = BIT(DMA_MEM_TO_DEV); + } +} + static int dw_edma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) { @@ -327,11 +356,12 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) { struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); enum dma_transfer_direction dir = xfer->direction; - phys_addr_t src_addr, dst_addr; struct scatterlist *sg = NULL; struct dw_edma_chunk *chunk; struct dw_edma_burst *burst; struct dw_edma_desc *desc; + u64 src_addr, dst_addr; + size_t fsz = 0; u32 cnt = 0; int i; @@ -381,9 +411,9 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (xfer->xfer.sg.len < 1) return NULL; } else if (xfer->type == EDMA_XFER_INTERLEAVED) { - if (!xfer->xfer.il->numf) + if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1) return NULL; - if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0) + if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc) return NULL; } else { return NULL; @@ -405,16 +435,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) dst_addr = chan->config.dst_addr; } + if (dir == DMA_DEV_TO_MEM) + src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr); + else + dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr); + if (xfer->type == EDMA_XFER_CYCLIC) { cnt = xfer->xfer.cyclic.cnt; } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { cnt = xfer->xfer.sg.len; sg = xfer->xfer.sg.sgl; } else if (xfer->type == EDMA_XFER_INTERLEAVED) { - if (xfer->xfer.il->numf > 0) - cnt = xfer->xfer.il->numf; - else - cnt = xfer->xfer.il->frame_size; + cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size; + fsz = xfer->xfer.il->frame_size; } for (i = 0; i < cnt; i++) { @@ -436,7 +469,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) else if (xfer->type == EDMA_XFER_SCATTER_GATHER) burst->sz = sg_dma_len(sg); else if (xfer->type == EDMA_XFER_INTERLEAVED) - burst->sz = xfer->xfer.il->sgl[i].size; + burst->sz = xfer->xfer.il->sgl[i % fsz].size; chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; @@ -479,20 +512,17 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (xfer->type == EDMA_XFER_SCATTER_GATHER) { sg = sg_next(sg); - } else if (xfer->type == EDMA_XFER_INTERLEAVED && - xfer->xfer.il->frame_size > 0) { + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { struct dma_interleaved_template *il = xfer->xfer.il; - struct data_chunk *dc = &il->sgl[i]; + struct data_chunk *dc = &il->sgl[i % fsz]; - if (il->src_sgl) { - src_addr += burst->sz; + src_addr += burst->sz; + if (il->src_sgl) src_addr += dmaengine_get_src_icg(il, dc); - } - if (il->dst_sgl) { - dst_addr += burst->sz; + dst_addr += burst->sz; + if (il->dst_sgl) dst_addr += dmaengine_get_dst_icg(il, dc); - } } } @@ -705,92 +735,76 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan) } } -static int dw_edma_channel_setup(struct dw_edma *dw, bool write, - u32 wr_alloc, u32 rd_alloc) +static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc) { struct dw_edma_chip *chip = dw->chip; - struct dw_edma_region *dt_region; struct device *dev = chip->dev; struct dw_edma_chan *chan; struct dw_edma_irq *irq; struct dma_device *dma; - u32 alloc, off_alloc; - u32 i, j, cnt; - int err = 0; + u32 i, ch_cnt; u32 pos; - if (write) { - i = 0; - cnt = dw->wr_ch_cnt; - dma = &dw->wr_edma; - alloc = wr_alloc; - off_alloc = 0; - } else { - i = dw->wr_ch_cnt; - cnt = dw->rd_ch_cnt; - dma = &dw->rd_edma; - alloc = rd_alloc; - off_alloc = wr_alloc; - } + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + dma = &dw->dma; INIT_LIST_HEAD(&dma->channels); - for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { + + for (i = 0; i < ch_cnt; i++) { chan = &dw->chan[i]; - dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); - if (!dt_region) - return -ENOMEM; + chan->dw = dw; - chan->vc.chan.private = dt_region; + if (i < dw->wr_ch_cnt) { + chan->id = i; + chan->dir = EDMA_DIR_WRITE; + } else { + chan->id = i - dw->wr_ch_cnt; + chan->dir = EDMA_DIR_READ; + } - chan->dw = dw; - chan->id = j; - chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; chan->configured = false; chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_IDLE; - if (write) - chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ); + if (chan->dir == EDMA_DIR_WRITE) + chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ); else - chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ); + chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ); chan->ll_max -= 1; dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", - write ? "write" : "read", j, chan->ll_max); + chan->dir == EDMA_DIR_WRITE ? "write" : "read", + chan->id, chan->ll_max); if (dw->nr_irqs == 1) pos = 0; + else if (chan->dir == EDMA_DIR_WRITE) + pos = chan->id % wr_alloc; else - pos = off_alloc + (j % alloc); + pos = wr_alloc + chan->id % rd_alloc; irq = &dw->irq[pos]; - if (write) - irq->wr_mask |= BIT(j); + if (chan->dir == EDMA_DIR_WRITE) + irq->wr_mask |= BIT(chan->id); else - irq->rd_mask |= BIT(j); + irq->rd_mask |= BIT(chan->id); irq->dw = dw; memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", - write ? "write" : "read", j, + chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id, chan->msi.address_hi, chan->msi.address_lo, chan->msi.data); chan->vc.desc_free = vchan_free_desc; - vchan_init(&chan->vc, dma); + chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ? + &dw->chip->dt_region_wr[chan->id] : + &dw->chip->dt_region_rd[chan->id]; - if (write) { - dt_region->paddr = chip->dt_region_wr[j].paddr; - dt_region->vaddr = chip->dt_region_wr[j].vaddr; - dt_region->sz = chip->dt_region_wr[j].sz; - } else { - dt_region->paddr = chip->dt_region_rd[j].paddr; - dt_region->vaddr = chip->dt_region_rd[j].vaddr; - dt_region->sz = chip->dt_region_rd[j].sz; - } + vchan_init(&chan->vc, dma); dw_edma_v0_core_device_config(chan); } @@ -801,16 +815,16 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write, dma_cap_set(DMA_CYCLIC, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); - dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); + dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; - dma->chancnt = cnt; /* Set DMA channel callbacks */ dma->dev = chip->dev; dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; dma->device_free_chan_resources = dw_edma_free_chan_resources; + dma->device_caps = dw_edma_device_caps; dma->device_config = dw_edma_device_config; dma->device_pause = dw_edma_device_pause; dma->device_resume = dw_edma_device_resume; @@ -824,9 +838,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write, dma_set_max_seg_size(dma->dev, U32_MAX); /* Register DMA device */ - err = dma_async_device_register(dma); - - return err; + return dma_async_device_register(dma); } static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) @@ -897,10 +909,8 @@ static int dw_edma_irq_request(struct dw_edma *dw, dw_edma_interrupt_read, IRQF_SHARED, dw->name, &dw->irq[i]); - if (err) { - dw->nr_irqs = i; - return err; - } + if (err) + goto err_irq_free; if (irq_get_msi_desc(irq)) get_cached_msi_msg(irq, &dw->irq[i].msi); @@ -909,6 +919,14 @@ static int dw_edma_irq_request(struct dw_edma *dw, dw->nr_irqs = i; } + return 0; + +err_irq_free: + for (i--; i >= 0; i--) { + irq = chip->ops->irq_vector(dev, i); + free_irq(irq, &dw->irq[i]); + } + return err; } @@ -955,7 +973,8 @@ int dw_edma_probe(struct dw_edma_chip *chip) if (!dw->chan) return -ENOMEM; - snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s", + dev_name(chip->dev)); /* Disable eDMA, only to establish the ideal initial conditions */ dw_edma_v0_core_off(dw); @@ -965,13 +984,8 @@ int dw_edma_probe(struct dw_edma_chip *chip) if (err) return err; - /* Setup write channels */ - err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc); - if (err) - goto err_irq_free; - - /* Setup read channels */ - err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc); + /* Setup write/read channels */ + err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc); if (err) goto err_irq_free; @@ -997,6 +1011,10 @@ int dw_edma_remove(struct dw_edma_chip *chip) struct dw_edma *dw = chip->dw; int i; + /* Skip removal if no private data found */ + if (!dw) + return -ENODEV; + /* Disable eDMA */ dw_edma_v0_core_off(dw); @@ -1005,23 +1023,13 @@ int dw_edma_remove(struct dw_edma_chip *chip) free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); /* Deregister eDMA device */ - dma_async_device_unregister(&dw->wr_edma); - list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, + dma_async_device_unregister(&dw->dma); + list_for_each_entry_safe(chan, _chan, &dw->dma.channels, vc.chan.device_node) { tasklet_kill(&chan->vc.task); list_del(&chan->vc.chan.device_node); } - dma_async_device_unregister(&dw->rd_edma); - list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, - vc.chan.device_node) { - tasklet_kill(&chan->vc.task); - list_del(&chan->vc.chan.device_node); - } - - /* Turn debugfs off */ - dw_edma_v0_core_debugfs_off(dw); - return 0; } EXPORT_SYMBOL_GPL(dw_edma_remove); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h index 85df2d511907b..0ab2b6dba8804 100644 --- a/drivers/dma/dw-edma/dw-edma-core.h +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -96,12 +96,11 @@ struct dw_edma_irq { }; struct dw_edma { - char name[20]; + char name[32]; - struct dma_device wr_edma; - u16 wr_ch_cnt; + struct dma_device dma; - struct dma_device rd_edma; + u16 wr_ch_cnt; u16 rd_ch_cnt; struct dw_edma_irq *irq; @@ -112,9 +111,6 @@ struct dw_edma { raw_spinlock_t lock; /* Only for legacy */ struct dw_edma_chip *chip; -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs; -#endif /* CONFIG_DEBUG_FS */ }; struct dw_edma_sg { diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index d6b5e24638847..2b40f2b44f5e1 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -95,8 +95,23 @@ static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) return pci_irq_vector(to_pci_dev(dev), nr); } +static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus_region region; + struct resource res = { + .flags = IORESOURCE_MEM, + .start = cpu_addr, + .end = cpu_addr, + }; + + pcibios_resource_to_bus(pdev->bus, ®ion, &res); + return region.start; +} + static const struct dw_edma_core_ops dw_edma_pcie_core_ops = { .irq_vector = dw_edma_pcie_irq_vector, + .pci_address = dw_edma_pcie_address, }; static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev, @@ -207,7 +222,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, /* Data structure initialization */ chip->dev = dev; - chip->id = pdev->devfn; chip->mf = vsec_data.mf; chip->nr_irqs = nr_irqs; @@ -226,21 +240,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; - ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar]; - if (!ll_region->vaddr) + ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr.io) return -ENOMEM; - ll_region->vaddr += ll_block->off; - ll_region->paddr = pdev->resource[ll_block->bar].start; + ll_region->vaddr.io += ll_block->off; + ll_region->paddr = pci_bus_address(pdev, ll_block->bar); ll_region->paddr += ll_block->off; ll_region->sz = ll_block->sz; - dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar]; - if (!dt_region->vaddr) + dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr.io) return -ENOMEM; - dt_region->vaddr += dt_block->off; - dt_region->paddr = pdev->resource[dt_block->bar].start; + dt_region->vaddr.io += dt_block->off; + dt_region->paddr = pci_bus_address(pdev, dt_block->bar); dt_region->paddr += dt_block->off; dt_region->sz = dt_block->sz; } @@ -251,21 +265,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; - ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar]; - if (!ll_region->vaddr) + ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr.io) return -ENOMEM; - ll_region->vaddr += ll_block->off; - ll_region->paddr = pdev->resource[ll_block->bar].start; + ll_region->vaddr.io += ll_block->off; + ll_region->paddr = pci_bus_address(pdev, ll_block->bar); ll_region->paddr += ll_block->off; ll_region->sz = ll_block->sz; - dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar]; - if (!dt_region->vaddr) + dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr.io) return -ENOMEM; - dt_region->vaddr += dt_block->off; - dt_region->paddr = pdev->resource[dt_block->bar].start; + dt_region->vaddr.io += dt_block->off; + dt_region->paddr = pci_bus_address(pdev, dt_block->bar); dt_region->paddr += dt_block->off; dt_region->sz = dt_block->sz; } @@ -289,24 +303,24 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.ll_wr[i].bar, vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, - chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr); + chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr); pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.dt_wr[i].bar, vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, - chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr); + chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr); } for (i = 0; i < chip->ll_rd_cnt; i++) { pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.ll_rd[i].bar, vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, - chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr); + chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr); pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.dt_rd[i].bar, vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, - chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr); + chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr); } pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index a3816ba632851..72e79a0c0a4eb 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -8,6 +8,8 @@ #include +#include + #include "dw-edma-core.h" #include "dw-edma-v0-core.h" #include "dw-edma-v0-regs.h" @@ -53,8 +55,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) SET_32(dw, rd_##name, value); \ } while (0) -#ifdef CONFIG_64BIT - #define SET_64(dw, name, value) \ writeq(value, &(__dw_regs(dw)->name)) @@ -80,8 +80,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) SET_64(dw, rd_##name, value); \ } while (0) -#endif /* CONFIG_64BIT */ - #define SET_COMPAT(dw, name, value) \ writel(value, &(__dw_regs(dw)->type.unroll.name)) @@ -161,11 +159,6 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, #define GET_CH_32(dw, dir, ch, name) \ readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) -#define SET_LL_32(ll, value) \ - writel(value, ll) - -#ifdef CONFIG_64BIT - static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, u64 value, void __iomem *addr) { @@ -222,11 +215,6 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, #define GET_CH_64(dw, dir, ch, name) \ readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) -#define SET_LL_64(ll, value) \ - writeq(value, ll) - -#endif /* CONFIG_64BIT */ - /* eDMA management callbacks */ void dw_edma_v0_core_off(struct dw_edma *dw) { @@ -298,17 +286,53 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) GET_RW_32(dw, dir, int_status)); } +static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, + u32 control, u32 size, u64 sar, u64 dar) +{ + ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli); + + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; + + lli->control = control; + lli->transfer_size = size; + lli->sar.reg = sar; + lli->dar.reg = dar; + } else { + struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; + + writel(control, &lli->control); + writel(size, &lli->transfer_size); + writeq(sar, &lli->sar.reg); + writeq(dar, &lli->dar.reg); + } +} + +static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk, + int i, u32 control, u64 pointer) +{ + ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli); + + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { + struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; + + llp->control = control; + llp->llp.reg = pointer; + } else { + struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; + + writel(control, &llp->control); + writeq(pointer, &llp->llp.reg); + } +} + static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) { struct dw_edma_burst *child; struct dw_edma_chan *chan = chunk->chan; - struct dw_edma_v0_lli __iomem *lli; - struct dw_edma_v0_llp __iomem *llp; u32 control = 0, i = 0; int j; - lli = chunk->ll_region.vaddr; - if (chunk->cb) control = DW_EDMA_V0_CB; @@ -320,41 +344,16 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) control |= DW_EDMA_V0_RIE; } - /* Channel control */ - SET_LL_32(&lli[i].control, control); - /* Transfer size */ - SET_LL_32(&lli[i].transfer_size, child->sz); - /* SAR */ - #ifdef CONFIG_64BIT - SET_LL_64(&lli[i].sar.reg, child->sar); - #else /* CONFIG_64BIT */ - SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar)); - SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar)); - #endif /* CONFIG_64BIT */ - /* DAR */ - #ifdef CONFIG_64BIT - SET_LL_64(&lli[i].dar.reg, child->dar); - #else /* CONFIG_64BIT */ - SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar)); - SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar)); - #endif /* CONFIG_64BIT */ - i++; + + dw_edma_v0_write_ll_data(chunk, i++, control, child->sz, + child->sar, child->dar); } - llp = (void __iomem *)&lli[i]; control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; if (!chunk->cb) control |= DW_EDMA_V0_CB; - /* Channel control */ - SET_LL_32(&llp->control, control); - /* Linked list */ - #ifdef CONFIG_64BIT - SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr); - #else /* CONFIG_64BIT */ - SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr)); - SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr)); - #endif /* CONFIG_64BIT */ + dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); } void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) @@ -504,8 +503,3 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw) { dw_edma_v0_debugfs_on(dw); } - -void dw_edma_v0_core_debugfs_off(struct dw_edma *dw) -{ - dw_edma_v0_debugfs_off(dw); -} diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h index 75aec6d31b210..ab96a1f480809 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.h +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -23,6 +23,5 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); /* eDMA debug fs callbacks */ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw); -void dw_edma_v0_core_debugfs_off(struct dw_edma *dw); #endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c index 5226c9014703c..d12c607433bf9 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -13,76 +13,79 @@ #include "dw-edma-v0-regs.h" #include "dw-edma-core.h" -#define REGS_ADDR(name) \ - ((void __force *)®s->name) -#define REGISTER(name) \ - { #name, REGS_ADDR(name) } - -#define WR_REGISTER(name) \ - { #name, REGS_ADDR(wr_##name) } -#define RD_REGISTER(name) \ - { #name, REGS_ADDR(rd_##name) } - -#define WR_REGISTER_LEGACY(name) \ - { #name, REGS_ADDR(type.legacy.wr_##name) } +#define REGS_ADDR(dw, name) \ + ({ \ + struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \ + \ + (void __iomem *)&__regs->name; \ + }) + +#define REGS_CH_ADDR(dw, name, _dir, _ch) \ + ({ \ + struct dw_edma_v0_ch_regs __iomem *__ch_regs; \ + \ + if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \ + __ch_regs = REGS_ADDR(dw, type.legacy.ch); \ + else if (_dir == EDMA_DIR_READ) \ + __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \ + else \ + __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \ + \ + (void __iomem *)&__ch_regs->name; \ + }) + +#define REGISTER(dw, name) \ + { dw, #name, REGS_ADDR(dw, name) } + +#define CTX_REGISTER(dw, name, dir, ch) \ + { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch } + +#define WR_REGISTER(dw, name) \ + { dw, #name, REGS_ADDR(dw, wr_##name) } +#define RD_REGISTER(dw, name) \ + { dw, #name, REGS_ADDR(dw, rd_##name) } + +#define WR_REGISTER_LEGACY(dw, name) \ + { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) } #define RD_REGISTER_LEGACY(name) \ - { #name, REGS_ADDR(type.legacy.rd_##name) } + { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) } -#define WR_REGISTER_UNROLL(name) \ - { #name, REGS_ADDR(type.unroll.wr_##name) } -#define RD_REGISTER_UNROLL(name) \ - { #name, REGS_ADDR(type.unroll.rd_##name) } +#define WR_REGISTER_UNROLL(dw, name) \ + { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) } +#define RD_REGISTER_UNROLL(dw, name) \ + { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) } #define WRITE_STR "write" #define READ_STR "read" #define CHANNEL_STR "channel" #define REGISTERS_STR "registers" -static struct dw_edma *dw; -static struct dw_edma_v0_regs __iomem *regs; - -static struct { - void __iomem *start; - void __iomem *end; -} lim[2][EDMA_V0_MAX_NR_CH]; - -struct debugfs_entries { +struct dw_edma_debugfs_entry { + struct dw_edma *dw; const char *name; - dma_addr_t *reg; + void __iomem *reg; + enum dw_edma_dir dir; + u16 ch; }; static int dw_edma_debugfs_u32_get(void *data, u64 *val) { - void __iomem *reg = (void __force __iomem *)data; + struct dw_edma_debugfs_entry *entry = data; + struct dw_edma *dw = entry->dw; + void __iomem *reg = entry->reg; + if (dw->chip->mf == EDMA_MF_EDMA_LEGACY && - reg >= (void __iomem *)®s->type.legacy.ch) { - void __iomem *ptr = ®s->type.legacy.ch; - u32 viewport_sel = 0; + reg >= REGS_ADDR(dw, type.legacy.ch)) { unsigned long flags; - u16 ch; - - for (ch = 0; ch < dw->wr_ch_cnt; ch++) - if (lim[0][ch].start >= reg && reg < lim[0][ch].end) { - ptr += (reg - lim[0][ch].start); - goto legacy_sel_wr; - } - - for (ch = 0; ch < dw->rd_ch_cnt; ch++) - if (lim[1][ch].start >= reg && reg < lim[1][ch].end) { - ptr += (reg - lim[1][ch].start); - goto legacy_sel_rd; - } - - return 0; -legacy_sel_rd: - viewport_sel = BIT(31); -legacy_sel_wr: - viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + u32 viewport_sel; + + viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0; + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch); raw_spin_lock_irqsave(&dw->lock, flags); - writel(viewport_sel, ®s->type.legacy.viewport_sel); - *val = readl(ptr); + writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel)); + *val = readl(reg); raw_spin_unlock_irqrestore(&dw->lock, flags); } else { @@ -93,222 +96,195 @@ legacy_sel_wr: } DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); -static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], - int nr_entries, struct dentry *dir) +static void dw_edma_debugfs_create_x32(struct dw_edma *dw, + const struct dw_edma_debugfs_entry ini[], + int nr_entries, struct dentry *dent) { + struct dw_edma_debugfs_entry *entries; int i; + entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries), + GFP_KERNEL); + if (!entries) + return; + for (i = 0; i < nr_entries; i++) { - if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, - entries[i].reg, &fops_x32)) - break; + entries[i] = ini[i]; + + debugfs_create_file_unsafe(entries[i].name, 0444, dent, + &entries[i], &fops_x32); } } -static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs, - struct dentry *dir) +static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir, + u16 ch, struct dentry *dent) { - int nr_entries; - const struct debugfs_entries debugfs_regs[] = { - REGISTER(ch_control1), - REGISTER(ch_control2), - REGISTER(transfer_size), - REGISTER(sar.lsb), - REGISTER(sar.msb), - REGISTER(dar.lsb), - REGISTER(dar.msb), - REGISTER(llp.lsb), - REGISTER(llp.msb), + struct dw_edma_debugfs_entry debugfs_regs[] = { + CTX_REGISTER(dw, ch_control1, dir, ch), + CTX_REGISTER(dw, ch_control2, dir, ch), + CTX_REGISTER(dw, transfer_size, dir, ch), + CTX_REGISTER(dw, sar.lsb, dir, ch), + CTX_REGISTER(dw, sar.msb, dir, ch), + CTX_REGISTER(dw, dar.lsb, dir, ch), + CTX_REGISTER(dw, dar.msb, dir, ch), + CTX_REGISTER(dw, llp.lsb, dir, ch), + CTX_REGISTER(dw, llp.msb, dir, ch), }; + int nr_entries; nr_entries = ARRAY_SIZE(debugfs_regs); - dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent); } -static void dw_edma_debugfs_regs_wr(struct dentry *dir) +static void dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent) { - const struct debugfs_entries debugfs_regs[] = { + const struct dw_edma_debugfs_entry debugfs_regs[] = { /* eDMA global registers */ - WR_REGISTER(engine_en), - WR_REGISTER(doorbell), - WR_REGISTER(ch_arb_weight.lsb), - WR_REGISTER(ch_arb_weight.msb), + WR_REGISTER(dw, engine_en), + WR_REGISTER(dw, doorbell), + WR_REGISTER(dw, ch_arb_weight.lsb), + WR_REGISTER(dw, ch_arb_weight.msb), /* eDMA interrupts registers */ - WR_REGISTER(int_status), - WR_REGISTER(int_mask), - WR_REGISTER(int_clear), - WR_REGISTER(err_status), - WR_REGISTER(done_imwr.lsb), - WR_REGISTER(done_imwr.msb), - WR_REGISTER(abort_imwr.lsb), - WR_REGISTER(abort_imwr.msb), - WR_REGISTER(ch01_imwr_data), - WR_REGISTER(ch23_imwr_data), - WR_REGISTER(ch45_imwr_data), - WR_REGISTER(ch67_imwr_data), - WR_REGISTER(linked_list_err_en), + WR_REGISTER(dw, int_status), + WR_REGISTER(dw, int_mask), + WR_REGISTER(dw, int_clear), + WR_REGISTER(dw, err_status), + WR_REGISTER(dw, done_imwr.lsb), + WR_REGISTER(dw, done_imwr.msb), + WR_REGISTER(dw, abort_imwr.lsb), + WR_REGISTER(dw, abort_imwr.msb), + WR_REGISTER(dw, ch01_imwr_data), + WR_REGISTER(dw, ch23_imwr_data), + WR_REGISTER(dw, ch45_imwr_data), + WR_REGISTER(dw, ch67_imwr_data), + WR_REGISTER(dw, linked_list_err_en), }; - const struct debugfs_entries debugfs_unroll_regs[] = { + const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = { /* eDMA channel context grouping */ - WR_REGISTER_UNROLL(engine_chgroup), - WR_REGISTER_UNROLL(engine_hshake_cnt.lsb), - WR_REGISTER_UNROLL(engine_hshake_cnt.msb), - WR_REGISTER_UNROLL(ch0_pwr_en), - WR_REGISTER_UNROLL(ch1_pwr_en), - WR_REGISTER_UNROLL(ch2_pwr_en), - WR_REGISTER_UNROLL(ch3_pwr_en), - WR_REGISTER_UNROLL(ch4_pwr_en), - WR_REGISTER_UNROLL(ch5_pwr_en), - WR_REGISTER_UNROLL(ch6_pwr_en), - WR_REGISTER_UNROLL(ch7_pwr_en), + WR_REGISTER_UNROLL(dw, engine_chgroup), + WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb), + WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb), + WR_REGISTER_UNROLL(dw, ch0_pwr_en), + WR_REGISTER_UNROLL(dw, ch1_pwr_en), + WR_REGISTER_UNROLL(dw, ch2_pwr_en), + WR_REGISTER_UNROLL(dw, ch3_pwr_en), + WR_REGISTER_UNROLL(dw, ch4_pwr_en), + WR_REGISTER_UNROLL(dw, ch5_pwr_en), + WR_REGISTER_UNROLL(dw, ch6_pwr_en), + WR_REGISTER_UNROLL(dw, ch7_pwr_en), }; - struct dentry *regs_dir, *ch_dir; + struct dentry *regs_dent, *ch_dent; int nr_entries, i; char name[16]; - regs_dir = debugfs_create_dir(WRITE_STR, dir); - if (!regs_dir) - return; + regs_dent = debugfs_create_dir(WRITE_STR, dent); nr_entries = ARRAY_SIZE(debugfs_regs); - dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { nr_entries = ARRAY_SIZE(debugfs_unroll_regs); - dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, - regs_dir); + dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries, + regs_dent); } for (i = 0; i < dw->wr_ch_cnt; i++) { snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); - ch_dir = debugfs_create_dir(name, regs_dir); - if (!ch_dir) - return; - - dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir); + ch_dent = debugfs_create_dir(name, regs_dent); - lim[0][i].start = ®s->type.unroll.ch[i].wr; - lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0]; + dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent); } } -static void dw_edma_debugfs_regs_rd(struct dentry *dir) +static void dw_edma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent) { - const struct debugfs_entries debugfs_regs[] = { + const struct dw_edma_debugfs_entry debugfs_regs[] = { /* eDMA global registers */ - RD_REGISTER(engine_en), - RD_REGISTER(doorbell), - RD_REGISTER(ch_arb_weight.lsb), - RD_REGISTER(ch_arb_weight.msb), + RD_REGISTER(dw, engine_en), + RD_REGISTER(dw, doorbell), + RD_REGISTER(dw, ch_arb_weight.lsb), + RD_REGISTER(dw, ch_arb_weight.msb), /* eDMA interrupts registers */ - RD_REGISTER(int_status), - RD_REGISTER(int_mask), - RD_REGISTER(int_clear), - RD_REGISTER(err_status.lsb), - RD_REGISTER(err_status.msb), - RD_REGISTER(linked_list_err_en), - RD_REGISTER(done_imwr.lsb), - RD_REGISTER(done_imwr.msb), - RD_REGISTER(abort_imwr.lsb), - RD_REGISTER(abort_imwr.msb), - RD_REGISTER(ch01_imwr_data), - RD_REGISTER(ch23_imwr_data), - RD_REGISTER(ch45_imwr_data), - RD_REGISTER(ch67_imwr_data), + RD_REGISTER(dw, int_status), + RD_REGISTER(dw, int_mask), + RD_REGISTER(dw, int_clear), + RD_REGISTER(dw, err_status.lsb), + RD_REGISTER(dw, err_status.msb), + RD_REGISTER(dw, linked_list_err_en), + RD_REGISTER(dw, done_imwr.lsb), + RD_REGISTER(dw, done_imwr.msb), + RD_REGISTER(dw, abort_imwr.lsb), + RD_REGISTER(dw, abort_imwr.msb), + RD_REGISTER(dw, ch01_imwr_data), + RD_REGISTER(dw, ch23_imwr_data), + RD_REGISTER(dw, ch45_imwr_data), + RD_REGISTER(dw, ch67_imwr_data), }; - const struct debugfs_entries debugfs_unroll_regs[] = { + const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = { /* eDMA channel context grouping */ - RD_REGISTER_UNROLL(engine_chgroup), - RD_REGISTER_UNROLL(engine_hshake_cnt.lsb), - RD_REGISTER_UNROLL(engine_hshake_cnt.msb), - RD_REGISTER_UNROLL(ch0_pwr_en), - RD_REGISTER_UNROLL(ch1_pwr_en), - RD_REGISTER_UNROLL(ch2_pwr_en), - RD_REGISTER_UNROLL(ch3_pwr_en), - RD_REGISTER_UNROLL(ch4_pwr_en), - RD_REGISTER_UNROLL(ch5_pwr_en), - RD_REGISTER_UNROLL(ch6_pwr_en), - RD_REGISTER_UNROLL(ch7_pwr_en), + RD_REGISTER_UNROLL(dw, engine_chgroup), + RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb), + RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb), + RD_REGISTER_UNROLL(dw, ch0_pwr_en), + RD_REGISTER_UNROLL(dw, ch1_pwr_en), + RD_REGISTER_UNROLL(dw, ch2_pwr_en), + RD_REGISTER_UNROLL(dw, ch3_pwr_en), + RD_REGISTER_UNROLL(dw, ch4_pwr_en), + RD_REGISTER_UNROLL(dw, ch5_pwr_en), + RD_REGISTER_UNROLL(dw, ch6_pwr_en), + RD_REGISTER_UNROLL(dw, ch7_pwr_en), }; - struct dentry *regs_dir, *ch_dir; + struct dentry *regs_dent, *ch_dent; int nr_entries, i; char name[16]; - regs_dir = debugfs_create_dir(READ_STR, dir); - if (!regs_dir) - return; + regs_dent = debugfs_create_dir(READ_STR, dent); nr_entries = ARRAY_SIZE(debugfs_regs); - dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { nr_entries = ARRAY_SIZE(debugfs_unroll_regs); - dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, - regs_dir); + dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries, + regs_dent); } for (i = 0; i < dw->rd_ch_cnt; i++) { snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); - ch_dir = debugfs_create_dir(name, regs_dir); - if (!ch_dir) - return; - - dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir); + ch_dent = debugfs_create_dir(name, regs_dent); - lim[1][i].start = ®s->type.unroll.ch[i].rd; - lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0]; + dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent); } } -static void dw_edma_debugfs_regs(void) +static void dw_edma_debugfs_regs(struct dw_edma *dw) { - const struct debugfs_entries debugfs_regs[] = { - REGISTER(ctrl_data_arb_prior), - REGISTER(ctrl), + const struct dw_edma_debugfs_entry debugfs_regs[] = { + REGISTER(dw, ctrl_data_arb_prior), + REGISTER(dw, ctrl), }; - struct dentry *regs_dir; + struct dentry *regs_dent; int nr_entries; - regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs); - if (!regs_dir) - return; + regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root); nr_entries = ARRAY_SIZE(debugfs_regs); - dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); - dw_edma_debugfs_regs_wr(regs_dir); - dw_edma_debugfs_regs_rd(regs_dir); + dw_edma_debugfs_regs_wr(dw, regs_dent); + dw_edma_debugfs_regs_rd(dw, regs_dent); } -void dw_edma_v0_debugfs_on(struct dw_edma *_dw) +void dw_edma_v0_debugfs_on(struct dw_edma *dw) { - dw = _dw; - if (!dw) - return; - - regs = dw->chip->reg_base; - if (!regs) - return; - - dw->debugfs = debugfs_create_dir(dw->name, NULL); - if (!dw->debugfs) + if (!debugfs_initialized()) return; - debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf); - debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt); - debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt); - - dw_edma_debugfs_regs(); -} - -void dw_edma_v0_debugfs_off(struct dw_edma *_dw) -{ - dw = _dw; - if (!dw) - return; + debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf); + debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt); - debugfs_remove_recursive(dw->debugfs); - dw->debugfs = NULL; + dw_edma_debugfs_regs(dw); } diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h index 3391b86edf5ab..fb3342d97d6de 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -13,15 +13,10 @@ #ifdef CONFIG_DEBUG_FS void dw_edma_v0_debugfs_on(struct dw_edma *dw); -void dw_edma_v0_debugfs_off(struct dw_edma *dw); #else static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw) { } - -static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw) -{ -} #endif /* CONFIG_DEBUG_FS */ #endif /* _DW_EDMA_V0_DEBUG_FS_H */ diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 456602d373b7b..6bc34ac13a752 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -486,7 +486,6 @@ config EDAC_ARMADA_XP config EDAC_SYNOPSYS tristate "Synopsys DDR Memory Controller" - depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || ARCH_MXC help Support for error detection and correction on the Synopsys DDR memory controller. @@ -541,4 +540,11 @@ config EDAC_DMC520 Support for error detection and correction on the SoCs with ARM DMC-520 DRAM controller. +config EDAC_ZYNQ + tristate "Xilinx Zynq A05 DDR Memory Controller" + depends on ARCH_ZYNQ || COMPILE_TEST + help + Support for error detection and correction on the Xilinx Zynq A05 + DDR memory controller. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 2d1641a27a28f..83e063f53b22f 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -84,3 +84,4 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o +obj-$(CONFIG_EDAC_ZYNQ) += zynq_edac.o diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 6faeb2ab39601..e353e98e01e2b 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -29,6 +29,9 @@ #include #include #include +#include +#include + #include #include "edac_mc.h" #include "edac_module.h" @@ -46,6 +49,7 @@ EXPORT_SYMBOL_GPL(edac_op_state); /* lock to memory controller's control array */ static DEFINE_MUTEX(mem_ctls_mutex); static LIST_HEAD(mc_devices); +static DEFINE_IDR(mc_idr); /* * Used to lock EDAC MC to just one module, avoiding two drivers e. g. @@ -147,10 +151,12 @@ const char * const edac_mem_types[] = { [MEM_RDR] = "Registered-SDR", [MEM_DDR] = "Unbuffered-DDR", [MEM_RDDR] = "Registered-DDR", + [MEM_LPDDR] = "Low-Power-(m)DDR-RAM", [MEM_RMBS] = "RMBS", [MEM_DDR2] = "Unbuffered-DDR2", [MEM_FB_DDR2] = "FullyBuffered-DDR2", [MEM_RDDR2] = "Registered-DDR2", + [MEM_LPDDR2] = "Low-Power-DDR2-RAM", [MEM_XDR] = "XDR", [MEM_DDR3] = "Unbuffered-DDR3", [MEM_RDDR3] = "Registered-DDR3", @@ -256,7 +262,6 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) unsigned int pos[EDAC_MAX_LAYERS]; unsigned int row, chn, idx; int layer; - void *p; /* * Allocate and fill the dimm structs @@ -271,7 +276,6 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) for (idx = 0; idx < mci->tot_dimms; idx++) { struct dimm_info *dimm; struct rank_info *chan; - int n, len; chan = mci->csrows[row]->channels[chn]; @@ -282,22 +286,9 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) dimm->mci = mci; dimm->idx = idx; - /* - * Copy DIMM location and initialize it. - */ - len = sizeof(dimm->label); - p = dimm->label; - n = scnprintf(p, len, "mc#%u", mci->mc_idx); - p += n; - len -= n; - for (layer = 0; layer < mci->n_layers; layer++) { - n = scnprintf(p, len, "%s#%u", - edac_layer_name[mci->layers[layer].type], - pos[layer]); - p += n; - len -= n; + /* Copy DIMM location */ + for (layer = 0; layer < mci->n_layers; layer++) dimm->location[layer] = pos[layer]; - } /* Link it to the csrows old API data */ chan->dimm = dimm; @@ -508,7 +499,91 @@ void edac_mc_reset_delay_period(unsigned long value) mutex_unlock(&mem_ctls_mutex); } +/** + * edac_mc_alloc_id() - Allocate unique Memory Controller identifier + * + * @mci: pointer to the mci structure to allocate ID for + * + * Use edac_mc_free_id() to coherently free the MC identifier. + * + * .. note:: + * locking model: must be called with the mem_ctls_mutex lock held + * + * Returns: + * 0 on Success, or an error code on failure + */ +static int edac_mc_alloc_id(struct mem_ctl_info *mci) +{ + struct device_node *np = dev_of_node(mci->pdev); + int ret, min, max; + + if (mci->mc_idx == EDAC_AUTO_MC_NUM) { + ret = of_alias_get_id(np, "mc"); + if (ret >= 0) { + min = ret; + max = ret + 1; + } else { + min = of_alias_get_highest_id("mc"); + if (min >= 0) + min++; + else + min = 0; + + max = 0; + } + } else { + min = mci->mc_idx; + max = mci->mc_idx + 1; + } + + ret = idr_alloc(&mc_idr, mci, min, max, GFP_KERNEL); + if (ret < 0) + return ret == -ENOSPC ? -EBUSY : ret; + + mci->mc_idx = ret; + + return 0; +} + +/** + * edac_mc_free_id() - Free Memory Controller identifier + * + * @mci: pointer to the mci structure to free ID from + * + * .. note:: + * locking model: must be called with the mem_ctls_mutex lock held + */ +static void edac_mc_free_id(struct mem_ctl_info *mci) +{ + idr_remove(&mc_idr, mci->mc_idx); +} + +/** + * edac_mc_init_labels() - Initialize DIMM labels + * + * @mci: pointer to the mci structure which DIMM labels need to be initialized + * + * .. note:: + * locking model: must be called with the mem_ctls_mutex lock held + */ +static void edac_mc_init_labels(struct mem_ctl_info *mci) +{ + int n, len, layer; + unsigned int idx; + char *p; + for (idx = 0; idx < mci->tot_dimms; idx++) { + len = sizeof(mci->dimms[idx]->label); + p = mci->dimms[idx]->label; + + n = scnprintf(p, len, "mc#%u", mci->mc_idx); + for (layer = 0; layer < mci->n_layers; layer++) { + n += scnprintf(p + n, len - n, "%s#%u", + edac_layer_name[mci->layers[layer].type], + mci->dimms[idx]->location[layer]); + } + } +} /* Return 0 on success, 1 on failure. * Before calling this function, caller must @@ -600,7 +675,8 @@ EXPORT_SYMBOL_GPL(edac_get_owner); int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, const struct attribute_group **groups) { - int ret = -EINVAL; + int ret; + edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG @@ -637,18 +713,30 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, goto fail0; } - if (add_mc_to_global_list(mci)) + ret = edac_mc_alloc_id(mci); + if (ret) { + edac_printk(KERN_ERR, EDAC_MC, "failed to allocate MC idx %u\n", + mci->mc_idx); goto fail0; + } + + edac_mc_init_labels(mci); + + if (add_mc_to_global_list(mci)) { + ret = -EINVAL; + goto fail1; + } /* set load time so that error rate can be tracked */ mci->start_time = jiffies; mci->bus = edac_get_sysfs_subsys(); - if (edac_create_sysfs_mci_device(mci, groups)) { + ret = edac_create_sysfs_mci_device(mci, groups); + if (ret) { edac_mc_printk(mci, KERN_WARNING, "failed to create sysfs device\n"); - goto fail1; + goto fail2; } if (mci->edac_check) { @@ -672,9 +760,12 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, mutex_unlock(&mem_ctls_mutex); return 0; -fail1: +fail2: del_mc_from_global_list(mci); +fail1: + edac_mc_free_id(mci); + fail0: mutex_unlock(&mem_ctls_mutex); return ret; @@ -702,6 +793,8 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) if (del_mc_from_global_list(mci)) edac_mc_owner = NULL; + edac_mc_free_id(mci); + mutex_unlock(&mem_ctls_mutex); if (mci->edac_check) diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h index 881b00eadf7a5..4b6676235b1b4 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_mc.h @@ -23,6 +23,7 @@ #define _EDAC_MC_H_ #include +#include #include #include #include @@ -37,6 +38,9 @@ #include #include +/* Generate MC identifier automatically */ +#define EDAC_AUTO_MC_NUM UINT_MAX + #if PAGE_SHIFT < 20 #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index f7d37c2828199..24cd8f23242a6 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -1,96 +1,48 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Synopsys DDR ECC Driver + * Synopsys DW uMCTL2 DDR ECC Driver * This driver is based on ppc4xx_edac.c drivers * * Copyright (C) 2012 - 2014 Xilinx, Inc. */ +#include +#include +#include #include +#include +#include #include +#include #include +#include +#include +#include +#include #include #include #include #include "edac_module.h" -/* Number of cs_rows needed per memory controller */ -#define SYNPS_EDAC_NR_CSROWS 1 - /* Number of channels per memory controller */ -#define SYNPS_EDAC_NR_CHANS 1 - -/* Granularity of reported error in bytes */ -#define SYNPS_EDAC_ERR_GRAIN 1 - -#define SYNPS_EDAC_MSG_SIZE 256 - -#define SYNPS_EDAC_MOD_STRING "synps_edac" -#define SYNPS_EDAC_MOD_VER "1" - -/* Synopsys DDR memory controller registers that are relevant to ECC */ -#define CTRL_OFST 0x0 -#define T_ZQ_OFST 0xA4 - -/* ECC control register */ -#define ECC_CTRL_OFST 0xC4 -/* ECC log register */ -#define CE_LOG_OFST 0xC8 -/* ECC address register */ -#define CE_ADDR_OFST 0xCC -/* ECC data[31:0] register */ -#define CE_DATA_31_0_OFST 0xD0 - -/* Uncorrectable error info registers */ -#define UE_LOG_OFST 0xDC -#define UE_ADDR_OFST 0xE0 -#define UE_DATA_31_0_OFST 0xE4 - -#define STAT_OFST 0xF0 -#define SCRUB_OFST 0xF4 - -/* Control register bit field definitions */ -#define CTRL_BW_MASK 0xC -#define CTRL_BW_SHIFT 2 - -#define DDRCTL_WDTH_16 1 -#define DDRCTL_WDTH_32 0 - -/* ZQ register bit field definitions */ -#define T_ZQ_DDRMODE_MASK 0x2 - -/* ECC control register bit field definitions */ -#define ECC_CTRL_CLR_CE_ERR 0x2 -#define ECC_CTRL_CLR_UE_ERR 0x1 - -/* ECC correctable/uncorrectable error log register definitions */ -#define LOG_VALID 0x1 -#define CE_LOG_BITPOS_MASK 0xFE -#define CE_LOG_BITPOS_SHIFT 1 - -/* ECC correctable/uncorrectable error address register definitions */ -#define ADDR_COL_MASK 0xFFF -#define ADDR_ROW_MASK 0xFFFF000 -#define ADDR_ROW_SHIFT 12 -#define ADDR_BANK_MASK 0x70000000 -#define ADDR_BANK_SHIFT 28 - -/* ECC statistic register definitions */ -#define STAT_UECNT_MASK 0xFF -#define STAT_CECNT_MASK 0xFF00 -#define STAT_CECNT_SHIFT 8 - -/* ECC scrub register definitions */ -#define SCRUB_MODE_MASK 0x7 -#define SCRUB_MODE_SECDED 0x4 - -/* DDR ECC Quirks */ -#define DDR_ECC_INTR_SUPPORT BIT(0) -#define DDR_ECC_DATA_POISON_SUPPORT BIT(1) -#define DDR_ECC_INTR_SELF_CLEAR BIT(2) - -/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */ +#define SNPS_EDAC_NR_CHANS 1 + +#define SNPS_EDAC_MSG_SIZE 256 + +#define SNPS_EDAC_MOD_STRING "snps_edac" +#define SNPS_EDAC_MOD_VER "1" + +/* DDR capabilities */ +#define SNPS_CAP_ECC_SCRUB BIT(0) +#define SNPS_CAP_ECC_SCRUBBER BIT(1) +#define SNPS_CAP_ZYNQMP BIT(31) + +/* Synopsys uMCTL2 DDR controller registers that are relevant to ECC */ + +/* DDRC master0 Register */ +#define DDR_MSTR_OFST 0x0 + /* ECC Configuration Registers */ #define ECC_CFG0_OFST 0x70 #define ECC_CFG1_OFST 0x74 @@ -131,96 +83,128 @@ #define ECC_POISON0_OFST 0xB8 #define ECC_POISON1_OFST 0xBC -#define ECC_ADDRMAP0_OFFSET 0x200 +/* DDR CRC/Parity register */ +#define DDR_CRCPARCTL0_OFST 0xC0 +#define DDR_CRCPARCTL1_OFST 0xC4 +#define DDR_CRCPARCTL2_OFST 0xC8 +#define DDR_CRCPARSTAT_OFST 0xCC + +/* DDR Address map0 Registers */ +#define DDR_ADDRMAP0_OFST 0x200 + +/* DDR Software control register */ +#define DDR_SWCTL 0x320 + +/* ECC Poison pattern registers */ +#define ECC_POISONPAT0_OFST 0x37C +#define ECC_POISONPAT1_OFST 0x380 +#define ECC_POISONPAT2_OFST 0x384 + +/* DDR SAR0 registers */ +#define DDR_SARBASE0_OFST 0xF04 +#define DDR_SARSIZE0_OFST 0xF08 + +/* ECC Scrubber registers */ +#define ECC_SBRCTL_OFST 0xF24 +#define ECC_SBRSTAT_OFST 0xF28 +#define ECC_SBRWDATA0_OFST 0xF2C +#define ECC_SBRWDATA1_OFST 0xF30 + +/* DDR Master Register 0 definitions */ +#define DDR_MSTR_DEV_CFG_MASK GENMASK(31, 30) +#define DDR_MSTR_DEV_X4 0x0 +#define DDR_MSTR_DEV_X8 0x1 +#define DDR_MSTR_DEV_X16 0x2 +#define DDR_MSTR_DEV_X32 0x3 +#define DDR_MSTR_ACT_RANKS_MASK GENMASK(27, 24) +#define DDR_MSTR_FREQ_RATIO11 BIT(22) +#define DDR_MSTR_BURST_RDWR GENMASK(19, 16) +#define DDR_MSTR_BUSWIDTH_MASK GENMASK(13, 12) +#define DDR_MSTR_MEM_MASK GENMASK(5, 0) +#define DDR_MSTR_MEM_DDR2 0 +#define DDR_MSTR_MEM_DDR3 BIT(0) +#define DDR_MSTR_MEM_LPDDR BIT(1) +#define DDR_MSTR_MEM_LPDDR2 BIT(2) +#define DDR_MSTR_MEM_LPDDR3 BIT(3) +#define DDR_MSTR_MEM_DDR4 BIT(4) +#define DDR_MSTR_MEM_LPDDR4 BIT(5) + +/* ECC CFG0 register definitions */ +#define ECC_CFG0_DIS_SCRUB BIT(4) +#define ECC_CFG0_MODE_MASK GENMASK(2, 0) + +/* ECC CFG1 register definitions */ +#define ECC_CFG1_POISON_BIT BIT(1) +#define ECC_CFG1_POISON_EN BIT(0) + +/* ECC status register definitions */ +#define ECC_STAT_UE_MASK GENMASK(23, 16) +#define ECC_STAT_CE_MASK GENMASK(15, 8) +#define ECC_STAT_BITNUM_MASK GENMASK(6, 0) -/* Control register bitfield definitions */ -#define ECC_CTRL_BUSWIDTH_MASK 0x3000 -#define ECC_CTRL_BUSWIDTH_SHIFT 12 +/* ECC control/clear register definitions */ +#define ECC_CTRL_CLR_CE_ERR BIT(0) +#define ECC_CTRL_CLR_UE_ERR BIT(1) #define ECC_CTRL_CLR_CE_ERRCNT BIT(2) #define ECC_CTRL_CLR_UE_ERRCNT BIT(3) - -/* DDR Control Register width definitions */ -#define DDRCTL_EWDTH_16 2 -#define DDRCTL_EWDTH_32 1 -#define DDRCTL_EWDTH_64 0 - -/* ECC status register definitions */ -#define ECC_STAT_UECNT_MASK 0xF0000 -#define ECC_STAT_UECNT_SHIFT 16 -#define ECC_STAT_CECNT_MASK 0xF00 -#define ECC_STAT_CECNT_SHIFT 8 -#define ECC_STAT_BITNUM_MASK 0x7F +#define ECC_CTRL_EN_CE_IRQ BIT(8) +#define ECC_CTRL_EN_UE_IRQ BIT(9) /* ECC error count register definitions */ -#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000 -#define ECC_ERRCNT_UECNT_SHIFT 16 -#define ECC_ERRCNT_CECNT_MASK 0xFFFF +#define ECC_ERRCNT_UECNT_MASK GENMASK(31, 16) +#define ECC_ERRCNT_CECNT_MASK GENMASK(15, 0) /* DDR QOS Interrupt register definitions */ #define DDR_QOS_IRQ_STAT_OFST 0x20200 -#define DDR_QOSUE_MASK 0x4 -#define DDR_QOSCE_MASK 0x2 -#define ECC_CE_UE_INTR_MASK 0x6 +#define DDR_QOSUE_MASK BIT(2) +#define DDR_QOSCE_MASK BIT(1) #define DDR_QOS_IRQ_EN_OFST 0x20208 #define DDR_QOS_IRQ_DB_OFST 0x2020C -/* DDR QOS Interrupt register definitions */ -#define DDR_UE_MASK BIT(9) -#define DDR_CE_MASK BIT(8) - /* ECC Corrected Error Register Mask and Shifts*/ -#define ECC_CEADDR0_RW_MASK 0x3FFFF -#define ECC_CEADDR0_RNK_MASK BIT(24) -#define ECC_CEADDR1_BNKGRP_MASK 0x3000000 -#define ECC_CEADDR1_BNKNR_MASK 0x70000 -#define ECC_CEADDR1_BLKNR_MASK 0xFFF -#define ECC_CEADDR1_BNKGRP_SHIFT 24 -#define ECC_CEADDR1_BNKNR_SHIFT 16 +#define ECC_CEADDR0_RANK_MASK GENMASK(27, 24) +#define ECC_CEADDR0_ROW_MASK GENMASK(17, 0) +#define ECC_CEADDR1_BANKGRP_MASK GENMASK(25, 24) +#define ECC_CEADDR1_BANK_MASK GENMASK(23, 16) +#define ECC_CEADDR1_COL_MASK GENMASK(11, 0) + +/* DDR CRC/Parity register definitions */ +#define DDR_CRCPARCTL0_CLR_ALRT_ERRCNT BIT(2) +#define DDR_CRCPARCTL0_CLR_ALRT_ERR BIT(1) +#define DDR_CRCPARCTL0_EN_ALRT_IRQ BIT(0) +#define DDR_CRCPARSTAT_ALRT_ERR BIT(16) +#define DDR_CRCPARSTAT_ALRT_CNT_MASK GENMASK(15, 0) /* ECC Poison register shifts */ -#define ECC_POISON0_RANK_SHIFT 24 -#define ECC_POISON0_RANK_MASK BIT(24) -#define ECC_POISON0_COLUMN_SHIFT 0 -#define ECC_POISON0_COLUMN_MASK 0xFFF -#define ECC_POISON1_BG_SHIFT 28 -#define ECC_POISON1_BG_MASK 0x30000000 -#define ECC_POISON1_BANKNR_SHIFT 24 -#define ECC_POISON1_BANKNR_MASK 0x7000000 -#define ECC_POISON1_ROW_SHIFT 0 -#define ECC_POISON1_ROW_MASK 0x3FFFF - -/* DDR Memory type defines */ -#define MEM_TYPE_DDR3 0x1 -#define MEM_TYPE_LPDDR3 0x8 -#define MEM_TYPE_DDR2 0x4 -#define MEM_TYPE_DDR4 0x10 -#define MEM_TYPE_LPDDR4 0x20 - -/* DDRC Software control register */ -#define DDRC_SWCTL 0x320 - -/* DDRC ECC CE & UE poison mask */ -#define ECC_CEPOISON_MASK 0x3 -#define ECC_UEPOISON_MASK 0x1 - -/* DDRC Device config masks */ -#define DDRC_MSTR_CFG_MASK 0xC0000000 -#define DDRC_MSTR_CFG_SHIFT 30 -#define DDRC_MSTR_CFG_X4_MASK 0x0 -#define DDRC_MSTR_CFG_X8_MASK 0x1 -#define DDRC_MSTR_CFG_X16_MASK 0x2 -#define DDRC_MSTR_CFG_X32_MASK 0x3 - -#define DDR_MAX_ROW_SHIFT 18 -#define DDR_MAX_COL_SHIFT 14 -#define DDR_MAX_BANK_SHIFT 3 -#define DDR_MAX_BANKGRP_SHIFT 2 - -#define ROW_MAX_VAL_MASK 0xF -#define COL_MAX_VAL_MASK 0xF -#define BANK_MAX_VAL_MASK 0x1F -#define BANKGRP_MAX_VAL_MASK 0x1F -#define RANK_MAX_VAL_MASK 0x1F +#define ECC_POISON0_RANK_MASK GENMASK(27, 24) +#define ECC_POISON0_COL_MASK GENMASK(11, 0) +#define ECC_POISON1_BANKGRP_MASK GENMASK(29, 28) +#define ECC_POISON1_BANK_MASK GENMASK(26, 24) +#define ECC_POISON1_ROW_MASK GENMASK(17, 0) + +/* DDRC address mapping parameters */ +#define DDR_ADDRMAP_NREGS 12 + +#define DDR_MAX_HIF_WIDTH 60 +#define DDR_MAX_ROW_WIDTH 18 +#define DDR_MAX_COL_WIDTH 14 +#define DDR_MAX_BANK_WIDTH 3 +#define DDR_MAX_BANKGRP_WIDTH 2 +#define DDR_MAX_RANK_WIDTH 2 + +#define DDR_ADDRMAP_B0_M15 GENMASK(3, 0) +#define DDR_ADDRMAP_B8_M15 GENMASK(11, 8) +#define DDR_ADDRMAP_B16_M15 GENMASK(19, 16) +#define DDR_ADDRMAP_B24_M15 GENMASK(27, 24) + +#define DDR_ADDRMAP_B0_M31 GENMASK(4, 0) +#define DDR_ADDRMAP_B8_M31 GENMASK(12, 8) +#define DDR_ADDRMAP_B16_M31 GENMASK(20, 16) +#define DDR_ADDRMAP_B24_M31 GENMASK(28, 24) + +#define DDR_ADDRMAP_UNUSED ((u8)-1) +#define DDR_ADDRMAP_MAX_15 DDR_ADDRMAP_B0_M15 +#define DDR_ADDRMAP_MAX_31 DDR_ADDRMAP_B0_M31 #define ROW_B0_BASE 6 #define ROW_B1_BASE 7 @@ -262,1055 +246,2163 @@ #define BANKGRP_B1_BASE 3 #define RANK_B0_BASE 6 +#define RANK_B1_BASE 7 + +/* DDRC system address parameters */ +#define DDR_MAX_NSAR 4 +#define DDR_MIN_SARSIZE SZ_256M + +/* ECC Scrubber registers definitions */ +#define ECC_SBRCTL_SCRUB_INTERVAL GENMASK(20, 8) +#define ECC_SBRCTL_INTERVAL_STEP 512 +#define ECC_SBRCTL_INTERVAL_MIN 0 +#define ECC_SBRCTL_INTERVAL_SAFE 1 +#define ECC_SBRCTL_INTERVAL_MAX (ECC_SBRCTL_SCRUB_INTERVAL >> 8) +#define ECC_SBRCTL_SCRUB_BURST GENMASK(6, 4) +#define ECC_SBRCTL_SCRUB_MODE_WR BIT(2) +#define ECC_SBRCTL_SCRUB_EN BIT(0) +#define ECC_SBRSTAT_SCRUB_DONE BIT(1) +#define ECC_SBRSTAT_SCRUB_BUSY BIT(0) /** - * struct ecc_error_info - ECC error log information. - * @row: Row number. - * @col: Column number. - * @bank: Bank number. - * @bitpos: Bit position. - * @data: Data causing the error. - * @bankgrpnr: Bank group number. - * @blknr: Block number. + * enum snps_dq_width - SDRAM DQ bus width (ECC capable). + * SNPS_DQ_32: 32-bit memory data width. + * SNPS_DQ_64: 64-bit memory data width. */ -struct ecc_error_info { - u32 row; - u32 col; - u32 bank; - u32 bitpos; - u32 data; - u32 bankgrpnr; - u32 blknr; +enum snps_dq_width { + SNPS_DQ_32 = 2, + SNPS_DQ_64 = 3, }; /** - * struct synps_ecc_status - ECC status information to report. - * @ce_cnt: Correctable error count. - * @ue_cnt: Uncorrectable error count. - * @ceinfo: Correctable error log information. - * @ueinfo: Uncorrectable error log information. + * enum snps_dq_mode - SDRAM DQ bus mode. + * @SNPS_DQ_FULL: Full DQ bus width. + * @SNPS_DQ_HALF: Half DQ bus width. + * @SNPS_DQ_QRTR: Quarter DQ bus width. */ -struct synps_ecc_status { - u32 ce_cnt; - u32 ue_cnt; - struct ecc_error_info ceinfo; - struct ecc_error_info ueinfo; +enum snps_dq_mode { + SNPS_DQ_FULL = 0, + SNPS_DQ_HALF = 1, + SNPS_DQ_QRTR = 2, }; /** - * struct synps_edac_priv - DDR memory controller private instance data. - * @baseaddr: Base address of the DDR controller. - * @message: Buffer for framing the event specific info. - * @stat: ECC status information. - * @p_data: Platform data. - * @ce_cnt: Correctable Error count. - * @ue_cnt: Uncorrectable Error count. - * @poison_addr: Data poison address. - * @row_shift: Bit shifts for row bit. - * @col_shift: Bit shifts for column bit. - * @bank_shift: Bit shifts for bank bit. - * @bankgrp_shift: Bit shifts for bank group bit. - * @rank_shift: Bit shifts for rank bit. + * enum snps_burst_length - HIF/SDRAM burst transactions length. + * @SNPS_DDR_BL2: Burst length 2xSDRAM-words. + * @SNPS_DDR_BL4: Burst length 4xSDRAM-words. + * @SNPS_DDR_BL8: Burst length 8xSDRAM-words. + * @SNPS_DDR_BL16: Burst length 16xSDRAM-words. */ -struct synps_edac_priv { - void __iomem *baseaddr; - char message[SYNPS_EDAC_MSG_SIZE]; - struct synps_ecc_status stat; - const struct synps_platform_data *p_data; - u32 ce_cnt; - u32 ue_cnt; -#ifdef CONFIG_EDAC_DEBUG - ulong poison_addr; - u32 row_shift[18]; - u32 col_shift[14]; - u32 bank_shift[3]; - u32 bankgrp_shift[2]; - u32 rank_shift[1]; -#endif +enum snps_burst_length { + SNPS_DDR_BL2 = 2, + SNPS_DDR_BL4 = 4, + SNPS_DDR_BL8 = 8, + SNPS_DDR_BL16 = 16, }; /** - * struct synps_platform_data - synps platform data structure. - * @get_error_info: Get EDAC error info. - * @get_mtype: Get mtype. - * @get_dtype: Get dtype. - * @get_ecc_state: Get ECC state. - * @quirks: To differentiate IPs. + * enum snps_freq_ratio - HIF:SDRAM frequency ratio mode. + * @SNPS_FREQ_RATIO11: 1:1 frequency mode. + * @SNPS_FREQ_RATIO12: 1:2 frequency mode. */ -struct synps_platform_data { - int (*get_error_info)(struct synps_edac_priv *priv); - enum mem_type (*get_mtype)(const void __iomem *base); - enum dev_type (*get_dtype)(const void __iomem *base); - bool (*get_ecc_state)(void __iomem *base); - int quirks; +enum snps_freq_ratio { + SNPS_FREQ_RATIO11 = 1, + SNPS_FREQ_RATIO12 = 2, }; /** - * zynq_get_error_info - Get the current ECC error info. - * @priv: DDR memory controller private instance data. - * - * Return: one if there is no error, otherwise zero. + * enum snps_ecc_mode - ECC mode. + * @SNPS_ECC_DISABLED: ECC is disabled/unavailable. + * @SNPS_ECC_SECDED: SEC/DED over 1 beat ECC (SideBand/Inline). + * @SNPS_ECC_ADVX4X8: Advanced ECC X4/X8 (SideBand). */ -static int zynq_get_error_info(struct synps_edac_priv *priv) -{ - struct synps_ecc_status *p; - u32 regval, clearval = 0; - void __iomem *base; - - base = priv->baseaddr; - p = &priv->stat; +enum snps_ecc_mode { + SNPS_ECC_DISABLED = 0, + SNPS_ECC_SECDED = 4, + SNPS_ECC_ADVX4X8 = 5, +}; - regval = readl(base + STAT_OFST); - if (!regval) - return 1; +/** + * enum snps_ref_clk - DW uMCTL2 DDR controller clocks. + * @SNPS_CSR_CLK: CSR/APB interface clock. + * @SNPS_AXI_CLK: AXI (AHB) Port reference clock. + * @SNPS_CORE_CLK: DDR controller (including DFI) clock. SDRAM clock + * matches runs with this freq in 1:1 ratio mode and + * with twice of this freq in case of 1:2 ratio mode. + * @SNPS_SBR_CLK: Scrubber port reference clock (synchronous to + * the core clock). + * @SNPS_MAX_NCLK: Total number of clocks. + */ +enum snps_ref_clk { + SNPS_CSR_CLK, + SNPS_AXI_CLK, + SNPS_CORE_CLK, + SNPS_SBR_CLK, + SNPS_MAX_NCLK +}; - p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT; - p->ue_cnt = regval & STAT_UECNT_MASK; - - regval = readl(base + CE_LOG_OFST); - if (!(p->ce_cnt && (regval & LOG_VALID))) - goto ue_err; - - p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT; - regval = readl(base + CE_ADDR_OFST); - p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT; - p->ceinfo.col = regval & ADDR_COL_MASK; - p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT; - p->ceinfo.data = readl(base + CE_DATA_31_0_OFST); - edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos, - p->ceinfo.data); - clearval = ECC_CTRL_CLR_CE_ERR; - -ue_err: - regval = readl(base + UE_LOG_OFST); - if (!(p->ue_cnt && (regval & LOG_VALID))) - goto out; - - regval = readl(base + UE_ADDR_OFST); - p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT; - p->ueinfo.col = regval & ADDR_COL_MASK; - p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT; - p->ueinfo.data = readl(base + UE_DATA_31_0_OFST); - clearval |= ECC_CTRL_CLR_UE_ERR; - -out: - writel(clearval, base + ECC_CTRL_OFST); - writel(0x0, base + ECC_CTRL_OFST); +/** + * struct snps_ddrc_info - DDR controller platform parameters. + * @caps: DDR controller capabilities. + * @sdram_mode: Current SDRAM mode selected. + * @dev_cfg: Current memory device config (if applicable). + * @dq_width: Memory data bus width (width of the DQ signals + * connected to SDRAM chips). + * @dq_mode: Proportion of the DQ bus utilized to access SDRAM. + * @sdram_burst_len: SDRAM burst transaction length. + * @hif_burst_len: HIF burst transaction length (Host Interface). + * @freq_ratio: HIF/SDRAM frequency ratio mode. + * @ecc_mode: ECC mode enabled for the DDR controller (SEC/DED, etc). + * @ranks: Number of ranks enabled to access DIMM (1, 2 or 4). + */ +struct snps_ddrc_info { + unsigned int caps; + enum mem_type sdram_mode; + enum dev_type dev_cfg; + enum snps_dq_width dq_width; + enum snps_dq_mode dq_mode; + enum snps_burst_length sdram_burst_len; + enum snps_burst_length hif_burst_len; + enum snps_freq_ratio freq_ratio; + enum snps_ecc_mode ecc_mode; + unsigned int ranks; +}; - return 0; -} +/** + * struct snps_sys_app_map - System/Application mapping table. + * @nsar: Number of SARs enabled on the controller (max 4). + * @minsize: Minimal block size (from 256MB to 32GB). + * @sar.base: SAR base address aligned to minsize. + * @sar.size: SAR size aligned to minsize. + * @sar.ofst: SAR address offset. + */ +struct snps_sys_app_map { + u8 nsar; + u64 minsize; + struct { + u64 base; + u64 size; + u64 ofst; + } sar[DDR_MAX_NSAR]; +}; /** - * zynqmp_get_error_info - Get the current ECC error info. - * @priv: DDR memory controller private instance data. + * struct snps_hif_sdram_map - HIF/SDRAM mapping table. + * @row: HIF bit offsets used as row address bits. + * @col: HIF bit offsets used as column address bits. + * @bank: HIF bit offsets used as bank address bits. + * @bankgrp: HIF bit offsets used as bank group address bits. + * @rank: HIF bit offsets used as rank address bits. * - * Return: one if there is no error otherwise returns zero. + * For example, row[0] = 6 means row bit #0 is encoded by the HIF + * address bit #6 and vice-versa. */ -static int zynqmp_get_error_info(struct synps_edac_priv *priv) -{ - struct synps_ecc_status *p; - u32 regval, clearval = 0; - void __iomem *base; - - base = priv->baseaddr; - p = &priv->stat; - - regval = readl(base + ECC_ERRCNT_OFST); - p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK; - p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT; - if (!p->ce_cnt) - goto ue_err; +struct snps_hif_sdram_map { + u8 row[DDR_MAX_ROW_WIDTH]; + u8 col[DDR_MAX_COL_WIDTH]; + u8 bank[DDR_MAX_BANK_WIDTH]; + u8 bankgrp[DDR_MAX_BANKGRP_WIDTH]; + u8 rank[DDR_MAX_RANK_WIDTH]; +}; - regval = readl(base + ECC_STAT_OFST); - if (!regval) - return 1; +/** + * struct snps_sdram_addr - SDRAM address. + * @row: Row number. + * @col: Column number. + * @bank: Bank number. + * @bankgrp: Bank group number. + * @rank: Rank number. + */ +struct snps_sdram_addr { + u16 row; + u16 col; + u8 bank; + u8 bankgrp; + u8 rank; +}; - p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK); - - regval = readl(base + ECC_CEADDR0_OFST); - p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK); - regval = readl(base + ECC_CEADDR1_OFST); - p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >> - ECC_CEADDR1_BNKNR_SHIFT; - p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >> - ECC_CEADDR1_BNKGRP_SHIFT; - p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK); - p->ceinfo.data = readl(base + ECC_CSYND0_OFST); - edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n", - readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST), - readl(base + ECC_CSYND2_OFST)); -ue_err: - if (!p->ue_cnt) - goto out; - - regval = readl(base + ECC_UEADDR0_OFST); - p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK); - regval = readl(base + ECC_UEADDR1_OFST); - p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >> - ECC_CEADDR1_BNKGRP_SHIFT; - p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >> - ECC_CEADDR1_BNKNR_SHIFT; - p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK); - p->ueinfo.data = readl(base + ECC_UESYND0_OFST); -out: - clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT; - clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; - writel(clearval, base + ECC_CLR_OFST); - writel(0x0, base + ECC_CLR_OFST); +/** + * struct snps_ecc_error_info - ECC error log information. + * @sdram: SDRAM address. + * @ecnt: Number of detected errors. + * @bitpos: Bit position. + * @data: Data causing the error. + * @syndrome: Erroneous data syndrome. + */ +struct snps_ecc_error_info { + struct snps_sdram_addr sdram; + u16 ecnt; + u32 bitpos; + u64 data; + u32 syndrome; +}; - return 0; -} +/** + * struct snps_edac_priv - DDR memory controller private data. + * @info: DDR controller config info. + * @sys_app_map: Sys/App mapping table. + * @hif_sdram_map: HIF/SDRAM mapping table. + * @pdev: Platform device. + * @baseaddr: Base address of the DDR controller. + * @lock: Concurrent CSRs access lock. + * @clks: Controller reference clocks. + * @message: Buffer for framing the event specific info. + */ +struct snps_edac_priv { + struct snps_ddrc_info info; + struct snps_sys_app_map sys_app_map; + struct snps_hif_sdram_map hif_sdram_map; + struct platform_device *pdev; + void __iomem *baseaddr; + spinlock_t lock; + struct clk_bulk_data clks[SNPS_MAX_NCLK]; + char message[SNPS_EDAC_MSG_SIZE]; +}; /** - * handle_error - Handle Correctable and Uncorrectable errors. - * @mci: EDAC memory controller instance. - * @p: Synopsys ECC status structure. + * snps_map_sys_to_app - Map System address to Application address. + * @priv: DDR memory controller private instance data. + * @sys: System address (source). + * @app: Application address (destination). + * + * System address space is used to define disjoint memory regions + * mapped then to the contiguous application memory space: * - * Handles ECC correctable and uncorrectable errors. + * System Address Space (SAR) <-> Application Address Space + * +------+ +------+ + * | SAR0 |----------------------->| Reg0 | + * +------+ -offset +------+ + * | ... | +----------->| Reg1 | + * +------+ | +------+ + * | SAR1 |-----------+ | ... | + * +------+ + * | ... | + * + * The translation is done by applying the corresponding SAR offset + * to the inbound system address. Note according to the hardware reference + * manual the same mapping is applied to the addresses up to the next + * SAR base address irrespective to the region size. */ -static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) +static void snps_map_sys_to_app(struct snps_edac_priv *priv, + dma_addr_t sys, u64 *app) { - struct synps_edac_priv *priv = mci->pvt_info; - struct ecc_error_info *pinf; - - if (p->ce_cnt) { - pinf = &p->ceinfo; - if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) { - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x", - "CE", pinf->row, pinf->bank, - pinf->bankgrpnr, pinf->blknr, - pinf->bitpos, pinf->data); - } else { - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x", - "CE", pinf->row, pinf->bank, pinf->col, - pinf->bitpos, pinf->data); - } + struct snps_sys_app_map *map = &priv->sys_app_map; + u64 ofst; + int i; - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, - p->ce_cnt, 0, 0, 0, 0, 0, -1, - priv->message, ""); - } - - if (p->ue_cnt) { - pinf = &p->ueinfo; - if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) { - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d", - "UE", pinf->row, pinf->bank, - pinf->bankgrpnr, pinf->blknr); - } else { - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type :%s Row %d Bank %d Col %d ", - "UE", pinf->row, pinf->bank, pinf->col); - } + ofst = 0; + for (i = 0; i < map->nsar; i++) { + if (sys < map->sar[i].base) + break; - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, - p->ue_cnt, 0, 0, 0, 0, 0, -1, - priv->message, ""); + ofst = map->sar[i].ofst; } - memset(p, 0, sizeof(*p)); -} - -static void enable_intr(struct synps_edac_priv *priv) -{ - /* Enable UE/CE Interrupts */ - if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) - writel(DDR_UE_MASK | DDR_CE_MASK, - priv->baseaddr + ECC_CLR_OFST); - else - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_EN_OFST); - -} - -static void disable_intr(struct synps_edac_priv *priv) -{ - /* Disable UE/CE Interrupts */ - if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) - writel(0x0, priv->baseaddr + ECC_CLR_OFST); - else - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_DB_OFST); + *app = sys - ofst; } /** - * intr_handler - Interrupt Handler for ECC interrupts. - * @irq: IRQ number. - * @dev_id: Device ID. + * snps_map_sys_to_app - Map Application address to System address. + * @priv: DDR memory controller private instance data. + * @app: Application address (source). + * @sys: System address (destination). * - * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise. + * Backward App-to-sys translation is easier because the application address + * space is contiguous. So we just need to add the offset corresponding + * to the region the passed address belongs to. Note the later offset is applied + * to all the addresses above the last available region. */ -static irqreturn_t intr_handler(int irq, void *dev_id) +static void snps_map_app_to_sys(struct snps_edac_priv *priv, + u64 app, dma_addr_t *sys) { - const struct synps_platform_data *p_data; - struct mem_ctl_info *mci = dev_id; - struct synps_edac_priv *priv; - int status, regval; - - priv = mci->pvt_info; - p_data = priv->p_data; - - /* - * v3.0 of the controller has the ce/ue bits cleared automatically, - * so this condition does not apply. - */ - if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) { - regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); - regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK); - if (!(regval & ECC_CE_UE_INTR_MASK)) - return IRQ_NONE; + struct snps_sys_app_map *map = &priv->sys_app_map; + u64 ofst, size; + int i; + + ofst = 0; + for (i = 0, size = 0; i < map->nsar; i++) { + ofst = map->sar[i].ofst; + size += map->sar[i].size; + if (app < size) + break; } - status = p_data->get_error_info(priv); - if (status) - return IRQ_NONE; - - priv->ce_cnt += priv->stat.ce_cnt; - priv->ue_cnt += priv->stat.ue_cnt; - handle_error(mci, &priv->stat); - - edac_dbg(3, "Total error count CE %d UE %d\n", - priv->ce_cnt, priv->ue_cnt); - /* v3.0 of the controller does not have this register */ - if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) - writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); - else - enable_intr(priv); - - return IRQ_HANDLED; + *sys = app + ofst; } /** - * check_errors - Check controller for ECC errors. - * @mci: EDAC memory controller instance. + * snps_map_app_to_hif - Map Application address to HIF address. + * @priv: DDR memory controller private instance data. + * @app: Application address (source). + * @hif: HIF address (destination). * - * Check and post ECC errors. Called by the polling thread. + * HIF address is used to perform the DQ bus width aligned burst transactions. + * So in order to perform the Application-to-HIF address translation we just + * need to discard the SDRAM-word bits of the Application address. */ -static void check_errors(struct mem_ctl_info *mci) +static void snps_map_app_to_hif(struct snps_edac_priv *priv, + u64 app, u64 *hif) { - const struct synps_platform_data *p_data; - struct synps_edac_priv *priv; - int status; - - priv = mci->pvt_info; - p_data = priv->p_data; - - status = p_data->get_error_info(priv); - if (status) - return; - - priv->ce_cnt += priv->stat.ce_cnt; - priv->ue_cnt += priv->stat.ue_cnt; - handle_error(mci, &priv->stat); - - edac_dbg(3, "Total error count CE %d UE %d\n", - priv->ce_cnt, priv->ue_cnt); + *hif = app >> priv->info.dq_width; } /** - * zynq_get_dtype - Return the controller memory width. - * @base: DDR memory controller base address. - * - * Get the EDAC device type width appropriate for the current controller - * configuration. + * snps_map_hif_to_app - Map HIF address to Application address. + * @priv: DDR memory controller private instance data. + * @hif: HIF address (source). + * @app: Application address (destination). * - * Return: a device type width enumeration. + * Backward HIF-to-App translation is just the opposite DQ-width-based + * shift operation. */ -static enum dev_type zynq_get_dtype(const void __iomem *base) +static void snps_map_hif_to_app(struct snps_edac_priv *priv, + u64 hif, u64 *app) { - enum dev_type dt; - u32 width; - - width = readl(base + CTRL_OFST); - width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT; - - switch (width) { - case DDRCTL_WDTH_16: - dt = DEV_X2; - break; - case DDRCTL_WDTH_32: - dt = DEV_X4; - break; - default: - dt = DEV_UNKNOWN; - } - - return dt; + *app = hif << priv->info.dq_width; } /** - * zynqmp_get_dtype - Return the controller memory width. - * @base: DDR memory controller base address. - * - * Get the EDAC device type width appropriate for the current controller - * configuration. + * snps_map_hif_to_sdram - Map HIF address to SDRAM address. + * @priv: DDR memory controller private instance data. + * @hif: HIF address (source). + * @sdram: SDRAM address (destination). * - * Return: a device type width enumeration. + * HIF-SDRAM address mapping is configured with the ADDRMAPx registers, Based + * on the CSRs value the HIF address bits are mapped to the corresponding bits + * in the SDRAM rank/bank/column/row. If an SDRAM address bit is unused (there + * is no any HIF address bit corresponding to it) it will be set to zero. Using + * this fact we can freely set the output SDRAM address with zeros and walk + * over the set HIF address bits only. Similarly the unmapped HIF address bits + * are just ignored. */ -static enum dev_type zynqmp_get_dtype(const void __iomem *base) +static void snps_map_hif_to_sdram(struct snps_edac_priv *priv, + u64 hif, struct snps_sdram_addr *sdram) { - enum dev_type dt; - u32 width; - - width = readl(base + CTRL_OFST); - width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT; - switch (width) { - case DDRCTL_EWDTH_16: - dt = DEV_X2; - break; - case DDRCTL_EWDTH_32: - dt = DEV_X4; - break; - case DDRCTL_EWDTH_64: - dt = DEV_X8; - break; - default: - dt = DEV_UNKNOWN; - } + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + int i; - return dt; -} + sdram->row = 0; + for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) { + if (map->row[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->row[i])) + sdram->row |= BIT(i); + } -/** - * zynq_get_ecc_state - Return the controller ECC enable/disable status. - * @base: DDR memory controller base address. - * - * Get the ECC enable/disable status of the controller. - * - * Return: true if enabled, otherwise false. - */ -static bool zynq_get_ecc_state(void __iomem *base) -{ - enum dev_type dt; - u32 ecctype; + sdram->col = 0; + for (i = 0; i < DDR_MAX_COL_WIDTH; i++) { + if (map->col[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->col[i])) + sdram->col |= BIT(i); + } - dt = zynq_get_dtype(base); - if (dt == DEV_UNKNOWN) - return false; + sdram->bank = 0; + for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) { + if (map->bank[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->bank[i])) + sdram->bank |= BIT(i); + } - ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK; - if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2)) - return true; + sdram->bankgrp = 0; + for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) { + if (map->bankgrp[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->bankgrp[i])) + sdram->bankgrp |= BIT(i); + } - return false; + sdram->rank = 0; + for (i = 0; i < DDR_MAX_RANK_WIDTH; i++) { + if (map->rank[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->rank[i])) + sdram->rank |= BIT(i); + } } /** - * zynqmp_get_ecc_state - Return the controller ECC enable/disable status. - * @base: DDR memory controller base address. + * snps_map_sdram_to_hif - Map SDRAM address to HIF address. + * @priv: DDR memory controller private instance data. + * @sdram: SDRAM address (source). + * @hif: HIF address (destination). * - * Get the ECC enable/disable status for the controller. + * SDRAM-HIF address mapping is similar to the HIF-SDRAM mapping procedure, but + * we'll traverse each SDRAM rank/bank/column/row bit. * - * Return: a ECC status boolean i.e true/false - enabled/disabled. + * Note the unmapped bits of the SDRAM address components will be just + * ignored. So make sure the source address is valid. */ -static bool zynqmp_get_ecc_state(void __iomem *base) +static void snps_map_sdram_to_hif(struct snps_edac_priv *priv, + struct snps_sdram_addr *sdram, u64 *hif) { - enum dev_type dt; - u32 ecctype; + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + unsigned long addr; + int i; - dt = zynqmp_get_dtype(base); - if (dt == DEV_UNKNOWN) - return false; + *hif = 0; - ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK; - if ((ecctype == SCRUB_MODE_SECDED) && - ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8))) - return true; + addr = sdram->row; + for_each_set_bit(i, &addr, DDR_MAX_ROW_WIDTH) { + if (map->row[i] != DDR_ADDRMAP_UNUSED) + *hif |= BIT_ULL(map->row[i]); + } - return false; -} + addr = sdram->col; + for_each_set_bit(i, &addr, DDR_MAX_COL_WIDTH) { + if (map->col[i] != DDR_ADDRMAP_UNUSED) + *hif |= BIT_ULL(map->col[i]); + } -/** - * get_memsize - Read the size of the attached memory device. - * - * Return: the memory size in bytes. - */ -static u32 get_memsize(void) -{ - struct sysinfo inf; + addr = sdram->bank; + for_each_set_bit(i, &addr, DDR_MAX_BANK_WIDTH) { + if (map->bank[i] != DDR_ADDRMAP_UNUSED) + *hif |= BIT_ULL(map->bank[i]); + } - si_meminfo(&inf); + addr = sdram->bankgrp; + for_each_set_bit(i, &addr, DDR_MAX_BANKGRP_WIDTH) { + if (map->bankgrp[i] != DDR_ADDRMAP_UNUSED) + *hif |= BIT_ULL(map->bankgrp[i]); + } - return inf.totalram * inf.mem_unit; + addr = sdram->rank; + for_each_set_bit(i, &addr, DDR_MAX_RANK_WIDTH) { + if (map->rank[i] != DDR_ADDRMAP_UNUSED) + *hif |= BIT_ULL(map->rank[i]); + } } /** - * zynq_get_mtype - Return the controller memory type. - * @base: Synopsys ECC status structure. - * - * Get the EDAC memory type appropriate for the current controller - * configuration. + * snps_map_sys_to_sdram - Map System address to SDRAM address. + * @priv: DDR memory controller private instance data. + * @sys: System address (source). + * @sdram: SDRAM address (destination). * - * Return: a memory type enumeration. + * Perform a full mapping of the system address (detected on the controller + * ports) to the SDRAM address tuple row/column/bank/etc. */ -static enum mem_type zynq_get_mtype(const void __iomem *base) +static void snps_map_sys_to_sdram(struct snps_edac_priv *priv, + dma_addr_t sys, struct snps_sdram_addr *sdram) { - enum mem_type mt; - u32 memtype; + u64 app, hif; - memtype = readl(base + T_ZQ_OFST); + snps_map_sys_to_app(priv, sys, &app); - if (memtype & T_ZQ_DDRMODE_MASK) - mt = MEM_DDR3; - else - mt = MEM_DDR2; + snps_map_app_to_hif(priv, app, &hif); - return mt; + snps_map_hif_to_sdram(priv, hif, sdram); } /** - * zynqmp_get_mtype - Returns controller memory type. - * @base: Synopsys ECC status structure. - * - * Get the EDAC memory type appropriate for the current controller - * configuration. + * snps_map_sdram_to_sys - Map SDRAM address to SDRAM address. + * @priv: DDR memory controller private instance data. + * @sys: System address (source). + * @sdram: SDRAM address (destination). * - * Return: a memory type enumeration. + * Perform a full mapping of the SDRAM address (row/column/bank/etc) to + * the system address specific to the controller system bus ports. */ -static enum mem_type zynqmp_get_mtype(const void __iomem *base) +static void snps_map_sdram_to_sys(struct snps_edac_priv *priv, + struct snps_sdram_addr *sdram, dma_addr_t *sys) { - enum mem_type mt; - u32 memtype; + u64 app, hif; - memtype = readl(base + CTRL_OFST); + snps_map_sdram_to_hif(priv, sdram, &hif); - if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3)) - mt = MEM_DDR3; - else if (memtype & MEM_TYPE_DDR2) - mt = MEM_RDDR2; - else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4)) - mt = MEM_DDR4; - else - mt = MEM_EMPTY; + snps_map_hif_to_app(priv, hif, &app); - return mt; + snps_map_app_to_sys(priv, app, sys); } /** - * init_csrows - Initialize the csrow data. - * @mci: EDAC memory controller instance. + * snps_get_bitpos - Get DQ-bus corrected bit position. + * @bitnum: Bit number retrieved from the ECCSTAT.corrected_bit_num field. + * @dq_width: Controller DQ-bus width. * - * Initialize the chip select rows associated with the EDAC memory - * controller instance. + * Return: actual corrected DQ-bus bit position starting from 0. */ -static void init_csrows(struct mem_ctl_info *mci) +static inline u32 snps_get_bitpos(u32 bitnum, enum snps_dq_width dq_width) { - struct synps_edac_priv *priv = mci->pvt_info; - const struct synps_platform_data *p_data; - struct csrow_info *csi; - struct dimm_info *dimm; - u32 size, row; - int j; - - p_data = priv->p_data; + /* ecc[0] bit */ + if (bitnum == 0) + return BITS_PER_BYTE << dq_width; - for (row = 0; row < mci->nr_csrows; row++) { - csi = mci->csrows[row]; - size = get_memsize(); + /* ecc[1:x] bit */ + if (is_power_of_2(bitnum)) + return (BITS_PER_BYTE << dq_width) + ilog2(bitnum) + 1; - for (j = 0; j < csi->nr_channels; j++) { - dimm = csi->channels[j]->dimm; - dimm->edac_mode = EDAC_SECDED; - dimm->mtype = p_data->get_mtype(priv->baseaddr); - dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; - dimm->grain = SYNPS_EDAC_ERR_GRAIN; - dimm->dtype = p_data->get_dtype(priv->baseaddr); - } - } + /* data[0:y] bit */ + return bitnum - ilog2(bitnum) - 2; } /** - * mc_init - Initialize one driver instance. - * @mci: EDAC memory controller instance. - * @pdev: platform device. + * snps_ce_irq_handler - Corrected error interrupt handler. + * @irq: IRQ number. + * @dev_id: Device ID. * - * Perform initialization of the EDAC memory controller instance and - * related driver-private data associated with the memory controller the - * instance is bound to. + * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise. */ -static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) +static irqreturn_t snps_ce_irq_handler(int irq, void *dev_id) { - struct synps_edac_priv *priv; - - mci->pdev = &pdev->dev; - priv = mci->pvt_info; - platform_set_drvdata(pdev, mci); + struct mem_ctl_info *mci = dev_id; + struct snps_edac_priv *priv = mci->pvt_info; + struct snps_ecc_error_info einfo; + unsigned long flags; + u32 qosval, regval; + dma_addr_t sys; + + /* Make sure IRQ is caused by a corrected ECC error */ + if (priv->info.caps & SNPS_CAP_ZYNQMP) { + qosval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + if (!(qosval & DDR_QOSCE_MASK)) + return IRQ_NONE; - /* Initialize controller capabilities and configuration */ - mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2; - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; - mci->scrub_cap = SCRUB_HW_SRC; - mci->scrub_mode = SCRUB_NONE; - - mci->edac_cap = EDAC_FLAG_SECDED; - mci->ctl_name = "synps_ddr_controller"; - mci->dev_name = SYNPS_EDAC_MOD_STRING; - mci->mod_name = SYNPS_EDAC_MOD_VER; - - if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) { - edac_op_state = EDAC_OPSTATE_INT; - } else { - edac_op_state = EDAC_OPSTATE_POLL; - mci->edac_check = check_errors; + qosval &= DDR_QOSCE_MASK; } - mci->ctl_page_to_phys = NULL; + regval = readl(priv->baseaddr + ECC_STAT_OFST); + if (!FIELD_GET(ECC_STAT_CE_MASK, regval)) + return IRQ_NONE; - init_csrows(mci); -} + /* Read error info like bit position, SDRAM address, data, syndrome */ + einfo.bitpos = FIELD_GET(ECC_STAT_BITNUM_MASK, regval); + einfo.bitpos = snps_get_bitpos(einfo.bitpos, priv->info.dq_width); -static int setup_irq(struct mem_ctl_info *mci, - struct platform_device *pdev) -{ - struct synps_edac_priv *priv = mci->pvt_info; - int ret, irq; + regval = readl(priv->baseaddr + ECC_ERRCNT_OFST); + einfo.ecnt = FIELD_GET(ECC_ERRCNT_CECNT_MASK, regval); - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - edac_printk(KERN_ERR, EDAC_MC, - "No IRQ %d in DT\n", irq); - return irq; - } + regval = readl(priv->baseaddr + ECC_CEADDR0_OFST); + einfo.sdram.rank = FIELD_GET(ECC_CEADDR0_RANK_MASK, regval); + einfo.sdram.row = FIELD_GET(ECC_CEADDR0_ROW_MASK, regval); - ret = devm_request_irq(&pdev->dev, irq, intr_handler, - 0, dev_name(&pdev->dev), mci); - if (ret < 0) { - edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n"); - return ret; - } + regval = readl(priv->baseaddr + ECC_CEADDR1_OFST); + einfo.sdram.bankgrp = FIELD_GET(ECC_CEADDR1_BANKGRP_MASK, regval); + einfo.sdram.bank = FIELD_GET(ECC_CEADDR1_BANK_MASK, regval); + einfo.sdram.col = FIELD_GET(ECC_CEADDR1_COL_MASK, regval); - enable_intr(priv); + einfo.data = readl(priv->baseaddr + ECC_CSYND0_OFST); + if (priv->info.dq_width == SNPS_DQ_64) + einfo.data |= (u64)readl(priv->baseaddr + ECC_CSYND1_OFST) << 32; - return 0; -} + einfo.syndrome = readl(priv->baseaddr + ECC_CSYND2_OFST); -static const struct synps_platform_data zynq_edac_def = { - .get_error_info = zynq_get_error_info, - .get_mtype = zynq_get_mtype, - .get_dtype = zynq_get_dtype, - .get_ecc_state = zynq_get_ecc_state, - .quirks = 0, -}; + /* Report the detected errors with the corresponding sys address */ + snps_map_sdram_to_sys(priv, &einfo.sdram, &sys); -static const struct synps_platform_data zynqmp_edac_def = { - .get_error_info = zynqmp_get_error_info, - .get_mtype = zynqmp_get_mtype, - .get_dtype = zynqmp_get_dtype, - .get_ecc_state = zynqmp_get_ecc_state, - .quirks = (DDR_ECC_INTR_SUPPORT -#ifdef CONFIG_EDAC_DEBUG - | DDR_ECC_DATA_POISON_SUPPORT -#endif - ), -}; + snprintf(priv->message, SNPS_EDAC_MSG_SIZE, + "Row %hu Col %hu Bank %hhu Bank Group %hhu Rank %hhu Bit %d Data 0x%08llx", + einfo.sdram.row, einfo.sdram.col, einfo.sdram.bank, + einfo.sdram.bankgrp, einfo.sdram.rank, + einfo.bitpos, einfo.data); -static const struct synps_platform_data synopsys_edac_def = { - .get_error_info = zynqmp_get_error_info, - .get_mtype = zynqmp_get_mtype, - .get_dtype = zynqmp_get_dtype, - .get_ecc_state = zynqmp_get_ecc_state, - .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR -#ifdef CONFIG_EDAC_DEBUG - | DDR_ECC_DATA_POISON_SUPPORT -#endif - ), -}; + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, einfo.ecnt, + PHYS_PFN(sys), offset_in_page(sys), + einfo.syndrome, einfo.sdram.rank, 0, -1, + priv->message, ""); + /* Make sure the CE IRQ status is cleared */ + spin_lock_irqsave(&priv->lock, flags); -static const struct of_device_id synps_edac_match[] = { - { - .compatible = "xlnx,zynq-ddrc-a05", - .data = (void *)&zynq_edac_def - }, - { - .compatible = "xlnx,zynqmp-ddrc-2.40a", - .data = (void *)&zynqmp_edac_def - }, - { - .compatible = "snps,ddrc-3.80a", - .data = (void *)&synopsys_edac_def - }, - { - /* end of table */ - } -}; + regval = readl(priv->baseaddr + ECC_CLR_OFST) | + ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT; + writel(regval, priv->baseaddr + ECC_CLR_OFST); -MODULE_DEVICE_TABLE(of, synps_edac_match); + spin_unlock_irqrestore(&priv->lock, flags); -#ifdef CONFIG_EDAC_DEBUG -#define to_mci(k) container_of(k, struct mem_ctl_info, dev) + if (priv->info.caps & SNPS_CAP_ZYNQMP) + writel(qosval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + + return IRQ_HANDLED; +} /** - * ddr_poison_setup - Update poison registers. - * @priv: DDR memory controller private instance data. + * snps_ue_irq_handler - Uncorrected error interrupt handler. + * @irq: IRQ number. + * @dev_id: Device ID. * - * Update poison registers as per DDR mapping. - * Return: none. + * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise. */ -static void ddr_poison_setup(struct synps_edac_priv *priv) +static irqreturn_t snps_ue_irq_handler(int irq, void *dev_id) { - int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval; - int index; - ulong hif_addr = 0; - - hif_addr = priv->poison_addr >> 3; + struct mem_ctl_info *mci = dev_id; + struct snps_edac_priv *priv = mci->pvt_info; + struct snps_ecc_error_info einfo; + unsigned long flags; + u32 qosval, regval; + dma_addr_t sys; + + /* Make sure IRQ is caused by an uncorrected ECC error */ + if (priv->info.caps & SNPS_CAP_ZYNQMP) { + qosval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + if (!(regval & DDR_QOSUE_MASK)) + return IRQ_NONE; - for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) { - if (priv->row_shift[index]) - row |= (((hif_addr >> priv->row_shift[index]) & - BIT(0)) << index); - else - break; + qosval &= DDR_QOSUE_MASK; } - for (index = 0; index < DDR_MAX_COL_SHIFT; index++) { - if (priv->col_shift[index] || index < 3) - col |= (((hif_addr >> priv->col_shift[index]) & - BIT(0)) << index); - else - break; - } + regval = readl(priv->baseaddr + ECC_STAT_OFST); + if (!FIELD_GET(ECC_STAT_UE_MASK, regval)) + return IRQ_NONE; - for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) { - if (priv->bank_shift[index]) - bank |= (((hif_addr >> priv->bank_shift[index]) & - BIT(0)) << index); - else - break; - } + /* Read error info like SDRAM address, data and syndrome */ + regval = readl(priv->baseaddr + ECC_ERRCNT_OFST); + einfo.ecnt = FIELD_GET(ECC_ERRCNT_UECNT_MASK, regval); - for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) { - if (priv->bankgrp_shift[index]) - bankgrp |= (((hif_addr >> priv->bankgrp_shift[index]) - & BIT(0)) << index); - else - break; - } + regval = readl(priv->baseaddr + ECC_UEADDR0_OFST); + einfo.sdram.rank = FIELD_GET(ECC_CEADDR0_RANK_MASK, regval); + einfo.sdram.row = FIELD_GET(ECC_CEADDR0_ROW_MASK, regval); - if (priv->rank_shift[0]) - rank = (hif_addr >> priv->rank_shift[0]) & BIT(0); + regval = readl(priv->baseaddr + ECC_UEADDR1_OFST); + einfo.sdram.bankgrp = FIELD_GET(ECC_CEADDR1_BANKGRP_MASK, regval); + einfo.sdram.bank = FIELD_GET(ECC_CEADDR1_BANK_MASK, regval); + einfo.sdram.col = FIELD_GET(ECC_CEADDR1_COL_MASK, regval); - regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK; - regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK; - writel(regval, priv->baseaddr + ECC_POISON0_OFST); + einfo.data = readl(priv->baseaddr + ECC_UESYND0_OFST); + if (priv->info.dq_width == SNPS_DQ_64) + einfo.data |= (u64)readl(priv->baseaddr + ECC_UESYND1_OFST) << 32; - regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK; - regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK; - regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK; - writel(regval, priv->baseaddr + ECC_POISON1_OFST); -} + einfo.syndrome = readl(priv->baseaddr + ECC_UESYND2_OFST); -static ssize_t inject_data_error_show(struct device *dev, - struct device_attribute *mattr, - char *data) -{ - struct mem_ctl_info *mci = to_mci(dev); - struct synps_edac_priv *priv = mci->pvt_info; - - return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r" - "Error injection Address: 0x%lx\n\r", - readl(priv->baseaddr + ECC_POISON0_OFST), - readl(priv->baseaddr + ECC_POISON1_OFST), - priv->poison_addr); -} + /* Report the detected errors with the corresponding sys address */ + snps_map_sdram_to_sys(priv, &einfo.sdram, &sys); -static ssize_t inject_data_error_store(struct device *dev, - struct device_attribute *mattr, - const char *data, size_t count) -{ - struct mem_ctl_info *mci = to_mci(dev); - struct synps_edac_priv *priv = mci->pvt_info; + snprintf(priv->message, SNPS_EDAC_MSG_SIZE, + "Row %hu Col %hu Bank %hhu Bank Group %hhu Rank %hhu Data 0x%08llx", + einfo.sdram.row, einfo.sdram.col, einfo.sdram.bank, + einfo.sdram.bankgrp, einfo.sdram.rank, + einfo.data); - if (kstrtoul(data, 0, &priv->poison_addr)) - return -EINVAL; + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, einfo.ecnt, + PHYS_PFN(sys), offset_in_page(sys), + einfo.syndrome, einfo.sdram.rank, 0, -1, + priv->message, ""); - ddr_poison_setup(priv); + /* Make sure the UE IRQ status is cleared */ + spin_lock_irqsave(&priv->lock, flags); - return count; -} + regval = readl(priv->baseaddr + ECC_CLR_OFST) | + ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; + writel(regval, priv->baseaddr + ECC_CLR_OFST); -static ssize_t inject_data_poison_show(struct device *dev, - struct device_attribute *mattr, - char *data) -{ - struct mem_ctl_info *mci = to_mci(dev); - struct synps_edac_priv *priv = mci->pvt_info; + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->info.caps & SNPS_CAP_ZYNQMP) + writel(qosval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); - return sprintf(data, "Data Poisoning: %s\n\r", - (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3) - ? ("Correctable Error") : ("UnCorrectable Error")); + return IRQ_HANDLED; } -static ssize_t inject_data_poison_store(struct device *dev, - struct device_attribute *mattr, - const char *data, size_t count) +/** + * snps_dfi_irq_handler - DFI CRC/Parity error interrupt handler. + * @irq: IRQ number. + * @dev_id: Device ID. + * + * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise. + */ +static irqreturn_t snps_dfi_irq_handler(int irq, void *dev_id) { - struct mem_ctl_info *mci = to_mci(dev); - struct synps_edac_priv *priv = mci->pvt_info; + struct mem_ctl_info *mci = dev_id; + struct snps_edac_priv *priv = mci->pvt_info; + unsigned long flags; + u32 regval; + u16 ecnt; + + /* Make sure IRQ is caused by an DFI alert error */ + regval = readl(priv->baseaddr + DDR_CRCPARSTAT_OFST); + if (!(regval & DDR_CRCPARSTAT_ALRT_ERR)) + return IRQ_NONE; - writel(0, priv->baseaddr + DDRC_SWCTL); - if (strncmp(data, "CE", 2) == 0) - writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST); - else - writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST); - writel(1, priv->baseaddr + DDRC_SWCTL); + /* Just a number of CRC/Parity errors is available */ + ecnt = FIELD_GET(DDR_CRCPARSTAT_ALRT_CNT_MASK, regval); - return count; -} + /* Report the detected errors with just the custom message */ + snprintf(priv->message, SNPS_EDAC_MSG_SIZE, + "DFI CRC/Parity error detected on dfi_alert_n"); -static DEVICE_ATTR_RW(inject_data_error); -static DEVICE_ATTR_RW(inject_data_poison); + edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, ecnt, + 0, 0, 0, 0, 0, -1, priv->message, ""); -static int edac_create_sysfs_attributes(struct mem_ctl_info *mci) -{ - int rc; + /* Make sure the DFI alert IRQ status is cleared */ + spin_lock_irqsave(&priv->lock, flags); - rc = device_create_file(&mci->dev, &dev_attr_inject_data_error); - if (rc < 0) - return rc; - rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison); - if (rc < 0) - return rc; - return 0; + regval = readl(priv->baseaddr + DDR_CRCPARCTL0_OFST) | + DDR_CRCPARCTL0_CLR_ALRT_ERR | DDR_CRCPARCTL0_CLR_ALRT_ERRCNT; + writel(regval, priv->baseaddr + DDR_CRCPARCTL0_OFST); + + spin_unlock_irqrestore(&priv->lock, flags); + + return IRQ_HANDLED; +} + +/** + * snps_sbr_irq_handler - Scrubber Done interrupt handler. + * @irq: IRQ number. + * @dev_id: Device ID. + * + * It just checks whether the IRQ has been caused by the Scrubber Done event + * and disables the back-to-back scrubbing by falling back to the smallest + * delay between the Scrubber read commands. + * + * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise. + */ +static irqreturn_t snps_sbr_irq_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct snps_edac_priv *priv = mci->pvt_info; + unsigned long flags; + u32 regval, en; + + /* Make sure IRQ is caused by the Scrubber Done event */ + regval = readl(priv->baseaddr + ECC_SBRSTAT_OFST); + if (!(regval & ECC_SBRSTAT_SCRUB_DONE)) + return IRQ_NONE; + + spin_lock_irqsave(&priv->lock, flags); + + regval = readl(priv->baseaddr + ECC_SBRCTL_OFST); + en = regval & ECC_SBRCTL_SCRUB_EN; + writel(regval & ~en, priv->baseaddr + ECC_SBRCTL_OFST); + + regval = FIELD_PREP(ECC_SBRCTL_SCRUB_INTERVAL, ECC_SBRCTL_INTERVAL_SAFE); + writel(regval, priv->baseaddr + ECC_SBRCTL_OFST); + + writel(regval | en, priv->baseaddr + ECC_SBRCTL_OFST); + + spin_unlock_irqrestore(&priv->lock, flags); + + edac_mc_printk(mci, KERN_WARNING, "Back-to-back scrubbing disabled\n"); + + return IRQ_HANDLED; +} + +/** + * snps_com_irq_handler - Interrupt IRQ signal handler. + * @irq: IRQ number. + * @dev_id: Device ID. + * + * Return: IRQ_NONE, if interrupts not set or IRQ_HANDLED otherwise. + */ +static irqreturn_t snps_com_irq_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct snps_edac_priv *priv = mci->pvt_info; + irqreturn_t rc = IRQ_NONE; + + rc |= snps_ce_irq_handler(irq, dev_id); + + rc |= snps_ue_irq_handler(irq, dev_id); + + rc |= snps_dfi_irq_handler(irq, dev_id); + + if (priv->info.caps & SNPS_CAP_ECC_SCRUBBER) + rc |= snps_sbr_irq_handler(irq, dev_id); + + return rc; +} + +static void snps_enable_irq(struct snps_edac_priv *priv) +{ + unsigned long flags; + + /* Enable UE/CE Interrupts */ + if (priv->info.caps & SNPS_CAP_ZYNQMP) { + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_EN_OFST); + + return; + } + + /* + * ECC IRQs Enable/Disable feature has been available since v3.10a, + * while CRC/Parity interrupts control - since v2.10a. + */ + spin_lock_irqsave(&priv->lock, flags); + + writel(ECC_CTRL_EN_CE_IRQ | ECC_CTRL_EN_UE_IRQ, + priv->baseaddr + ECC_CLR_OFST); + writel(DDR_CRCPARCTL0_EN_ALRT_IRQ, + priv->baseaddr + DDR_CRCPARCTL0_OFST); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void snps_disable_irq(struct snps_edac_priv *priv) +{ + unsigned long flags; + + /* Disable UE/CE Interrupts */ + if (priv->info.caps & SNPS_CAP_ZYNQMP) { + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_DB_OFST); + + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + writel(0, priv->baseaddr + ECC_CLR_OFST); + writel(0, priv->baseaddr + DDR_CRCPARCTL0_OFST); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +/** + * snps_get_sdram_bw - Get SDRAM bandwidth. + * @priv: DDR memory controller private instance data. + * + * The SDRAM interface bandwidth is calculated based on the DDRC Core clock rate + * and the DW uMCTL2 IP-core parameters like DQ-bus width and mode and + * Core/SDRAM clocks frequency ratio. Note it returns the theoretical bandwidth + * which in reality is hardly possible to reach. + * + * Return: SDRAM bandwidth or zero if no Core clock specified. + */ +static u64 snps_get_sdram_bw(struct snps_edac_priv *priv) +{ + unsigned long rate; + + /* + * Depending on the ratio mode the SDRAM clock either matches the Core + * clock or runs with the twice its frequency. + */ + rate = clk_get_rate(priv->clks[SNPS_CORE_CLK].clk); + rate *= priv->info.freq_ratio; + + /* + * Scale up by 2 since it's DDR (Double Data Rate) and subtract the + * DQ-mode since in non-Full mode only a part of the DQ-bus is utilised + * on each SDRAM clock edge. + */ + return (2U << (priv->info.dq_width - priv->info.dq_mode)) * (u64)rate; +} + +/** + * snps_get_scrub_bw - Get Scrubber bandwidth. + * @priv: DDR memory controller private instance data. + * @interval: Scrub interval. + * + * DW uMCTL2 DDRC Scrubber performs periodical progressive burst reads (RMW if + * ECC CE is detected) commands from the whole memory space. The read commands + * can be delayed by means of the SBRCTL.scrub_interval field. The Scrubber + * cycles look as follows: + * + * |---HIF-burst-read---|-------delay-------|-HIF-burst-read-| etc + * + * Tb = Bl*[DQ]/Bw[RAM] Td = 512*interval/Fc - periods of the stages, where + * Bl - HIF burst length, [DQ] - Full DQ-bus width, Bw[RAM] - SDRAM bandwidth, + * Fc - Core clock frequency (Scrubber and Core clocks are synchronous). + * + * After some simple calculations the expressions above can be used to get the + * next Scrubber bandwidth formulae: + * + * Bw[Sbr] = Bw[RAM] / (1 + F * interval), where + * F = 2 * 512 * Fr * Fc * [DQ]e - interval scale factor with + * Fr - HIF/SDRAM clock frequency ratio (1 or 2), [DQ]e - DQ-bus width mode. + * + * Return: Scrubber bandwidth or zero if no Core clock specified. + */ +static u64 snps_get_scrub_bw(struct snps_edac_priv *priv, u32 interval) +{ + unsigned long fac; + u64 bw_ram; + + fac = (2 * ECC_SBRCTL_INTERVAL_STEP * priv->info.freq_ratio) / + (priv->info.hif_burst_len * (1UL << priv->info.dq_mode)); + + bw_ram = snps_get_sdram_bw(priv); + + do_div(bw_ram, 1 + fac * interval); + + return bw_ram; +} + +/** + * snps_get_scrub_interval - Get Scrubber delay interval. + * @priv: DDR memory controller private instance data. + * @bw: Scrubber bandwidth. + * + * Similarly to the Scrubber bandwidth the interval formulae can be inferred + * from the same expressions: + * + * interval = (Bw[RAM] - Bw[Sbr]) / (F * Bw[Sbr]) + * + * Return: Scrubber delay interval or zero if no Core clock specified. + */ +static u32 snps_get_scrub_interval(struct snps_edac_priv *priv, u32 bw) +{ + unsigned long fac; + u64 bw_ram; + + fac = (2 * priv->info.freq_ratio * ECC_SBRCTL_INTERVAL_STEP) / + (priv->info.hif_burst_len * (1UL << priv->info.dq_mode)); + + bw_ram = snps_get_sdram_bw(priv); + + /* Divide twice so not to cause the integer overflow in (fac * bw) */ + bw_ram -= bw; + do_div(bw_ram, bw); + do_div(bw_ram, fac); + + return bw_ram; +} + +/** + * snps_set_sdram_scrub_rate - Set the Scrubber bandwidth. + * @mci: EDAC memory controller instance. + * @bw: Bandwidth. + * + * It calculates the delay between the Scrubber read commands based on the + * specified bandwidth and the Core clock rate. If the Core clock is unavailable + * the passed bandwidth will be directly used as the interval value. + * + * Note the method warns about the back-to-back scrubbing since it may + * significantly degrade the system performance. This mode is supposed to be + * used for a single SDRAM scrubbing pass only. So it will be turned off in the + * Scrubber Done IRQ handler. + * + * Return: Actually set bandwidth (interval-based approximated bandwidth if the + * Core clock is unavailable) or zero if the Scrubber was disabled. + */ +static int snps_set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 bw) +{ + struct snps_edac_priv *priv = mci->pvt_info; + u32 regval, interval; + unsigned long flags; + u64 bw_min, bw_max; + + /* Don't bother with the calculations just disable and return. */ + if (!bw) { + spin_lock_irqsave(&priv->lock, flags); + + regval = readl(priv->baseaddr + ECC_SBRCTL_OFST); + regval &= ~ECC_SBRCTL_SCRUB_EN; + writel(regval, priv->baseaddr + ECC_SBRCTL_OFST); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; + } + + /* If no Core clock specified fallback to the direct interval setup. */ + bw_max = snps_get_scrub_bw(priv, ECC_SBRCTL_INTERVAL_MIN); + if (bw_max) { + bw_min = snps_get_scrub_bw(priv, ECC_SBRCTL_INTERVAL_MAX); + bw = clamp_t(u64, bw, bw_min, bw_max); + + interval = snps_get_scrub_interval(priv, bw); + } else { + bw = clamp_val(bw, ECC_SBRCTL_INTERVAL_MIN, ECC_SBRCTL_INTERVAL_MAX); + + interval = ECC_SBRCTL_INTERVAL_MAX - bw; + } + + /* + * SBRCTL.scrub_en bitfield must be accessed separately from the other + * CSR bitfields. It means the flag must be set/clear with no updates + * to the rest of the fields. + */ + spin_lock_irqsave(&priv->lock, flags); + + regval = FIELD_PREP(ECC_SBRCTL_SCRUB_INTERVAL, interval); + writel(regval, priv->baseaddr + ECC_SBRCTL_OFST); + + writel(regval | ECC_SBRCTL_SCRUB_EN, priv->baseaddr + ECC_SBRCTL_OFST); + + spin_unlock_irqrestore(&priv->lock, flags); + + if (!interval) + edac_mc_printk(mci, KERN_WARNING, "Back-to-back scrubbing enabled\n"); + + if (!bw_max) + return interval ? bw : INT_MAX; + + return snps_get_scrub_bw(priv, interval); +} + +/** + * snps_get_sdram_scrub_rate - Get the Scrubber bandwidth. + * @mci: EDAC memory controller instance. + * + * Return: Scrubber bandwidth (interval-based approximated bandwidth if the + * Core clock is unavailable) or zero if the Scrubber was disabled. + */ +static int snps_get_sdram_scrub_rate(struct mem_ctl_info *mci) +{ + struct snps_edac_priv *priv = mci->pvt_info; + u32 regval; + u64 bw; + + regval = readl(priv->baseaddr + ECC_SBRCTL_OFST); + if (!(regval & ECC_SBRCTL_SCRUB_EN)) + return 0; + + regval = FIELD_GET(ECC_SBRCTL_SCRUB_INTERVAL, regval); + + bw = snps_get_scrub_bw(priv, regval); + if (!bw) + return regval ? ECC_SBRCTL_INTERVAL_MAX - regval : INT_MAX; + + return bw; +} + +/** + * snps_create_data - Create private data. + * @pdev: platform device. + * + * Return: Private data instance or negative errno. + */ +static struct snps_edac_priv *snps_create_data(struct platform_device *pdev) +{ + struct snps_edac_priv *priv; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + priv->baseaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->baseaddr)) + return ERR_CAST(priv->baseaddr); + + priv->pdev = pdev; + spin_lock_init(&priv->lock); + + return priv; +} + +/** + * snps_get_res - Get platform device resources. + * @priv: DDR memory controller private instance data. + * + * It's supposed to request all the controller resources available for the + * particular platform and enable all the required for the driver normal + * work. Note only the CSR and Scrubber clocks are supposed to be switched + * on/off by the driver. + * + * Return: negative errno if failed to get the resources, otherwise - zero. + */ +static int snps_get_res(struct snps_edac_priv *priv) +{ + const char * const ids[] = { + [SNPS_CSR_CLK] = "pclk", + [SNPS_AXI_CLK] = "aclk", + [SNPS_CORE_CLK] = "core", + [SNPS_SBR_CLK] = "sbr", + }; + int i, rc; + + for (i = 0; i < SNPS_MAX_NCLK; i++) + priv->clks[i].id = ids[i]; + + rc = devm_clk_bulk_get_optional(&priv->pdev->dev, SNPS_MAX_NCLK, + priv->clks); + if (rc) { + edac_printk(KERN_INFO, EDAC_MC, "Failed to get ref clocks\n"); + return rc; + } + + /* + * Don't touch the Core and AXI clocks since they are critical for the + * stable system functioning and are supposed to have been enabled + * anyway. + */ + rc = clk_prepare_enable(priv->clks[SNPS_CSR_CLK].clk); + if (rc) { + edac_printk(KERN_INFO, EDAC_MC, "Couldn't enable CSR clock\n"); + return rc; + } + + rc = clk_prepare_enable(priv->clks[SNPS_SBR_CLK].clk); + if (rc) { + edac_printk(KERN_INFO, EDAC_MC, "Couldn't enable Scrubber clock\n"); + goto err_disable_pclk; + } + + return 0; + +err_disable_pclk: + clk_disable_unprepare(priv->clks[SNPS_CSR_CLK].clk); + + return rc; +} + +/** + * snps_put_res - Put platform device resources. + * @priv: DDR memory controller private instance data. + */ +static void snps_put_res(struct snps_edac_priv *priv) +{ + clk_disable_unprepare(priv->clks[SNPS_SBR_CLK].clk); + + clk_disable_unprepare(priv->clks[SNPS_CSR_CLK].clk); +} + +/* + * zynqmp_init_plat - ZynqMP-specific platform initialization. + * @priv: DDR memory controller private data. + * + * Return: always zero. + */ +static int zynqmp_init_plat(struct snps_edac_priv *priv) +{ + priv->info.caps |= SNPS_CAP_ZYNQMP; + priv->info.dq_width = SNPS_DQ_64; + + return 0; +} + +/* + * bt1_init_plat - Baikal-T1-specific platform initialization. + * @priv: DDR memory controller private data. + * + * Return: always zero. + */ +static int bt1_init_plat(struct snps_edac_priv *priv) +{ + priv->info.hif_burst_len = SNPS_DDR_BL8; + priv->sys_app_map.minsize = DDR_MIN_SARSIZE; + + return 0; +} + +/** + * snps_get_dtype - Return the controller memory width. + * @mstr: Master CSR value. + * + * Get the EDAC device type width appropriate for the current controller + * configuration. + * + * Return: a device type width enumeration. + */ +static inline enum dev_type snps_get_dtype(u32 mstr) +{ + if (!(mstr & DDR_MSTR_MEM_DDR4)) + return DEV_UNKNOWN; + + switch (FIELD_GET(DDR_MSTR_DEV_CFG_MASK, mstr)) { + case DDR_MSTR_DEV_X4: + return DEV_X4; + case DDR_MSTR_DEV_X8: + return DEV_X8; + case DDR_MSTR_DEV_X16: + return DEV_X16; + case DDR_MSTR_DEV_X32: + return DEV_X32; + } + + return DEV_UNKNOWN; +} + +/** + * snps_get_mtype - Returns controller memory type. + * @mstr: Master CSR value. + * + * Get the EDAC memory type appropriate for the current controller + * configuration. + * + * Return: a memory type enumeration. + */ +static inline enum mem_type snps_get_mtype(u32 mstr) +{ + switch (FIELD_GET(DDR_MSTR_MEM_MASK, mstr)) { + case DDR_MSTR_MEM_DDR2: + return MEM_DDR2; + case DDR_MSTR_MEM_DDR3: + return MEM_DDR3; + case DDR_MSTR_MEM_LPDDR: + return MEM_LPDDR; + case DDR_MSTR_MEM_LPDDR2: + return MEM_LPDDR2; + case DDR_MSTR_MEM_LPDDR3: + return MEM_LPDDR3; + case DDR_MSTR_MEM_DDR4: + return MEM_DDR4; + case DDR_MSTR_MEM_LPDDR4: + return MEM_LPDDR4; + } + + return MEM_RESERVED; +} + +/** + * snps_get_ddrc_info - Get the DDR controller config data. + * @priv: DDR memory controller private data. + * + * Return: negative errno if no ECC detected, otherwise - zero. + */ +static int snps_get_ddrc_info(struct snps_edac_priv *priv) +{ + int (*init_plat)(struct snps_edac_priv *priv); + u32 regval; + + /* Before getting the DDRC parameters make sure ECC is enabled */ + regval = readl(priv->baseaddr + ECC_CFG0_OFST); + + priv->info.ecc_mode = FIELD_GET(ECC_CFG0_MODE_MASK, regval); + if (priv->info.ecc_mode != SNPS_ECC_SECDED) { + edac_printk(KERN_INFO, EDAC_MC, "SEC/DED ECC not enabled\n"); + return -ENXIO; + } + + /* Assume HW-src scrub is always available if it isn't disabled */ + if (!(regval & ECC_CFG0_DIS_SCRUB)) + priv->info.caps |= SNPS_CAP_ECC_SCRUB; + + /* Auto-detect the scrubber by writing to the SBRWDATA0 CSR */ + regval = readl(priv->baseaddr + ECC_SBRWDATA0_OFST); + writel(~regval, priv->baseaddr + ECC_SBRWDATA0_OFST); + if (regval != readl(priv->baseaddr + ECC_SBRWDATA0_OFST)) { + priv->info.caps |= SNPS_CAP_ECC_SCRUBBER; + writel(regval, priv->baseaddr + ECC_SBRWDATA0_OFST); + } + + /* Auto-detect the basic HIF/SDRAM bus parameters */ + regval = readl(priv->baseaddr + DDR_MSTR_OFST); + + priv->info.sdram_mode = snps_get_mtype(regval); + priv->info.dev_cfg = snps_get_dtype(regval); + + priv->info.dq_mode = FIELD_GET(DDR_MSTR_BUSWIDTH_MASK, regval); + + /* + * Assume HIF burst length matches the SDRAM burst length since it's + * not auto-detectable + */ + priv->info.sdram_burst_len = FIELD_GET(DDR_MSTR_BURST_RDWR, regval) << 1; + priv->info.hif_burst_len = priv->info.sdram_burst_len; + + /* Retrieve the current HIF/SDRAM frequency ratio: 1:1 vs 1:2 */ + priv->info.freq_ratio = !(regval & DDR_MSTR_FREQ_RATIO11) + 1; + + /* Activated ranks field: set bit corresponds to populated rank */ + priv->info.ranks = FIELD_GET(DDR_MSTR_ACT_RANKS_MASK, regval); + priv->info.ranks = hweight_long(priv->info.ranks); + + /* Auto-detect the DQ bus width by using the ECC-poison pattern CSR */ + writel(0, priv->baseaddr + DDR_SWCTL); + + /* + * If poison pattern [32:64] is changeable then DQ is 64-bit wide. + * Note the feature has been available since IP-core v2.51a. + */ + regval = readl(priv->baseaddr + ECC_POISONPAT1_OFST); + writel(~regval, priv->baseaddr + ECC_POISONPAT1_OFST); + if (regval != readl(priv->baseaddr + ECC_POISONPAT1_OFST)) { + priv->info.dq_width = SNPS_DQ_64; + writel(regval, priv->baseaddr + ECC_POISONPAT1_OFST); + } else { + priv->info.dq_width = SNPS_DQ_32; + } + + writel(1, priv->baseaddr + DDR_SWCTL); + + /* Apply platform setups after all the configs auto-detection */ + init_plat = device_get_match_data(&priv->pdev->dev); + + return init_plat ? init_plat(priv) : 0; +} + +/** + * snps_get_sys_app_map - Get System/Application address map. + * @priv: DDR memory controller private instance data. + * @sarregs: Array with SAR registers value. + * + * System address regions are defined by the SARBASEn and SARSIZEn registers. + * Controller reference manual requires the base addresses and sizes creating + * a set of ascending non-overlapped regions in order to have a linear + * application address space. Doing otherwise causes unpredictable results. + */ +static void snps_get_sys_app_map(struct snps_edac_priv *priv, u32 *sarregs) +{ + struct snps_sys_app_map *map = &priv->sys_app_map; + int i, ofst; + + /* + * SARs are supposed to be initialized in the ascending non-overlapped + * order: base[i - 1] < base[i] < etc. If that rule is broken for a SAR + * it's considered as no more SARs have been enabled, so the detection + * procedure will halt. Having the very first SAR with zero base + * address only makes sense if there is a consequent SAR. + */ + for (i = 0, ofst = 0; i < DDR_MAX_NSAR; i++) { + map->sar[i].base = sarregs[2 * i] * map->minsize; + if (map->sar[i].base) + map->nsar = i + 1; + else if (i && map->sar[i].base <= map->sar[i - 1].base) + break; + + map->sar[i].size = (sarregs[2 * i + 1] + 1) * map->minsize; + map->sar[i].ofst = map->sar[i].base - ofst; + ofst += map->sar[i].size; + } + + /* + * SAR block size isn't auto-detectable. If one isn't specified for the + * platform there is a good chance to have invalid mapping of the + * detected SARs. So proceed with 1:1 mapping then. + */ + if (!map->minsize && map->nsar) { + edac_printk(KERN_WARNING, EDAC_MC, + "No block size specified. Discard SARs mapping\n"); + map->nsar = 0; + } +} + +/** + * snps_get_hif_row_map - Get HIF/SDRAM-row address map. + * @priv: DDR memory controller private instance data. + * @addrmap: Array with ADDRMAP registers value. + * + * SDRAM-row address is defined by the fields in the ADDRMAP[5-7,9-11] + * registers. Those fields value indicate the HIF address bits used to encode + * the DDR row address. + */ +static void snps_get_hif_row_map(struct snps_edac_priv *priv, u32 *addrmap) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + u8 map_row_b2_10; + int i; + + for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) + map->row[i] = DDR_ADDRMAP_UNUSED; + + map->row[0] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[5]) + ROW_B0_BASE; + map->row[1] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[5]) + ROW_B1_BASE; + + map_row_b2_10 = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[5]); + if (map_row_b2_10 != DDR_ADDRMAP_MAX_15) { + for (i = 2; i < 11; i++) + map->row[i] = map_row_b2_10 + i + ROW_B0_BASE; + } else { + map->row[2] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[9]) + ROW_B2_BASE; + map->row[3] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[9]) + ROW_B3_BASE; + map->row[4] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[9]) + ROW_B4_BASE; + map->row[5] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[9]) + ROW_B5_BASE; + map->row[6] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[10]) + ROW_B6_BASE; + map->row[7] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[10]) + ROW_B7_BASE; + map->row[8] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[10]) + ROW_B8_BASE; + map->row[9] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[10]) + ROW_B9_BASE; + map->row[10] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[11]) + ROW_B10_BASE; + } + + map->row[11] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[5]); + map->row[11] = map->row[11] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[11] + ROW_B11_BASE; + + map->row[12] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[6]); + map->row[12] = map->row[12] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[12] + ROW_B12_BASE; + + map->row[13] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[6]); + map->row[13] = map->row[13] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[13] + ROW_B13_BASE; + + map->row[14] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[6]); + map->row[14] = map->row[14] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[14] + ROW_B14_BASE; + + map->row[15] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[6]); + map->row[15] = map->row[15] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[15] + ROW_B15_BASE; + + if (priv->info.sdram_mode == MEM_DDR4 || priv->info.sdram_mode == MEM_LPDDR4) { + map->row[16] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[7]); + map->row[16] = map->row[16] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[16] + ROW_B16_BASE; + + map->row[17] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[7]); + map->row[17] = map->row[17] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->row[17] + ROW_B17_BASE; + } +} + +/** + * snps_get_hif_col_map - Get HIF/SDRAM-column address map. + * @priv: DDR memory controller private instance data. + * @addrmap: Array with ADDRMAP registers value. + * + * SDRAM-column address is defined by the fields in the ADDRMAP[2-4] + * registers. Those fields value indicate the HIF address bits used to encode + * the DDR row address. + */ +static void snps_get_hif_col_map(struct snps_edac_priv *priv, u32 *addrmap) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + int i; + + for (i = 0; i < DDR_MAX_COL_WIDTH; i++) + map->col[i] = DDR_ADDRMAP_UNUSED; + + map->col[0] = 0; + map->col[1] = 1; + map->col[2] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[2]) + COL_B2_BASE; + map->col[3] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[2]) + COL_B3_BASE; + + map->col[4] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[2]); + map->col[4] = map->col[4] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[4] + COL_B4_BASE; + + map->col[5] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[2]); + map->col[5] = map->col[5] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[5] + COL_B5_BASE; + + map->col[6] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[3]); + map->col[6] = map->col[6] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[6] + COL_B6_BASE; + + map->col[7] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[3]); + map->col[7] = map->col[7] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[7] + COL_B7_BASE; + + map->col[8] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[3]); + map->col[8] = map->col[8] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[8] + COL_B8_BASE; + + map->col[9] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[3]); + map->col[9] = map->col[9] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[9] + COL_B9_BASE; + + map->col[10] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[4]); + map->col[10] = map->col[10] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[10] + COL_B10_BASE; + + map->col[11] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[4]); + map->col[11] = map->col[11] == DDR_ADDRMAP_MAX_15 ? + DDR_ADDRMAP_UNUSED : map->col[11] + COL_B11_BASE; + + /* + * In case of the non-Full DQ bus mode the lowest columns are + * unmapped and used by the controller to read the full DQ word + * in multiple cycles (col[0] for the Half bus mode, col[0:1] for + * the Quarter bus mode). + */ + if (priv->info.dq_mode) { + for (i = 11 + priv->info.dq_mode; i >= priv->info.dq_mode; i--) { + map->col[i] = map->col[i - priv->info.dq_mode]; + map->col[i - priv->info.dq_mode] = DDR_ADDRMAP_UNUSED; + } + } + + /* + * Per JEDEC DDR2/3/4/mDDR specification, column address bit 10 is + * reserved for indicating auto-precharge, and hence no source + * address bit can be mapped to col[10]. + */ + if (priv->info.sdram_mode == MEM_LPDDR || priv->info.sdram_mode == MEM_DDR2 || + priv->info.sdram_mode == MEM_DDR3 || priv->info.sdram_mode == MEM_DDR4) { + for (i = 12 + priv->info.dq_mode; i > 10; i--) { + map->col[i] = map->col[i - 1]; + map->col[i - 1] = DDR_ADDRMAP_UNUSED; + } + } + + /* + * Per JEDEC specification, column address bit 12 is reserved + * for the Burst-chop status, so no source address bit mapping + * for col[12] either. + */ + map->col[13] = map->col[12]; + map->col[12] = DDR_ADDRMAP_UNUSED; +} + +/** + * snps_get_hif_bank_map - Get HIF/SDRAM-bank address map. + * @priv: DDR memory controller private instance data. + * @addrmap: Array with ADDRMAP registers value. + * + * SDRAM-bank address is defined by the fields in the ADDRMAP[1] + * register. Those fields value indicate the HIF address bits used to encode + * the DDR bank address. + */ +static void snps_get_hif_bank_map(struct snps_edac_priv *priv, u32 *addrmap) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + int i; + + for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) + map->bank[i] = DDR_ADDRMAP_UNUSED; + + map->bank[0] = FIELD_GET(DDR_ADDRMAP_B0_M31, addrmap[1]) + BANK_B0_BASE; + map->bank[1] = FIELD_GET(DDR_ADDRMAP_B8_M31, addrmap[1]) + BANK_B1_BASE; + + map->bank[2] = FIELD_GET(DDR_ADDRMAP_B16_M31, addrmap[1]); + map->bank[2] = map->bank[2] == DDR_ADDRMAP_MAX_31 ? + DDR_ADDRMAP_UNUSED : map->bank[2] + BANK_B2_BASE; +} + +/** + * snps_get_hif_bankgrp_map - Get HIF/SDRAM-bank group address map. + * @priv: DDR memory controller private instance data. + * @addrmap: Array with ADDRMAP registers value. + * + * SDRAM-bank group address is defined by the fields in the ADDRMAP[8] + * register. Those fields value indicate the HIF address bits used to encode + * the DDR bank group address. + */ +static void snps_get_hif_bankgrp_map(struct snps_edac_priv *priv, u32 *addrmap) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + int i; + + for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) + map->bankgrp[i] = DDR_ADDRMAP_UNUSED; + + /* Bank group signals are available on the DDR4 memory only */ + if (priv->info.sdram_mode != MEM_DDR4) + return; + + map->bankgrp[0] = FIELD_GET(DDR_ADDRMAP_B0_M31, addrmap[8]) + BANKGRP_B0_BASE; + + map->bankgrp[1] = FIELD_GET(DDR_ADDRMAP_B8_M31, addrmap[8]); + map->bankgrp[1] = map->bankgrp[1] == DDR_ADDRMAP_MAX_31 ? + DDR_ADDRMAP_UNUSED : map->bankgrp[1] + BANKGRP_B1_BASE; +} + +/** + * snps_get_hif_rank_map - Get HIF/SDRAM-rank address map. + * @priv: DDR memory controller private instance data. + * @addrmap: Array with ADDRMAP registers value. + * + * SDRAM-rank address is defined by the fields in the ADDRMAP[0] + * register. Those fields value indicate the HIF address bits used to encode + * the DDR rank address. + */ +static void snps_get_hif_rank_map(struct snps_edac_priv *priv, u32 *addrmap) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + int i; + + for (i = 0; i < DDR_MAX_RANK_WIDTH; i++) + map->rank[i] = DDR_ADDRMAP_UNUSED; + + if (priv->info.ranks > 1) { + map->rank[0] = FIELD_GET(DDR_ADDRMAP_B0_M31, addrmap[0]); + map->rank[0] = map->rank[0] == DDR_ADDRMAP_MAX_31 ? + DDR_ADDRMAP_UNUSED : map->rank[0] + RANK_B0_BASE; + } + + if (priv->info.ranks > 2) { + map->rank[1] = FIELD_GET(DDR_ADDRMAP_B8_M31, addrmap[0]); + map->rank[1] = map->rank[1] == DDR_ADDRMAP_MAX_31 ? + DDR_ADDRMAP_UNUSED : map->rank[1] + RANK_B1_BASE; + } +} + +/** + * snps_get_addr_map - Get HIF/SDRAM/etc address map from CSRs. + * @priv: DDR memory controller private instance data. + * + * Parse the controller registers content creating the addresses mapping tables. + * They will be used for the erroneous and poison addresses encode/decode. + */ +static void snps_get_addr_map(struct snps_edac_priv *priv) +{ + u32 regval[max(DDR_ADDRMAP_NREGS, 2 * DDR_MAX_NSAR)]; + int i; + + for (i = 0; i < 2 * DDR_MAX_NSAR; i++) + regval[i] = readl(priv->baseaddr + DDR_SARBASE0_OFST + i * 4); + + snps_get_sys_app_map(priv, regval); + + for (i = 0; i < DDR_ADDRMAP_NREGS; i++) + regval[i] = readl(priv->baseaddr + DDR_ADDRMAP0_OFST + i * 4); + + snps_get_hif_row_map(priv, regval); + + snps_get_hif_col_map(priv, regval); + + snps_get_hif_bank_map(priv, regval); + + snps_get_hif_bankgrp_map(priv, regval); + + snps_get_hif_rank_map(priv, regval); +} + +/** + * snps_get_sdram_size - Calculate SDRAM size. + * @priv: DDR memory controller private data. + * + * The total size of the attached memory is calculated based on the HIF/SDRAM + * mapping table. It can be done since the hardware reference manual demands + * that none two SDRAM bits should be mapped to the same HIF bit and that the + * unused SDRAM address bits mapping must be disabled. + * + * Return: the memory size in bytes. + */ +static u64 snps_get_sdram_size(struct snps_edac_priv *priv) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + u64 size = 0; + int i; + + for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) { + if (map->row[i] != DDR_ADDRMAP_UNUSED) + size++; + } + + for (i = 0; i < DDR_MAX_COL_WIDTH; i++) { + if (map->col[i] != DDR_ADDRMAP_UNUSED) + size++; + } + + for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) { + if (map->bank[i] != DDR_ADDRMAP_UNUSED) + size++; + } + + for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) { + if (map->bankgrp[i] != DDR_ADDRMAP_UNUSED) + size++; + } + + /* Skip the ranks since the multi-rankness is determined by layer[0] */ + + return 1ULL << (size + priv->info.dq_width); } -static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci) +/** + * snps_init_csrows - Initialize the csrow data. + * @mci: EDAC memory controller instance. + * + * Initialize the chip select rows associated with the EDAC memory + * controller instance. + */ +static void snps_init_csrows(struct mem_ctl_info *mci) { - device_remove_file(&mci->dev, &dev_attr_inject_data_error); - device_remove_file(&mci->dev, &dev_attr_inject_data_poison); + struct snps_edac_priv *priv = mci->pvt_info; + struct csrow_info *csi; + struct dimm_info *dimm; + u32 row, width; + u64 size; + int j; + + /* Actual SDRAM-word width for which ECC is calculated */ + width = 1U << (priv->info.dq_width - priv->info.dq_mode); + + for (row = 0; row < mci->nr_csrows; row++) { + csi = mci->csrows[row]; + size = snps_get_sdram_size(priv); + + for (j = 0; j < csi->nr_channels; j++) { + dimm = csi->channels[j]->dimm; + dimm->edac_mode = EDAC_SECDED; + dimm->mtype = priv->info.sdram_mode; + dimm->nr_pages = PHYS_PFN(size) / csi->nr_channels; + dimm->grain = width; + dimm->dtype = priv->info.dev_cfg; + } + } } -static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap) +/** + * snps_mc_create - Create and initialize MC instance. + * @priv: DDR memory controller private data. + * + * Allocate the EDAC memory controller descriptor and initialize it + * using the private data info. + * + * Return: MC data instance or negative errno. + */ +static struct mem_ctl_info *snps_mc_create(struct snps_edac_priv *priv) { - u32 addrmap_row_b2_10; - int index; + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = priv->info.ranks; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = SNPS_EDAC_NR_CHANS; + layers[1].is_virt_csrow = false; - priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE; - priv->row_shift[1] = ((addrmap[5] >> 8) & - ROW_MAX_VAL_MASK) + ROW_B1_BASE; + mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers, 0); + if (!mci) { + edac_printk(KERN_ERR, EDAC_MC, + "Failed memory allocation for mc instance\n"); + return ERR_PTR(-ENOMEM); + } - addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK; - if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) { - for (index = 2; index < 11; index++) - priv->row_shift[index] = addrmap_row_b2_10 + - index + ROW_B0_BASE; + mci->pvt_info = priv; + mci->pdev = &priv->pdev->dev; + platform_set_drvdata(priv->pdev, mci); + /* Initialize controller capabilities and configuration */ + mci->mtype_cap = MEM_FLAG_LPDDR | MEM_FLAG_DDR2 | MEM_FLAG_LPDDR2 | + MEM_FLAG_DDR3 | MEM_FLAG_LPDDR3 | + MEM_FLAG_DDR4 | MEM_FLAG_LPDDR4; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_PARITY; + mci->edac_cap = mci->edac_ctl_cap; + + if (priv->info.caps & SNPS_CAP_ECC_SCRUB) { + mci->scrub_mode = SCRUB_HW_SRC; + mci->scrub_cap = SCRUB_FLAG_HW_SRC; } else { - priv->row_shift[2] = (addrmap[9] & - ROW_MAX_VAL_MASK) + ROW_B2_BASE; - priv->row_shift[3] = ((addrmap[9] >> 8) & - ROW_MAX_VAL_MASK) + ROW_B3_BASE; - priv->row_shift[4] = ((addrmap[9] >> 16) & - ROW_MAX_VAL_MASK) + ROW_B4_BASE; - priv->row_shift[5] = ((addrmap[9] >> 24) & - ROW_MAX_VAL_MASK) + ROW_B5_BASE; - priv->row_shift[6] = (addrmap[10] & - ROW_MAX_VAL_MASK) + ROW_B6_BASE; - priv->row_shift[7] = ((addrmap[10] >> 8) & - ROW_MAX_VAL_MASK) + ROW_B7_BASE; - priv->row_shift[8] = ((addrmap[10] >> 16) & - ROW_MAX_VAL_MASK) + ROW_B8_BASE; - priv->row_shift[9] = ((addrmap[10] >> 24) & - ROW_MAX_VAL_MASK) + ROW_B9_BASE; - priv->row_shift[10] = (addrmap[11] & - ROW_MAX_VAL_MASK) + ROW_B10_BASE; + mci->scrub_mode = SCRUB_SW_SRC; + mci->scrub_cap = SCRUB_FLAG_SW_SRC; } - priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) & - ROW_MAX_VAL_MASK) + ROW_B11_BASE); - priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] & - ROW_MAX_VAL_MASK) + ROW_B12_BASE); - priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) & - ROW_MAX_VAL_MASK) + ROW_B13_BASE); - priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) & - ROW_MAX_VAL_MASK) + ROW_B14_BASE); - priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) & - ROW_MAX_VAL_MASK) + ROW_B15_BASE); - priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] & - ROW_MAX_VAL_MASK) + ROW_B16_BASE); - priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) == - ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) & - ROW_MAX_VAL_MASK) + ROW_B17_BASE); + if (priv->info.caps & SNPS_CAP_ECC_SCRUBBER) { + mci->scrub_cap |= SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_TUN; + mci->set_sdram_scrub_rate = snps_set_sdram_scrub_rate; + mci->get_sdram_scrub_rate = snps_get_sdram_scrub_rate; + } + + mci->ctl_name = "snps_umctl2_ddrc"; + mci->dev_name = SNPS_EDAC_MOD_STRING; + mci->mod_name = SNPS_EDAC_MOD_VER; + + edac_op_state = EDAC_OPSTATE_INT; + + mci->ctl_page_to_phys = NULL; + + snps_init_csrows(mci); + + return mci; +} + +/** + * snps_mc_free - Free MC instance. + * @mci: EDAC memory controller instance. + * + * Just revert what was done in the framework of the snps_mc_create(). + * + * Return: MC data instance or negative errno. + */ +static void snps_mc_free(struct mem_ctl_info *mci) +{ + struct snps_edac_priv *priv = mci->pvt_info; + + platform_set_drvdata(priv->pdev, NULL); + + edac_mc_free(mci); } -static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap) +/** + * snps_request_ind_irq - Request individual DDRC IRQs. + * @mci: EDAC memory controller instance. + * + * Return: 0 if the IRQs were successfully requested, 1 if the individual IRQs + * are unavailable, otherwise negative errno. + */ +static int snps_request_ind_irq(struct mem_ctl_info *mci) { - u32 width, memtype; - int index; - - memtype = readl(priv->baseaddr + CTRL_OFST); - width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT; - - priv->col_shift[0] = 0; - priv->col_shift[1] = 1; - priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE; - priv->col_shift[3] = ((addrmap[2] >> 8) & - COL_MAX_VAL_MASK) + COL_B3_BASE; - priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) == - COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) & - COL_MAX_VAL_MASK) + COL_B4_BASE); - priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) == - COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) & - COL_MAX_VAL_MASK) + COL_B5_BASE); - priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) == - COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] & - COL_MAX_VAL_MASK) + COL_B6_BASE); - priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) == - COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) & - COL_MAX_VAL_MASK) + COL_B7_BASE); - priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) == - COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) & - COL_MAX_VAL_MASK) + COL_B8_BASE); - priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) == - COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) & - COL_MAX_VAL_MASK) + COL_B9_BASE); - if (width == DDRCTL_EWDTH_64) { - if (memtype & MEM_TYPE_LPDDR3) { - priv->col_shift[10] = ((addrmap[4] & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - ((addrmap[4] & COL_MAX_VAL_MASK) + - COL_B10_BASE); - priv->col_shift[11] = (((addrmap[4] >> 8) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) + - COL_B11_BASE); - } else { - priv->col_shift[11] = ((addrmap[4] & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - ((addrmap[4] & COL_MAX_VAL_MASK) + - COL_B10_BASE); - priv->col_shift[13] = (((addrmap[4] >> 8) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) + - COL_B11_BASE); + struct snps_edac_priv *priv = mci->pvt_info; + struct device *dev = &priv->pdev->dev; + int rc, irq; + + irq = platform_get_irq_byname_optional(priv->pdev, "ecc_ce"); + if (irq == -ENXIO) + return 1; + if (irq < 0) + return irq; + + rc = devm_request_irq(dev, irq, snps_ce_irq_handler, 0, "ecc_ce", mci); + if (rc) { + edac_printk(KERN_ERR, EDAC_MC, "Failed to request ECC CE IRQ\n"); + return rc; + } + + irq = platform_get_irq_byname(priv->pdev, "ecc_ue"); + if (irq < 0) + return irq; + + rc = devm_request_irq(dev, irq, snps_ue_irq_handler, 0, "ecc_ue", mci); + if (rc) { + edac_printk(KERN_ERR, EDAC_MC, "Failed to request ECC UE IRQ\n"); + return rc; + } + + irq = platform_get_irq_byname_optional(priv->pdev, "dfi_e"); + if (irq > 0) { + rc = devm_request_irq(dev, irq, snps_dfi_irq_handler, 0, "dfi_e", mci); + if (rc) { + edac_printk(KERN_ERR, EDAC_MC, "Failed to request DFI IRQ\n"); + return rc; } - } else if (width == DDRCTL_EWDTH_32) { - if (memtype & MEM_TYPE_LPDDR3) { - priv->col_shift[10] = (((addrmap[3] >> 24) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) + - COL_B9_BASE); - priv->col_shift[11] = ((addrmap[4] & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - ((addrmap[4] & COL_MAX_VAL_MASK) + - COL_B10_BASE); - } else { - priv->col_shift[11] = (((addrmap[3] >> 24) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) + - COL_B9_BASE); - priv->col_shift[13] = ((addrmap[4] & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - ((addrmap[4] & COL_MAX_VAL_MASK) + - COL_B10_BASE); + } + + irq = platform_get_irq_byname_optional(priv->pdev, "ecc_sbr"); + if (irq > 0) { + rc = devm_request_irq(dev, irq, snps_sbr_irq_handler, 0, "ecc_sbr", mci); + if (rc) { + edac_printk(KERN_ERR, EDAC_MC, "Failed to request Sbr IRQ\n"); + return rc; } + } + + + return 0; +} + +/** + * snps_request_com_irq - Request common DDRC IRQ. + * @mci: EDAC memory controller instance. + * + * It first attempts to get the named IRQ. If failed the method fallbacks + * to first available one. + * + * Return: 0 if the IRQ was successfully requested otherwise negative errno. + */ +static int snps_request_com_irq(struct mem_ctl_info *mci) +{ + struct snps_edac_priv *priv = mci->pvt_info; + struct device *dev = &priv->pdev->dev; + int rc, irq; + + irq = platform_get_irq_byname_optional(priv->pdev, "ecc"); + if (irq < 0) { + irq = platform_get_irq(priv->pdev, 0); + if (irq < 0) + return irq; + } + + rc = devm_request_irq(dev, irq, snps_com_irq_handler, 0, "ecc", mci); + if (rc) { + edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n"); + return rc; + } + + return 0; +} + +/** + * snps_setup_irq - Request and enable DDRC IRQs. + * @mci: EDAC memory controller instance. + * + * It first tries to get and request individual IRQs. If failed the method + * fallbacks to the common IRQ line case. The IRQs will be enabled only if + * some of these requests have been successful. + * + * Return: 0 if IRQs were successfully setup otherwise negative errno. + */ +static int snps_setup_irq(struct mem_ctl_info *mci) +{ + struct snps_edac_priv *priv = mci->pvt_info; + int rc; + + rc = snps_request_ind_irq(mci); + if (rc > 0) + rc = snps_request_com_irq(mci); + if (rc) + return rc; + + snps_enable_irq(priv); + + return 0; +} + +#ifdef CONFIG_EDAC_DEBUG + +#define SNPS_DEBUGFS_FOPS(__name, __read, __write) \ + static const struct file_operations __name = { \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = __read, \ + .write = __write, \ + } + +#define SNPS_DBGFS_BUF_LEN 128 + +static int snps_ddrc_info_show(struct seq_file *s, void *data) +{ + struct mem_ctl_info *mci = s->private; + struct snps_edac_priv *priv = mci->pvt_info; + unsigned long rate; + + seq_printf(s, "SDRAM: %s\n", edac_mem_types[priv->info.sdram_mode]); + + rate = clk_get_rate(priv->clks[SNPS_CORE_CLK].clk); + if (rate) { + rate = rate / HZ_PER_MHZ; + seq_printf(s, "Clock: Core %luMHz SDRAM %luMHz\n", + rate, priv->info.freq_ratio * rate); + } + + seq_printf(s, "DQ bus: %u/%s\n", (BITS_PER_BYTE << priv->info.dq_width), + priv->info.dq_mode == SNPS_DQ_FULL ? "Full" : + priv->info.dq_mode == SNPS_DQ_HALF ? "Half" : + priv->info.dq_mode == SNPS_DQ_QRTR ? "Quarter" : + "Unknown"); + seq_printf(s, "Burst: SDRAM %u HIF %u\n", priv->info.sdram_burst_len, + priv->info.hif_burst_len); + + seq_printf(s, "Ranks: %u\n", priv->info.ranks); + + seq_printf(s, "ECC: %s\n", + priv->info.ecc_mode == SNPS_ECC_SECDED ? "SEC/DED" : + priv->info.ecc_mode == SNPS_ECC_ADVX4X8 ? "Advanced X4/X8" : + "Unknown"); + + seq_puts(s, "Caps:"); + if (priv->info.caps) { + if (priv->info.caps & SNPS_CAP_ECC_SCRUB) + seq_puts(s, " +Scrub"); + if (priv->info.caps & SNPS_CAP_ECC_SCRUBBER) + seq_puts(s, " +Scrubber"); + if (priv->info.caps & SNPS_CAP_ZYNQMP) + seq_puts(s, " +ZynqMP"); } else { - if (memtype & MEM_TYPE_LPDDR3) { - priv->col_shift[10] = (((addrmap[3] >> 16) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) + - COL_B8_BASE); - priv->col_shift[11] = (((addrmap[3] >> 24) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) + - COL_B9_BASE); - priv->col_shift[13] = ((addrmap[4] & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - ((addrmap[4] & COL_MAX_VAL_MASK) + - COL_B10_BASE); - } else { - priv->col_shift[11] = (((addrmap[3] >> 16) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) + - COL_B8_BASE); - priv->col_shift[13] = (((addrmap[3] >> 24) & - COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 : - (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) + - COL_B9_BASE); + seq_puts(s, " -"); + } + seq_putc(s, '\n'); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(snps_ddrc_info); + +static int snps_sys_app_map_show(struct seq_file *s, void *data) +{ + struct mem_ctl_info *mci = s->private; + struct snps_edac_priv *priv = mci->pvt_info; + struct snps_sys_app_map *map = &priv->sys_app_map; + u64 size; + int i; + + if (!map->nsar) { + seq_puts(s, "No SARs detected\n"); + return 0; + } + + seq_printf(s, "%9s %-37s %-18s %-37s\n", + "", "System address", "Offset", "App address"); + + for (i = 0, size = 0; i < map->nsar; i++) { + seq_printf(s, "Region %d: ", i); + seq_printf(s, "0x%016llx-0x%016llx ", map->sar[i].base, + map->sar[i].base + map->sar[i].size - 1); + seq_printf(s, "0x%016llx ", map->sar[i].ofst); + seq_printf(s, "0x%016llx-0x%016llx\n", size, + size + map->sar[i].size - 1); + size += map->sar[i].size; + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(snps_sys_app_map); + +static u8 snps_find_sdram_dim(struct snps_edac_priv *priv, u8 hif, char *dim) +{ + struct snps_hif_sdram_map *map = &priv->hif_sdram_map; + int i; + + for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) { + if (map->row[i] == hif) { + *dim = 'r'; + return i; + } + } + + for (i = 0; i < DDR_MAX_COL_WIDTH; i++) { + if (map->col[i] == hif) { + *dim = 'c'; + return i; + } + } + + for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) { + if (map->bank[i] == hif) { + *dim = 'b'; + return i; + } + } + + for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) { + if (map->bankgrp[i] == hif) { + *dim = 'g'; + return i; } } - if (width) { - for (index = 9; index > width; index--) { - priv->col_shift[index] = priv->col_shift[index - width]; - priv->col_shift[index - width] = 0; + for (i = 0; i < DDR_MAX_RANK_WIDTH; i++) { + if (map->rank[i] == hif) { + *dim = 'a'; + return i; } } + return DDR_ADDRMAP_UNUSED; +} + +static int snps_hif_sdram_map_show(struct seq_file *s, void *data) +{ + struct mem_ctl_info *mci = s->private; + struct snps_edac_priv *priv = mci->pvt_info; + char dim, buf[SNPS_DBGFS_BUF_LEN]; + const int line_len = 10; + u8 bit; + int i; + + seq_printf(s, "%3s", ""); + for (i = 0; i < line_len; i++) + seq_printf(s, " %02d ", i); + + for (i = 0; i < DDR_MAX_HIF_WIDTH; i++) { + if (i % line_len == 0) + seq_printf(s, "\n%02d ", i); + + bit = snps_find_sdram_dim(priv, i, &dim); + + if (bit != DDR_ADDRMAP_UNUSED) + scnprintf(buf, SNPS_DBGFS_BUF_LEN, "%c%hhu", dim, bit); + else + scnprintf(buf, SNPS_DBGFS_BUF_LEN, "--"); + + seq_printf(s, "%3s ", buf); + } + seq_putc(s, '\n'); + + seq_puts(s, "r - row, c - column, b - bank, g - bank group, a - rank\n"); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(snps_hif_sdram_map); + +static ssize_t snps_inject_data_error_read(struct file *filep, char __user *ubuf, + size_t size, loff_t *offp) +{ + struct mem_ctl_info *mci = filep->private_data; + struct snps_edac_priv *priv = mci->pvt_info; + struct snps_sdram_addr sdram; + char buf[SNPS_DBGFS_BUF_LEN]; + dma_addr_t sys; + u32 regval; + int pos; + + regval = readl(priv->baseaddr + ECC_POISON0_OFST); + sdram.rank = FIELD_GET(ECC_POISON0_RANK_MASK, regval); + sdram.col = FIELD_GET(ECC_POISON0_COL_MASK, regval); + + regval = readl(priv->baseaddr + ECC_POISON1_OFST); + sdram.bankgrp = FIELD_PREP(ECC_POISON1_BANKGRP_MASK, regval); + sdram.bank = FIELD_PREP(ECC_POISON1_BANK_MASK, regval); + sdram.row = FIELD_PREP(ECC_POISON1_ROW_MASK, regval); + + snps_map_sdram_to_sys(priv, &sdram, &sys); + + pos = scnprintf(buf, sizeof(buf), + "%pad: Row %hu Rank %hu Bank %hhu Bank Group %hhu Rank %hhu\n", + &sys, sdram.row, sdram.col, sdram.bank, sdram.bankgrp, + sdram.rank); + + return simple_read_from_buffer(ubuf, size, offp, buf, pos); } -static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap) +static ssize_t snps_inject_data_error_write(struct file *filep, const char __user *ubuf, + size_t size, loff_t *offp) { - priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE; - priv->bank_shift[1] = ((addrmap[1] >> 8) & - BANK_MAX_VAL_MASK) + BANK_B1_BASE; - priv->bank_shift[2] = (((addrmap[1] >> 16) & - BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 : - (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) + - BANK_B2_BASE); + struct mem_ctl_info *mci = filep->private_data; + struct snps_edac_priv *priv = mci->pvt_info; + struct snps_sdram_addr sdram; + u32 regval; + u64 sys; + int rc; + + rc = kstrtou64_from_user(ubuf, size, 0, &sys); + if (rc) + return rc; + + snps_map_sys_to_sdram(priv, sys, &sdram); + + regval = FIELD_PREP(ECC_POISON0_RANK_MASK, sdram.rank) | + FIELD_PREP(ECC_POISON0_COL_MASK, sdram.col); + writel(regval, priv->baseaddr + ECC_POISON0_OFST); + + regval = FIELD_PREP(ECC_POISON1_BANKGRP_MASK, sdram.bankgrp) | + FIELD_PREP(ECC_POISON1_BANK_MASK, sdram.bank) | + FIELD_PREP(ECC_POISON1_ROW_MASK, sdram.row); + writel(regval, priv->baseaddr + ECC_POISON1_OFST); + return size; } -static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap) +SNPS_DEBUGFS_FOPS(snps_inject_data_error, snps_inject_data_error_read, + snps_inject_data_error_write); + +static ssize_t snps_inject_data_poison_read(struct file *filep, char __user *ubuf, + size_t size, loff_t *offp) { - priv->bankgrp_shift[0] = (addrmap[8] & - BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE; - priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) == - BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8) - & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE); + struct mem_ctl_info *mci = filep->private_data; + struct snps_edac_priv *priv = mci->pvt_info; + char buf[SNPS_DBGFS_BUF_LEN]; + const char *errstr; + u32 regval; + int pos; + + regval = readl(priv->baseaddr + ECC_CFG1_OFST); + if (!(regval & ECC_CFG1_POISON_EN)) + errstr = "Off"; + else if (regval & ECC_CFG1_POISON_BIT) + errstr = "CE"; + else + errstr = "UE"; + + pos = scnprintf(buf, sizeof(buf), "%s\n", errstr); + return simple_read_from_buffer(ubuf, size, offp, buf, pos); } -static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap) +static ssize_t snps_inject_data_poison_write(struct file *filep, const char __user *ubuf, + size_t size, loff_t *offp) { - priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) == - RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] & - RANK_MAX_VAL_MASK) + RANK_B0_BASE); + struct mem_ctl_info *mci = filep->private_data; + struct snps_edac_priv *priv = mci->pvt_info; + char buf[SNPS_DBGFS_BUF_LEN]; + u32 regval; + int rc; + + rc = simple_write_to_buffer(buf, sizeof(buf), offp, ubuf, size); + if (rc < 0) + return rc; + + writel(0, priv->baseaddr + DDR_SWCTL); + + regval = readl(priv->baseaddr + ECC_CFG1_OFST); + if (strncmp(buf, "CE", 2) == 0) + regval |= ECC_CFG1_POISON_BIT | ECC_CFG1_POISON_EN; + else if (strncmp(buf, "UE", 2) == 0) + regval = (regval & ~ECC_CFG1_POISON_BIT) | ECC_CFG1_POISON_EN; + else + regval &= ~ECC_CFG1_POISON_EN; + writel(regval, priv->baseaddr + ECC_CFG1_OFST); + + writel(1, priv->baseaddr + DDR_SWCTL); + + return size; } +SNPS_DEBUGFS_FOPS(snps_inject_data_poison, snps_inject_data_poison_read, + snps_inject_data_poison_write); + /** - * setup_address_map - Set Address Map by querying ADDRMAP registers. - * @priv: DDR memory controller private instance data. + * snps_create_debugfs_nodes - Create DebugFS nodes. + * @mci: EDAC memory controller instance. * - * Set Address Map by querying ADDRMAP registers. + * Create DW uMCTL2 EDAC driver DebugFS nodes in the device private + * DebugFS directory. * * Return: none. */ -static void setup_address_map(struct synps_edac_priv *priv) +static void snps_create_debugfs_nodes(struct mem_ctl_info *mci) { - u32 addrmap[12]; - int index; + edac_debugfs_create_file("ddrc_info", 0400, mci->debugfs, mci, + &snps_ddrc_info_fops); - for (index = 0; index < 12; index++) { - u32 addrmap_offset; + edac_debugfs_create_file("sys_app_map", 0400, mci->debugfs, mci, + &snps_sys_app_map_fops); - addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4); - addrmap[index] = readl(priv->baseaddr + addrmap_offset); - } + edac_debugfs_create_file("hif_sdram_map", 0400, mci->debugfs, mci, + &snps_hif_sdram_map_fops); - setup_row_address_map(priv, addrmap); + edac_debugfs_create_file("inject_data_error", 0600, mci->debugfs, mci, + &snps_inject_data_error); - setup_column_address_map(priv, addrmap); + edac_debugfs_create_file("inject_data_poison", 0600, mci->debugfs, mci, + &snps_inject_data_poison); +} - setup_bank_address_map(priv, addrmap); +#else /* !CONFIG_EDAC_DEBUG */ - setup_bg_address_map(priv, addrmap); +static inline void snps_create_debugfs_nodes(struct mem_ctl_info *mci) {} - setup_rank_address_map(priv, addrmap); -} -#endif /* CONFIG_EDAC_DEBUG */ +#endif /* !CONFIG_EDAC_DEBUG */ /** - * mc_probe - Check controller and bind driver. + * snps_mc_probe - Check controller and bind driver. * @pdev: platform device. * * Probe a specific controller instance for binding with the driver. @@ -1318,56 +2410,35 @@ static void setup_address_map(struct synps_edac_priv *priv) * Return: 0 if the controller instance was successfully bound to the * driver; otherwise, < 0 on error. */ -static int mc_probe(struct platform_device *pdev) +static int snps_mc_probe(struct platform_device *pdev) { - const struct synps_platform_data *p_data; - struct edac_mc_layer layers[2]; - struct synps_edac_priv *priv; + struct snps_edac_priv *priv; struct mem_ctl_info *mci; - void __iomem *baseaddr; - struct resource *res; int rc; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - baseaddr = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(baseaddr)) - return PTR_ERR(baseaddr); + priv = snps_create_data(pdev); + if (IS_ERR(priv)) + return PTR_ERR(priv); - p_data = of_device_get_match_data(&pdev->dev); - if (!p_data) - return -ENODEV; + rc = snps_get_res(priv); + if (rc) + return rc; - if (!p_data->get_ecc_state(baseaddr)) { - edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); - return -ENXIO; - } + rc = snps_get_ddrc_info(priv); + if (rc) + goto put_res; - layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = SYNPS_EDAC_NR_CSROWS; - layers[0].is_virt_csrow = true; - layers[1].type = EDAC_MC_LAYER_CHANNEL; - layers[1].size = SYNPS_EDAC_NR_CHANS; - layers[1].is_virt_csrow = false; + snps_get_addr_map(priv); - mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, - sizeof(struct synps_edac_priv)); - if (!mci) { - edac_printk(KERN_ERR, EDAC_MC, - "Failed memory allocation for mc instance\n"); - return -ENOMEM; + mci = snps_mc_create(priv); + if (IS_ERR(mci)) { + rc = PTR_ERR(mci); + goto put_res; } - priv = mci->pvt_info; - priv->baseaddr = baseaddr; - priv->p_data = p_data; - - mc_init(mci, pdev); - - if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) { - rc = setup_irq(mci, pdev); - if (rc) - goto free_edac_mc; - } + rc = snps_setup_irq(mci); + if (rc) + goto free_edac_mc; rc = edac_mc_add_mc(mci); if (rc) { @@ -1376,71 +2447,59 @@ static int mc_probe(struct platform_device *pdev) goto free_edac_mc; } -#ifdef CONFIG_EDAC_DEBUG - if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) { - rc = edac_create_sysfs_attributes(mci); - if (rc) { - edac_printk(KERN_ERR, EDAC_MC, - "Failed to create sysfs entries\n"); - goto free_edac_mc; - } - } - - if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) - setup_address_map(priv); -#endif - - /* - * Start capturing the correctable and uncorrectable errors. A write of - * 0 starts the counters. - */ - if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)) - writel(0x0, baseaddr + ECC_CTRL_OFST); + snps_create_debugfs_nodes(mci); - return rc; + return 0; free_edac_mc: - edac_mc_free(mci); + snps_mc_free(mci); + +put_res: + snps_put_res(priv); return rc; } /** - * mc_remove - Unbind driver from controller. + * snps_mc_remove - Unbind driver from device. * @pdev: Platform device. * * Return: Unconditionally 0 */ -static int mc_remove(struct platform_device *pdev) +static int snps_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); - struct synps_edac_priv *priv = mci->pvt_info; + struct snps_edac_priv *priv = mci->pvt_info; - if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) - disable_intr(priv); - -#ifdef CONFIG_EDAC_DEBUG - if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) - edac_remove_sysfs_attributes(mci); -#endif + snps_disable_irq(priv); edac_mc_del_mc(&pdev->dev); - edac_mc_free(mci); + + snps_mc_free(mci); + + snps_put_res(priv); return 0; } -static struct platform_driver synps_edac_mc_driver = { +static const struct of_device_id snps_edac_match[] = { + { .compatible = "xlnx,zynqmp-ddrc-2.40a", .data = zynqmp_init_plat }, + { .compatible = "baikal,bt1-ddrc", .data = bt1_init_plat }, + { .compatible = "snps,ddrc-3.80a" }, + { } +}; +MODULE_DEVICE_TABLE(of, snps_edac_match); + +static struct platform_driver snps_edac_mc_driver = { .driver = { - .name = "synopsys-edac", - .of_match_table = synps_edac_match, + .name = "snps-edac", + .of_match_table = snps_edac_match, }, - .probe = mc_probe, - .remove = mc_remove, + .probe = snps_mc_probe, + .remove = snps_mc_remove, }; - -module_platform_driver(synps_edac_mc_driver); +module_platform_driver(snps_edac_mc_driver); MODULE_AUTHOR("Xilinx Inc"); -MODULE_DESCRIPTION("Synopsys DDR ECC driver"); +MODULE_DESCRIPTION("Synopsys uMCTL2 DDR ECC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/edac/zynq_edac.c b/drivers/edac/zynq_edac.c new file mode 100644 index 0000000000000..0781e69e019cd --- /dev/null +++ b/drivers/edac/zynq_edac.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Zynq DDR ECC Driver + * This driver is based on ppc4xx_edac.c drivers + * + * Copyright (C) 2012 - 2014 Xilinx, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "edac_module.h" + +/* Number of cs_rows needed per memory controller */ +#define ZYNQ_EDAC_NR_CSROWS 1 + +/* Number of channels per memory controller */ +#define ZYNQ_EDAC_NR_CHANS 1 + +/* Granularity of reported error in bytes */ +#define ZYNQ_EDAC_ERR_GRAIN 1 + +#define ZYNQ_EDAC_MSG_SIZE 256 + +#define ZYNQ_EDAC_MOD_STRING "zynq_edac" +#define ZYNQ_EDAC_MOD_VER "1" + +/* Zynq DDR memory controller ECC registers */ +#define ZYNQ_CTRL_OFST 0x0 +#define ZYNQ_T_ZQ_OFST 0xA4 + +/* ECC control register */ +#define ZYNQ_ECC_CTRL_OFST 0xC4 +/* ECC log register */ +#define ZYNQ_CE_LOG_OFST 0xC8 +/* ECC address register */ +#define ZYNQ_CE_ADDR_OFST 0xCC +/* ECC data[31:0] register */ +#define ZYNQ_CE_DATA_31_0_OFST 0xD0 + +/* Uncorrectable error info registers */ +#define ZYNQ_UE_LOG_OFST 0xDC +#define ZYNQ_UE_ADDR_OFST 0xE0 +#define ZYNQ_UE_DATA_31_0_OFST 0xE4 + +#define ZYNQ_STAT_OFST 0xF0 +#define ZYNQ_SCRUB_OFST 0xF4 + +/* Control register bit field definitions */ +#define ZYNQ_CTRL_BW_MASK 0xC +#define ZYNQ_CTRL_BW_SHIFT 2 + +#define ZYNQ_DDRCTL_WDTH_16 1 +#define ZYNQ_DDRCTL_WDTH_32 0 + +/* ZQ register bit field definitions */ +#define ZYNQ_T_ZQ_DDRMODE_MASK 0x2 + +/* ECC control register bit field definitions */ +#define ZYNQ_ECC_CTRL_CLR_CE_ERR 0x2 +#define ZYNQ_ECC_CTRL_CLR_UE_ERR 0x1 + +/* ECC correctable/uncorrectable error log register definitions */ +#define ZYNQ_LOG_VALID 0x1 +#define ZYNQ_CE_LOG_BITPOS_MASK 0xFE +#define ZYNQ_CE_LOG_BITPOS_SHIFT 1 + +/* ECC correctable/uncorrectable error address register definitions */ +#define ZYNQ_ADDR_COL_MASK 0xFFF +#define ZYNQ_ADDR_ROW_MASK 0xFFFF000 +#define ZYNQ_ADDR_ROW_SHIFT 12 +#define ZYNQ_ADDR_BANK_MASK 0x70000000 +#define ZYNQ_ADDR_BANK_SHIFT 28 + +/* ECC statistic register definitions */ +#define ZYNQ_STAT_UECNT_MASK 0xFF +#define ZYNQ_STAT_CECNT_MASK 0xFF00 +#define ZYNQ_STAT_CECNT_SHIFT 8 + +/* ECC scrub register definitions */ +#define ZYNQ_SCRUB_MODE_MASK 0x7 +#define ZYNQ_SCRUB_MODE_SECDED 0x4 + +/** + * struct zynq_ecc_error_info - ECC error log information. + * @row: Row number. + * @col: Column number. + * @bank: Bank number. + * @bitpos: Bit position. + * @data: Data causing the error. + */ +struct zynq_ecc_error_info { + u32 row; + u32 col; + u32 bank; + u32 bitpos; + u32 data; +}; + +/** + * struct zynq_ecc_status - ECC status information to report. + * @ce_cnt: Correctable error count. + * @ue_cnt: Uncorrectable error count. + * @ceinfo: Correctable error log information. + * @ueinfo: Uncorrectable error log information. + */ +struct zynq_ecc_status { + u32 ce_cnt; + u32 ue_cnt; + struct zynq_ecc_error_info ceinfo; + struct zynq_ecc_error_info ueinfo; +}; + +/** + * struct zynq_edac_priv - DDR memory controller private instance data. + * @baseaddr: Base address of the DDR controller. + * @message: Buffer for framing the event specific info. + * @stat: ECC status information. + */ +struct zynq_edac_priv { + void __iomem *baseaddr; + char message[ZYNQ_EDAC_MSG_SIZE]; + struct zynq_ecc_status stat; +}; + +/** + * zynq_get_error_info - Get the current ECC error info. + * @priv: DDR memory controller private instance data. + * + * Return: one if there is no error, otherwise zero. + */ +static int zynq_get_error_info(struct zynq_edac_priv *priv) +{ + struct zynq_ecc_status *p; + u32 regval, clearval = 0; + void __iomem *base; + + base = priv->baseaddr; + p = &priv->stat; + + regval = readl(base + ZYNQ_STAT_OFST); + if (!regval) + return 1; + + p->ce_cnt = (regval & ZYNQ_STAT_CECNT_MASK) >> ZYNQ_STAT_CECNT_SHIFT; + p->ue_cnt = regval & ZYNQ_STAT_UECNT_MASK; + + regval = readl(base + ZYNQ_CE_LOG_OFST); + if (!(p->ce_cnt && (regval & ZYNQ_LOG_VALID))) + goto ue_err; + + p->ceinfo.bitpos = (regval & ZYNQ_CE_LOG_BITPOS_MASK) >> ZYNQ_CE_LOG_BITPOS_SHIFT; + regval = readl(base + ZYNQ_CE_ADDR_OFST); + p->ceinfo.row = (regval & ZYNQ_ADDR_ROW_MASK) >> ZYNQ_ADDR_ROW_SHIFT; + p->ceinfo.col = regval & ZYNQ_ADDR_COL_MASK; + p->ceinfo.bank = (regval & ZYNQ_ADDR_BANK_MASK) >> ZYNQ_ADDR_BANK_SHIFT; + p->ceinfo.data = readl(base + ZYNQ_CE_DATA_31_0_OFST); + edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos, + p->ceinfo.data); + clearval = ZYNQ_ECC_CTRL_CLR_CE_ERR; + +ue_err: + regval = readl(base + ZYNQ_UE_LOG_OFST); + if (!(p->ue_cnt && (regval & ZYNQ_LOG_VALID))) + goto out; + + regval = readl(base + ZYNQ_UE_ADDR_OFST); + p->ueinfo.row = (regval & ZYNQ_ADDR_ROW_MASK) >> ZYNQ_ADDR_ROW_SHIFT; + p->ueinfo.col = regval & ZYNQ_ADDR_COL_MASK; + p->ueinfo.bank = (regval & ZYNQ_ADDR_BANK_MASK) >> ZYNQ_ADDR_BANK_SHIFT; + p->ueinfo.data = readl(base + ZYNQ_UE_DATA_31_0_OFST); + clearval |= ZYNQ_ECC_CTRL_CLR_UE_ERR; + +out: + writel(clearval, base + ZYNQ_ECC_CTRL_OFST); + writel(0x0, base + ZYNQ_ECC_CTRL_OFST); + + return 0; +} + +/** + * handle_error - Handle Correctable and Uncorrectable errors. + * @mci: EDAC memory controller instance. + * @p: Zynq ECC status structure. + * + * Handles ECC correctable and uncorrectable errors. + */ +static void zynq_handle_error(struct mem_ctl_info *mci, struct zynq_ecc_status *p) +{ + struct zynq_edac_priv *priv = mci->pvt_info; + struct zynq_ecc_error_info *pinf; + + if (p->ce_cnt) { + pinf = &p->ceinfo; + + snprintf(priv->message, ZYNQ_EDAC_MSG_SIZE, + "Row %d Bank %d Col %d Bit %d Data 0x%08x", + pinf->row, pinf->bank, pinf->col, + pinf->bitpos, pinf->data); + + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + p->ce_cnt, 0, 0, 0, 0, 0, -1, + priv->message, ""); + } + + if (p->ue_cnt) { + pinf = &p->ueinfo; + + snprintf(priv->message, ZYNQ_EDAC_MSG_SIZE, + "Row %d Bank %d Col %d", + pinf->row, pinf->bank, pinf->col); + + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + p->ue_cnt, 0, 0, 0, 0, 0, -1, + priv->message, ""); + } + + memset(p, 0, sizeof(*p)); +} + +/** + * check_errors - Check controller for ECC errors. + * @mci: EDAC memory controller instance. + * + * Check and post ECC errors. Called by the polling thread. + */ +static void zynq_check_errors(struct mem_ctl_info *mci) +{ + struct zynq_edac_priv *priv = mci->pvt_info; + int status; + + status = zynq_get_error_info(priv); + if (status) + return; + + zynq_handle_error(mci, &priv->stat); +} + +/** + * zynq_get_dtype - Return the controller memory width. + * @base: DDR memory controller base address. + * + * Get the EDAC device type width appropriate for the current controller + * configuration. + * + * Return: a device type width enumeration. + */ +static enum dev_type zynq_get_dtype(const void __iomem *base) +{ + enum dev_type dt; + u32 width; + + width = readl(base + ZYNQ_CTRL_OFST); + width = (width & ZYNQ_CTRL_BW_MASK) >> ZYNQ_CTRL_BW_SHIFT; + + switch (width) { + case ZYNQ_DDRCTL_WDTH_16: + dt = DEV_X2; + break; + case ZYNQ_DDRCTL_WDTH_32: + dt = DEV_X4; + break; + default: + dt = DEV_UNKNOWN; + } + + return dt; +} + +/** + * zynq_get_ecc_state - Return the controller ECC enable/disable status. + * @base: DDR memory controller base address. + * + * Get the ECC enable/disable status of the controller. + * + * Return: true if enabled, otherwise false. + */ +static bool zynq_get_ecc_state(void __iomem *base) +{ + enum dev_type dt; + u32 ecctype; + + dt = zynq_get_dtype(base); + if (dt == DEV_UNKNOWN) + return false; + + ecctype = readl(base + ZYNQ_SCRUB_OFST) & ZYNQ_SCRUB_MODE_MASK; + if ((ecctype == ZYNQ_SCRUB_MODE_SECDED) && (dt == DEV_X2)) + return true; + + return false; +} + +/** + * zynq_get_memsize - Read the size of the attached memory device. + * + * Return: the memory size in bytes. + */ +static u32 zynq_get_memsize(void) +{ + struct sysinfo inf; + + si_meminfo(&inf); + + return inf.totalram * inf.mem_unit; +} + +/** + * zynq_get_mtype - Return the controller memory type. + * @base: Zynq ECC status structure. + * + * Get the EDAC memory type appropriate for the current controller + * configuration. + * + * Return: a memory type enumeration. + */ +static enum mem_type zynq_get_mtype(const void __iomem *base) +{ + enum mem_type mt; + u32 memtype; + + memtype = readl(base + ZYNQ_T_ZQ_OFST); + + if (memtype & ZYNQ_T_ZQ_DDRMODE_MASK) + mt = MEM_DDR3; + else + mt = MEM_DDR2; + + return mt; +} + +/** + * zynq_init_csrows - Initialize the csrow data. + * @mci: EDAC memory controller instance. + * + * Initialize the chip select rows associated with the EDAC memory + * controller instance. + */ +static void zynq_init_csrows(struct mem_ctl_info *mci) +{ + struct zynq_edac_priv *priv = mci->pvt_info; + struct csrow_info *csi; + struct dimm_info *dimm; + u32 size, row; + int j; + + for (row = 0; row < mci->nr_csrows; row++) { + csi = mci->csrows[row]; + size = zynq_get_memsize(); + + for (j = 0; j < csi->nr_channels; j++) { + dimm = csi->channels[j]->dimm; + dimm->edac_mode = EDAC_SECDED; + dimm->mtype = zynq_get_mtype(priv->baseaddr); + dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; + dimm->grain = ZYNQ_EDAC_ERR_GRAIN; + dimm->dtype = zynq_get_dtype(priv->baseaddr); + } + } +} + +/** + * zynq_mc_init - Initialize one driver instance. + * @mci: EDAC memory controller instance. + * @pdev: platform device. + * + * Perform initialization of the EDAC memory controller instance and + * related driver-private data associated with the memory controller the + * instance is bound to. + */ +static void zynq_mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) +{ + mci->pdev = &pdev->dev; + platform_set_drvdata(pdev, mci); + + /* Initialize controller capabilities and configuration */ + mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->scrub_cap = SCRUB_FLAG_HW_SRC; + mci->scrub_mode = SCRUB_NONE; + + mci->edac_cap = EDAC_FLAG_SECDED; + mci->ctl_name = "zynq_ddr_controller"; + mci->dev_name = ZYNQ_EDAC_MOD_STRING; + mci->mod_name = ZYNQ_EDAC_MOD_VER; + + edac_op_state = EDAC_OPSTATE_POLL; + mci->edac_check = zynq_check_errors; + + mci->ctl_page_to_phys = NULL; + + zynq_init_csrows(mci); +} + +/** + * zynq_mc_probe - Check controller and bind driver. + * @pdev: platform device. + * + * Probe a specific controller instance for binding with the driver. + * + * Return: 0 if the controller instance was successfully bound to the + * driver; otherwise, < 0 on error. + */ +static int zynq_mc_probe(struct platform_device *pdev) +{ + struct edac_mc_layer layers[2]; + struct zynq_edac_priv *priv; + struct mem_ctl_info *mci; + void __iomem *baseaddr; + int rc; + + baseaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(baseaddr)) + return PTR_ERR(baseaddr); + + if (!zynq_get_ecc_state(baseaddr)) { + edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); + return -ENXIO; + } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = ZYNQ_EDAC_NR_CSROWS; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = ZYNQ_EDAC_NR_CHANS; + layers[1].is_virt_csrow = false; + + mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers, + sizeof(struct zynq_edac_priv)); + if (!mci) { + edac_printk(KERN_ERR, EDAC_MC, + "Failed memory allocation for mc instance\n"); + return -ENOMEM; + } + + priv = mci->pvt_info; + priv->baseaddr = baseaddr; + + zynq_mc_init(mci, pdev); + + rc = edac_mc_add_mc(mci); + if (rc) { + edac_printk(KERN_ERR, EDAC_MC, + "Failed to register with EDAC core\n"); + goto free_edac_mc; + } + + /* + * Start capturing the correctable and uncorrectable errors. A write of + * 0 starts the counters. + */ + writel(0x0, baseaddr + ZYNQ_ECC_CTRL_OFST); + + return 0; + +free_edac_mc: + edac_mc_free(mci); + + return rc; +} + +/** + * zynq_mc_remove - Unbind driver from controller. + * @pdev: Platform device. + * + * Return: Unconditionally 0 + */ +static int zynq_mc_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + + return 0; +} + +static const struct of_device_id zynq_edac_match[] = { + { .compatible = "xlnx,zynq-ddrc-a05" }, + {} +}; +MODULE_DEVICE_TABLE(of, zynq_edac_match); + +static struct platform_driver zynq_edac_mc_driver = { + .driver = { + .name = "zynq-edac", + .of_match_table = zynq_edac_match, + }, + .probe = zynq_mc_probe, + .remove = zynq_mc_remove, +}; +module_platform_driver(zynq_edac_mc_driver); + +MODULE_AUTHOR("Xilinx Inc"); +MODULE_DESCRIPTION("Zynq DDR ECC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index ab42f75b94139..a2adb873114da 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -159,7 +159,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM - depends on X86 || ARM64 || COMPILE_TEST + depends on X86 || ARM64 || MIPS || COMPILE_TEST depends on PTP_1588_CLOCK_OPTIONAL select BITREVERSE select CRC32 diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile index 620785ffbd519..78d57625dfe68 100644 --- a/drivers/net/ethernet/amd/xgbe/Makefile +++ b/drivers/net/ethernet/amd/xgbe/Makefile @@ -4,7 +4,8 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ xgbe-ptp.o \ - xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \ + xgbe-i2c.o \ + xgbe-phy-v1.o xgbe-phy-v2.o xgbe-phy-v3.o \ xgbe-platform.o amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 0e8698928e4d7..15f3c7d1f9527 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -295,6 +295,16 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) return ret; } + /* Set platform-specific DMA bus settings */ + if (pdata->vdata->blen) + pdata->blen = pdata->vdata->blen; + if (pdata->vdata->pbl) + pdata->pbl = pdata->vdata->pbl; + if (pdata->vdata->rd_osr_limit) + pdata->rd_osr_limit = pdata->vdata->rd_osr_limit; + if (pdata->vdata->wr_osr_limit) + pdata->wr_osr_limit = pdata->vdata->wr_osr_limit; + /* Set default max values if not provided */ if (!pdata->tx_max_fifo_size) pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v3.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v3.c new file mode 100644 index 0000000000000..90089f4772b6d --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v3.c @@ -0,0 +1,693 @@ +/* + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +#define VR_XS_PMA_MII_Gen5_MPLL_CTRL 0x807A +#define VR_XS_PMA_MII_Gen5_MPLL_CTRL_REF_CLK_SEL_bit (1 << 13) +#define VR_XS_PCS_DIG_CTRL1 0x8000 +#define VR_XS_PCS_DIG_CTRL1_VR_RST_Bit MDIO_CTRL1_RESET +#define SR_XC_or_PCS_MMD_Control1 MDIO_CTRL1 +#define SR_XC_or_PCS_MMD_Control1_RST_Bit MDIO_CTRL1_RESET +#define DWC_GLBL_PLL_MONITOR 0x8010 +#define SDS_PCS_CLOCK_READY_mask 0x1C +#define SDS_PCS_CLOCK_READY_bit 0x10 +#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL 0x809C +#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_KX4 (4 << 0) +#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_MASK 0x0007 +#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_4 (2 << 8) +#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_MASK 0x0700 +#define VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST (1 << 15) + +#define DELAY_COUNT 50 + +/* PHY related configuration information */ +struct xgbe_phy_data { + struct phy_device *phydev; +}; + +static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata); + +static int xgbe_an_restart_kr_training(struct xgbe_prv_data *pdata) +{ + int reg = 0; + + DBGPR("%s\n", __FUNCTION__); + + /* Restart training */ + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, 0x0096, 3); + msleep(500); + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, 0x0096, 1); + + /* The worse case when training continue till 500ms */ + msleep(500); + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, 0x0097); + /* Check training failure */ + if (reg & (1 << 3)) + return -1; + + /* Success */ + return 0; +} + +static int xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata) +{ + DBGPR("%s\n", __FUNCTION__); + + /* Enable training */ + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, 0x0096, 2); + + return 0; +} + +static int xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata) +{ + int ret; + DBGPR("%s\n", __FUNCTION__); + + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + + ret |= MDIO_CTRL1_LPOWER; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret); + + usleep_range(75, 100); + + ret &= ~MDIO_CTRL1_LPOWER; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret); + + return 0; +} + +static int xgbe_phy_xgmii_mode_kx4(struct xgbe_prv_data *pdata) +{ + int ret, count; + + DBGPR_MDIO("%s\n", __FUNCTION__); + + /* Write 2'b01 to Bits[1:0] of SR PCS Control2 to set the xpcx_kr_0 + * output to 0. + */ + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); + + ret &= ~MDIO_PCS_CTRL2_TYPE; + ret |= MDIO_PCS_CTRL2_10GBX; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, ret); + + /* Set Bit 13 SR PMA MMD Control1 Register (for back plane) to 1. */ + ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_CTRL1); + + ret |= 0x2000; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_CTRL1, ret); + + /* Set LANE_MODE TO KX4 (4). */ + ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL); + + ret &= ~VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_MASK; + ret |= VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_KX4; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL, ret); + + /* Set LANE_WIDTH (2) 4 lanes per link. */ + ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL); + + ret &= ~VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_MASK; + ret |= VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_4; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL, ret); + + /* Initiate Software Reset. */ + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1); + + ret |= VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1, ret); + + /* Wait until reset done. */ + count = DELAY_COUNT; + do { + msleep(20); + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1); + } while (!!(ret & VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST) && --count); + + if (ret & VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST) + return -ETIMEDOUT; + + return 0; +} + +static int xgbe_phy_xgmii_mode_kr(struct xgbe_prv_data *pdata) +{ + int ret; + DBGPR("%s\n", __FUNCTION__); + + /* Enable KR training */ + ret = xgbe_an_enable_kr_training(pdata); + if (ret < 0) + return ret; + + /* Set PCS to KR/10G speed */ + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); + + ret &= ~MDIO_PCS_CTRL2_TYPE; + ret |= MDIO_PCS_CTRL2_10GBR; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, ret); + + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + + ret &= ~MDIO_CTRL1_SPEEDSEL; + ret |= MDIO_CTRL1_SPEED10G; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret); + + ret = xgbe_phy_pcs_power_cycle(pdata); + if (ret < 0) + return ret; + + return 0; +} + +static int xgbe_phy_xgmii_mode(struct xgbe_prv_data *pdata) +{ + const char *pm; + + if(!device_property_read_string(pdata->phy_dev, "baikal,line-mode", &pm)) { + if(strcasecmp(pm, "KX4") == 0){ + DBGPR("xgbe: mode KX4: %s\n", __FUNCTION__); + return xgbe_phy_xgmii_mode_kx4(pdata); + } + } + + DBGPR("xgbe: mode KR: %s\n", __FUNCTION__); + return xgbe_phy_xgmii_mode_kr(pdata); +} + +static int xgbe_phy_probe(struct xgbe_prv_data *pdata) +{ + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct phy_device *phydev; + int ret; + + phydev = device_phy_find_device(pdata->phy_dev); + if (!phydev) + return -ENODEV; + + ret = phy_init_hw(phydev); + if (ret) + return ret; + + if ((phydev->speed != SPEED_10000) && (phydev->duplex != DUPLEX_FULL)) + return -ENODEV; + + /* Initialize supported features */ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Backplane_BIT, phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, phydev->supported, 1); + linkmode_copy(phydev->advertising, phydev->supported); + + XGBE_ZERO_SUP(lks); + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); + XGBE_SET_SUP(lks, 10000baseKR_Full); + /*XGBE_SET_SUP(lks, 10000baseKX4_Full); + XGBE_SET_SUP(lks, 10000baseT_Full);*/ + + pdata->phy.pause_autoneg = AUTONEG_DISABLE; + pdata->phy.speed = phydev->speed; // SPEED_10000 + pdata->phy.duplex = phydev->duplex; // DUPLEX_FULL + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + phy_data->phydev = phydev; + + return 0; +} + +int xgbe_phy_config_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data; + int count = DELAY_COUNT; + int ret; + + DBGPR("%s\n", __FUNCTION__); + + phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL); + if (!phy_data) + return -ENOMEM; + + pdata->phy_data = phy_data; + + ret = xgbe_phy_probe(pdata); + if (ret) { + dev_info(pdata->dev, "Failed to probe external PHY\n"); + return ret; + } + + /* Switch XGMAC PHY PLL to use external ref clock from pad */ + ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_Gen5_MPLL_CTRL); + ret &= ~(VR_XS_PMA_MII_Gen5_MPLL_CTRL_REF_CLK_SEL_bit); + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_Gen5_MPLL_CTRL, ret); + wmb(); + + /* Make vendor specific soft reset */ + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1); + ret |= VR_XS_PCS_DIG_CTRL1_VR_RST_Bit; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1, ret); + wmb(); + + /* Wait reset finish */ + count = DELAY_COUNT; + do { + usleep_range(500, 600); + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1); + } while(((ret & VR_XS_PCS_DIG_CTRL1_VR_RST_Bit) != 0) && count--); + + + DBGPR("%s %x\n", __FUNCTION__, ret); + /* + * Wait for the RST (bit 15) of the “SR XS or PCS MMD Control1” Register is 0. + * This bit is self-cleared when Bits[4:2] in VR XS or PCS MMD Digital + * Status Register are equal to 3’b100, that is, Tx/Rx clocks are stable + * and in Power_Good state. + */ + count = DELAY_COUNT; + do { + usleep_range(500, 600); + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, SR_XC_or_PCS_MMD_Control1); + } while(((ret & SR_XC_or_PCS_MMD_Control1_RST_Bit) != 0) && count--); + + /* + * This bit is self-cleared when Bits[4:2] in VR XS or PCS MMD Digital + * Status Register are equal to 3’b100, that is, Tx/Rx clocks are stable + * and in Power_Good state. + */ + count = DELAY_COUNT; + do { + usleep_range(500, 600); + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, DWC_GLBL_PLL_MONITOR); + } while(((ret & SDS_PCS_CLOCK_READY_mask) != SDS_PCS_CLOCK_READY_bit) && count-- ); + + /* Turn off and clear interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); + wmb(); + + xgbe_phy_config_aneg(pdata); + + ret = xgbe_phy_xgmii_mode(pdata); + + count = DELAY_COUNT; + do + { + msleep(10); + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, 0x0001); + } while(((ret & 0x0004) != 0x0004) && count--); + + return 0; +} + +/** + * xgbe_phy_exit() - dummy + */ +static void xgbe_phy_exit(struct xgbe_prv_data *pdata) +{ + return; +} + +static int xgbe_phy_soft_reset(struct xgbe_prv_data *pdata) +{ + /* No real soft-reset for now. Sigh... */ + DBGPR("%s\n", __FUNCTION__); +#if 0 + int count, ret; + + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + + ret |= MDIO_CTRL1_RESET; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret); + + count = DELAY_COUNT; + do { + msleep(20); + ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + if (ret < 0) + return ret; + } while ((ret & MDIO_CTRL1_RESET) && --count); + + if (ret & MDIO_CTRL1_RESET) + return -ETIMEDOUT; +#endif + + return 0; +} + +static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata) +{ + if (pdata->tx_pause && pdata->rx_pause) + return "rx/tx"; + else if (pdata->rx_pause) + return "rx"; + else if (pdata->tx_pause) + return "tx"; + else + return "off"; +} + +static const char *xgbe_phy_speed_string(int speed) +{ + switch (speed) { + case SPEED_10000: + return "10Gbps"; + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported"; + } +} + +static void xgbe_phy_print_status(struct xgbe_prv_data *pdata) +{ + if (pdata->phy.link) + netdev_info(pdata->netdev, + "Link is Up - %s/%s - flow control %s\n", + xgbe_phy_speed_string(pdata->phy.speed), + pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half", + xgbe_phy_fc_string(pdata)); + else + netdev_info(pdata->netdev, "Link is Down\n"); +} + +static void xgbe_phy_update_link(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct phy_device *phydev = phy_data->phydev; + int new_state = 0; + + /* Dummy read? Why? */ + phy_read_mmd(phydev, MDIO_MMD_PHYXS, 0x1001); + + if (pdata->phy.link) { + /* Flow control support */ + pdata->pause_autoneg = pdata->phy.pause_autoneg; + + if (pdata->tx_pause != pdata->phy.tx_pause) { + new_state = 1; + pdata->hw_if.config_tx_flow_control(pdata); + pdata->tx_pause = pdata->phy.tx_pause; + } + + if (pdata->rx_pause != pdata->phy.rx_pause) { + new_state = 1; + pdata->hw_if.config_rx_flow_control(pdata); + pdata->rx_pause = pdata->phy.rx_pause; + } + + /* Speed support */ + if (pdata->phy_speed != pdata->phy.speed) { + new_state = 1; + pdata->phy_speed = pdata->phy.speed; + } + + if (pdata->phy_link != pdata->phy.link) { + new_state = 1; + pdata->phy_link = pdata->phy.link; + } + } else if (pdata->phy_link) { + new_state = 1; + pdata->phy_link = 0; + pdata->phy_speed = SPEED_UNKNOWN; + } + + if (new_state && netif_msg_link(pdata)) + xgbe_phy_print_status(pdata); +} + +/** + * xgbe_phy_start() - dummy + */ +static int xgbe_phy_start(struct xgbe_prv_data *pdata) +{ + return 0; +} + +static void xgbe_phy_stop(struct xgbe_prv_data *pdata) +{ + netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n"); + + /* Disable auto-negotiation interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + + pdata->phy.link = 0; + netif_carrier_off(pdata->netdev); + + xgbe_phy_update_link(pdata); +} + +static int xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) +{ + int reg; + + DBGPR("%s\n", __FUNCTION__); + + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1); + + return (reg & MDIO_AN_STAT1_COMPLETE) ? 1 : 0; +} + +static void xgbe_phy_read_status(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct phy_device *phydev = phy_data->phydev; + int reg, link_aneg; + + pdata->phy.link = 1; + + if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) { + netif_carrier_off(pdata->netdev); + + pdata->phy.link = 0; + goto update_link; + } + + link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); + + phydev->drv->read_status(phydev); + /* Pop out old values */ + phydev->drv->read_status(phydev); + if (!phydev->link) + pdata->phy.link &= phydev->link; + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + pdata->phy.link &= (reg & MDIO_STAT1_LSTATUS) ? 1 : 0; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_STAT1); + pdata->phy.link &= (reg & MDIO_STAT1_LSTATUS) ? 1 : 0; + + if (pdata->phy.link) { + if (link_aneg && !xgbe_phy_aneg_done(pdata)) { + return; + } + + if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) + clear_bit(XGBE_LINK_INIT, &pdata->dev_state); + + netif_carrier_on(pdata->netdev); + } else { + if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { + if (link_aneg) + return; + } + + netif_carrier_off(pdata->netdev); + + /* If KX4 mode is enabled training doesn't affect behavior */ + xgbe_an_restart_kr_training(pdata); + /* Pop out old values */ + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_STAT1); + } + +update_link: + xgbe_phy_update_link(pdata); +} + +static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +{ + int reg; + + DBGPR("%s\n", __FUNCTION__); + + pdata->link_check = jiffies; + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); + + /* Disable auto negotiation in any case! */ + reg &= ~MDIO_AN_CTRL1_ENABLE; + pdata->phy.autoneg = AUTONEG_DISABLE; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); + + return 0; +} + +static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) +{ + if (speed == SPEED_10000) + return true; + + return false; +} + +/** + * xgbe_an_isr() - dummy + */ +static irqreturn_t xgbe_an_isr(struct xgbe_prv_data *pdata) +{ + DBGPR("Unhandled AN IRQ\n"); + + return IRQ_HANDLED; +} + +void xgbe_init_function_ptrs_phy_v3(struct xgbe_phy_if *phy_if) +{ + phy_if->phy_init = xgbe_phy_config_init; + phy_if->phy_exit = xgbe_phy_exit; + + phy_if->phy_reset = xgbe_phy_soft_reset; + phy_if->phy_start = xgbe_phy_start; + phy_if->phy_stop = xgbe_phy_stop; + + phy_if->phy_status = xgbe_phy_read_status; + phy_if->phy_config_aneg = xgbe_phy_config_aneg; + + phy_if->phy_valid_speed = xgbe_phy_valid_speed; + + phy_if->an_isr = xgbe_an_isr; +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4d790a89fe771..8ecda969c2c6d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -126,6 +126,7 @@ #include #include #include +#include #include #include #include @@ -296,38 +297,18 @@ static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata) : xgbe_of_vdata(pdata); } -static int xgbe_platform_probe(struct platform_device *pdev) +static int xgbe_init_function_plat_amd(struct xgbe_prv_data *pdata) { - struct xgbe_prv_data *pdata; - struct device *dev = &pdev->dev; + unsigned int phy_memnum, phy_irqnum, dma_irqnum, dma_irqend; + struct platform_device *pdev = pdata->platdev; struct platform_device *phy_pdev; - const char *phy_mode; - unsigned int phy_memnum, phy_irqnum; - unsigned int dma_irqnum, dma_irqend; - enum dev_dma_attr attr; + struct device *dev = pdata->dev; int ret; - pdata = xgbe_alloc_pdata(dev); - if (IS_ERR(pdata)) { - ret = PTR_ERR(pdata); - goto err_alloc; - } - - pdata->platdev = pdev; - pdata->adev = ACPI_COMPANION(dev); - platform_set_drvdata(pdev, pdata); - - /* Check if we should use ACPI or DT */ - pdata->use_acpi = dev->of_node ? 0 : 1; - - /* Get the version data */ - pdata->vdata = xgbe_get_vdata(pdata); - phy_pdev = xgbe_get_phy_pdev(pdata); if (!phy_pdev) { dev_err(dev, "unable to obtain phy device\n"); - ret = -EINVAL; - goto err_phydev; + return -EINVAL; } pdata->phy_platdev = phy_pdev; pdata->phy_dev = &phy_pdev->dev; @@ -400,28 +381,6 @@ static int xgbe_platform_probe(struct platform_device *pdev) if (netif_msg_probe(pdata)) dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs); - /* Retrieve the MAC address */ - ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, - pdata->mac_addr, - sizeof(pdata->mac_addr)); - if (ret || !is_valid_ether_addr(pdata->mac_addr)) { - dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); - if (!ret) - ret = -EINVAL; - goto err_io; - } - - /* Retrieve the PHY mode - it must be "xgmii" */ - ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, - &phy_mode); - if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { - dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); - if (!ret) - ret = -EINVAL; - goto err_io; - } - pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; - /* Check for per channel interrupt support */ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) { pdata->per_channel_irq = 1; @@ -436,29 +395,6 @@ static int xgbe_platform_probe(struct platform_device *pdev) if (ret) goto err_io; - /* Set the DMA coherency values */ - attr = device_get_dma_attr(dev); - if (attr == DEV_DMA_NOT_SUPPORTED) { - dev_err(dev, "DMA is not supported"); - ret = -ENODEV; - goto err_io; - } - pdata->coherent = (attr == DEV_DMA_COHERENT); - if (pdata->coherent) { - pdata->arcr = XGBE_DMA_OS_ARCR; - pdata->awcr = XGBE_DMA_OS_AWCR; - } else { - pdata->arcr = XGBE_DMA_SYS_ARCR; - pdata->awcr = XGBE_DMA_SYS_AWCR; - } - - /* Set the maximum fifo amounts */ - pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size; - pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size; - - /* Set the hardware channel and queue counts */ - xgbe_set_counts(pdata); - /* Always have XGMAC and XPCS (auto-negotiation) interrupts */ pdata->irq_count = 2; @@ -491,6 +427,219 @@ static int xgbe_platform_probe(struct platform_device *pdev) goto err_io; pdata->an_irq = ret; + return 0; + +err_io: + platform_device_put(phy_pdev); + + return ret; +} + +static void xgbe_init_function_disclk_baikal(void *data) +{ + struct xgbe_prv_data *pdata = data; + + clk_disable_unprepare(pdata->sysclk); +} + +static int xgbe_init_function_plat_baikal(struct xgbe_prv_data *pdata) +{ + struct platform_device *pdev = pdata->platdev; + struct device *dev = pdata->dev; + struct device_node *phy_node; + struct mdio_device *mdio_dev; + int ret; + + phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0); + if (!phy_node) { + dev_err(dev, "unable to obtain phy node\n"); + return -ENODEV; + } + + /* Nothing more sophisticated available at the moment... */ + mdio_dev = of_mdio_find_device(phy_node); + of_node_put(phy_node); + if (!mdio_dev) { + dev_err_probe(dev, -EPROBE_DEFER, "unable to obtain mdio device\n"); + return -EPROBE_DEFER; + } + + pdata->phy_platdev = NULL; + pdata->phy_dev = &mdio_dev->dev; + + /* Obtain the CSR regions of the device */ + pdata->xgmac_regs = devm_platform_ioremap_resource_byname(pdev, "stmmaceth"); + if (IS_ERR(pdata->xgmac_regs)) { + dev_err(dev, "xgmac ioremap failed\n"); + ret = PTR_ERR(pdata->xgmac_regs); + goto err_io; + } + if (netif_msg_probe(pdata)) + dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); + + pdata->xpcs_regs = devm_platform_ioremap_resource_byname(pdev, "xpcs"); + if (IS_ERR(pdata->xpcs_regs)) { + dev_err(dev, "xpcs ioremap failed\n"); + ret = PTR_ERR(pdata->xpcs_regs); + goto err_io; + } + if (netif_msg_probe(pdata)) + dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); + + /* Obtain the platform clocks setting */ + pdata->apbclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(pdata->apbclk)) { + dev_err(dev, "apb devm_clk_get failed\n"); + ret = PTR_ERR(pdata->apbclk); + goto err_io; + } + + pdata->sysclk = devm_clk_get(dev, "stmmaceth"); + if (IS_ERR(pdata->sysclk)) { + dev_err(dev, "dma devm_clk_get failed\n"); + ret = PTR_ERR(pdata->sysclk); + goto err_io; + } + pdata->sysclk_rate = clk_get_rate(pdata->sysclk); + + pdata->ptpclk = devm_clk_get(dev, "ptp_ref"); + if (IS_ERR(pdata->ptpclk)) { + dev_err(dev, "ptp devm_clk_get failed\n"); + ret = PTR_ERR(pdata->ptpclk); + goto err_io; + } + pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk); + + pdata->refclk = devm_clk_get(dev, "tx"); + if (IS_ERR(pdata->refclk)) { + dev_err(dev, "ref devm_clk_get failed\n"); + ret = PTR_ERR(pdata->refclk); + goto err_io; + } + + /* Even though it's claimed that the CSR clock source is different from + * the application clock the CSRs are still unavailable until the DMA + * clock signal is enabled. + */ + ret = clk_prepare_enable(pdata->sysclk); + if (ret) { + dev_err(dev, "sys clock enable failed\n"); + goto err_io; + } + + ret = devm_add_action_or_reset(dev, xgbe_init_function_disclk_baikal, pdata); + if (ret) { + dev_err(dev, "sys clock undo registration failed\n"); + goto err_io; + } + + /* Forget about the per-channel IRQs for now... */ + pdata->per_channel_irq = 0; // 1 + pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE; // XGBE_IRQ_MODE_LEVEL; + + pdata->irq_count = 1; + + ret = platform_get_irq_byname(pdev, "macirq"); + if (ret < 0) + goto err_io; + pdata->dev_irq = ret; + pdata->an_irq = pdata->dev_irq; + + return 0; + +err_io: + put_device(pdata->phy_dev); + + return ret; +} + +static int xgbe_platform_probe(struct platform_device *pdev) +{ + struct xgbe_prv_data *pdata; + struct device *dev = &pdev->dev; + const char *phy_mode; + enum dev_dma_attr attr; + int ret; + + pdata = xgbe_alloc_pdata(dev); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto err_alloc; + } + + pdata->platdev = pdev; + pdata->adev = ACPI_COMPANION(dev); + platform_set_drvdata(pdev, pdata); + + /* Check if we should use ACPI or DT */ + pdata->use_acpi = dev->of_node ? 0 : 1; + + /* Get the version data */ + pdata->vdata = xgbe_get_vdata(pdata); + + /* Platform-specific resources setup */ + ret = pdata->vdata->init_function_plat_impl(pdata); + if (ret) + goto err_plat; + + /* Activate basic clocks */ + ret = clk_prepare_enable(pdata->apbclk); + if (ret) { + dev_err(dev, "apb clock enable failed\n"); + goto err_apb; + } + + ret = clk_prepare_enable(pdata->refclk); + if (ret) { + dev_err(dev, "ref clock enable failed\n"); + goto err_ref; + } + + /* Retrieve the MAC address */ + ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, + pdata->mac_addr, + sizeof(pdata->mac_addr)); + if (ret || !is_valid_ether_addr(pdata->mac_addr)) { + dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); + if (!ret) + ret = -EINVAL; + goto err_io; + } + + /* Retrieve the PHY mode - it must be "xgmii" */ + ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, + &phy_mode); + if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { + dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); + if (!ret) + ret = -EINVAL; + goto err_io; + } + pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; + + /* Set the DMA coherency values */ + attr = device_get_dma_attr(dev); + if (attr == DEV_DMA_NOT_SUPPORTED) { + dev_err(dev, "DMA is not supported"); + ret = -ENODEV; + goto err_io; + } + pdata->coherent = (attr == DEV_DMA_COHERENT); + if (pdata->coherent) { + pdata->arcr = XGBE_DMA_OS_ARCR; + pdata->awcr = XGBE_DMA_OS_AWCR; + } else { + pdata->arcr = XGBE_DMA_SYS_ARCR; + pdata->awcr = XGBE_DMA_SYS_AWCR; + } + + /* Set the maximum fifo amounts */ + pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size; + pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size; + + /* Set the hardware channel and queue counts */ + xgbe_set_counts(pdata); + /* Configure the netdev resource */ ret = xgbe_config_netdev(pdata); if (ret) @@ -501,9 +650,15 @@ static int xgbe_platform_probe(struct platform_device *pdev) return 0; err_io: - platform_device_put(phy_pdev); + clk_disable_unprepare(pdata->refclk); + +err_ref: + clk_disable_unprepare(pdata->apbclk); -err_phydev: +err_apb: + put_device(pdata->phy_dev); + +err_plat: xgbe_free_pdata(pdata); err_alloc: @@ -518,7 +673,11 @@ static int xgbe_platform_remove(struct platform_device *pdev) xgbe_deconfig_netdev(pdata); - platform_device_put(pdata->phy_platdev); + clk_disable_unprepare(pdata->refclk); + + clk_disable_unprepare(pdata->apbclk); + + put_device(pdata->phy_dev); xgbe_free_pdata(pdata); @@ -573,6 +732,7 @@ static int xgbe_platform_resume(struct device *dev) #endif /* CONFIG_PM_SLEEP */ static const struct xgbe_version_data xgbe_v1 = { + .init_function_plat_impl = xgbe_init_function_plat_amd, .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1, .xpcs_access = XGBE_XPCS_ACCESS_V1, .tx_max_fifo_size = 81920, @@ -580,6 +740,19 @@ static const struct xgbe_version_data xgbe_v1 = { .tx_tstamp_workaround = 1, }; +static const struct xgbe_version_data xgbe_v3 = { + .init_function_plat_impl = xgbe_init_function_plat_baikal, + .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v3, + .xpcs_access = XGBE_XPCS_ACCESS_V1, + .tx_max_fifo_size = 32768, + .rx_max_fifo_size = 32768, + .blen = DMA_SBMR_BLEN_16, + .pbl = DMA_PBL_256, + .rd_osr_limit = 8, + .wr_osr_limit = 8, + .tx_tstamp_workaround = 1, +}; + #ifdef CONFIG_ACPI static const struct acpi_device_id xgbe_acpi_match[] = { { .id = "AMDI8001", @@ -594,6 +767,8 @@ MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match); static const struct of_device_id xgbe_of_match[] = { { .compatible = "amd,xgbe-seattle-v1a", .data = &xgbe_v1 }, + { .compatible = "baikal,bt1-xgmac", + .data = &xgbe_v3 }, {}, }; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 7a41367c437dd..dc963f90d8905 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1027,11 +1027,16 @@ struct xgbe_hw_features { }; struct xgbe_version_data { + int (*init_function_plat_impl)(struct xgbe_prv_data *); void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *); enum xgbe_xpcs_access xpcs_access; unsigned int mmc_64bit; unsigned int tx_max_fifo_size; unsigned int rx_max_fifo_size; + unsigned int blen; + unsigned int pbl; + unsigned int rd_osr_limit; + unsigned int wr_osr_limit; unsigned int tx_tstamp_workaround; unsigned int ecc_support; unsigned int i2c_support; @@ -1207,6 +1212,8 @@ struct xgbe_prv_data { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; /* Device clocks */ + struct clk *apbclk; + struct clk *refclk; struct clk *sysclk; unsigned long sysclk_rate; struct clk *ptpclk; @@ -1334,6 +1341,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *); void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *); void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *); +void xgbe_init_function_ptrs_phy_v3(struct xgbe_phy_if *); void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *); const struct net_device_ops *xgbe_get_netdev_ops(void); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 31ff351740342..abf75814f0ed5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -66,6 +66,15 @@ config DWMAC_ANARION This selects the Anarion SoC glue layer support for the stmmac driver. +config DWMAC_BT1 + tristate "Baikal-T1 GMAC support" + depends on OF && (MIPS_BAIKAL_T1 || COMPILE_TEST) + help + Support for Baikal-T1 GMAC Ethernet controller. + + This selects the Baikal-T1 platform specific glue layer of the + STMMAC driver. + config DWMAC_INGENIC tristate "Ingenic MAC support" default MACH_INGENIC diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index d4e12e9ace4ff..222240510748d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -14,6 +14,7 @@ stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o +obj-$(CONFIG_DWMAC_BT1) += dwmac-bt1.o obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-bt1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-bt1.c new file mode 100644 index 0000000000000..bf3ec1e722aa5 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-bt1.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Baikal-T1 GMAC driver + * + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dwmac1000.h" +#include "dwmac_dma.h" +#include "stmmac.h" +#include "stmmac_platform.h" + +/* General Purpose IO */ +#define GMAC_GPIO 0x000000e0 +#define GMAC_GPIO_GPIS BIT(0) +#define GMAC_GPIO_GPO BIT(8) + +struct bt1_gmac { + struct device *dev; + struct clk *tx_clk; +}; + +static int bt1_gmac_clks_config(void *bsp_priv, bool enable) +{ + struct bt1_gmac *btg = bsp_priv; + int ret = 0; + + if (enable) { + ret = clk_prepare_enable(btg->tx_clk); + if (ret) + dev_err(btg->dev, "Failed to enable Tx clock\n"); + } else { + clk_disable_unprepare(btg->tx_clk); + } + + return ret; +} + +static int bt1_gmac_bus_reset(void *bsp_priv) +{ + struct bt1_gmac *btg = bsp_priv; + struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(btg->dev)); + + writel(0, priv->ioaddr + GMAC_GPIO); + fsleep(priv->mii->reset_delay_us); + writel(GMAC_GPIO_GPO, priv->ioaddr + GMAC_GPIO); + if (priv->mii->reset_post_delay_us > 0) + fsleep(priv->mii->reset_post_delay_us); + + return 0; +} + +/* Clean the basic MAC registers up. Note the MAC interrupts are enabled by + * default after reset. Let's mask them out so not to have any spurious + * MAC-related IRQ generated during the cleanup procedure. + */ +static void bt1_gmac_core_clean(struct stmmac_priv *priv) +{ + int i; + + writel(0x7FF, priv->ioaddr + GMAC_INT_MASK); + writel(0, priv->ioaddr + GMAC_CONTROL); + writel(0, priv->ioaddr + GMAC_FRAME_FILTER); + writel(0, priv->ioaddr + GMAC_HASH_HIGH); + writel(0, priv->ioaddr + GMAC_HASH_LOW); + writel(0, priv->ioaddr + GMAC_FLOW_CTRL); + writel(0, priv->ioaddr + GMAC_VLAN_TAG); + writel(0, priv->ioaddr + GMAC_DEBUG); + writel(0x80000000, priv->ioaddr + GMAC_PMT); + writel(0, priv->ioaddr + LPI_CTRL_STATUS); + writel(0x03e80000, priv->ioaddr + LPI_TIMER_CTRL); + for (i = 0; i < 15; ++i) { + writel(0x0000ffff, priv->ioaddr + GMAC_ADDR_HIGH(i)); + writel(0xffffffff, priv->ioaddr + GMAC_ADDR_LOW(i)); + } + writel(0, priv->ioaddr + GMAC_PCS_BASE); + writel(0, priv->ioaddr + GMAC_RGSMIIIS); + writel(0x1, priv->ioaddr + GMAC_MMC_CTRL); + readl(priv->ioaddr + GMAC_INT_STATUS); + readl(priv->ioaddr + GMAC_PMT); + readl(priv->ioaddr + LPI_CTRL_STATUS); +} + +/* Clean the basic DMA registers up */ +static void bt1_gmac_dma_clean(struct stmmac_priv *priv) +{ + writel(0, priv->ioaddr + DMA_INTR_ENA); + writel(0x00020100, priv->ioaddr + DMA_BUS_MODE); + writel(0, priv->ioaddr + DMA_RCV_BASE_ADDR); + writel(0, priv->ioaddr + DMA_TX_BASE_ADDR); + writel(0x00100000, priv->ioaddr + DMA_CONTROL); + writel(0x00110001, priv->ioaddr + DMA_AXI_BUS_MODE); + writel(0x0001FFFF, priv->ioaddr + DMA_STATUS); +} + +static int bt1_gmac_swr_reset(void *bsp_priv) +{ + struct bt1_gmac *btg = bsp_priv; + struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(btg->dev)); + + bt1_gmac_core_clean(priv); + + bt1_gmac_dma_clean(priv); + + return 0; +} + +static void bt1_gmac_fix_mac_speed(void *bsp_priv, unsigned int speed) +{ + struct bt1_gmac *btg = bsp_priv; + unsigned long rate; + int ret; + + switch (speed) { + case SPEED_1000: + rate = 250000000; + break; + case SPEED_100: + rate = 50000000; + break; + case SPEED_10: + rate = 5000000; + break; + default: + dev_err(btg->dev, "Unsupported speed %u\n", speed); + return; + } + + /* The clock must be gated to successfully update the rate */ + clk_disable_unprepare(btg->tx_clk); + + ret = clk_set_rate(btg->tx_clk, rate); + if (ret) + dev_err(btg->dev, "Failed to update Tx clock rate %lu\n", rate); + + ret = clk_prepare_enable(btg->tx_clk); + if (ret) + dev_err(btg->dev, "Failed to re-enable Tx clock\n"); + +} + +static int bt1_gmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct bt1_gmac *btg; + int ret; + + btg = devm_kzalloc(&pdev->dev, sizeof(*btg), GFP_KERNEL); + if (!btg) + return -ENOMEM; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "DT configuration failed\n"); + return PTR_ERR(plat_dat); + } + + btg->dev = &pdev->dev; + + btg->tx_clk = devm_clk_get(&pdev->dev, "tx"); + if (IS_ERR(btg->tx_clk)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(btg->tx_clk), + "Failed to get Tx clock\n"); + goto err_remove_config_dt; + } + + ret = clk_prepare_enable(btg->tx_clk); + if (ret) { + dev_err(btg->dev, "Failed to pre-enable Tx clock\n"); + goto err_remove_config_dt; + } + + plat_dat->addr64 = 32; + plat_dat->has_gmac = 1; + plat_dat->enh_desc = 1; + plat_dat->tx_coe = 1; + plat_dat->rx_coe = 1; + plat_dat->pmt = 1; + plat_dat->unicast_filter_entries = 8; + plat_dat->multicast_filter_bins = 0; + plat_dat->clks_config = bt1_gmac_clks_config; + plat_dat->bus_reset = bt1_gmac_bus_reset; + plat_dat->swr_reset = bt1_gmac_swr_reset; + plat_dat->fix_mac_speed = bt1_gmac_fix_mac_speed; + plat_dat->bsp_priv = btg; + plat_dat->mdio_bus_data->needs_reset = true; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_disable_tx_clk; + + return 0; + +err_disable_tx_clk: + clk_disable_unprepare(btg->tx_clk); + +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +} + +static const struct of_device_id bt1_gmac_match[] = { + { .compatible = "baikal,bt1-gmac"}, + { } +}; +MODULE_DEVICE_TABLE(of, bt1_gmac_match); + +static struct platform_driver bt1_gmac_driver = { + .probe = bt1_gmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "bt1-gmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = of_match_ptr(bt1_gmac_match), + }, +}; +module_platform_driver(bt1_gmac_driver); + +MODULE_AUTHOR("Serge Semin "); +MODULE_DESCRIPTION("Baikal-T1 GMAC glue driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3c1d4b27668fe..55e1b8c6f599d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -301,7 +301,12 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { u32 clk_rate; - clk_rate = clk_get_rate(priv->plat->stmmac_clk); + /* If APB clock has been specified then it is supposed to be used + * to select the CSR mode. Otherwise the application clock is the + * source of the periodic signal for the CSR interface. + */ + clk_rate = clk_get_rate(priv->plat->pclk) ?: + clk_get_rate(priv->plat->stmmac_clk); /* Platform provided default clk_csr would be assumed valid * for all other cases except for the below mentioned ones. @@ -2916,7 +2921,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) atds = 1; - ret = stmmac_reset(priv, priv->ioaddr); + if (priv->plat->swr_reset) + ret = priv->plat->swr_reset(priv->plat->bsp_priv); + else + ret = stmmac_reset(priv, priv->ioaddr); if (ret) { dev_err(priv->device, "Failed to reset the dma\n"); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 5f177ea807258..8d207e28182ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -349,6 +349,13 @@ int stmmac_mdio_reset(struct mii_bus *bus) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; + int ret; + + if (priv->plat->bus_reset) { + ret = priv->plat->bus_reset(priv->plat->bsp_priv); + if (ret) + return ret; + } #ifdef CONFIG_OF if (priv->device->of_node) { @@ -444,7 +451,7 @@ int stmmac_mdio_register(struct net_device *ndev) if (!mdio_bus_data) return 0; - new_bus = mdiobus_alloc(); + priv->mii = new_bus = mdiobus_alloc(); if (!new_bus) return -ENOMEM; @@ -500,12 +507,12 @@ int stmmac_mdio_register(struct net_device *ndev) fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); if (fixed_node) { fwnode_handle_put(fixed_node); - goto bus_register_done; + return 0; } } if (priv->plat->phy_node || mdio_node) - goto bus_register_done; + return 0; found = 0; for (addr = 0; addr < max_addr; addr++) { @@ -542,9 +549,6 @@ int stmmac_mdio_register(struct net_device *ndev) goto no_phy_found; } -bus_register_done: - priv->mii = new_bus; - return 0; no_phy_found: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 0046a4ee6e641..a3429e51ed00a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -40,6 +40,7 @@ static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins) int x = mcast_bins; switch (x) { + case 0: case HASH_TABLE_SIZE: case 128: case 256: @@ -96,7 +97,8 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) struct device_node *np; struct stmmac_axi *axi; - np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); + np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0) ?: + of_get_child_by_name(pdev->dev.of_node, "axi-bus-config"); if (!np) return NULL; @@ -151,11 +153,13 @@ static int stmmac_mtl_setup(struct platform_device *pdev, plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; - rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); + rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0) ?: + of_get_child_by_name(pdev->dev.of_node, "rx-queues-config"); if (!rx_node) return ret; - tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); + tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0) ?: + of_get_child_by_name(pdev->dev.of_node, "tx-queues-config"); if (!tx_node) { of_node_put(rx_node); return ret; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c57a0262fb64f..60118aebaf27f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -219,6 +219,12 @@ config MARVELL_88X2222_PHY Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet Transceiver. +config MARVELL_88X2222_KR_PHY + tristate "Marvell 88X2222 KR/KX4 PHY" + help + Support for the Marvell 88X2222 KR/KX4 Dual-port Multi-speed + Ethernet Transceiver. + config MAXLINEAR_GPHY tristate "Maxlinear Ethernet PHYs" select POLYNOMIAL if HWMON diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index f7138d3c896b3..45b6eed420752 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o +obj-$(CONFIG_MARVELL_88X2222_KR_PHY) += marvell-88x2222-kr.o obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o diff --git a/drivers/net/phy/marvell-88x2222-kr.c b/drivers/net/phy/marvell-88x2222-kr.c new file mode 100644 index 0000000000000..b963a722a4d1e --- /dev/null +++ b/drivers/net/phy/marvell-88x2222-kr.c @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Marvell Integrated Dual-port + * Multi-speed Ethernet Transceiver 88x2222 KR/KX4 mode + * + * Copyright (c) 2015, 2016, 2020 Baikal Electronics JSC. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* 31.F002 Line side mode (ch.3.1.2, pg.46) */ +#define MV_MODE_LINE_SHF 8 +#define MV_MODE_LINE_10GBR (0x71UL << 8) +#define MV_MODE_LINE_10GBW (0x74UL << 8) +#define MV_MODE_LINE_2GBX_AN_OFF (0x76UL << 8) +#define MV_MODE_LINE_1GBR_AN_OFF (0x72UL << 8) +#define MV_MODE_LINE_1GBR_AN_ON (0x73UL << 8) +#define MV_MODE_LINE_SGMII_SYS_AN_OFF (0x7CUL << 8) +#define MV_MODE_LINE_SGMII_SYS_AN_ON (0x7DUL << 8) +#define MV_MODE_LINE_SGMII_NET_AN_OFF (0x7EUL << 8) +#define MV_MODE_LINE_SGMII_NET_AN_ON (0x7FUL << 8) +#define MV_MODE_LINE_DEFAULT MV_MODE_LINE_10GBR +#define MV_MODE_LINE_OF_NAME "baikal,line-mode" + +/* 31.F002 Host side mode (ch.3.1.2, pg.46) */ +#define MV_MODE_HOST_SHF 0 +#define MV_MODE_HOST_10GBR (0x71UL << 0) +#define MV_MODE_HOST_10GBX2 (0x72UL << 0) +#define MV_MODE_HOST_10GBX4 (0x73UL << 0) +#define MV_MODE_HOST_2GBX_AN_OFF (0x76UL << 0) +#define MV_MODE_HOST_1GBR_AN_OFF (0x7AUL << 0) +#define MV_MODE_HOST_1GBR_AN_ON (0x7BUL << 0) +#define MV_MODE_HOST_SGMII_SYS_AN_OFF (0x7CUL << 0) +#define MV_MODE_HOST_SGMII_SYS_AN_ON (0x7DUL << 0) +#define MV_MODE_HOST_SGMII_NET_AN_OFF (0x7EUL << 0) +#define MV_MODE_HOST_SGMII_NET_AN_ON (0x7FUL << 0) +#define MV_MODE_HOST_DEFAULT MV_MODE_HOST_10GBR +#define MV_MODE_HOST_OF_NAME "baikal,host-mode" + +/* 31.F402 Host side line muxing (ch.3.1.5, pg.48) */ +#define MV_ATT_10GBX2_SHF 11 +#define MV_ATT_10GBX2_LANE_0145 (0UL << 11) +#define MV_ATT_10GBX2_LANE_0123 (1UL << 11) +#define MV_ATT_10GBR_SHF 9 +#define MV_ATT_10GBR_LANE_0246 (0UL << 9) +#define MV_ATT_10GBR_LANE_0123 (1UL << 9) +#define MV_ATT_2GBR_SHF 8 +#define MV_ATT_2GBR_LANE_0246 (0UL << 8) +#define MV_ATT_2GBR_LANE_0123 (1UL << 8) +#define MV_ATT_1GBR_SHF 8 +#define MV_ATT_1GBR_LANE_0246 (0UL << 8) +#define MV_ATT_1GBR_LANE_0123 (1UL << 8) +#define MV_ATT_DEFAULT 0 +#define MV_ATT_OF_NAME "baikal,mux" + +/* 31.F003 Software reset (ch.3.2 pg.50) */ +#define MV_SW_RST_HOST_SHF 7 +#define MV_SW_RST_HOST (1UL << 7) +#define MV_SW_RST_LINE_SHF 15 +#define MV_SW_RST_LINE (1UL << 15) +#define MV_SW_RST_ALL (MV_SW_RST_HOST | MV_SW_RST_LINE) + +/* 31.F012 GPIO data */ +#define MV_GPIO_TXDISABLE_DATA_SHF 8 + +/* 31.F013 Tristate Control */ +#define MV_GPIO_TXDISABLE_OUTP_EN_SHF 8 + +/* 31.F016 Interrupt type 3 */ +#define MV_GPIO_TXDISABLE_FN_SHF 3 +#define MV_GPIO_TXDISABLE_FN_GPIO 0x1 + +/* Devices in package and registers */ +#define MV_DEV_10GBW_IRQ_ENABLE 0x8000 +#define MV_DEV_10GBW_IRQ_STATUS 0x8001 +#define MV_DEV_10GBW_IRQ_REALTIME 0x8002 + +#define MV_DEV_10GBR_ANEG 0x2000 +#define MV_DEV_10GBR_IRQ_ENABLE 0x8000 +#define MV_DEV_10GBR_IRQ_STATUS 0x8001 +#define MV_DEV_10GBR_IRQ_REALTIME 0x8002 + +#define MV_DEV_GBX_IRQ_ENABLE 0xA000 +#define MV_DEV_GBX_IRQ_STATUS 0xA001 +#define MV_DEV_GBX_IRQ_REALTIME 0xA002 + +#define MV_DEV_MISC_IRQ_ENABLE 0xF00A +#define MV_DEV_MISC_IRQ_STATUS 0xF00B + +#define MV_DEV_GPIO_DATA 0xF012 +#define MV_DEV_GPIO_TRISTATE_CTL 0xF013 +#define MV_DEV_GPIO_INTERRUPT_TYPE_3 0xF016 + +#define MV_DEV_CHIP_HOST_LINE 0xF002 +#define MV_DEV_CHIP_RESET 0xF003 +#define MV_DEV_CHIP_MUX 0xF402 +#define MV_DEV_CHIP_IRQ_STATUS 0xF420 +#define MV_DEV_CHIP_IRQ_CONTROL 0xF421 + +#define MV_RESET_DELAY_US 500 + +struct mode { + unsigned int mode_num; + char mode_name[16]; +}; + +static struct mode line_modes[] = { + {MV_MODE_LINE_10GBR, "KR"}, + {MV_MODE_LINE_10GBW, "10GBW"}, + {MV_MODE_LINE_2GBX_AN_OFF, "2GBX_AN_OFF"}, + {MV_MODE_LINE_1GBR_AN_OFF, "1GBR_AN_OFF"}, + {MV_MODE_LINE_1GBR_AN_ON, "1GBR_AN_ON"}, + {MV_MODE_LINE_SGMII_SYS_AN_OFF, "SGMII_SYS_AN_OFF"}, + {MV_MODE_LINE_SGMII_SYS_AN_ON, "SGMI_SYS_AN_ON"}, + {MV_MODE_LINE_SGMII_NET_AN_OFF, "SMGII_NET_AN_OFF"}, + {MV_MODE_LINE_SGMII_NET_AN_ON, "SGMII_NET_AN_ON"} +}; + +static struct mode host_modes[] = { + {MV_MODE_HOST_10GBR, "KR"}, + {MV_MODE_HOST_10GBX2, "10GBX2"}, + {MV_MODE_HOST_10GBX4, "KX4"}, + {MV_MODE_HOST_2GBX_AN_OFF, "2GBX_AN_OFF"}, + {MV_MODE_HOST_1GBR_AN_OFF, "1GBR_AN_OFF"}, + {MV_MODE_HOST_1GBR_AN_ON, "1GBR_AN_ON"}, + {MV_MODE_HOST_SGMII_SYS_AN_OFF, "SGMII_SYS_AN_OFF"}, + {MV_MODE_HOST_SGMII_SYS_AN_ON, "SGMII_SYS_AN_ON"}, + {MV_MODE_HOST_SGMII_NET_AN_OFF, "SGMII_NE_AN_OFF"}, + {MV_MODE_HOST_SGMII_NET_AN_ON, "SGMII_NET_AN_ON"} +}; + +struct mv88x2222_data { + int line_mode; + int host_mode; + int mux; +}; + +static void *marvell_of_get_data(struct phy_device *phydev) +{ + struct device_node *np = phydev->mdio.dev.of_node; + struct mv88x2222_data *pdata; + const char *pm; + char mode[32]; + int ret, i; + + pdata = devm_kzalloc(&phydev->mdio.dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + pm = mode; + pdata->line_mode = MV_MODE_LINE_DEFAULT; + ret = of_property_read_string(np, MV_MODE_LINE_OF_NAME, &pm); + if (!ret) { + for(i = 0; i < sizeof(line_modes) / sizeof(struct mode); ++i) { + if(strcasecmp(line_modes[i].mode_name, pm) == 0) { + pdata->line_mode = line_modes[i].mode_num; + break; + } + } + } + + pdata->host_mode = MV_MODE_HOST_DEFAULT; + ret = of_property_read_string(np, MV_MODE_HOST_OF_NAME, &pm); + if (!ret) { + for(i = 0; i < sizeof(host_modes) / sizeof(struct mode); ++i) { + if(strcasecmp(host_modes[i].mode_name, pm) == 0) { + pdata->host_mode = host_modes[i].mode_num; + break; + } + } + } + + /* Default value at now */ + pdata->mux = MV_ATT_DEFAULT; + + return pdata; +} + +static int marvell_soft_reset(struct phy_device *phydev) { + int ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_RESET, + MV_SW_RST_ALL); + int count = 50; + + if (ret) { + dev_warn(&phydev->mdio.dev, "software reset failed\n"); + return ret; + } + + do { + usleep_range(MV_RESET_DELAY_US, MV_RESET_DELAY_US + 100); + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_RESET); + } while ((ret & MV_SW_RST_ALL) || count--); + + return 0; +} + +static int marvell_config_init(struct phy_device *phydev) +{ + struct mv88x2222_data *pdata = phydev->priv; + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE, + pdata->line_mode | pdata->host_mode); + if (ret) + return 1; + + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + + /* + * This must be done after mode set; + */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE); + ret |= 0x8000; + phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE, ret); + + marvell_soft_reset(phydev); + + dev_info(&phydev->mdio.dev, "phy(%d, %x)=%x\n", MDIO_MMD_VEND2, + MV_DEV_CHIP_HOST_LINE, + phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE)); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Backplane_BIT, + phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported, 1); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported, 1); + + phydev->pause = 0; + phydev->asym_pause = 0; + phydev->interface = PHY_INTERFACE_MODE_XGMII; + phydev->duplex = DUPLEX_FULL; + + switch (pdata->line_mode) { + case MV_MODE_LINE_10GBR: + case MV_MODE_LINE_10GBW: + phydev->speed = SPEED_10000; + break; + case MV_MODE_LINE_2GBX_AN_OFF: + phydev->speed = SPEED_2500; + break; + default: + phydev->speed = SPEED_1000; + break; + } + + return 0; +} + +static int marvell_adjust_tx(struct phy_device *phydev) +{ + int reg, line_link = 1; + + /* Switch tristate to "write to pin/read from register" */ + reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL); + phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL,\ + reg | (1 << MV_GPIO_TXDISABLE_OUTP_EN_SHF)); + + /* Switch off TX_DISABLE */ + reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA); + phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA, reg & \ + ~(1 << MV_GPIO_TXDISABLE_DATA_SHF)); + + /* Check if opto-cable is plugged */ + reg = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if ((reg < 0) || !(reg & MDIO_STAT1_LSTATUS)) + line_link = 0; + + if (line_link) { + /* It's fine */ + return 0; + + } else { + /* Switch on TX_DISABLE */ + reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA); + phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA, reg | \ + (1 << MV_GPIO_TXDISABLE_DATA_SHF)); + } + + return 1; +} + +static int marvell_update_link(struct phy_device *phydev) +{ + int reg, host_mode, line_mode; + + /* Default link status */ + phydev->link = 1; + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE); + if (reg < 0) + { + phydev->link = 0; + return 0; + } + + host_mode = reg & 0x007F; + line_mode = reg & 0x7F00; + + /* Read host link status */ + if (host_mode == MV_MODE_HOST_10GBX4) + reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, 0x1001); + else + reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_STAT1); + + if ((reg < 0) || !(reg & MDIO_STAT1_LSTATUS)) + phydev->link = 0; + + /* Read line link status */ + if (line_mode == MV_MODE_LINE_10GBR) + reg = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + else + reg = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2001); + + if ((reg < 0) || !(reg & MDIO_STAT1_LSTATUS)) + phydev->link = 0; + + /* + * PMAPMD link status is always broken + * later we need to update this driver; + */ + reg = marvell_adjust_tx(phydev); + if (reg < 0) + phydev->link = 0; + + return 0; +} + +static int marvell_read_status(struct phy_device *phydev) +{ + int reg; + + /* Update the link, but return if there was an error */ + reg = marvell_update_link(phydev); + if (reg < 0) + return reg; + + /* Read line control reg */ + reg = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); + if (reg < 0) + return reg; + + return 0; +} + +static int marvell_config_aneg(struct phy_device *phydev) +{ + linkmode_copy(phydev->advertising, phydev->supported); + + return 0; +} + +static int marvell_probe(struct phy_device *phydev) +{ + struct mv88x2222_data *pdata = NULL; + + if (phydev->mdio.dev.of_node) + pdata = marvell_of_get_data(phydev); + + if (!pdata) { + dev_err(&phydev->mdio.dev, "No PHY platform data\n"); + return -ENODEV; + } + + phydev->priv = pdata; + + dev_info(&phydev->mdio.dev, "PHY detected at 0x%02x\n", + phydev->mdio.addr); + + return 0; +} + +static int marvell_suspend(struct phy_device *phydev) +{ + int reg; + mutex_lock(&phydev->lock); + + /* Switch tristate to "write to pin/read from register" */ + reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL); + phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL,\ + reg | (1 << MV_GPIO_TXDISABLE_OUTP_EN_SHF)); + + /* Switch on TX_DISABLE */ + reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA); + phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA, reg | \ + (1 << MV_GPIO_TXDISABLE_DATA_SHF)); + /* TBD Probably switch to lowpower mode */ + + mutex_unlock(&phydev->lock); + + return 0; +} + +/*static int marvell_match_phy_device(struct phy_device *phydev) +{ + unsigned int phy_id = phydev->c45_ids.device_ids[MDIO_MMD_PCS] & MARVELL_PHY_ID_MASK; + + return (phy_id == MARVELL_PHY_ID_88X2222) || (phy_id == MARVELL_PHY_ID_88X2222R); +}*/ + +static struct phy_driver marvell_drivers[] = { + { + .phy_id = MARVELL_PHY_ID_88X2222, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88X2222 KR/KX4", + .features = 0, + .config_init = marvell_config_init, + .config_aneg = marvell_config_aneg, + .probe = marvell_probe, + //.match_phy_device = marvell_match_phy_device, + .read_status = marvell_read_status, + .soft_reset = marvell_soft_reset, + .resume = genphy_resume, + .suspend = marvell_suspend, + }, +}; +module_phy_driver(marvell_drivers); + +static struct mdio_device_id __maybe_unused marvell_tbl[] = { + { MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88X2222R, MARVELL_PHY_ID_MASK }, + { } +}; +MODULE_DEVICE_TABLE(mdio, marvell_tbl); + +MODULE_DESCRIPTION("Marvell 88x2222 KR/KX4 ethernet transceiver driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index fd9ad48201924..f2ebf04b368af 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -616,6 +616,7 @@ module_phy_driver(mv2222_drivers); static struct mdio_device_id __maybe_unused mv2222_tbl[] = { { MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88X2222R, MARVELL_PHY_ID_MASK }, { } }; MODULE_DEVICE_TABLE(mdio, mv2222_tbl); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 3d99fd6664d7a..72ca06bcd53ef 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -637,6 +637,42 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, return ret; } +static int rtl8211e_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + int ret; + + /* Write to the MMD registers by using the standard control/data pair. + * The only difference is that we need to perform a dummy read after + * the PC1R.CLKSTOP_EN bit is set. It's required to workaround an issue + * of a partial core freeze so LED2 stops blinking in EEE mode, PHY + * stops detecting the link change and raising IRQs until any read from + * its registers performed. That happens only if and right after the PHY + * is enabled to stop RXC in LPI mode. + */ + ret = __phy_write(phydev, MII_MMD_CTRL, devnum); + if (ret) + return ret; + + ret = __phy_write(phydev, MII_MMD_DATA, regnum); + if (ret) + return ret; + + ret = __phy_write(phydev, MII_MMD_CTRL, devnum | MII_MMD_CTRL_NOINCR); + if (ret) + return ret; + + ret = __phy_write(phydev, MII_MMD_DATA, val); + if (ret) + return ret; + + if (devnum == MDIO_MMD_PCS && regnum == MDIO_CTRL1 && + val & MDIO_PCS_CTRL1_CLKSTOP_EN) + ret = __phy_read(phydev, MII_MMD_DATA); + + return ret < 0 ? ret : 0; +} + static int rtl822x_get_features(struct phy_device *phydev) { int val; @@ -919,6 +955,7 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + .write_mmd = rtl8211e_write_mmd, }, { PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index feafa378bf8ea..02516d73a1607 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -156,11 +156,30 @@ static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, */ static void pci_clip_resource_to_region(struct pci_bus *bus, struct resource *res, - struct pci_bus_region *region) + struct pci_bus_region *region, + resource_size_t *align) { struct pci_bus_region r; + resource_size_t new_align, offset; pcibios_resource_to_bus(bus, &r, res); + + offset = res->start - r.start; + if (offset & (*align - 1) && (r.start & (*align - 1)) == 0) { + /* + * a) CPU address (resource) differs from PCI bus address + * (pci_bus_region), i.e. address translation is in effect; + * b) PCI bus address is aligned as required; + * c) CPU address is not aligned. + * So, we can relax alignment requirement for CPU address. + */ + new_align = 1 << __ffs(offset); + dev_info(&bus->dev, + "pci_clip_resource_to_region: relaxing alignment from %pa to %pa\n", + align, &new_align); + *align = new_align; + } + if (r.start < region->start) r.start = region->start; if (r.end > region->end) @@ -190,6 +209,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, pci_bus_for_each_resource(bus, r, i) { resource_size_t min_used = min; + resource_size_t res_align = align; if (!r) continue; @@ -205,7 +225,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, continue; avail = *r; - pci_clip_resource_to_region(bus, &avail, region); + pci_clip_resource_to_region(bus, &avail, region, &res_align); /* * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to @@ -220,7 +240,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, /* Ok, try it out.. */ ret = allocate_resource(r, res, size, min_used, max, - align, alignf, alignf_data); + res_align, alignf, alignf_data); if (ret == 0) return 0; } diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 62ce3abf0f196..771b8b146623f 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -222,6 +222,15 @@ config PCIE_ARTPEC6_EP Enables support for the PCIe controller in the ARTPEC-6 SoC to work in endpoint mode. This uses the DesignWare core. +config PCIE_BT1 + tristate "Baikal-T1 PCIe controller" + depends on MIPS_BAIKAL_T1 || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Enables support for the PCIe controller in the Baikal-T1 SoC to work + in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core. + config PCIE_ROCKCHIP_DW_HOST bool "Rockchip DesignWare PCIe controller" select PCIE_DW diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 8ba7b67f5e50a..bf5c311875a1e 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c new file mode 100644 index 0000000000000..5c9728699119d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-bt1.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Vadim Vlasov + * Serge Semin + * + * Baikal-T1 PCIe controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +/* Baikal-T1 System CCU control registers */ +#define BT1_CCU_PCIE_CLKC 0x140 +#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16) +#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17) +#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18) + +#define BT1_CCU_PCIE_RSTC 0x144 +#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13) +#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14) +#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16) +#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24) +#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26) +#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27) + +#define BT1_CCU_PCIE_PMSC 0x148 +#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0) +#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00 +#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01 +#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02 +#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03 +#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04 +#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05 +#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06 +#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07 +#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08 +#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09 +#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a +#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b +#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c +#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d +#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e +#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f +#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10 +#define BT1_CCU_PCIE_LTSSM_L0 0x11 +#define BT1_CCU_PCIE_LTSSM_L0S 0x12 +#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13 +#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14 +#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15 +#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16 +#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17 +#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18 +#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19 +#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a +#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b +#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c +#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d +#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e +#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20 +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21 +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22 +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23 +#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6) +#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7) +#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8) +#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9) +#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10) +#define BT1_CCU_PCIE_L1_PENDING BIT(12) +#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14) +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15) +#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16) +#define BT1_CCU_PCIE_PM_PME_EN BIT(20) +#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21) +#define BT1_CCU_PCIE_AUX_PM_EN BIT(22) +#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23) +#define BT1_CCU_PCIE_WAKE_DET BIT(24) +#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30) +#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31) + +#define BT1_CCU_PCIE_GENC 0x14c +#define BT1_CCU_PCIE_LTSSM_EN BIT(1) +#define BT1_CCU_PCIE_DBI2_MODE BIT(2) +#define BT1_CCU_PCIE_MGMT_EN BIT(3) +#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16) +#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17) +#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24) +#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25) +#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26) +#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27) + +#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \ +({ \ + int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \ + __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \ +}) + +/* Baikal-T1 PCIe specific control registers */ +#define BT1_PCIE_AXI2MGM_LANENUM 0xd04 +#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0) + +#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08 +#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0) +#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29) +#define BT1_PCIE_AXI2MGM_DONE BIT(30) +#define BT1_PCIE_AXI2MGM_BUSY BIT(31) + +#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c +#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0) + +#define BT1_PCIE_AXI2MGM_READDATA 0xd10 +#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0) + +/* Generic Baikal-T1 PCIe interface resources */ +#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks) +#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks) +#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts) +#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts) + +/* PCIe bus setup delays and timeouts */ +#define BT1_PCIE_RST_DELAY_MS 100 +#define BT1_PCIE_RUN_DELAY_US 100 +#define BT1_PCIE_REQ_DELAY_US 1 +#define BT1_PCIE_REQ_TIMEOUT_US 1000 +#define BT1_PCIE_LNK_DELAY_US 1000 +#define BT1_PCIE_LNK_TIMEOUT_US 1000000 + +static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = { + DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK, +}; + +static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = { + DW_PCIE_REF_CLK, +}; + +static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = { + DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST, +}; + +static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = { + DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST, + DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST, +}; + +struct bt1_pcie { + struct dw_pcie dw; + struct platform_device *pdev; + struct regmap *sys_regs; +}; +#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw) + +/* + * Baikal-T1 MMIO space must be read/written by the dword-aligned + * instructions. Note the methods are optimized to have the dword operations + * performed with minimum overhead as the most frequently used ones. + */ +static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val) +{ + unsigned int ofs = (uintptr_t)addr & 0x3; + + if (!IS_ALIGNED((uintptr_t)addr, size)) + return -EINVAL; + + *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE; + if (size == 4) { + return 0; + } else if (size == 2) { + *val &= 0xffff; + return 0; + } else if (size == 1) { + *val &= 0xff; + return 0; + } + + return -EINVAL; +} + +static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val) +{ + unsigned int ofs = (uintptr_t)addr & 0x3; + u32 tmp, mask; + + if (!IS_ALIGNED((uintptr_t)addr, size)) + return -EINVAL; + + if (size == 4) { + writel(val, addr); + return 0; + } else if (size == 2 || size == 1) { + mask = GENMASK(size * BITS_PER_BYTE - 1, 0); + tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE); + tmp |= (val & mask) << ofs * BITS_PER_BYTE; + writel(tmp, addr - ofs); + return 0; + } + + return -EINVAL; +} + +static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size) +{ + int ret; + u32 val; + + ret = bt1_pcie_read_mmio(base + reg, size, &val); + if (ret) { + dev_err(pci->dev, "Read DBI address failed\n"); + return ~0U; + } + + return val; +} + +static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) +{ + int ret; + + ret = bt1_pcie_write_mmio(base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write DBI address failed\n"); +} + +static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) +{ + struct bt1_pcie *btpci = to_bt1_pcie(pci); + int ret; + + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE); + + ret = bt1_pcie_write_mmio(base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write DBI2 address failed\n"); + + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_DBI2_MODE, 0); +} + +static int bt1_pcie_start_link(struct dw_pcie *pci) +{ + struct bt1_pcie *btpci = to_bt1_pcie(pci); + u32 val; + int ret; + + /* + * Enable LTSSM and make sure it was able to establish both PHY and + * data links. This procedure shall work fine to reach 2.5 GT/s speed. + */ + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN); + + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, + (val & BT1_CCU_PCIE_SMLH_LINKUP), + BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); + if (ret) { + dev_err(pci->dev, "LTSSM failed to set PHY link up\n"); + return ret; + } + + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, + (val & BT1_CCU_PCIE_RDLH_LINKUP), + BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); + if (ret) { + dev_err(pci->dev, "LTSSM failed to set data link up\n"); + return ret; + } + + /* + * Activate direct speed change after the link is established in an + * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s). + * This is required at least to get 8.0 GT/s speed. + */ + val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); + + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, + BT1_CCU_PCIE_LTSSM_LINKUP(val), + BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); + if (ret) + dev_err(pci->dev, "LTSSM failed to get into L0 state\n"); + + return ret; +} + +static void bt1_pcie_stop_link(struct dw_pcie *pci) +{ + struct bt1_pcie *btpci = to_bt1_pcie(pci); + + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_LTSSM_EN, 0); +} + +static const struct dw_pcie_ops bt1_pcie_ops = { + .read_dbi = bt1_pcie_read_dbi, + .write_dbi = bt1_pcie_write_dbi, + .write_dbi2 = bt1_pcie_write_dbi2, + .start_link = bt1_pcie_start_link, + .stop_link = bt1_pcie_stop_link, +}; + +static struct pci_ops bt1_pci_ops = { + .map_bus = dw_pcie_own_conf_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write32, +}; + +static int bt1_pcie_get_resources(struct bt1_pcie *btpci) +{ + struct device *dev = btpci->dw.dev; + int i; + + /* DBI access is supposed to be performed by the dword-aligned IOs */ + btpci->dw.pp.bridge->ops = &bt1_pci_ops; + + /* These CSRs are in MMIO so we won't check the regmap-methods status */ + btpci->sys_regs = + syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon"); + if (IS_ERR(btpci->sys_regs)) + return dev_err_probe(dev, PTR_ERR(btpci->sys_regs), + "Failed to get syscon\n"); + + /* Make sure all the required resources have been specified */ + for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) { + if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) { + dev_err(dev, "App clocks set is incomplete\n"); + return -ENOENT; + } + } + + for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) { + if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) { + dev_err(dev, "Core clocks set is incomplete\n"); + return -ENOENT; + } + } + + for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) { + if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) { + dev_err(dev, "App resets set is incomplete\n"); + return -ENOENT; + } + } + + for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) { + if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) { + dev_err(dev, "Core resets set is incomplete\n"); + return -ENOENT; + } + } + + return 0; +} + +static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init) +{ + struct device *dev = btpci->dw.dev; + struct dw_pcie *pci = &btpci->dw; + int ret; + + /* Disable LTSSM for sure */ + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_LTSSM_EN, 0); + + /* + * Application reset controls are trigger-based so assert the core + * resets only. + */ + ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts); + if (ret) + dev_err(dev, "Failed to assert core resets\n"); + + /* + * Clocks are disabled by default at least in accordance with the clk + * enable counter value on init stage. + */ + if (!init) { + clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); + + clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks); + } + + /* The peripheral devices are unavailable anyway so reset them too */ + gpiod_set_value_cansleep(pci->pe_rst, 1); + + /* Make sure all the resets are settled */ + msleep(BT1_PCIE_RST_DELAY_MS); +} + +/* + * Implements the cold reset procedure in accordance with the reference manual + * and available PM signals. + */ +static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci) +{ + struct device *dev = btpci->dw.dev; + struct dw_pcie *pci = &btpci->dw; + u32 val; + int ret; + + /* First get out of the Power/Hot reset state */ + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert PHY reset\n"); + return ret; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert hot reset\n"); + goto err_assert_pwr_rst; + } + + /* Wait for the PM-core to stop requesting the PHY reset */ + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val, + !(val & BT1_CCU_PCIE_REQ_PHY_RST), + BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US); + if (ret) { + dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n"); + goto err_assert_hot_rst; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert PHY reset\n"); + goto err_assert_hot_rst; + } + + /* Clocks can be now enabled, but the ref one is crucial at this stage */ + ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks); + if (ret) { + dev_err(dev, "Failed to enable app clocks\n"); + goto err_assert_phy_rst; + } + + ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); + if (ret) { + dev_err(dev, "Failed to enable ref clocks\n"); + goto err_disable_app_clk; + } + + /* Wait for the PM to stop requesting the controller core reset */ + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val, + !(val & BT1_CCU_PCIE_REQ_CORE_RST), + BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US); + if (ret) { + dev_err(dev, "Timed out waiting for PM to stop core resetting\n"); + goto err_disable_core_clk; + } + + /* PCS-PIPE interface and controller core can be now activated */ + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert PIPE reset\n"); + goto err_disable_core_clk; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert core reset\n"); + goto err_assert_pipe_rst; + } + + /* It's recommended to reset the core and application logic together */ + ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts); + if (ret) { + dev_err(dev, "Failed to reset app domain\n"); + goto err_assert_core_rst; + } + + /* Sticky/Non-sticky CSR flags can be now unreset too */ + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert sticky reset\n"); + goto err_assert_core_rst; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert non-sticky reset\n"); + goto err_assert_sticky_rst; + } + + /* Activate the PCIe bus peripheral devices */ + gpiod_set_value_cansleep(pci->pe_rst, 0); + + /* Make sure the state is settled (LTSSM is still disabled though) */ + usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100); + + return 0; + +err_assert_sticky_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc); + +err_assert_core_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc); + +err_assert_pipe_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc); + +err_disable_core_clk: + clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); + +err_disable_app_clk: + clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks); + +err_assert_phy_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc); + +err_assert_hot_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc); + +err_assert_pwr_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc); + + return ret; +} + +static int bt1_pcie_host_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct bt1_pcie *btpci = to_bt1_pcie(pci); + int ret; + + ret = bt1_pcie_get_resources(btpci); + if (ret) + return ret; + + bt1_pcie_full_stop_bus(btpci, true); + + return bt1_pcie_cold_start_bus(btpci); +} + +static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct bt1_pcie *btpci = to_bt1_pcie(pci); + + bt1_pcie_full_stop_bus(btpci, false); +} + +static const struct dw_pcie_host_ops bt1_pcie_host_ops = { + .host_init = bt1_pcie_host_init, + .host_deinit = bt1_pcie_host_deinit, +}; + +static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev) +{ + struct bt1_pcie *btpci; + + btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL); + if (!btpci) + return ERR_PTR(-ENOMEM); + + btpci->pdev = pdev; + + platform_set_drvdata(pdev, btpci); + + return btpci; +} + +static int bt1_pcie_add_port(struct bt1_pcie *btpci) +{ + struct device *dev = &btpci->pdev->dev; + int ret; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + btpci->dw.version = DW_PCIE_VER_460A; + btpci->dw.dev = dev; + btpci->dw.ops = &bt1_pcie_ops; + + btpci->dw.pp.num_vectors = MAX_MSI_IRQS; + btpci->dw.pp.ops = &bt1_pcie_host_ops; + + dw_pcie_cap_set(&btpci->dw, REQ_RES); + + ret = dw_pcie_host_init(&btpci->dw.pp); + if (ret) + dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n"); + + return ret; +} + +static void bt1_pcie_del_port(struct bt1_pcie *btpci) +{ + dw_pcie_host_deinit(&btpci->dw.pp); +} + +static int bt1_pcie_probe(struct platform_device *pdev) +{ + struct bt1_pcie *btpci; + + btpci = bt1_pcie_create_data(pdev); + if (IS_ERR(btpci)) + return PTR_ERR(btpci); + + return bt1_pcie_add_port(btpci); +} + +static int bt1_pcie_remove(struct platform_device *pdev) +{ + struct bt1_pcie *btpci = platform_get_drvdata(pdev); + + bt1_pcie_del_port(btpci); + + return 0; +} + +static const struct of_device_id bt1_pcie_of_match[] = { + { .compatible = "baikal,bt1-pcie" }, + {}, +}; +MODULE_DEVICE_TABLE(of, bt1_pcie_of_match); + +static struct platform_driver bt1_pcie_driver = { + .probe = bt1_pcie_probe, + .remove = bt1_pcie_remove, + .driver = { + .name = "bt1-pcie", + .of_match_table = bt1_pcie_of_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(bt1_pcie_driver); + +MODULE_AUTHOR("Serge Semin "); +MODULE_DESCRIPTION("Baikal-T1 PCIe driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 83ddb190292e4..1a9956692a973 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -13,8 +13,6 @@ #include #include -#include "../../pci.h" - void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) { struct pci_epc *epc = ep->epc; @@ -171,8 +169,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, return -EINVAL; } - ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type, - cpu_addr, bar); + ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, + cpu_addr, bar); if (ret < 0) { dev_err(pci->dev, "Failed to program IB window\n"); return ret; @@ -614,8 +612,11 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; + dw_pcie_edma_remove(pci); + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, epc->mem->window.page_size); @@ -694,23 +695,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) INIT_LIST_HEAD(&ep->func_list); - if (!pci->dbi_base) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - } - - if (!pci->dbi_base2) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - if (!res) { - pci->dbi_base2 = pci->dbi_base + SZ_4K; - } else { - pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base2)) - return PTR_ERR(pci->dbi_base2); - } - } + ret = dw_pcie_get_resources(pci); + if (ret) + return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) @@ -739,9 +726,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) return -ENOMEM; ep->outbound_addr = addr; - if (pci->link_gen < 1) - pci->link_gen = of_pci_get_max_link_speed(np); - epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { dev_err(dev, "Failed to create epc device\n"); @@ -787,6 +771,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) goto err_exit_epc_mem; } + ret = dw_pcie_edma_detect(pci); + if (ret) + goto err_free_epc_mem; + if (ep->ops->get_features) { epc_features = ep->ops->get_features(ep); if (epc_features->core_init_notifier) @@ -795,10 +783,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ret = dw_pcie_ep_init_complete(ep); if (ret) - goto err_free_epc_mem; + goto err_remove_edma; return 0; +err_remove_edma: + dw_pcie_edma_remove(pci); + err_free_epc_mem: pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, epc->mem->window.page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 39f3b37d4033c..36eaa8a1d63c3 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -16,7 +16,6 @@ #include #include -#include "../../pci.h" #include "pcie-designware.h" static struct pci_ops dw_pcie_ops; @@ -367,7 +366,16 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) dw_chained_msi_isr, pp); } - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + /* + * Even though the iMSI-RX Module supports 64-bit addresses some + * peripheral PCIe devices may lack the 64-bit messages support. In + * order not to miss MSI TLPs from those devices the MSI target address + * has to be reserved within the lowest 4GB. + * Note until there is a better alternative found the reservation is + * done by allocating from the artificially limited DMA-coherent + * memory. + */ + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); @@ -395,6 +403,10 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) raw_spin_lock_init(&pp->lock); + ret = dw_pcie_get_resources(pci); + if (ret) + return ret; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (res) { pp->cfg0_size = resource_size(res); @@ -408,13 +420,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) return -ENODEV; } - if (!pci->dbi_base) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - } - bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; @@ -429,9 +434,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) pp->io_base = pci_pio_to_address(win->res->start); } - if (pci->link_gen < 1) - pci->link_gen = of_pci_get_max_link_speed(np); - /* Set default bus ops */ bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; @@ -474,14 +476,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) dw_pcie_iatu_detect(pci); - ret = dw_pcie_setup_rc(pp); + ret = dw_pcie_edma_detect(pci); if (ret) goto err_free_msi; + ret = dw_pcie_setup_rc(pp); + if (ret) + goto err_remove_edma; + if (!dw_pcie_link_up(pci)) { ret = dw_pcie_start_link(pci); if (ret) - goto err_free_msi; + goto err_remove_edma; } /* Ignore errors, the link may come up later */ @@ -498,6 +504,9 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) err_stop_link: dw_pcie_stop_link(pci); +err_remove_edma: + dw_pcie_edma_remove(pci); + err_free_msi: if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); @@ -519,6 +528,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp) dw_pcie_stop_link(pci); + dw_pcie_edma_remove(pci); + if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); @@ -643,12 +654,15 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) } /* - * Ensure all outbound windows are disabled before proceeding with - * the MEM/IO ranges setups. + * Ensure all out/inbound windows are disabled before proceeding with + * the MEM/IO (dma-)ranges setups. */ for (i = 0; i < pci->num_ob_windows; i++) dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); + for (i = 0; i < pci->num_ib_windows; i++) + dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i); + i = 0; resource_list_for_each_entry(entry, &pp->bridge->windows) { if (resource_type(entry->res) != IORESOURCE_MEM) @@ -685,9 +699,32 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) } if (pci->num_ob_windows <= i) - dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n", + dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", pci->num_ob_windows); + i = 0; + resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { + if (resource_type(entry->res) != IORESOURCE_MEM) + continue; + + if (pci->num_ib_windows <= i) + break; + + ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, + entry->res->start, + entry->res->start - entry->offset, + resource_size(entry->res)); + if (ret) { + dev_err(pci->dev, "Failed to set DMA range %pr\n", + entry->res); + return ret; + } + } + + if (pci->num_ib_windows <= i) + dev_warn(pci->dev, "DMA-ranges exceed inbound iATU size (%u)\n", + pci->num_ib_windows); + return 0; } diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 9e4d96e5a3f5a..7ff3b69422198 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -10,7 +10,11 @@ #include #include +#include #include +#include +#include +#include #include #include #include @@ -19,6 +23,160 @@ #include "../../pci.h" #include "pcie-designware.h" +static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = { + [DW_PCIE_DBI_CLK] = "dbi", + [DW_PCIE_MSTR_CLK] = "mstr", + [DW_PCIE_SLV_CLK] = "slv", +}; + +static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = { + [DW_PCIE_PIPE_CLK] = "pipe", + [DW_PCIE_CORE_CLK] = "core", + [DW_PCIE_AUX_CLK] = "aux", + [DW_PCIE_REF_CLK] = "ref", +}; + +static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = { + [DW_PCIE_DBI_RST] = "dbi", + [DW_PCIE_MSTR_RST] = "mstr", + [DW_PCIE_SLV_RST] = "slv", +}; + +static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = { + [DW_PCIE_NON_STICKY_RST] = "non-sticky", + [DW_PCIE_STICKY_RST] = "sticky", + [DW_PCIE_CORE_RST] = "core", + [DW_PCIE_PIPE_RST] = "pipe", + [DW_PCIE_PHY_RST] = "phy", + [DW_PCIE_HOT_RST] = "hot", + [DW_PCIE_PWR_RST] = "pwr", +}; + +static int dw_pcie_get_clocks(struct dw_pcie *pci) +{ + int i, ret; + + for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++) + pci->app_clks[i].id = dw_pcie_app_clks[i]; + + for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++) + pci->core_clks[i].id = dw_pcie_core_clks[i]; + + ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS, + pci->app_clks); + if (ret) + return ret; + + return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS, + pci->core_clks); +} + +static int dw_pcie_get_resets(struct dw_pcie *pci) +{ + int i, ret; + + for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++) + pci->app_rsts[i].id = dw_pcie_app_rsts[i]; + + for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++) + pci->core_rsts[i].id = dw_pcie_core_rsts[i]; + + ret = devm_reset_control_bulk_get_optional_shared(pci->dev, + DW_PCIE_NUM_APP_RSTS, + pci->app_rsts); + if (ret) + return ret; + + ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev, + DW_PCIE_NUM_CORE_RSTS, + pci->core_rsts); + if (ret) + return ret; + + pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(pci->pe_rst)) + return PTR_ERR(pci->pe_rst); + + return 0; +} + +int dw_pcie_get_resources(struct dw_pcie *pci) +{ + struct platform_device *pdev = to_platform_device(pci->dev); + struct device_node *np = dev_of_node(pci->dev); + struct resource *res; + int ret; + + if (!pci->dbi_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + } + + /* DBI2 is mainly useful for the endpoint controller */ + if (!pci->dbi_base2) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); + if (res) { + pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + } else { + pci->dbi_base2 = pci->dbi_base + SZ_4K; + } + } + + /* For non-unrolled iATU/eDMA platforms this range will be ignored */ + if (!pci->atu_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); + if (res) { + pci->atu_size = resource_size(res); + pci->atu_base = devm_ioremap_resource(pci->dev, res); + if (IS_ERR(pci->atu_base)) + return PTR_ERR(pci->atu_base); + } else { + pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; + } + } + + /* Set a default value suitable for at most 8 in and 8 out windows */ + if (!pci->atu_size) + pci->atu_size = SZ_4K; + + /* eDMA region can be mapped to a custom base address */ + if (!pci->edma.reg_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); + if (res) { + pci->edma.reg_base = devm_ioremap_resource(pci->dev, res); + if (IS_ERR(pci->edma.reg_base)) + return PTR_ERR(pci->edma.reg_base); + } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) { + pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET; + } + } + + /* LLDD is supposed to manually switch the clocks and resets state */ + if (dw_pcie_cap_is(pci, REQ_RES)) { + ret = dw_pcie_get_clocks(pci); + if (ret) + return ret; + + ret = dw_pcie_get_resets(pci); + if (ret) + return ret; + } + + if (pci->link_gen < 1) + pci->link_gen = of_pci_get_max_link_speed(np); + + of_property_read_u32(np, "num-lanes", &pci->num_lanes); + + if (of_property_read_bool(np, "snps,enable-cdm-check")) + dw_pcie_cap_set(pci, CDM_CHECK); + + return 0; +} + void dw_pcie_version_detect(struct dw_pcie *pci) { u32 ver; @@ -211,7 +369,7 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir, u32 index) { - if (pci->iatu_unroll_enabled) + if (dw_pcie_cap_is(pci, IATU_UNROLL)) return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index); @@ -393,8 +551,60 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val); } -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar) +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u64 size) +{ + u64 limit_addr = pci_addr + size - 1; + u32 retries, val; + + if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || + !IS_ALIGNED(cpu_addr, pci->region_align) || + !IS_ALIGNED(pci_addr, pci->region_align) || !size) { + return -EINVAL; + } + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE, + lower_32_bits(pci_addr)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE, + upper_32_bits(pci_addr)); + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT, + lower_32_bits(limit_addr)); + if (dw_pcie_ver_is_ge(pci, 460A)) + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT, + upper_32_bits(limit_addr)); + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, + lower_32_bits(cpu_addr)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + val = type; + if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && + dw_pcie_ver_is_ge(pci, 460A)) + val |= PCIE_ATU_INCREASE_REGION_SIZE; + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + mdelay(LINK_WAIT_IATU); + } + + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); + + return -ETIMEDOUT; +} + +int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, + int type, u64 cpu_addr, u8 bar) { u32 retries, val; @@ -519,29 +729,23 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) cap &= ~((u32)PCI_EXP_LNKCAP_SLS); dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); - } -static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) -{ - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); - if (val == 0xffffffff) - return true; - - return false; -} - -static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) +void dw_pcie_iatu_detect(struct dw_pcie *pci) { int max_region, ob, ib; u32 val, min, dir; u64 max; - if (pci->iatu_unroll_enabled) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); + if (val == 0xFFFFFFFF) { + dw_pcie_cap_set(pci, IATU_UNROLL); + max_region = min((int)pci->atu_size / 512, 256); } else { + pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; + pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; } @@ -583,46 +787,197 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) pci->num_ib_windows = ib; pci->region_align = 1 << fls(min); pci->region_limit = (max << 32) | (SZ_4G - 1); + + dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n", + dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F", + pci->num_ob_windows, pci->num_ib_windows, + pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G); } -void dw_pcie_iatu_detect(struct dw_pcie *pci) +static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg) { - struct platform_device *pdev = to_platform_device(pci->dev); + u32 val = 0; + int ret; - pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); - if (pci->iatu_unroll_enabled) { - if (!pci->atu_base) { - struct resource *res = - platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - if (res) { - pci->atu_size = resource_size(res); - pci->atu_base = devm_ioremap_resource(pci->dev, res); - } - if (!pci->atu_base || IS_ERR(pci->atu_base)) - pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; - } + if (pci->ops && pci->ops->read_dbi) + return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4); + + ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val); + if (ret) + dev_err(pci->dev, "Read DMA address failed\n"); + + return val; +} + +static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr) +{ + struct platform_device *pdev = to_platform_device(dev); + char name[6]; + int ret; + + if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH) + return -EINVAL; + + ret = platform_get_irq_byname_optional(pdev, "dma"); + if (ret > 0) + return ret; - if (!pci->atu_size) - /* Pick a minimal default, enough for 8 in and 8 out windows */ - pci->atu_size = SZ_4K; + snprintf(name, sizeof(name), "dma%u", nr); + + return platform_get_irq_byname_optional(pdev, name); +} + +static struct dw_edma_core_ops dw_pcie_edma_ops = { + .irq_vector = dw_pcie_edma_irq_vector, +}; + +static int dw_pcie_edma_find_chip(struct dw_pcie *pci) +{ + u32 val; + + /* + * Indirect eDMA CSRs access has been completely removed since v5.40a + * thus no space is now reserved for the eDMA channels viewport and + * former DMA CTRL register is no longer fixed to FFs. + */ + if (dw_pcie_ver_is_ge(pci, 540A)) + val = 0xFFFFFFFF; + else + val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL); + + if (val == 0xFFFFFFFF && pci->edma.reg_base) { + pci->edma.mf = EDMA_MF_EDMA_UNROLL; + + val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL); + } else if (val != 0xFFFFFFFF) { + pci->edma.mf = EDMA_MF_EDMA_LEGACY; + + pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE; } else { - pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; - pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; + return -ENODEV; } - dw_pcie_iatu_detect_regions(pci); + pci->edma.dev = pci->dev; - dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? - "enabled" : "disabled"); + if (!pci->edma.ops) + pci->edma.ops = &dw_pcie_edma_ops; - dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n", - pci->num_ob_windows, pci->num_ib_windows, - pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G); + pci->edma.flags |= DW_EDMA_CHIP_LOCAL; + + pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val); + pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val); + + /* Sanity check the channels count if the mapping was incorrect */ + if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH || + !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH) + return -EINVAL; + + return 0; +} + +static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) +{ + struct platform_device *pdev = to_platform_device(pci->dev); + u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt; + char name[6]; + int ret; + + if (pci->edma.nr_irqs == 1) + return 0; + else if (pci->edma.nr_irqs > 1) + return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0; + + ret = platform_get_irq_byname_optional(pdev, "dma"); + if (ret > 0) { + pci->edma.nr_irqs = 1; + return 0; + } + + for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) { + snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs); + + ret = platform_get_irq_byname_optional(pdev, name); + if (ret <= 0) + return -EINVAL; + } + + return 0; +} + +static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci) +{ + struct dw_edma_region *ll; + dma_addr_t paddr; + int i; + + for (i = 0; i < pci->edma.ll_wr_cnt; i++) { + ll = &pci->edma.ll_region_wr[i]; + ll->sz = DMA_LLP_MEM_SIZE; + ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, + &paddr, GFP_KERNEL); + if (!ll->vaddr.mem) + return -ENOMEM; + + ll->paddr = paddr; + } + + for (i = 0; i < pci->edma.ll_rd_cnt; i++) { + ll = &pci->edma.ll_region_rd[i]; + ll->sz = DMA_LLP_MEM_SIZE; + ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, + &paddr, GFP_KERNEL); + if (!ll->vaddr.mem) + return -ENOMEM; + + ll->paddr = paddr; + } + + return 0; +} + +int dw_pcie_edma_detect(struct dw_pcie *pci) +{ + int ret; + + /* Don't fail if no eDMA was found (for the backward compatibility) */ + ret = dw_pcie_edma_find_chip(pci); + if (ret) + return 0; + + /* Don't fail on the IRQs verification (for the backward compatibility) */ + ret = dw_pcie_edma_irq_verify(pci); + if (ret) { + dev_err(pci->dev, "Invalid eDMA IRQs found\n"); + return 0; + } + + ret = dw_pcie_edma_ll_alloc(pci); + if (ret) { + dev_err(pci->dev, "Couldn't allocate LLP memory\n"); + return ret; + } + + /* Don't fail if the DW eDMA driver can't find the device */ + ret = dw_edma_probe(&pci->edma); + if (ret && ret != -ENODEV) { + dev_err(pci->dev, "Couldn't register eDMA device\n"); + return ret; + } + + dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n", + pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F", + pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt); + + return 0; +} + +void dw_pcie_edma_remove(struct dw_pcie *pci) +{ + dw_edma_remove(&pci->edma); } void dw_pcie_setup(struct dw_pcie *pci) { - struct device_node *np = pci->dev->of_node; u32 val; if (pci->link_gen > 0) @@ -650,21 +1005,20 @@ void dw_pcie_setup(struct dw_pcie *pci) val |= PORT_LINK_DLL_LINK_EN; dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); - if (of_property_read_bool(np, "snps,enable-cdm-check")) { + if (dw_pcie_cap_is(pci, CDM_CHECK)) { val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START; dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); } - of_property_read_u32(np, "num-lanes", &pci->num_lanes); if (!pci->num_lanes) { dev_dbg(pci->dev, "Using h/w default number of lanes\n"); return; } /* Set the number of lanes */ - val &= ~PORT_LINK_FAST_LINK_MODE; + val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_MODE_MASK; switch (pci->num_lanes) { case 1: diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index a871ae7eb59ec..adad0ea617999 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -12,10 +12,15 @@ #define _PCIE_DESIGNWARE_H #include +#include +#include #include +#include +#include #include #include #include +#include #include #include @@ -27,21 +32,36 @@ #define DW_PCIE_VER_480A 0x3438302a #define DW_PCIE_VER_490A 0x3439302a #define DW_PCIE_VER_520A 0x3532302a +#define DW_PCIE_VER_540A 0x3534302a #define __dw_pcie_ver_cmp(_pci, _ver, _op) \ ((_pci)->version _op DW_PCIE_VER_ ## _ver) +#define __dw_pcie_ver_type_cmp(_pci, _type, _op) \ + ((_pci)->type _op DW_PCIE_VER_TYPE_ ## _type) + #define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==) #define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=) #define dw_pcie_ver_type_is(_pci, _ver, _type) \ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \ - __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==)) + __dw_pcie_ver_type_cmp(_pci, _type, ==)) #define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \ - __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=)) + __dw_pcie_ver_type_cmp(_pci, _type, >=)) + +/* DWC PCIe controller capabilities */ +#define DW_PCIE_CAP_REQ_RES 0 +#define DW_PCIE_CAP_IATU_UNROLL 1 +#define DW_PCIE_CAP_CDM_CHECK 2 + +#define dw_pcie_cap_is(_pci, _cap) \ + test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) + +#define dw_pcie_cap_set(_pci, _cap) \ + set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) /* Parameters for the waiting for link up routine */ #define LINK_WAIT_MAX_RETRIES 10 @@ -152,6 +172,18 @@ #define PCIE_MSIX_DOORBELL 0x948 #define PCIE_MSIX_DOORBELL_PF_SHIFT 24 +/* + * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible + * over the Port Logic registers space. Afterwards the unrolled mapping was + * introduced so eDMA and iATU could be accessed via a dedicated registers + * space. + */ +#define PCIE_DMA_VIEWPORT_BASE 0x970 +#define PCIE_DMA_UNROLL_BASE 0x80000 +#define PCIE_DMA_CTRL 0x008 +#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0) +#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16) + #define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20 #define PCIE_PL_CHK_REG_CHK_REG_START BIT(0) #define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1) @@ -200,6 +232,7 @@ * this offset, if atu_base not set. */ #define DEFAULT_DBI_ATU_OFFSET (0x3 << 20) +#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE #define MAX_MSI_IRQS 256 #define MAX_MSI_IRQS_PER_CTRL 32 @@ -211,6 +244,9 @@ #define MAX_IATU_IN 256 #define MAX_IATU_OUT 256 +/* Default eDMA LLP memory size */ +#define DMA_LLP_MEM_SIZE PAGE_SIZE + struct dw_pcie; struct dw_pcie_rp; struct dw_pcie_ep; @@ -222,6 +258,39 @@ enum dw_pcie_device_mode { DW_PCIE_RC_TYPE, }; +enum dw_pcie_app_clk { + DW_PCIE_DBI_CLK, + DW_PCIE_MSTR_CLK, + DW_PCIE_SLV_CLK, + DW_PCIE_NUM_APP_CLKS +}; + +enum dw_pcie_core_clk { + DW_PCIE_PIPE_CLK, + DW_PCIE_CORE_CLK, + DW_PCIE_AUX_CLK, + DW_PCIE_REF_CLK, + DW_PCIE_NUM_CORE_CLKS +}; + +enum dw_pcie_app_rst { + DW_PCIE_DBI_RST, + DW_PCIE_MSTR_RST, + DW_PCIE_SLV_RST, + DW_PCIE_NUM_APP_RSTS +}; + +enum dw_pcie_core_rst { + DW_PCIE_NON_STICKY_RST, + DW_PCIE_STICKY_RST, + DW_PCIE_CORE_RST, + DW_PCIE_PIPE_RST, + DW_PCIE_PHY_RST, + DW_PCIE_HOT_RST, + DW_PCIE_PWR_RST, + DW_PCIE_NUM_CORE_RSTS +}; + struct dw_pcie_host_ops { int (*host_init)(struct dw_pcie_rp *pp); void (*host_deinit)(struct dw_pcie_rp *pp); @@ -317,10 +386,16 @@ struct dw_pcie { const struct dw_pcie_ops *ops; u32 version; u32 type; + unsigned long caps; int num_lanes; int link_gen; u8 n_fts[2]; - bool iatu_unroll_enabled: 1; + struct dw_edma_chip edma; + struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS]; + struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS]; + struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS]; + struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS]; + struct gpio_desc *pe_rst; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -328,6 +403,8 @@ struct dw_pcie { #define to_dw_pcie_from_ep(endpoint) \ container_of((endpoint), struct dw_pcie, ep) +int dw_pcie_get_resources(struct dw_pcie *pci); + void dw_pcie_version_detect(struct dw_pcie *pci); u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); @@ -346,11 +423,15 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size); int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size); -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar); +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u64 size); +int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, + int type, u64 cpu_addr, u8 bar); void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index); void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); +int dw_pcie_edma_detect(struct dw_pcie *pci); +void dw_pcie_edma_remove(struct dw_pcie *pci); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c index 71026fefa3668..ae1517b52c580 100644 --- a/drivers/pci/controller/dwc/pcie-visconti.c +++ b/drivers/pci/controller/dwc/pcie-visconti.c @@ -29,9 +29,6 @@ struct visconti_pcie { void __iomem *ulreg_base; void __iomem *smu_base; void __iomem *mpu_base; - struct clk *refclk; - struct clk *coreclk; - struct clk *auxclk; }; #define PCIE_UL_REG_S_PCIE_MODE 0x00F4 @@ -198,6 +195,21 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp) int err; u32 val; + if (!pcie->pci.core_clks[DW_PCIE_REF_CLK].clk) { + dev_err(pci->dev, "Missing ref clock source\n"); + return -ENOENT; + } + + if (!pcie->pci.core_clks[DW_PCIE_CORE_CLK].clk) { + dev_err(pci->dev, "Missing core clock source\n"); + return -ENOENT; + } + + if (!pcie->pci.core_clks[DW_PCIE_AUX_CLK].clk) { + dev_err(pci->dev, "Missing aux clock source\n"); + return -ENOENT; + } + visconti_smu_writel(pcie, PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK, PISMU_CKON_PCIE); @@ -242,8 +254,6 @@ static const struct dw_pcie_host_ops visconti_pcie_host_ops = { static int visconti_get_resources(struct platform_device *pdev, struct visconti_pcie *pcie) { - struct device *dev = &pdev->dev; - pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg"); if (IS_ERR(pcie->ulreg_base)) return PTR_ERR(pcie->ulreg_base); @@ -256,21 +266,6 @@ static int visconti_get_resources(struct platform_device *pdev, if (IS_ERR(pcie->mpu_base)) return PTR_ERR(pcie->mpu_base); - pcie->refclk = devm_clk_get(dev, "ref"); - if (IS_ERR(pcie->refclk)) - return dev_err_probe(dev, PTR_ERR(pcie->refclk), - "Failed to get ref clock\n"); - - pcie->coreclk = devm_clk_get(dev, "core"); - if (IS_ERR(pcie->coreclk)) - return dev_err_probe(dev, PTR_ERR(pcie->coreclk), - "Failed to get core clock\n"); - - pcie->auxclk = devm_clk_get(dev, "aux"); - if (IS_ERR(pcie->auxclk)) - return dev_err_probe(dev, PTR_ERR(pcie->auxclk), - "Failed to get aux clock\n"); - return 0; } @@ -304,6 +299,8 @@ static int visconti_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + dw_pcie_cap_set(pci, REQ_RES); + ret = visconti_get_resources(pdev, pcie); if (ret) return ret; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index c690572b10ce7..9681c67d61f9b 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -952,12 +952,15 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns, resource_size_t min_align = 0; int order; - for (order = 0; order <= max_order; order++) { + for (order = 0; order < max_order; order++) { resource_size_t align1 = 1; + if (!aligns[order]) + continue; + align1 <<= (order + 20); - if (!align) + if (!min_align) min_align = align1; else if (ALIGN(align + min_align, min_align) < align1) min_align = align1 >> 1; @@ -993,7 +996,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, struct list_head *realloc_head) { struct pci_dev *dev; - resource_size_t min_align, align, size, size0, size1; + resource_size_t min_align, align, size, size0, size1, max_align; resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */ int order, max_order; struct resource *b_res = find_bus_resource_of_type(bus, @@ -1073,6 +1076,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, min_align = calculate_mem_align(aligns, max_order); min_align = max(min_align, window_alignment(bus, b_res->flags)); + max_align = 1 << (max_order + 20); + if (min_align >= max_align/2) + max_align = min_align; size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align); add_align = max(min_align, add_align); size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 : @@ -1085,8 +1091,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->flags = 0; return 0; } - b_res->start = min_align; - b_res->end = size0 + min_align - 1; + b_res->start = max_align; + b_res->end = size0 + max_align - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 4619d5e89d5be..8fdd6f9d1f7dc 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1711,6 +1711,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) bus_state = &rhub->bus_state; wake_enabled = hcd->self.root_hub->do_remote_wakeup; + /* FIXME Workaround CSRTimeout error on the PORTSC access */ + if (time_before(jiffies, bus_state->next_statechange)) + msleep(5); + spin_lock_irqsave(&xhci->lock, flags); if (wake_enabled) { diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index 91727065bacbf..c72393bd8d3aa 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -73,4 +73,14 @@ static inline void deactivate_mm(struct task_struct *tsk, } #endif +/** + * tlb_prefetch - called if by design TLB-prefetching is required + * @addr: Virtual address + */ +#ifndef tlb_prefetch +static inline void tlb_prefetch(unsigned long addr) +{ +} +#endif + #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/dt-bindings/soc/bt1-boot-mode.h b/include/dt-bindings/soc/bt1-boot-mode.h new file mode 100644 index 0000000000000..0e425805bc1d4 --- /dev/null +++ b/include/dt-bindings/soc/bt1-boot-mode.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 Boot Modes + */ +#ifndef __DT_BINDINGS_SOC_BT1_BOOT_MODE_H +#define __DT_BINDINGS_SOC_BT1_BOOT_MODE_H + +#define RCR_BOOT_NORMAL 0x1 +#define RCR_BOOT_LOADER 0x2 +#define RCR_BOOT_RECOVERY 0x3 + +#endif /* __DT_BINDINGS_SOC_BT1_BOOT_MODE_H */ diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h index 7d8062e9c544c..c062c8db472c9 100644 --- a/include/linux/dma/edma.h +++ b/include/linux/dma/edma.h @@ -18,13 +18,31 @@ struct dw_edma; struct dw_edma_region { - phys_addr_t paddr; - void __iomem *vaddr; + u64 paddr; + union { + void *mem; + void __iomem *io; + } vaddr; size_t sz; }; +/** + * struct dw_edma_core_ops - platform-specific eDMA methods + * @irq_vector: Get IRQ number of the passed eDMA channel. Note the + * method accepts the channel id in the end-to-end + * numbering with the eDMA write channels being placed + * first in the row. + * @pci_address: Get PCIe bus address corresponding to the passed CPU + * address. Note there is no need in specifying this + * function if the address translation is performed by + * the DW PCIe RP/EP controller with the DW eDMA device in + * subject and DMA_BYPASS isn't set for all the outbound + * iATU windows. That will be done by the controller + * automatically. + */ struct dw_edma_core_ops { int (*irq_vector)(struct device *dev, unsigned int nr); + u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr); }; enum dw_edma_map_format { @@ -61,7 +79,6 @@ enum dw_edma_chip_flags { */ struct dw_edma_chip { struct device *dev; - int id; int nr_irqs; const struct dw_edma_core_ops *ops; u32 flags; @@ -84,7 +101,7 @@ struct dw_edma_chip { }; /* Export to the platform drivers */ -#if IS_ENABLED(CONFIG_DW_EDMA) +#if IS_REACHABLE(CONFIG_DW_EDMA) int dw_edma_probe(struct dw_edma_chip *chip); int dw_edma_remove(struct dw_edma_chip *chip); #else diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c923f4e60f240..0c020682d8941 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -394,7 +394,7 @@ enum dma_slave_buswidth { * should be read (RX), if the source is memory this argument is * ignored. * @dst_addr: this is the physical address where DMA slave data - * should be written (TX), if the source is memory this argument + * should be written (TX), if the destination is memory this argument * is ignored. * @src_addr_width: this is the width in bytes of the source (RX) * register where DMA data shall be read. If the source diff --git a/include/linux/edac.h b/include/linux/edac.h index fa4bda2a70f6c..89167a4459d54 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -157,6 +157,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * This is a variant of the DDR memories. * A registered memory has a buffer inside it, hiding * part of the memory details to the memory controller. + * @MEM_LPDDR: Low-Power DDR memory (mDDR). * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. * Those memories are labeled as "PC2-" instead of "PC" to @@ -167,6 +168,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * a chip select signal. * @MEM_RDDR2: Registered DDR2 RAM * This is a variant of the DDR2 memories. + * @MEM_LPDDR2: Low-Power DDR2 memory. * @MEM_XDR: Rambus XDR * It is an evolution of the original RAMBUS memories, * created to compete with DDR2. Weren't used on any @@ -199,10 +201,12 @@ enum mem_type { MEM_RDR, MEM_DDR, MEM_RDDR, + MEM_LPDDR, MEM_RMBS, MEM_DDR2, MEM_FB_DDR2, MEM_RDDR2, + MEM_LPDDR2, MEM_XDR, MEM_DDR3, MEM_RDDR3, @@ -230,10 +234,12 @@ enum mem_type { #define MEM_FLAG_RDR BIT(MEM_RDR) #define MEM_FLAG_DDR BIT(MEM_DDR) #define MEM_FLAG_RDDR BIT(MEM_RDDR) +#define MEM_FLAG_LPDDR BIT(MEM_LPDDR) #define MEM_FLAG_RMBS BIT(MEM_RMBS) #define MEM_FLAG_DDR2 BIT(MEM_DDR2) #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) +#define MEM_FLAG_LPDDR2 BIT(MEM_LPDDR2) #define MEM_FLAG_XDR BIT(MEM_XDR) #define MEM_FLAG_DDR3 BIT(MEM_DDR3) #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 0f06c2287b527..38a558d37fdd9 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -25,6 +25,7 @@ #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 +#define MARVELL_PHY_ID_88X2222R 0x014131b0 /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 313edd19bf545..30d2b238a287c 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -231,6 +231,8 @@ struct plat_stmmacenet_data { void (*exit)(struct platform_device *pdev, void *priv); struct mac_device_info *(*setup)(void *priv); int (*clks_config)(void *priv, bool enabled); + int (*bus_reset)(void *priv); + int (*swr_reset)(void *priv); int (*crosststamp)(ktime_t *device, struct system_counterval_t *system, void *ctx); void (*dump_debug_regs)(void *priv); diff --git a/mm/slab.c b/mm/slab.c index 59c8e28f7b6ab..ec8bd83f26e28 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3416,6 +3416,8 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) return; + tlb_prefetch((unsigned long)ac); + if (ac->avail < ac->limit) { STATS_INC_FREEHIT(cachep); } else { -- 2.39.5