- enum:
- renesas,r9a06g032-dma
- const: renesas,rzn1-dma
-
+ - const: baikal,bt1-dmac
"#dma-cells":
minimum: 3
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/baikal,bt1-ddrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 DDR Controller
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description:
+ Baikal-T1 DDRC is based on the DW uMCTL2 DDRC IP-core v2.51a with DDR2
+ and DDR3 protocol capability, 32-bit data bus + 8-bit ECC + up to 2
+ SDRAM ranks. There are individual IRQs for each ECC and DFI events.
+ The dedicated scrubber clock source is absent since it's fully
+ synchronous to the core clock.
+
+allOf:
+ - $ref: /schemas/memory-controllers/snps,dw-umctl2-common.yaml#
+
+properties:
+ compatible:
+ const: baikal,bt1-ddrc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 4
+
+ interrupt-names:
+ items:
+ - const: dfi_e
+ - const: ecc_ce
+ - const: ecc_ue
+ - const: ecc_sbr
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: aclk
+ - const: core
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: arst
+ - const: core
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+ #include <dt-bindings/clock/bt1-ccu.h>
+ #include <dt-bindings/reset/bt1-ccu.h>
+
+ memory-controller@1f042000 {
+ compatible = "baikal,bt1-ddrc";
+ reg = <0x1f042000 0x1000>;
+
+ interrupts = <GIC_SHARED 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 99 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dfi_e", "ecc_ce", "ecc_ue", "ecc_sbr";
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_DDR_CLK>,
+ <&ccu_pll CCU_DDR_PLL>;
+ clock-names = "pclk", "aclk", "core";
+
+ resets = <&ccu_axi CCU_AXI_DDR_RST>,
+ <&ccu_sys CCU_SYS_DDR_INIT_RST>;
+ reset-names = "arst", "core";
+ };
+...
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/snps,dw-umctl2-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare Universal Multi-Protocol Memory Controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Manish Narani <manish.narani@xilinx.com>
+ - Michal Simek <michal.simek@xilinx.com>
+
+description:
+ Synopsys DesignWare Enhanced uMCTL2 DDR Memory Controller is capable of
+ working with the memory devices supporting up to (LP)DDR4 protocol. It can
+ be equipped with SEC/DEC ECC feature if DRAM data bus width is either
+ 16-bits or 32-bits or 64-bits wide.
+
+select: false
+
+properties:
+ interrupts:
+ description:
+ DW uMCTL2 DDRC IP-core provides individual IRQ signal for each event":"
+ ECC Corrected Error, ECC Uncorrected Error, ECC Address Protection,
+ Scrubber-Done signal, DFI Parity/CRC Error. Some platforms may have the
+ signals merged before they reach the IRQ controller or have some of them
+ absent in case if the corresponding feature is unavailable/disabled.
+ minItems: 1
+ maxItems: 5
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 5
+ oneOf:
+ - description: Common ECC CE/UE/Scrubber/DFI Errors IRQ
+ items:
+ - const: ecc
+ - description: Individual ECC CE/UE/Scrubber/DFI Errors IRQs
+ items:
+ enum: [ ecc_ce, ecc_ue, ecc_ap, ecc_sbr, dfi_e ]
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description:
+ A standard set of the clock sources contains CSRs bus clock, AXI-ports
+ reference clock, DDRC core clock, Scrubber standalone clock
+ (synchronous to the DDRC clock).
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
+ items:
+ enum: [ pclk, aclk, core, sbr ]
+
+ resets:
+ description:
+ Each clock domain can have separate reset signal.
+ minItems: 1
+ maxItems: 4
+
+ reset-names:
+ minItems: 1
+ maxItems: 4
+ items:
+ enum: [ prst, arst, core, sbr ]
+
+additionalProperties: true
+
+...
controller. It has an optional SEC/DEC ECC support in 64- and 32-bits
bus width configurations.
+allOf:
+ - $ref: /schemas/memory-controllers/snps,dw-umctl2-common.yaml#
+
+# Please create a separate DT-schema for your DW uMCTL2 DDR controller
+# with more detailed properties definition.
properties:
compatible:
oneOf:
- description: Xilinx ZynqMP DDR controller v2.40a
const: xlnx,zynqmp-ddrc-2.40a
- interrupts:
- description:
- DW uMCTL2 DDRC IP-core provides individual IRQ signal for each event":"
- ECC Corrected Error, ECC Uncorrected Error, ECC Address Protection,
- Scrubber-Done signal, DFI Parity/CRC Error. Some platforms may have the
- signals merged before they reach the IRQ controller or have some of them
- absent in case if the corresponding feature is unavailable/disabled.
- minItems: 1
- maxItems: 5
-
- interrupt-names:
- minItems: 1
- maxItems: 5
- oneOf:
- - description: Common ECC CE/UE/Scrubber/DFI Errors IRQ
- items:
- - const: ecc
- - description: Individual ECC CE/UE/Scrubber/DFI Errors IRQs
- items:
- enum: [ ecc_ce, ecc_ue, ecc_ap, ecc_sbr, dfi_e ]
-
- reg:
- maxItems: 1
-
- clocks:
- description:
- A standard set of the clock sources contains CSRs bus clock, AXI-ports
- reference clock, DDRC core clock, Scrubber standalone clock
- (synchronous to the DDRC clock).
- minItems: 1
- maxItems: 4
-
- clock-names:
- minItems: 1
- maxItems: 4
- items:
- enum: [ pclk, aclk, core, sbr ]
-
- resets:
- description:
- Each clock domain can have separate reset signal.
- minItems: 1
- maxItems: 4
-
- reset-names:
- minItems: 1
- maxItems: 4
- items:
- enum: [ prst, arst, core, sbr ]
-
required:
- compatible
- reg
- interrupts
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mfd/baikal,bt1-boot-con.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Baikal-T1 SoC Boot Controller
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description:
+ Baikal-T1 SoC is equipped with a Boot Controller which is responsible for
+ the SoC proper boot up procedure. Depending on the external pin state the
+ system can boot up either from the internal ROM or from the externally attached
+ SPI flash (at least of 16MB) or from the internal SRAM (the 64KB of executional
+ code is pre-loaded from the external SPI flash).
+
+allOf:
+ - $ref: /schemas/mfd/syscon.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: baikal,bt1-boot-con
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ items:
+ - description:
+ Baikal-T1 Boot Controller CSR space. It doesn't include many
+ settings':' corrent boot mode, SPI controller access mux, SRAM
+ access mux and device ID.
+ - description: Mirrored first 4MB of the boot SPI flash memory
+
+ reg-names:
+ items:
+ - const: boot
+ - const: mirror
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ little-endian: true
+
+ mux-controller:
+ $ref: /schemas/mux/reg-mux.yaml#
+
+ rom@1bfc0000:
+ $ref: /schemas/mtd/mtd-physmap.yaml#
+
+ spi@1f040100:
+ $ref: /schemas/spi/snps,dw-apb-ssi.yaml#
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ syscon@1f04d000 {
+ compatible = "baikal,bt1-boot-con", "syscon", "simple-mfd";
+ reg = <0x1f040000 0x1000>,
+ <0x1fc00000 0x400000>;
+ reg-names = "boot", "mirror";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ little-endian;
+ reg-io-width = <4>;
+
+ mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+
+ mux-reg-masks = <0x0 0x100>, <0x4 0x1>;
+ idle-states = <0x1>, <0x0>;
+ };
+
+ rom@1bfc0000 {
+ compatible = "baikal,bt1-int-rom", "mtd-rom";
+ reg = <0x1bfc0000 0x10000>;
+
+ no-unaligned-direct-access;
+ bank-width = <4>;
+ };
+
+ spi@1f040100 {
+ compatible = "baikal,bt1-sys-ssi";
+ reg = <0x1f040100 0x900>,
+ <0x1c000000 0x1000000>;
+ reg-names = "config", "map";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mux-controls = <&boot_mux 0>;
+
+ clocks = <&ccu_sys 1>;
+ clock-names = "ssi_clk";
+ };
+ };
+...
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mfd/baikal,bt1-sys-con.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Baikal-T1 SoC System Controller
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description:
+ Baikal-T1 SoC is equipped with a System Controller which is responsible for
+ the SoC components setting up and consists of the next sub-blocks':'
+ PLL/AXI-bus/System devices Clocks Control Units, P5600 CM2 L2-RAM controller,
+ CPU cores reboot flag, persistent across reboots register, indirectly
+ accessible DW APB I2C controller, Boot Controller with a pre-installed memory
+ mapped firmware and a resource limited DW APB SSI, which is also can be used
+ to transparently access an external SPI flash by means of a dedicated IO
+ memory region.
+
+allOf:
+ - $ref: /schemas/mfd/syscon.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: baikal,bt1-sys-con
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ description:
+ Baikal-T1 System Controller CSR space. It includes CCU (Clock Control
+ Unit), L2 settings, Reboot flag and Reboot tolerant register, System I2C
+ controller CSRs.
+ maxItems: 1
+
+ reg-names:
+ const: sys
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ little-endian: true
+
+ clock-controller@1f04d000:
+ $ref: /schemas/clock/baikal,bt1-ccu-pll.yaml#
+
+ clock-controller@1f04d030:
+ $ref: /schemas/clock/baikal,bt1-ccu-div.yaml#
+
+ clock-controller@1f04d060:
+ $ref: /schemas/clock/baikal,bt1-ccu-div.yaml#
+
+ l2@1f04d028:
+ $ref: /schemas/cache/baikal,bt1-l2-ctl.yaml#
+
+ reboot:
+ $ref: /schemas/power/reset/syscon-reboot.yaml#
+
+ reboot-mode:
+ $ref: /schemas/power/reset/syscon-reboot-mode.yaml#
+
+ i2c@1f04d100:
+ $ref: /schemas/i2c/snps,designware-i2c.yaml#
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ syscon@1f04d000 {
+ compatible = "baikal,bt1-sys-con", "syscon", "simple-mfd";
+ reg = <0x1f04d000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ little-endian;
+ reg-io-width = <4>;
+
+ clock-controller@1f04d000 {
+ compatible = "baikal,bt1-ccu-pll";
+ reg = <0x1f04d000 0x028>;
+ #clock-cells = <1>;
+
+ clocks = <&clk25m>;
+ clock-names = "ref_clk";
+ };
+
+ clock-controller@1f04d030 {
+ compatible = "baikal,bt1-ccu-axi";
+ reg = <0x1f04d030 0x030>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ clocks = <&ccu_pll 1>,
+ <&ccu_pll 2>,
+ <&ccu_pll 3>;
+ clock-names = "sata_clk", "pcie_clk", "eth_clk";
+ };
+
+ l2@1f04d028 {
+ compatible = "baikal,bt1-l2-ctl";
+ reg = <0x1f04d028 0x004>;
+
+ baikal,l2-ws-latency = <0>;
+ baikal,l2-tag-latency = <0>;
+ baikal,l2-data-latency = <1>;
+ };
+
+ i2c@1f04d100 {
+ compatible = "baikal,bt1-sys-i2c";
+ reg = <0x1f04d100 0x010>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <0 32 4>;
+
+ clocks = <&ccu_sys 1>;
+ };
+ };
+...
minItems: 2
maxItems: 5 # Should be enough
- reg:
- maxItems: 1
+ reg: true
reg-io-width:
description: |
- cypress,cy7c1019dv33-10zsxi
- arm,vexpress-psram
- const: mtd-ram
+ - items:
+ - enum:
+ - baikal,bt1-int-rom
+ - const: mtd-rom
- enum:
- cfi-flash
- jedec-flash
properties:
$nodename:
- pattern: "^flash(@.*)?$"
+ pattern: "^(flash|rom|sram-controller|.*sram)(@.*)?$"
label:
description:
- trgmii
- 1000base-x
- 2500base-x
+ - 10gbase-x
- 5gbase-r
- rxaui
- xaui
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/marvell,88x2222.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell 88x2222 PHY
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Heiner Kallweit <hkallweit1@gmail.com>
+ - Russell King <linux@armlinux.org.uk>
+
+description: |
+ The Marvell 88X2222 transceiver is a fully integrated single chip solution
+ providing end-to-end data transmission over fiber-optic networks as well as
+ Twinax® copper links. It is a 2-port device that performs all physical layer
+ functions associated with 10GBASE-R, and 1000BASE-X.
+
+ The line-side interface supports 2 ports of 10GBASE-R and 1000BASE-X. The
+ line side also supports Clause 73 AP Auto-Negotiation. The host-side
+ interface supports 4 ports of 10GBASE-R, RXAUI, 1000BASE-X, and 2 ports of
+ XAUI. Any port from the host side can be attached to any port on the line
+ side as long as the speeds match.
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id0141.0f10
+ - ethernet-phy-id0141.31b0
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ ngpios:
+ default: 2
+ minimum: 1
+ maximum: 12
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 12
+
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 12
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/net/mv-phy-88x2222.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@c {
+ /* Only needed to make DT lint tools working. Do not copy/paste
+ * it into the real DTS files.
+ */
+ compatible = "ethernet-phy-id0141.0f10";
+ reg = <0x0c>;
+
+ interrupt-parent = <&pic>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-reserved-ranges = <MV_88X2222_LED0 1>,
+ <MV_88X2222_LED1 1>,
+ <MV_88X2222_SDA 1>,
+ <MV_88X2222_SCL 1>;
+
+ };
+ };
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/pcs/snps,dw-xpcs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare Ethernet PCS
+
+maintainers:
+ - Jose Abreu <Jose.Abreu@synopsys.com>
+
+description:
+ Synopsys DesignWare Ethernet Physical Coding Sublayer provides an interface
+ between Media Access Control and Physical Medium Attachment Sublayer through
+ the Media Independent Interface (XGMII, USXGMII, XLGMII, GMII, etc)
+ controlled by means of the IEEE std. Clause 45 registers set. The PCS can be
+ optionally synthesized with a vendor-specific interface connected to
+ Synopsys PMA (also called DesignWare Consumer/Enterprise PHY) although in
+ general it can be used to communicate with any compatible PHY.
+
+properties:
+ compatible:
+ oneOf:
+ - description: Synopsys DesignWare XPCS with none or unknown PMA
+ const: snps,dw-xpcs
+ - description: Synopsys DesignWare XPCS with Consumer Gen1 3G PMA
+ const: snps,dw-xpcs-gen1-3g
+ - description: Synopsys DesignWare XPCS with Consumer Gen2 3G PMA
+ const: snps,dw-xpcs-gen2-3g
+ - description: Synopsys DesignWare XPCS with Consumer Gen2 6G PMA
+ const: snps,dw-xpcs-gen2-6g
+ - description: Synopsys DesignWare XPCS with Consumer Gen4 3G PMA
+ const: snps,dw-xpcs-gen4-3g
+ - description: Synopsys DesignWare XPCS with Consumer Gen4 6G PMA
+ const: snps,dw-xpcs-gen4-6g
+ - description: Synopsys DesignWare XPCS with Consumer Gen5 10G PMA
+ const: snps,dw-xpcs-gen5-10g
+ - description: Synopsys DesignWare XPCS with Consumer Gen5 12G PMA
+ const: snps,dw-xpcs-gen5-12g
+ - description: Baikal-T1 XPCS (DW XPCS with Consumer Gen5 10G PMA)
+ const: baikal,bt1-xpcs
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ System interface interrupt output (sbd_intr_o) indicating Clause 73/37
+ auto-negotiation events like':' Page received, AN is completed or
+ incompatible link partner.
+ maxItems: 1
+
+ clocks:
+ description:
+ PCS/PMA interface be can clocked either by internal reference clock
+ source or by an externally connected (via a pad) clock generator.
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum: [ core, pad ]
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ xgmac_pcs: ethernet-pcs@0 {
+ compatible = "snps,dw-xpcs";
+ reg = <0>;
+
+ interrupts = <79 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_core>, <&ccu_pad>;
+ clock-names = "core", "pad";
+ };
+ };
+...
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/snps,dw-xpcs-mi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare Ethernet PCS Management Interface
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description:
+ Synopsys DesignWare Ethernet PCS provides an interface between MAC and PMA
+ through the Media Independent Interface. The PCS CSRs can be accessible over
+ the Ethernet MDIO bus or directly by means of the APB3/MCI interfaces. In the
+ later case the XPCS can be mapped right to the system IO memory space.
+
+allOf:
+ - $ref: mdio.yaml#
+
+properties:
+ compatible:
+ const: snps,dw-xpcs-mi
+
+ reg:
+ items:
+ - description:
+ DW XPCS CSRs space can be either 'directly' or 'indirectly'
+ accessible. In the former case all Clause 45 registers are
+ contiguously mapped within the address space MMD '[20:16]',
+ Reg '[15:0]'. In the later case the space is divided to the
+ multiple 256 register sets. There is a special viewport CSR
+ which is responsible for the set selection. The upper part of
+ the CSR address is supposed to be written in there thus the
+ corresponding subset would be mapped over the lowest 255 CSRs.
+
+ reg-names:
+ items:
+ - enum: [ direct, indirect ]
+
+ reg-io-width:
+ description:
+ The way the CSRs are mapped to the memory is platform depended. Since
+ each Clause 45 CSR is of 16-bits wide the access instructions must be
+ two bytes aligned at least.
+ default: 2
+ enum: [ 2, 4 ]
+
+ clocks:
+ items:
+ - description: Peripheral MCI/APB3 bus clock source
+
+ clock-names:
+ items:
+ - const: pclk
+
+patternProperties:
+ 'ethernet-pcs@[0-9a-f]+$':
+ type: object
+
+ $ref: pcs/snps,dw-xpcs.yaml#
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio@1f05d000 {
+ compatible = "snps,dw-xpcs-mi";
+ reg = <0x1f05d000 0x1000>;
+ reg-names = "indirect";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clocks = <&ccu_pclk>;
+ clock-names = "pclk";
+
+ reg-io-width = <4>;
+
+ ethernet-pcs@0 {
+ compatible = "snps,dw-xpcs";
+ reg = <0>;
+ };
+ };
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/baikal,bt1-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 PCIe Root Port Controller
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description:
+ Embedded into Baikal-T1 SoC Root Complex controller with a single port
+ activated. It's based on the DWC RC PCIe v4.60a IP-core, which is configured
+ to have just a single Root Port function and is capable of establishing the
+ link up to Gen.3 speed on x4 lanes. It doesn't have embedded clock and reset
+ control module, so the proper interface initialization is supposed to be
+ performed by software. There four in- and four outbound iATU regions
+ which can be used to emit all required TLP types on the PCIe bus.
+
+allOf:
+ - $ref: /schemas/pci/snps,dw-pcie.yaml#
+
+properties:
+ compatible:
+ const: baikal,bt1-pcie
+
+ reg:
+ description:
+ DBI, DBI2 and at least 4KB outbound iATU-capable region for the
+ peripheral devices CFG-space access.
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: dbi2
+ - const: config
+
+ interrupts:
+ description:
+ MSI, AER, PME, Hot-plug, Link Bandwidth Management, Link Equalization
+ request and eight Read/Write eDMA IRQ lines are available.
+ maxItems: 14
+
+ interrupt-names:
+ items:
+ - const: dma0
+ - const: dma1
+ - const: dma2
+ - const: dma3
+ - const: dma4
+ - const: dma5
+ - const: dma6
+ - const: dma7
+ - const: msi
+ - const: aer
+ - const: pme
+ - const: hp
+ - const: bw_mg
+ - const: l_eq
+
+ clocks:
+ description:
+ DBI (attached to the APB bus), AXI-bus master and slave interfaces
+ are fed up by the dedicated application clocks. A common reference
+ clock signal is supposed to be attached to the corresponding Ref-pad
+ of the SoC. It will be redistributed amongst the controller core
+ sub-modules (pipe, core, aux, etc).
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: dbi
+ - const: mstr
+ - const: slv
+ - const: ref
+
+ resets:
+ description:
+ A comprehensive controller reset logic is supposed to be implemented
+ by software, so almost all the possible application and core reset
+ signals are exposed via the system CCU module.
+ maxItems: 9
+
+ reset-names:
+ items:
+ - const: mstr
+ - const: slv
+ - const: pwr
+ - const: hot
+ - const: phy
+ - const: core
+ - const: pipe
+ - const: sticky
+ - const: non-sticky
+
+ baikal,bt1-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the Baikal-T1 System Controller DT node. It's required to
+ access some additional PM, Reset-related and LTSSM signals.
+
+ num-lanes:
+ maximum: 4
+
+ max-link-speed:
+ maximum: 3
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ pcie@1f052000 {
+ compatible = "baikal,bt1-pcie";
+ device_type = "pci";
+ reg = <0x1f052000 0x1000>, <0x1f053000 0x1000>, <0x1bdbf000 0x1000>;
+ reg-names = "dbi", "dbi2", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0x00000000 0x1bdb0000 0 0x00008000>,
+ <0x82000000 0 0x20000000 0x08000000 0 0x13db0000>;
+ bus-range = <0x0 0xff>;
+
+ interrupts = <GIC_SHARED 80 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 83 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 91 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 92 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 93 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma0", "dma1", "dma2", "dma3",
+ "dma4", "dma5", "dma6", "dma7",
+ "msi", "aer", "pme", "hp", "bw_mg",
+ "l_eq";
+
+ clocks = <&ccu_sys 1>, <&ccu_axi 6>, <&ccu_axi 7>, <&clk_pcie>;
+ clock-names = "dbi", "mstr", "slv", "ref";
+
+ resets = <&ccu_axi 6>, <&ccu_axi 7>, <&ccu_sys 7>, <&ccu_sys 10>,
+ <&ccu_sys 4>, <&ccu_sys 6>, <&ccu_sys 5>, <&ccu_sys 8>,
+ <&ccu_sys 9>;
+ reset-names = "mstr", "slv", "pwr", "hot", "phy", "core", "pipe",
+ "sticky", "non-sticky";
+
+ reset-gpios = <&port0 0 GPIO_ACTIVE_LOW>;
+
+ num-lanes = <4>;
+ max-link-speed = <3>;
+ };
+...
description: |+
RK3568 SoC PCIe host controller is based on the Synopsys DesignWare
PCIe IP and thus inherits all the common properties defined in
- designware-pcie.txt.
+ snps,dw-pcie.yaml.
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/snps,dw-pcie.yaml#
properties:
compatible:
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/snps,dw-pcie-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DWC PCIe RP/EP controller
+
+maintainers:
+ - Jingoo Han <jingoohan1@gmail.com>
+ - Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+
+description:
+ Generic Synopsys DesignWare PCIe Root Port and Endpoint controller
+ properties.
+
+select: false
+
+properties:
+ reg:
+ description:
+ DWC PCIe CSR space is normally accessed over the dedicated Data Bus
+ Interface - DBI. In accordance with the reference manual the register
+ configuration space belongs to the Configuration-Dependent Module (CDM)
+ and is split up into several sub-parts Standard PCIe configuration
+ space, Port Logic Registers (PL), Shadow Config-space Registers,
+ iATU/eDMA registers. The particular sub-space is selected by the
+ CDM/ELBI (dbi_cs) and CS2 (dbi_cs2) signals (selector bits). Such
+ configuration provides a flexible interface for the system engineers to
+ either map the particular space at a desired MMIO address or just leave
+ them in a contiguous memory space if pure Native or AXI Bridge DBI access
+ is selected. Note the PCIe CFG-space, PL and Shadow registers are
+ specific for each activated function, while the rest of the sub-spaces
+ are common for all of them (if there are more than one).
+ minItems: 2
+ maxItems: 6
+
+ reg-names:
+ minItems: 2
+ maxItems: 6
+
+ interrupts:
+ description:
+ There are two main sub-blocks which are normally capable of
+ generating interrupts. It's System Information Interface and MSI
+ interface. While the former one has some common for the Host and
+ Endpoint controllers IRQ-signals, the later interface is obviously
+ Root Complex specific since it's responsible for the incoming MSI
+ messages signalling. The System Information IRQ signals are mainly
+ responsible for reporting the generic PCIe hierarchy and Root
+ Complex events like VPD IO request, general AER, PME, Hot-plug, link
+ bandwidth change, link equalization request, INTx asserted/deasserted
+ Message detection, embedded DMA Tx/Rx/Error.
+ minItems: 1
+ maxItems: 26
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 26
+
+ clocks:
+ description:
+ DWC PCIe reference manual explicitly defines a set of the clocks required
+ to get the controller working correctly. In general all of them can
+ be divided into two groups':' application and core clocks. Note the
+ platforms may have some of the clock sources unspecified in case if the
+ corresponding domains are fed up from a common clock source.
+ minItems: 1
+ maxItems: 7
+
+ clock-names:
+ minItems: 1
+ maxItems: 7
+ items:
+ oneOf:
+ - description:
+ Data Bus Interface (DBI) clock. Clock signal for the AXI-bus
+ interface of the Configuration-Dependent Module, which is
+ basically the set of the controller CSRs.
+ const: dbi
+ - description:
+ Application AXI-bus Master interface clock. Basically this is
+ a clock for the controller DMA interface (PCI-to-CPU).
+ const: mstr
+ - description:
+ Application AXI-bus Slave interface clock. This is a clock for
+ the CPU-to-PCI memory IO interface.
+ const: slv
+ - description:
+ Controller Core-PCS PIPE interface clock. It's normally
+ supplied by an external PCS-PHY.
+ const: pipe
+ - description:
+ Controller Primary clock. It's assumed that all controller input
+ signals (except resets) are synchronous to this clock.
+ const: core
+ - description:
+ Auxiliary clock for the controller PMC domain. The controller
+ partitioning implies having some parts to operate with this
+ clock in some power management states.
+ const: aux
+ - description:
+ Generic reference clock. In case if there are several
+ interfaces fed up with a common clock source it's advisable to
+ define it with this name (for instance pipe, core and aux can
+ be connected to a single source of the periodic signal).
+ const: ref
+ - description:
+ Clock for the PHY registers interface. Originally this is
+ a PHY-viewport-based interface, but some platform may have
+ specifically designed one.
+ const: phy_reg
+ - description:
+ Vendor-specific clock names. Consider using the generic names
+ above for new bindings.
+ oneOf:
+ - description: See native 'dbi' clock for details
+ enum: [ pcie, pcie_apb_sys, aclk_dbi ]
+ - description: See native 'mstr/slv' clock for details
+ enum: [ pcie_bus, pcie_inbound_axi, pcie_aclk, aclk_mst, aclk_slv ]
+ - description: See native 'pipe' clock for details
+ enum: [ pcie_phy, pcie_phy_ref, link ]
+ - description: See native 'aux' clock for details
+ enum: [ pcie_aux ]
+ - description: See native 'ref' clock for details.
+ enum: [ gio ]
+ - description: See nativs 'phy_reg' clock for details
+ enum: [ pcie_apb_phy, pclk ]
+
+ resets:
+ description:
+ DWC PCIe reference manual explicitly defines a set of the reset
+ signals required to be de-asserted to properly activate the controller
+ sub-parts. All of these signals can be divided into two sub-groups':'
+ application and core resets with respect to the main sub-domains they
+ are supposed to reset. Note the platforms may have some of these signals
+ unspecified in case if they are automatically handled or aggregated into
+ a comprehensive control module.
+ minItems: 1
+ maxItems: 10
+
+ reset-names:
+ minItems: 1
+ maxItems: 10
+ items:
+ oneOf:
+ - description: Data Bus Interface (DBI) domain reset
+ const: dbi
+ - description: AXI-bus Master interface reset
+ const: mstr
+ - description: AXI-bus Slave interface reset
+ const: slv
+ - description: Application-dependent interface reset
+ const: app
+ - description: Controller Non-sticky CSR flags reset
+ const: non-sticky
+ - description: Controller sticky CSR flags reset
+ const: sticky
+ - description: PIPE-interface (Core-PCS) logic reset
+ const: pipe
+ - description:
+ Controller primary reset (resets everything except PMC module)
+ const: core
+ - description: PCS/PHY block reset
+ const: phy
+ - description: PMC hot reset signal
+ const: hot
+ - description: Cold reset signal
+ const: pwr
+ - description:
+ Vendor-specific reset names. Consider using the generic names
+ above for new bindings.
+ oneOf:
+ - description: See native 'app' reset for details
+ enum: [ apps, gio, apb ]
+ - description: See native 'phy' reset for details
+ enum: [ pciephy, link ]
+ - description: See native 'pwr' reset for details
+ enum: [ turnoff ]
+
+ phys:
+ description:
+ There can be up to the number of possible lanes PHYs specified placed in
+ the phandle array in the line-based order. Obviously each the specified
+ PHYs are supposed to be able to work in the PCIe mode with a speed
+ implied by the DWC PCIe controller they are attached to.
+ minItems: 1
+ maxItems: 16
+
+ phy-names:
+ minItems: 1
+ maxItems: 16
+ oneOf:
+ - description: Generic PHY names
+ items:
+ pattern: '^pcie[0-9]+$'
+ - description:
+ Vendor-specific PHY names. Consider using the generic
+ names above for new bindings.
+ items:
+ oneOf:
+ - pattern: '^pcie(-?phy[0-9]*)?$'
+ - pattern: '^p2u-[0-7]$'
+
+ reset-gpio:
+ deprecated: true
+ description:
+ Reference to the GPIO-controlled PERST# signal. It is used to reset all
+ the peripheral devices available on the PCIe bus.
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ Reference to the GPIO-controlled PERST# signal. It is used to reset all
+ the peripheral devices available on the PCIe bus.
+ maxItems: 1
+
+ max-link-speed:
+ maximum: 5
+
+ num-lanes:
+ description:
+ Number of PCIe link lanes to use. Can be omitted if the already brought
+ up link is supposed to be preserved.
+ maximum: 16
+
+ num-ob-windows:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Number of outbound address translation windows. This parameter can be
+ auto-detected based on the iATU memory writability. So there is no
+ point in having a dedicated DT-property for it.
+ maximum: 256
+
+ num-ib-windows:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Number of inbound address translation windows. In the same way as
+ for the outbound AT windows, this parameter can be auto-detected based
+ on the iATU memory writability. There is no point having a dedicated
+ DT-property for it either.
+ maximum: 256
+
+ num-viewport:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Number of outbound view ports configured in hardware. It's the same as
+ the number of outbound AT windows.
+ maximum: 256
+
+ snps,enable-cdm-check:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable automatic checking of CDM (Configuration Dependent Module)
+ registers for data corruption. CDM registers include standard PCIe
+ configuration space registers, Port Logic registers, DMA and iATU
+ registers. This feature has been available since DWC PCIe v4.80a.
+
+ dma-coherent: true
+
+additionalProperties: true
+
+...
description: |
Synopsys DesignWare PCIe host controller endpoint
+# Please create a separate DT-schema for your DWC PCIe Endpoint controller
+# and make sure it's assigned with the vendor-specific compatible string.
+select:
+ properties:
+ compatible:
+ const: snps,dw-pcie-ep
+ required:
+ - compatible
+
allOf:
- $ref: /schemas/pci/pci-ep.yaml#
+ - $ref: /schemas/pci/snps,dw-pcie-common.yaml#
properties:
- compatible:
- anyOf:
- - {}
- - const: snps,dw-pcie-ep
-
reg:
- description: |
- It should contain Data Bus Interface (dbi) and config registers for all
- versions.
- For designware core version >= 4.80, it may contain ATU address space.
+ description:
+ DBI, DBI2 reg-spaces and outbound memory window are required for the
+ normal controller functioning. iATU memory IO region is also required
+ if the space is unrolled (IP-core version >= 4.80a).
minItems: 2
- maxItems: 4
+ maxItems: 5
reg-names:
minItems: 2
- maxItems: 4
+ maxItems: 5
items:
- enum: [dbi, dbi2, config, atu, addr_space, link, atu_dma, appl]
-
- reset-gpio:
- description: GPIO pin number of PERST# signal
- maxItems: 1
- deprecated: true
-
- reset-gpios:
- description: GPIO controlled connection to PERST# signal
- maxItems: 1
-
- snps,enable-cdm-check:
- type: boolean
- description: |
- This is a boolean property and if present enables
- automatic checking of CDM (Configuration Dependent Module) registers
- for data corruption. CDM registers include standard PCIe configuration
- space registers, Port Logic registers, DMA and iATU (internal Address
- Translation Unit) registers.
-
- num-ib-windows:
- $ref: /schemas/types.yaml#/definitions/uint32
- maximum: 256
- description: number of inbound address translation windows
- deprecated: true
-
- num-ob-windows:
- $ref: /schemas/types.yaml#/definitions/uint32
- maximum: 256
- description: number of outbound address translation windows
- deprecated: true
+ oneOf:
+ - description:
+ Basic DWC PCIe controller configuration-space accessible over
+ the DBI interface. This memory space is either activated with
+ CDM/ELBI = 0 and CS2 = 0 or is a contiguous memory region
+ with all spaces. Note iATU/eDMA CSRs are indirectly accessible
+ via the PL viewports on the DWC PCIe controllers older than
+ v4.80a.
+ const: dbi
+ - description:
+ Shadow DWC PCIe config-space registers. This space is selected
+ by setting CDM/ELBI = 0 and CS2 = 1. This is an intermix of
+ the PCI-SIG PCIe CFG-space with the shadow registers for some
+ PCI Header space, PCI Standard and Extended Structures. It's
+ mainly relevant for the end-point controller configuration,
+ but still there are some shadow registers available for the
+ Root Port mode too.
+ const: dbi2
+ - description:
+ External Local Bus registers. It's an application-dependent
+ registers normally defined by the platform engineers. The space
+ can be selected by setting CDM/ELBI = 1 and CS2 = 0 wires or can
+ be accessed over some platform-specific means (for instance
+ as a part of a system controller).
+ enum: [ elbi, app ]
+ - description:
+ iATU/eDMA registers common for all device functions. It's an
+ unrolled memory space with the internal Address Translation
+ Unit and Enhanced DMA, which is selected by setting CDM/ELBI = 1
+ and CS2 = 1. For IP-core releases prior v4.80a, these registers
+ have been programmed via an indirect addressing scheme using a
+ set of viewport CSRs mapped into the PL space. Note iATU is
+ normally mapped to the 0x0 address of this region, while eDMA
+ is available at 0x80000 base address.
+ const: atu
+ - description:
+ Platform-specific eDMA registers. Some platforms may have eDMA
+ CSRs mapped in a non-standard base address. The registers offset
+ can be changed or the MS/LS-bits of the address can be attached
+ in an additional RTL block before the MEM-IO transactions reach
+ the DW PCIe slave interface.
+ const: dma
+ - description:
+ PHY/PCS configuration registers. Some platforms can have the
+ PCS and PHY CSRs accessible over a dedicated memory mapped
+ region, but mainly these registers are indirectly accessible
+ either by means of the embedded PHY viewport schema or by some
+ platform-specific method.
+ const: phy
+ - description:
+ Outbound iATU-capable memory-region which will be used to
+ generate various application-specific traffic on the PCIe bus
+ hierarchy. It's usage scenario depends on the endpoint
+ functionality, for instance it can be used to create MSI(X)
+ messages.
+ const: addr_space
+ - description:
+ Vendor-specific CSR names. Consider using the generic names above
+ for new bindings.
+ oneOf:
+ - description: See native 'elbi/app' CSR region for details.
+ enum: [ link, appl ]
+ - description: See native 'atu' CSR region for details.
+ enum: [ atu_dma ]
+ allOf:
+ - contains:
+ const: dbi
+ - contains:
+ const: addr_space
+
+ interrupts:
+ description:
+ There is no mandatory IRQ signals for the normal controller functioning,
+ but in addition to the native set the platforms may have a link- or
+ PM-related IRQs specified.
+ minItems: 1
+ maxItems: 20
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 20
+ items:
+ oneOf:
+ - description:
+ Controller request to read or write virtual product data
+ from/to the VPD capability registers.
+ const: vpd
+ - description:
+ Link Equalization Request flag is set in the Link Status 2
+ register (applicable if the corresponding IRQ is enabled in
+ the Link Control 3 register).
+ const: l_eq
+ - description:
+ Indicates that the eDMA Tx/Rx transfer is complete or that an
+ error has occurred on the corresponding channel. eDMA can have
+ eight Tx (Write) and Rx (Read) eDMA channels thus supporting up
+ to 16 IRQ signals all together. Write eDMA channels shall go
+ first in the ordered row as per default edma_int[*] bus setup.
+ pattern: '^dma([0-9]|1[0-5])?$'
+ - description:
+ PCIe protocol correctable error or a Data Path protection
+ correctable error is detected by the automotive/safety
+ feature.
+ const: sft_ce
+ - description:
+ Indicates that the internal safety mechanism has detected an
+ uncorrectable error.
+ const: sft_ue
+ - description:
+ Application-specific IRQ raised depending on the vendor-specific
+ events basis.
+ const: app
+ - description:
+ Vendor-specific IRQ names. Consider using the generic names above
+ for new bindings.
+ oneOf:
+ - description: See native "app" IRQ for details
+ enum: [ intr ]
+
+ max-functions:
+ maximum: 32
required:
+ - compatible
- reg
- reg-names
- - compatible
additionalProperties: true
examples:
- |
- bus {
- #address-cells = <1>;
- #size-cells = <1>;
- pcie-ep@dfd00000 {
- compatible = "snps,dw-pcie-ep";
- reg = <0xdfc00000 0x0001000>, /* IP registers 1 */
- <0xdfc01000 0x0001000>, /* IP registers 2 */
- <0xd0000000 0x2000000>; /* Configuration space */
- reg-names = "dbi", "dbi2", "addr_space";
- };
+ pcie-ep@dfd00000 {
+ compatible = "snps,dw-pcie-ep";
+ reg = <0xdfc00000 0x0001000>, /* IP registers 1 */
+ <0xdfc01000 0x0001000>, /* IP registers 2 */
+ <0xd0000000 0x2000000>; /* Configuration space */
+ reg-names = "dbi", "dbi2", "addr_space";
+
+ interrupts = <23>, <24>;
+ interrupt-names = "dma0", "dma1";
+
+ clocks = <&sys_clk 12>, <&sys_clk 24>;
+ clock-names = "dbi", "ref";
+
+ resets = <&sys_rst 12>, <&sys_rst 24>;
+ reset-names = "dbi", "phy";
+
+ phys = <&pcie_phy0>, <&pcie_phy1>, <&pcie_phy2>, <&pcie_phy3>;
+ phy-names = "pcie0", "pcie1", "pcie2", "pcie3";
+
+ max-link-speed = <3>;
+ max-functions = /bits/ 8 <4>;
};
description: |
Synopsys DesignWare PCIe host controller
+# Please create a separate DT-schema for your DWC PCIe Root Port controller
+# and make sure it's assigned with the vendor-specific compatible string.
+select:
+ properties:
+ compatible:
+ const: snps,dw-pcie
+ required:
+ - compatible
+
allOf:
- $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/snps,dw-pcie-common.yaml#
properties:
- compatible:
- anyOf:
- - {}
- - const: snps,dw-pcie
-
reg:
- description: |
- It should contain Data Bus Interface (dbi) and config registers for all
- versions.
- For designware core version >= 4.80, it may contain ATU address space.
+ description:
+ At least DBI reg-space and peripheral devices CFG-space outbound window
+ are required for the normal controller work. iATU memory IO region is
+ also required if the space is unrolled (IP-core version >= 4.80a).
minItems: 2
maxItems: 5
minItems: 2
maxItems: 5
items:
- enum: [ dbi, dbi2, config, atu, atu_dma, app, appl, elbi, mgmt, ctrl,
- parf, cfg, link, ulreg, smu, mpu, apb, phy ]
-
- num-lanes:
- description: |
- number of lanes to use (this property should be specified unless
- the link is brought already up in firmware)
- maximum: 16
-
- reset-gpio:
- description: GPIO pin number of PERST# signal
- maxItems: 1
- deprecated: true
-
- reset-gpios:
- description: GPIO controlled connection to PERST# signal
- maxItems: 1
-
- interrupts: true
-
- interrupt-names: true
-
- clocks: true
-
- snps,enable-cdm-check:
- type: boolean
- description: |
- This is a boolean property and if present enables
- automatic checking of CDM (Configuration Dependent Module) registers
- for data corruption. CDM registers include standard PCIe configuration
- space registers, Port Logic registers, DMA and iATU (internal Address
- Translation Unit) registers.
-
- num-viewport:
- $ref: /schemas/types.yaml#/definitions/uint32
- maximum: 256
- description: |
- number of view ports configured in hardware. If a platform
- does not specify it, the driver autodetects it.
- deprecated: true
+ oneOf:
+ - description:
+ Basic DWC PCIe controller configuration-space accessible over
+ the DBI interface. This memory space is either activated with
+ CDM/ELBI = 0 and CS2 = 0 or is a contiguous memory region
+ with all spaces. Note iATU/eDMA CSRs are indirectly accessible
+ via the PL viewports on the DWC PCIe controllers older than
+ v4.80a.
+ const: dbi
+ - description:
+ Shadow DWC PCIe config-space registers. This space is selected
+ by setting CDM/ELBI = 0 and CS2 = 1. This is an intermix of
+ the PCI-SIG PCIe CFG-space with the shadow registers for some
+ PCI Header space, PCI Standard and Extended Structures. It's
+ mainly relevant for the end-point controller configuration,
+ but still there are some shadow registers available for the
+ Root Port mode too.
+ const: dbi2
+ - description:
+ External Local Bus registers. It's an application-dependent
+ registers normally defined by the platform engineers. The space
+ can be selected by setting CDM/ELBI = 1 and CS2 = 0 wires or can
+ be accessed over some platform-specific means (for instance
+ as a part of a system controller).
+ enum: [ elbi, app ]
+ - description:
+ iATU/eDMA registers common for all device functions. It's an
+ unrolled memory space with the internal Address Translation
+ Unit and Enhanced DMA, which is selected by setting CDM/ELBI = 1
+ and CS2 = 1. For IP-core releases prior v4.80a, these registers
+ have been programmed via an indirect addressing scheme using a
+ set of viewport CSRs mapped into the PL space. Note iATU is
+ normally mapped to the 0x0 address of this region, while eDMA
+ is available at 0x80000 base address.
+ const: atu
+ - description:
+ Platform-specific eDMA registers. Some platforms may have eDMA
+ CSRs mapped in a non-standard base address. The registers offset
+ can be changed or the MS/LS-bits of the address can be attached
+ in an additional RTL block before the MEM-IO transactions reach
+ the DW PCIe slave interface.
+ const: dma
+ - description:
+ PHY/PCS configuration registers. Some platforms can have the
+ PCS and PHY CSRs accessible over a dedicated memory mapped
+ region, but mainly these registers are indirectly accessible
+ either by means of the embedded PHY viewport schema or by some
+ platform-specific method.
+ const: phy
+ - description:
+ Outbound iATU-capable memory-region which will be used to access
+ the peripheral PCIe devices configuration space.
+ const: config
+ - description:
+ Vendor-specific CSR names. Consider using the generic names above
+ for new bindings.
+ oneOf:
+ - description: See native 'elbi/app' CSR region for details.
+ enum: [ apb, mgmt, link, ulreg, appl ]
+ - description: See native 'atu' CSR region for details.
+ enum: [ atu_dma ]
+ - description: Syscon-related CSR regions.
+ enum: [ smu, mpu ]
+ allOf:
+ - contains:
+ const: dbi
+ - contains:
+ const: config
+
+ interrupts:
+ description:
+ DWC PCIe Root Port/Complex specific IRQ signals. At least MSI interrupt
+ signal is supposed to be specified for the host controller.
+ minItems: 1
+ maxItems: 26
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 26
+ items:
+ oneOf:
+ - description:
+ Controller request to read or write virtual product data
+ from/to the VPD capability registers.
+ const: vpd
+ - description:
+ Link Equalization Request flag is set in the Link Status 2
+ register (applicable if the corresponding IRQ is enabled in
+ the Link Control 3 register).
+ const: l_eq
+ - description:
+ Indicates that the eDMA Tx/Rx transfer is complete or that an
+ error has occurred on the corresponding channel. eDMA can have
+ eight Tx (Write) and Rx (Read) eDMA channels thus supporting up
+ to 16 IRQ signals all together. Write eDMA channels shall go
+ first in the ordered row as per default edma_int[*] bus setup.
+ pattern: '^dma([0-9]|1[0-5])?$'
+ - description:
+ PCIe protocol correctable error or a Data Path protection
+ correctable error is detected by the automotive/safety
+ feature.
+ const: sft_ce
+ - description:
+ Indicates that the internal safety mechanism has detected an
+ uncorrectable error.
+ const: sft_ue
+ - description:
+ Application-specific IRQ raised depending on the vendor-specific
+ events basis.
+ const: app
+ - description:
+ DSP AXI MSI Interrupt detected. It gets de-asserted when there is
+ no more MSI interrupt pending. The interrupt is relevant to the
+ iMSI-RX - Integrated MSI Receiver (AXI bridge).
+ const: msi
+ - description:
+ Legacy A/B/C/D interrupt signal. Basically it's triggered by
+ receiving a Assert_INT{A,B,C,D}/Desassert_INT{A,B,C,D} message
+ from the downstream device.
+ pattern: "^int(a|b|c|d)$"
+ - description:
+ Error condition detected and a flag is set in the Root Error Status
+ register of the AER capability. It's asserted when the RC
+ internally generated an error or an error message is received by
+ the RC.
+ const: aer
+ - description:
+ PME message is received by the port. That means having the PME
+ status bit set in the Root Status register (the event is
+ supposed to be unmasked in the Root Control register).
+ const: pme
+ - description:
+ Hot-plug event is detected. That is a bit has been set in the
+ Slot Status register and the corresponding event is enabled in
+ the Slot Control register.
+ const: hp
+ - description:
+ Link Autonomous Bandwidth Status flag has been set in the Link
+ Status register (the event is supposed to be unmasked in the
+ Link Control register).
+ const: bw_au
+ - description:
+ Bandwidth Management Status flag has been set in the Link
+ Status register (the event is supposed to be unmasked in the
+ Link Control register).
+ const: bw_mg
+ - description:
+ Vendor-specific IRQ names. Consider using the generic names above
+ for new bindings.
+ oneOf:
+ - description: See native "app" IRQ for details
+ enum: [ intr ]
+ allOf:
+ - contains:
+ const: msi
additionalProperties: true
required:
+ - compatible
- reg
- reg-names
- - compatible
examples:
- |
- bus {
- #address-cells = <1>;
- #size-cells = <1>;
- pcie@dfc00000 {
- device_type = "pci";
- compatible = "snps,dw-pcie";
- reg = <0xdfc00000 0x0001000>, /* IP registers */
- <0xd0000000 0x0002000>; /* Configuration space */
- reg-names = "dbi", "config";
- #address-cells = <3>;
- #size-cells = <2>;
- ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
- <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
- interrupts = <25>, <24>;
- #interrupt-cells = <1>;
- num-lanes = <1>;
- };
+ pcie@dfc00000 {
+ compatible = "snps,dw-pcie";
+ device_type = "pci";
+ reg = <0xdfc00000 0x0001000>, /* IP registers */
+ <0xd0000000 0x0002000>; /* Configuration space */
+ reg-names = "dbi", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
+ <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
+ bus-range = <0x0 0xff>;
+
+ interrupts = <25>, <24>;
+ interrupt-names = "msi", "hp";
+ #interrupt-cells = <1>;
+
+ reset-gpios = <&port0 0 1>;
+
+ phys = <&pcie_phy>;
+ phy-names = "pcie";
+
+ num-lanes = <1>;
+ max-link-speed = <3>;
};
properties:
mux-controls:
maxItems: 1
+ reg-names:
+ items:
+ - const: config
+ - const: map
required:
- mux-controls
+ - reg-names
else:
required:
- interrupts
compatible = "baikal,bt1-sys-ssi";
reg = <0x1f040100 0x900>,
<0x1c000000 0x1000000>;
+ reg-names = "config", "map";
#address-cells = <1>;
#size-cells = <0>;
mux-controls = <&boot_mux>;
- nvidia,tegra234-sysram
- qcom,rpm-msg-ram
- rockchip,rk3288-pmu-sram
+ - baikal,bt1-sram
reg:
maxItems: 1
A list of phandle and clock specifier pair that controls the single
SRAM clock.
+ clock-names: true
+
+ resets:
+ description:
+ A list of phandle and reset specifier pair that controls the SRAM
+ state reset.
+
+ reset-names: true
+
"#address-cells":
const: 1
- arm,juno-scp-shmem
- arm,scmi-shmem
- arm,scp-shmem
+ - baikal,bt1-boot-sram
- renesas,smp-sram
- rockchip,rk3066-smp-sram
- samsung,exynos4210-sysram
- compatible
- reg
-if:
- not:
- properties:
- compatible:
- contains:
- enum:
- - qcom,rpm-msg-ram
- - rockchip,rk3288-pmu-sram
-then:
- required:
- - "#address-cells"
- - "#size-cells"
- - ranges
-
-additionalProperties: false
+allOf:
+ - $ref: /schemas/mux/mux-consumer.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: baikal,bt1-sram
+ then:
+ properties:
+ mux-controls:
+ maxItems: 1
+ required:
+ - mux-controls
+ else:
+ properties:
+ mux-controls: false
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - qcom,rpm-msg-ram
+ - rockchip,rk3288-pmu-sram
+ then:
+ required:
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+unevaluatedProperties: false
examples:
- |
F: drivers/clocksource/timer-cadence-ttc.c
F: drivers/cpuidle/cpuidle-zynq.c
F: drivers/edac/synopsys_edac.c
+F: drivers/edac/zynq_edac.c
F: drivers/i2c/busses/i2c-cadence.c
F: drivers/i2c/busses/i2c-xiic.c
F: drivers/mmc/host/sdhci-of-arasan.c
F: include/linux/backlight.h
F: include/linux/pwm_backlight.h
+BAIKAL-T1 PVT HARDWARE MONITOR DRIVER
+M: Serge Semin <fancer.lancer@gmail.com>
+L: linux-hwmon@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml
+F: drivers/hwmon/bt1-pvt.*
+
BARCO P50 GPIO DRIVER
M: Santosh Kumar Yadav <santoshkumar.yadav@barco.com>
M: Peter Korsgaard <peter.korsgaard@barco.com>
F: drivers/platform/mips/
F: include/dt-bindings/mips/
+MIPS/BAIKAL-T1 PLATFORM
+M: Serge Semin <fancer.lancer@gmail.com>
+L: linux-mips@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/bus/baikal,bt1-*
+F: Documentation/devicetree/bindings/clock/baikal,bt1-*
+F: Documentation/devicetree/bindings/mfd/baikal,bt1-*
+F: Documentation/devicetree/bindings/memory-controllers/baikal,bt1-*
+F: arch/mips/baikal-t1/
+F: arch/mips/boot/dts/baikal-t1/
+F: arch/mips/include/asm/mach-baikal-t1/
+F: drivers/clk/baikal-t1/
+F: drivers/bus/bt1-*
+F: drivers/memory/bt1-l2-ctl.c
+
MIPS BOSTON DEVELOPMENT BOARD
M: Paul Burton <paulburton@kernel.org>
L: linux-mips@vger.kernel.org
NTB IDT DRIVER
M: Serge Semin <fancer.lancer@gmail.com>
L: ntb@lists.linux.dev
-S: Supported
+S: Maintained
F: drivers/ntb/hw/idt/
+F: drivers/misc/eeprom/idt_89hpesx.c
NTB INTEL DRIVER
M: Dave Jiang <dave.jiang@intel.com>
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+R: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+R: Serge Semin <fancer.lancer@gmail.com>
L: linux-pci@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
-F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
+F: Documentation/devicetree/bindings/pci/snps,dw-pcie*.yaml
F: drivers/pci/controller/dwc/*designware*
PCI DRIVER FOR TI DRA7XX/J721E
F: Documentation/devicetree/bindings/pci/axis,artpec*
F: drivers/pci/controller/dwc/*artpec*
+PCIE DRIVER FOR BAIKAL-T1
+M: Serge Semin <fancer.lancer@gmail.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml
+F: drivers/pci/controller/dwc/pcie-bt1.c
+
PCIE DRIVER FOR CAVIUM THUNDERX
M: Robert Richter <rric@kernel.org>
L: linux-pci@vger.kernel.org
platform-$(CONFIG_AR7) += ar7/
platform-$(CONFIG_ATH25) += ath25/
platform-$(CONFIG_ATH79) += ath79/
+platform-$(CONFIG_MIPS_BAIKAL_T1) += baikal-t1/
platform-$(CONFIG_BCM47XX) += bcm47xx/
platform-$(CONFIG_BCM63XX) += bcm63xx/
platform-$(CONFIG_BMIPS_GENERIC) += bmips/
and Loongson-2F which will be removed), developed by the Institute
of Computing Technology (ICT), Chinese Academy of Sciences (CAS).
+config MIPS_BAIKAL_T1
+ bool "MIPS Baikal-T1 SoC"
+ imply MIPS_CPS
+ select BOOT_ELF32
+ select BOOT_RAW
+ select USE_OF
+ select GENERIC_ISA_DMA
+ select DMA_NONCOHERENT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT
+ select SCHED_HRTICK
+ select HAVE_PCI
+ select PCI_DRIVERS_GENERIC
+ select COMMON_CLK
+ select ARCH_HAS_RESET_CONTROLLER
+ select UHI_BOOT
+ select MIPS_CPU_SCACHE
+ select IRQ_MIPS_CPU
+ select MIPS_GIC
+ select CLKSRC_MIPS_GIC
+ select CEVT_R4K
+ select CSRC_R4K
+ select HARDIRQS_SW_RESEND
+ select DW_APB_TIMER_OF
+ select MIPS_EXTERNAL_TIMER
+ select GENERIC_CLOCKEVENTS_MIN_ADJUST
+ select SMP_UP if SMP
+ select EDAC_SUPPORT
+ select EDAC_ATOMIC_SCRUB
+ select SOC_BUS
+ select STRONG_UC_ORDERING
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R3_5
+ select SYS_HAS_CPU_MIPS32_R5
+ select SYS_HAS_CPU_P5600
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_RELOCATABLE
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_ZBOOT_UART_PROM
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select MIPS_L1_CACHE_SHIFT_5
+ help
+ This enables support of Baikal Electronics Baikal-T1 SoC platform.
+
config MIPS_MALTA
bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
source "arch/mips/alchemy/Kconfig"
source "arch/mips/ath25/Kconfig"
source "arch/mips/ath79/Kconfig"
+source "arch/mips/baikal-t1/Kconfig"
source "arch/mips/bcm47xx/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
source "arch/mips/bmips/Kconfig"
#
config WEAK_REORDERING_BEYOND_LLSC
bool
+
+#
+# CPU may not reorder reads and writes R->R, R->W, W->R, W->W within Uncached
+# Cacheability and Coherency Attribute (CCA=2)
+#
+config STRONG_UC_ORDERING
+ bool
+
endmenu
#
config DMI
bool "Enable DMI scanning"
- depends on MACH_LOONGSON64
+ depends on MACH_LOONGSON64 || MIPS_BAIKAL_T1
select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
- default y
+ default MACH_LOONGSON64
help
Enabled scanning of DMI to identify machine quirks. Say Y
here unless you have verified that your setup is not
config MIPS_CPS_NS16550_BASE
hex "UART Base Address"
default 0x1b0003f8 if MIPS_MALTA
+ default 0x1f04a000 if MIPS_BAIKAL_T1
default 0
help
The base address of the ns16550 compatible UART on which to output
config MIPS_CPS_NS16550_SHIFT
int "UART Register Shift"
+ default 2 if MIPS_BAIKAL_T1
default 0
help
The number of bits to shift ns16550 register indices by in order to
config MIPS_CPS_NS16550_WIDTH
int "UART Register Width"
+ default 4 if MIPS_BAIKAL_T1
default 1
help
ns16550 registers width. UART registers IO access methods will be
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+#
+# Baikal-T1 platform configs
+#
+if MIPS_BAIKAL_T1
+
+config BT1_DEBUG
+ bool "Enable SoC/kernel debug options"
+ select EXPERT
+ select DEBUG_KERNEL
+ select DEBUG_ZBOOT
+ select DEBUG_MEMORY_INIT
+ select DEBUG_HIGHMEM if HIGHMEM
+ select DEBUG_STACKOVERFLOW
+ select RCU_TRACE
+ select EDAC_DEBUG if EDAC
+ select SCACHE_DEBUGFS
+ select GENERIC_IRQ_DEBUGFS
+ select CMA_DEBUGFS if CMA
+ select MIPS_CPS_NS16550_BOOL if MIPS_CPS
+ help
+ Use this option if you at the process of the kernel drivers
+ platform code development.
+
+config BT1_EARLY_UART
+ int "Default UART device for early printk and zboot"
+ range 0 1
+ default 0
+ help
+ There are two DW APB UART-based serial interfaces available on
+ Baikal-T1 SoC. By this option you can select one of them to be used
+ to print early logs and zboot debug symbols. Note having both
+ EARLY_PRINTK and SERIAL_EARLYCON configs enabled is prune to
+ getting duplicated log messages if both of these sub-systems are
+ using the same console. In case if you need to have the logs on both
+ UART devices make sure that this parameter and 'stdout-path' DT
+ property point to the different serial devices.
+
+config BT1_CPU_FEATURE_OVERRIDES
+ bool "Declare CPU features"
+ help
+ By default nearly all the MIPS IP-core features are detectable on
+ runtime. Corresponding cpu_has_* flags are constantly checked in
+ the code to enabled/disable corresponding platform features. Since
+ we indend to build the Baikal-T1 CPU specific kernel there is no
+ need in such flexibility, so we can freely define these flags with
+ values known at build-time. By doing so we not only decrease the
+ kernel size, but also speed it up.
+
+ If unsure, say N.
+
+config BT1_SWIOTLB_SIZE
+ int "SWIOTLB size in MiB" if SWIOTLB
+ range 4 64
+ default 8
+ help
+ Due to the Baikal-T1 main interconnect controller invalid synthesis
+ parameters, SATA/USB/GMACx aren't able to access the physical memory
+ higher than 4GiB. So in case if XPA is enabled and bootloader states
+ there is more than 4GiB of physical memory, we need to have the
+ SWIOTLB declared. Since by default SWIOTLB consumes too much memory
+ we create a custom table with compile-time configurable buffer size.
+
+choice
+ prompt "Baikal-T1 SoC based boards devicetree"
+ default BT1_DTB_NONE
+ help
+ Select a devicetree of the board with Baikal-T1 SoC installed.
+
+ config BT1_DTB_NONE
+ bool "None"
+
+ config BT1_DTB_ALL
+ bool "All"
+
+ config BT1_DTB_GENERIC
+ bool "Generic Baikal-T1 Board"
+ help
+ This option provides a dtb for a generic board. It just activates all
+ the Baikal-T1 SoC peripherals. So all the run-time detectable devices
+ will work out-of-box while undetectable platform devices will be left
+ untouched.
+
+ config BT1_DTB_BFK
+ bool "Baikal Electronics BFK"
+ help
+ This option provides a dtb for the Baikal Electronics BFK boards.
+ It's a Baikal-T1 SoC evaluation board specifically designed for
+ the SoC-based software prototyping.
+
+endchoice
+
+menu "Baikal-T1 Errata"
+
+config BT1_ERRATA_JR_LS_BUG
+ bool "Fix load/store bonding and JR prediction bug"
+ help
+ Early Baikal-T1 chips had problems when load/store bonding and JR
+ prediction were enabled. Switch these features off if you are using
+ the engineering version of the chip.
+
+ If unsure, say N.
+
+config BT1_ERRATA_GMAC_SPEED_INV_BUG
+ bool "Fix DW GMAC 10/100Mbit link speed bug"
+ help
+ DW GMAC on early Baikal-T1 chip releases had an inverted 10/100Mbit
+ MAC speed settings. So when 10Mbit link is requested then 100Mbit MAC
+ link speed should be setup and vise-versa.
+
+ If unsure, say N.
+
+endmenu
+
+endif # MIPS_BAIKAL_T1
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+#
+# Baikal-T1 platform code makefile
+#
+obj-y += init.o irq.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+#
+# Baikal-T1 platform makefile
+#
+platform-$(CONFIG_MIPS_BAIKAL_T1) += baikal-t1/
+cflags-$(CONFIG_MIPS_BAIKAL_T1) += -I$(srctree)/arch/mips/include/asm/mach-baikal-t1
+ifdef CONFIG_KVM_GUEST
+ load-$(CONFIG_MIPS_BAIKAL_T1) += 0x0000000040100000
+ zload-$(CONFIG_MIPS_BAIKAL_T1) += 0xffffffff45100000
+else
+ load-$(CONFIG_MIPS_BAIKAL_T1) += 0xffffffff80100000
+ zload-$(CONFIG_MIPS_BAIKAL_T1) += 0xffffffff85100000
+endif
+all-$(CONFIG_MIPS_BAIKAL_T1) := $(COMPRESSION_FNAME).bin
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Alexey Malahov <Alexey.Malahov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 early printk
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+
+#include <asm/mach-baikal-t1/memory.h>
+
+#define BT1_UART_BASE(_id) \
+ (void *)KSEG1ADDR(CONCATENATE(BT1_UART, CONCATENATE(_id, _BASE)))
+
+void prom_putchar(char c)
+{
+ void __iomem *uart_base = BT1_UART_BASE(CONFIG_BT1_EARLY_UART);
+ unsigned int timeout = 50000;
+ int status, bits;
+
+ bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+ do {
+ status = __raw_readl(uart_base + (UART_LSR << 2));
+
+ if (--timeout == 0)
+ break;
+ } while ((status & bits) != bits);
+
+ if (timeout)
+ __raw_writel(c, uart_base + (UART_TX << 2));
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Alexey Malahov <Alexey.Malahov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 platform initialization
+ */
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/limits.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/swiotlb.h>
+#include <linux/sys_soc.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cpu-info.h>
+#include <asm/io.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mipsregs.h>
+#include <asm/pci.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/setup.h>
+#include <asm/smp-ops.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+
+#include <asm/mach-baikal-t1/memory.h>
+
+static __initdata const void *fdt;
+
+/*
+ * The following configuration have been used to synthesize the Baikal-T1
+ * MIPS Warroir P5600 core:
+ * 1) SI_EVAReset = 0 - boot in legacy (not EVA) memory layout mode after
+ * reset.
+ * 2) SI_UseExceptionBase = 0 - core uses legacy BEV mode, which selects
+ * 0xBFC00000 to be exception vector by default after reset.
+ * 3) SI_ExceptionBase[31:12] = 0xBFC00000 - externally set default exception
+ * SI_ExceptionBasePA[31:29] = 0x0 base address. It is used when
+ * CP0.CONFIG5.K = 1.
+ * 4) SI_EICPresent = 0 - even though GIC is always attached to the cores,
+ * this pin is hardwaired to the state of the
+ * GIC_VX_CTL_EIC bit.
+ */
+
+/*
+ * Redefine the MIPS CDMM phys base method to be used at the earliest boot
+ * stage before DT is parsed.
+ */
+#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
+
+phys_addr_t mips_cdmm_phys_base(void)
+{
+ return BT1_P5600_CDMM_BASE;
+}
+
+#endif /* CONFIG_MIPS_EJTAG_FDC_EARLYCON */
+
+/*
+ * We have to redefine the L2-sync phys base method, since the default
+ * region overlaps the Baikal-T1 boot memory following the CM2 GCRs.
+ */
+phys_addr_t mips_cm_l2sync_phys_base(void)
+{
+ return BT1_P5600_GCR_L2SYNC_BASE;
+}
+
+void __init *plat_get_fdt(void)
+{
+ const char *str;
+
+ /* Return already found fdt. */
+ if (fdt)
+ return (void *)fdt;
+
+ /*
+ * Generic method will search for appended, UHI and built-in DTBs.
+ * Some older version of Baikal-T1 bootloader could also pass DTB via
+ * the FW arg3 slot. So check that option too.
+ */
+ fdt = get_fdt();
+ if (fdt) {
+ str = (fw_arg0 == -2) ? "UHI" : "Built-in/Appended";
+ } else if (fw_arg3) {
+ fdt = phys_to_virt(fw_arg3);
+ str = "Legacy position";
+ }
+
+ if (!fdt || fdt_check_header(fdt))
+ panic("No valid dtb found. Can't continue.");
+
+ pr_info("%s DTB found at %p\n", str, fdt);
+
+ return (void *)fdt;
+}
+
+#ifdef CONFIG_RELOCATABLE
+
+void __init plat_fdt_relocated(void *new_location)
+{
+ fdt = NULL;
+
+ /*
+ * Forget about the way dtb has been passed at the system startup. Use
+ * UHI always.
+ */
+ fw_arg0 = -2;
+ fw_arg1 = (unsigned long)new_location;
+}
+
+#endif /* CONFIG_RELOCATABLE */
+
+void __init prom_init(void)
+{
+ if (IS_ENABLED(CONFIG_EVA) && (read_c0_config5() & MIPS_CONF5_K))
+ pr_info("Enhanced Virtual Addressing (EVA) enabled\n");
+
+ /*
+ * Disable Legacy SYNC transaction performed on the L2/Memory port.
+ * This shall significantly improve the concurrent MMIO access
+ * performance.
+ */
+ change_gcr_control(CM_GCR_CONTROL_SYNCDIS, CM_GCR_CONTROL_SYNCDIS);
+
+ plat_get_fdt();
+}
+
+void __init plat_mem_setup(void)
+{
+ memblock_add(BT1_LOMEM_BASE, BT1_LOMEM_SIZE);
+
+#ifdef CONFIG_HIGHMEM
+ memblock_add(BT1_HIMEM_BASE, BT1_HIMEM_SIZE);
+#endif
+
+#ifdef CONFIG_PCI
+ PCIBIOS_MIN_IO = 0x100;
+#endif
+
+ __dt_setup_arch((void *)fdt);
+}
+
+void __init device_tree_init(void)
+{
+ int err;
+
+ unflatten_and_copy_device_tree();
+
+ mips_cpc_probe();
+
+ err = register_cps_smp_ops();
+ if (err)
+ err = register_up_smp_ops();
+}
+
+#ifdef CONFIG_SWIOTLB
+
+void __init plat_swiotlb_setup(void)
+{
+ phys_addr_t top;
+
+ /*
+ * Skip SWIOTLB initialization since there is no that much memory to
+ * cause the peripherals invalid access.
+ */
+ top = memblock_end_of_DRAM();
+ if (top <= SIZE_MAX)
+ return;
+
+ /*
+ * Override the default SWIOTLB size with the configuration value.
+ * Note a custom size has been passed via the kernel parameter it won't
+ * be overwritten.
+ */
+ swiotlb_adjust_size(CONFIG_BT1_SWIOTLB_SIZE * SZ_1M);
+ swiotlb_init(true, SWIOTLB_VERBOSE);
+}
+
+#endif /* CONFIG_SWIOTLB */
+
+void __init prom_free_prom_memory(void) {}
+
+#define HZ_TO_MHZ(_hz) (_hz / 1000000)
+#define HZ_GET_KHZ(_hz) ((_hz / 1000) % 1000)
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ unsigned long rate;
+ struct clk *clk;
+
+ of_clk_init(NULL);
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU of node\n");
+ goto err_timer_probe;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock (%ld)\n", PTR_ERR(clk));
+ goto err_timer_probe;
+ }
+
+ /* CPU count/compare timer runs at half the CPU frequency. */
+ rate = clk_get_rate(clk);
+ mips_hpt_frequency = rate / 2;
+
+ pr_info("MIPS CPU frequency: %lu.%03lu MHz\n",
+ HZ_TO_MHZ(rate), HZ_GET_KHZ(rate));
+ pr_info("MIPS CPU count/compare timer frequency: %u.%03u MHz\n",
+ HZ_TO_MHZ(mips_hpt_frequency), HZ_GET_KHZ(mips_hpt_frequency));
+
+ clk_put(clk);
+
+err_timer_probe:
+ timer_probe();
+}
+
+const char *get_system_type(void)
+{
+ return "Baikal-T1 SoC";
+}
+
+static struct bt1_soc {
+ struct soc_device_attribute dev_attr;
+ char revision[16];
+ char id[16];
+} soc;
+
+static int __init soc_setup(void)
+{
+ unsigned int cpuid = boot_cpu_data.processor_id;
+ struct soc_device *soc_dev;
+ struct device *parent = NULL;
+ int ret = 0;
+
+ soc.dev_attr.machine = mips_get_machine_name();
+ soc.dev_attr.family = get_system_type();
+ soc.dev_attr.revision = soc.revision;
+ soc.dev_attr.soc_id = soc.id;
+
+ snprintf(soc.revision, sizeof(soc.revision) - 1, "%u.%u.%u",
+ (cpuid >> 5) & 0x07, (cpuid >> 2) & 0x07, cpuid & 0x03);
+ snprintf(soc.id, sizeof(soc.id) - 1, "0x%08X",
+ readl(phys_to_virt(BT1_BOOT_CTRL_BASE + BT1_BOOT_CTRL_DRID)));
+
+ soc_dev = soc_device_register(&soc.dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err_return;
+ }
+
+ parent = soc_device_to_device(soc_dev);
+
+err_return:
+ return ret;
+}
+arch_initcall(soc_setup);
+
+int __uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_DSYNC)
+ return 1;
+
+ return addr >= __pa(high_memory) ||
+ ((addr >= BT1_MMIO_START) && (addr < BT1_MMIO_END));
+}
+
+#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
+
+static phys_addr_t uca_start, uca_end;
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ phys_addr_t offset = PFN_PHYS(pfn), end = offset + size;
+
+ if (__uncached_access(file, offset)) {
+ if (uca_start && (offset >= uca_start) &&
+ (end <= uca_end))
+ return __pgprot((pgprot_val(vma_prot) &
+ ~_CACHE_MASK) |
+ _CACHE_UNCACHED_ACCELERATED);
+ else
+ return pgprot_noncached(vma_prot);
+ }
+ return vma_prot;
+}
+
+int mips_set_uca_range(phys_addr_t start, phys_addr_t end)
+{
+ if (end <= start || end <= BT1_MMIO_START)
+ return -EINVAL;
+
+ uca_start = start;
+ uca_end = end;
+ return 0;
+}
+
+#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 IRQ initialization
+ */
+#include <linux/irqchip.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mips-gic.h>
+#include <asm/irq_cpu.h>
+#include <asm/irq.h>
+
+int get_c0_fdc_int(void)
+{
+ return gic_get_c0_fdc_int();
+}
+
+int get_c0_perfcount_int(void)
+{
+ return gic_get_c0_perfcount_int();
+}
+
+unsigned int get_c0_compare_int(void)
+{
+ return gic_get_c0_compare_int();
+}
+
+/*
+ * If CP0.Cause.IV == 1 and cpu_has_veic = 1 the next method isn't supposed
+ * to be called ever. Otherwise we just handle a vectored interrupt, which was
+ * routed to the generic exception vector.
+ */
+#if !defined(CONFIG_IRQ_MIPS_CPU)
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ extern unsigned long vi_handlers[];
+ unsigned int cause = (read_c0_cause() & CAUSEF_IP) >> CAUSEB_IP2;
+ void (*isr)(void) = (void *)vi_handlers[cause];
+
+ if (cause && isr)
+ isr();
+ else if (cause && !isr)
+ panic("Vectored interrupt %u handler is empty\n", cause);
+ else
+ spurious_interrupt();
+}
+
+#endif /* !CONFIG_IRQ_MIPS_CPU */
+
+void __init arch_init_irq(void)
+{
+ if (!cpu_has_veic)
+ mips_cpu_irq_init();
+
+ irqchip_init();
+}
vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
+vmlinuzobjs-$(CONFIG_MIPS_BAIKAL_T1) += $(obj)/uart-bt1.o
vmlinuzobjs-$(CONFIG_ATH79) += $(obj)/uart-ath79.o
endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../baikal-t1/early_printk.c"
# SPDX-License-Identifier: GPL-2.0
+subdir-$(CONFIG_MIPS_BAIKAL_T1) += baikal
subdir-$(CONFIG_BMIPS_GENERIC) += brcm
subdir-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon
subdir-$(CONFIG_FIT_IMAGE_FDT_MARDUK) += img
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+#
+# Baikal-T1 dtbs makefile
+#
+dtb-$(CONFIG_BT1_DTB_ALL) += bt1-gen.dtb bfk3.dtb
+dtb-$(CONFIG_BT1_DTB_GENERIC) += bt1-gen.dtb
+dtb-$(CONFIG_BT1_DTB_BFK) += bfk3.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal Electronics BFK v3.x evaluation board device tree
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "bt1.dtsi"
+#include "krkx4-mv-sfp.dtsi"
+
+/ {
+ model = "Baikal Electronics BFK v3.x Evaluation Board";
+ compatible = "baikal,bfk3", "baikal,bt1";
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 earlycon maxcpus=2";
+ stdout-path = "serial0:115200n8";
+
+ /* Bootloader may use these props to pass the initrd image */
+ linux,initrd-start = <0 0>;
+ linux,initrd-end = <0 0>;
+ };
+
+ memory {
+ /*
+ * Assume at least 512MB of RAM:
+ * low memory - 128MB, high memory - 256MB.
+ */
+ device_type = "memory";
+ reg = <0 0x00000000 0 0x08000000>,
+ <0 0x20000000 0 0x10000000>;
+ };
+
+ clocks {
+ /*
+ * SATA/PCIe/xGMAC reference clocks are provided by the
+ * IDT 5P49V5901 which is out of the SoC reach and is
+ * initialized by the embedded BMC.
+ */
+ xgmac_ref_clk: clock-oscillator-vc5p1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <156250000>;
+ clock-output-names = "xgmac156m";
+ };
+
+ pcie_ref_clk: clock-oscillator-vc5p3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "pcie100m";
+ };
+
+ sata_ref_clk: clock-oscillator-vc5p4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "sata100m";
+ };
+
+ usb_phy_clk: clock-oscillator-usb-phy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "usbphy24m";
+ };
+
+ gmac0_phy_clk: clock-oscillator-gmac0-phy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "gmac0phy25m";
+ };
+
+ gmac1_phy_clk: clock-oscillator-gmac1-phy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "gmac1phy25m";
+ };
+ };
+};
+
+&l2 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&int_rom {
+ status = "okay";
+};
+
+&spi0 {
+ num-cs = <1>;
+
+ status = "okay";
+
+ /* Micron N25Q128A11 */
+ boot_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+
+ spi-max-frequency = <25000000>;
+ m25p,fast-read;
+ };
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ /* STM32F205VET-based Board Management Controller */
+ bmc: bmc@8 {
+ compatible = "baikal,bt1-bmc";
+ reg = <0x08>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ spd: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+
+ pagesize = <8>;
+ };
+
+ /* Might be absent */
+ fw: eeprom@54 {
+ compatible = "atmel,24cs04";
+ reg = <0x54>;
+
+ pagesize = <8>;
+ };
+
+ rtc: rtc@56 {
+ compatible = "abracon,abeoz9";
+ reg = <0x56>;
+
+ trickle-resistor-ohms = <5000>;
+ };
+};
+
+&timer_dw0 {
+ status = "okay";
+};
+
+&timer_dw1 {
+ status = "okay";
+};
+
+&timer_dw2 {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
+
+&spi1 {
+ num-cs = <4>;
+
+ /*
+ * XP20 port switches between CS0 and port1:0 chip-selects.
+ * XP21 port switches between CS1 and port1:1 chip-selects.
+ */
+ cs-gpios = <0>, <0>,
+ <&port1 0 GPIO_ACTIVE_LOW>, <&port1 1 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+
+ /* Micron N25Q256A13EF */
+ test_flash11: flash@1 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <1>;
+
+ spi-max-frequency = <25000000>;
+ m25p,fast-read;
+ };
+
+ /* Micron N25Q256A13EF */
+ test_flash13: flash@3 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <3>;
+
+ spi-max-frequency = <25000000>;
+ m25p,fast-read;
+ };
+};
+
+&spi2 {
+ /* XP19 port switches between CS0 and port1:2 chip-selects */
+ cs-gpios = <0>, <&port1 2 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&pvt {
+ status = "okay";
+};
+
+&efuse {
+ status = "okay";
+};
+
+&pcie {
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_M_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_S_CLK>,
+ <&pcie_ref_clk>;
+ clock-names = "dbi", "mstr", "slv", "ref";
+
+ status = "okay";
+};
+
+&sram {
+ status = "okay";
+};
+
+&dma {
+ status = "okay";
+};
+
+&mc {
+ status = "okay";
+};
+
+&mc_phy {
+ status = "okay";
+};
+
+&sata {
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_SATA_CLK>,
+ <&sata_ref_clk>;
+ clock-names = "pclk", "aclk", "ref";
+
+ status = "okay";
+};
+
+&sata0 {
+ hba-port-cap = <HBA_PORT_FBSCP>;
+
+ status = "okay";
+};
+
+&sata1 {
+ hba-port-cap = <HBA_PORT_FBSCP>;
+
+ status = "okay";
+};
+
+&xgmac_mi {
+ status = "disabled";
+};
+
+&xgmac_pcs {
+ clocks = <&ccu_sys CCU_SYS_XGMAC_REF_CLK>,
+ <&xgmac_ref_clk>;
+ clock-names = "core", "pad";
+};
+
+&xgmac {
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_XGMAC_CLK>,
+ <&ccu_sys CCU_SYS_XGMAC_PTP_CLK>,
+ <&xgmac_ref_clk>;
+ clock-names = "pclk", "stmmaceth", "ptp_ref", "tx";
+
+ mac-address = [ 00 20 13 ba 1c a1 ];
+
+ status = "okay";
+};
+
+&hwa {
+ status = "okay";
+};
+
+&gmac0_mdio {
+ reset-delay-us = <10200>;
+ reset-post-delay-us = <1000>;
+
+ /* Micrel KSZ9031RNX */
+ gmac0_phy: ethernet-phy@3 {
+ compatible = "ethernet-phy-id0022.1620";
+ reg = <0x3>;
+
+ clocks = <&gmac0_phy_clk>;
+ clock-names = "ref";
+ };
+};
+
+&gmac0 {
+ mac-address = [ 00 26 58 80 01 02 ];
+
+ phy-handle = <&gmac0_phy>;
+
+ status = "okay";
+};
+
+&gmac1_mdio {
+ reset-delay-us = <10200>;
+ reset-post-delay-us = <1000>;
+
+ /* Micrel KSZ9031RNX */
+ gmac1_phy: ethernet-phy@3 {
+ compatible = "ethernet-phy-id0022.1620";
+ reg = <0x3>;
+
+ clocks = <&gmac1_phy_clk>;
+ clock-names = "ref";
+ };
+};
+
+&gmac1 {
+ mac-address = [ 00 26 58 80 01 03 ];
+
+ phy-handle = <&gmac1_phy>;
+
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+
+ ulpi {
+ phy {
+ clocks = <&usb_phy_clk>;
+ clock-names = "ref";
+ };
+ };
+};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 generic platform device tree
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "bt1.dtsi"
+
+/ {
+ model = "Baikal-T1 Generic Platform";
+ compatible = "baikal,bt1";
+
+ chosen {
+ /*
+ * Note of having both EARLY_PRINTK and SERIAL_EARLYCON
+ * activated at the same time. If they both refer to the same
+ * device, you'll end up with duplicated log messages.
+ * Here by passing 'earlycon' to the kernel we'll activate it
+ * to parse the stdout-path property to find the early console
+ * device. System console will be then activated in accordance
+ * with it if 'console=' parameter isn't passed. Any of the
+ * following consoles are valid: ttyS{0,1}/uart{0,1} (which
+ * alias is serial{0,1}), early_fdc (CDMM-JTAG serial iface).
+ */
+ bootargs = "console=ttyS0,115200n8 earlycon maxcpus=2";
+ stdout-path = "serial0:115200n8";
+
+ /* It's implied that the bootloader updates the initrd address */
+ linux,initrd-start = <0 0>;
+ linux,initrd-end = <0 0>;
+ };
+
+ memory {
+ /*
+ * Declare required low-memory and additional 256MB of high-
+ * memory, which due to the DW uMCTL2 controller specific setup
+ * nearly always exists as being remapped upper part of the
+ * first memory chip. Without low-level remapping that segment
+ * is hidden behind the MMIO region and isn't reachable.
+ * NOTE. For the reason of having MMIO above the very first
+ * 128MB of the low memory, the second 128MB of the physical
+ * memory is always unavailable as being hidden behind MMIO
+ * and non-remappable by DW uMCTL2.
+ */
+ device_type = "memory";
+ reg = <0 0x00000000 0 0x08000000>,
+ <0 0x20000000 0 0x10000000>;
+ };
+
+ /* Standard xGMAC/PCIe/SATA reference clocks setup */
+ clocks {
+ xgmac_ref_clk: clock-oscillator-xgmac {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <156250000>;
+ clock-output-names = "xgmac156m";
+ };
+
+ pcie_ref_clk: clock-oscillator-pcie {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "pcie100m";
+ };
+
+ sata_ref_clk: clock-oscillator-sata {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "sata100m";
+ };
+ };
+};
+
+&l2 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&int_rom {
+ status = "okay";
+};
+
+&spi0 {
+ num-cs = <1>;
+
+ status = "okay";
+
+ /*
+ * Most likely an SPI-nor flash will be always installed on each
+ * device with Baikal-T1 SoC on board. There is no just better
+ * alternative to boot a normal system on that CPU.
+ * Note Baikal-T1 is able to transparently access up to 16MB flash,
+ * so the system bootloader size can not exceed that limit, but an
+ * attached SPI-flash can as long as it supports 3bytes addressing
+ * of the lowest partition.
+ */
+ boot_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+
+ spi-max-frequency = <25000000>;
+ m25p,fast-read;
+ };
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&timer_dw0 {
+ status = "okay";
+};
+
+&timer_dw1 {
+ status = "okay";
+};
+
+&timer_dw2 {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
+
+&spi1 {
+ num-cs = <4>;
+
+ status = "okay";
+};
+
+&spi2 {
+ num-cs = <4>;
+
+ status = "okay";
+};
+
+&pvt {
+ status = "okay";
+};
+
+&efuse {
+ status = "okay";
+};
+
+&pcie {
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_M_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_S_CLK>,
+ <&pcie_ref_clk>;
+ clock-names = "dbi", "mstr", "slv", "ref";
+
+
+ status = "okay";
+};
+
+&sram {
+ status = "okay";
+};
+
+&dma {
+ status = "okay";
+};
+
+&mc {
+ status = "okay";
+};
+
+&mc_phy {
+ status = "okay";
+};
+
+&sata {
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_SATA_CLK>,
+ <&sata_ref_clk>;
+ clock-names = "pclk", "aclk", "ref";
+
+ status = "okay";
+};
+
+&sata0 {
+ hba-port-cap = <HBA_PORT_FBSCP>;
+
+ status = "okay";
+};
+
+&sata1 {
+ hba-port-cap = <HBA_PORT_FBSCP>;
+
+ status = "okay";
+};
+
+&xgmac_mi {
+ status = "okay";
+};
+
+&xgmac_pcs {
+ clocks = <&ccu_sys CCU_SYS_XGMAC_REF_CLK>,
+ <&xgmac_ref_clk>;
+ clock-names = "core", "pad";
+};
+
+&xgmac {
+ mac-address = [ 00 20 13 ba 1c a1 ];
+
+ status = "okay";
+};
+
+&hwa {
+ status = "okay";
+};
+
+&gmac0_mdio {
+ reset-delay-us = <10000>;
+ reset-post-delay-us = <30000>;
+
+ /*
+ * We don't know actual PHY address on a generic device. Let the driver
+ * auto scan the MDIO bus looking for the IEEE 802.3 Clause 22
+ * compatible PHY.
+ */
+ gmac0_phy: ethernet-phy {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+};
+
+&gmac0 {
+ mac-address = [ 7a 72 6c 4a 7a 07 ];
+
+ phy-handle = <&gmac0_phy>;
+
+ status = "okay";
+};
+
+&gmac1_mdio {
+ reset-delay-us = <10000>;
+ reset-post-delay-us = <30000>;
+
+ /*
+ * We don't know actual PHY address on a generic device. Let the driver
+ * auto scan the MDIO bus looking for the IEEE 802.3 Clause 22
+ * compatible PHY.
+ */
+ gmac1_phy: ethernet-phy {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+};
+
+&gmac1 {
+ mac-address = [ 7a 72 6c 4a 7b 07 ];
+
+ phy-handle = <&gmac1_phy>;
+
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 SoC overclocking device tree
+ */
+
+#include <dt-bindings/clock/bt1-ccu.h>
+
+/*
+ * WARNING! This file provides the Baikal-T1 SoC overclocking settings. Since
+ * the specified clock rates are officially unsupported there is no firm
+ * guarantee the system will stably work if they are applied. So use it at your
+ * own risk.
+ */
+
+&cpu_opp {
+ opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ clock-latency-ns = <20000>;
+ turbo-mode;
+ };
+
+ opp-1400000000 {
+ opp-hz = /bits/ 64 <1400000000>;
+ clock-latency-ns = <20000>;
+ turbo-mode;
+ };
+
+ opp-1500000000 {
+ opp-hz = /bits/ 64 <1500000000>;
+ clock-latency-ns = <20000>;
+ turbo-mode;
+ };
+};
+
+/*
+ * In general the system is working well with the CSRs bus rate above 50MHz
+ * and up to 300MHz, but it hasn't been fully tested yet. For instance, DW DMA
+ * won't work well with APB clock being greater than 200 MHz. So if you mean to
+ * use the DMA-based communications over the I2C/UART/SPI interfaces don't
+ * exceed the 200MHz limit.
+ */
+&apb {
+ assigned-clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ assigned-clock-rates = <200000000>;
+};
+
+/*
+ * For this to work well the overclocked rates must be set on the fully
+ * disabled PCIe controller.
+ */
+&pcie {
+ assigned-clocks = <&ccu_axi CCU_AXI_PCIE_M_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_S_CLK>;
+ assigned-clock-rates = <600000000>, <600000000>;
+};
+
+&sata {
+ assigned-clocks = <&ccu_axi CCU_AXI_SATA_CLK>;
+ assigned-clock-rates = <300000000>;
+};
+
+&xgmac {
+ assigned-clocks = <&ccu_axi CCU_AXI_XGMAC_CLK>;
+ assigned-clock-rates = <312500000>;
+};
+
+&gmac0 {
+ assigned-clocks = <&ccu_axi CCU_AXI_GMAC0_CLK>;
+ assigned-clock-rates = <250000000>;
+};
+
+&gmac1 {
+ assigned-clocks = <&ccu_axi CCU_AXI_GMAC1_CLK>;
+ assigned-clock-rates = <250000000>;
+};
+
+&usb {
+ assigned-clocks = <&ccu_axi CCU_AXI_USB_CLK>;
+ assigned-clock-rates = <300000000>;
+};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 SoC device tree
+ */
+
+#include <dt-bindings/dma/dw-dmac.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/ata/ahci.h>
+
+#include <dt-bindings/clock/bt1-ccu.h>
+#include <dt-bindings/reset/bt1-ccu.h>
+#include <dt-bindings/soc/bt1-boot-mode.h>
+
+/ {
+ model = "Baikal-T1 SoC";
+ compatible = "baikal,bt1";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ interrupt-parent = <&gic>;
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ mc0 = &mc;
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ ethernet2 = &xgmac;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ compatible = "img,p5600";
+ device_type = "cpu";
+ reg = <0x0>;
+ #cooling-cells = <2>;
+
+ clocks = <&ccu_pll CCU_CPU_PLL>;
+ clock-names = "cpu_clk";
+
+ operating-points-v2 = <&cpu_opp>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "img,p5600";
+ device_type = "cpu";
+ reg = <0x1>;
+ #cooling-cells = <2>;
+
+ clocks = <&ccu_pll CCU_CPU_PLL>;
+ clock-names = "cpu_clk";
+
+ operating-points-v2 = <&cpu_opp>;
+ };
+ };
+
+ gic: gic@1bdc0000 {
+ compatible = "mti,gic";
+ reg = <0 0x1bdc0000 0 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ mti,reserved-ipi-vectors = <108 4>;
+
+ timer_gic: timer {
+ compatible = "mti,gic-timer";
+
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+
+ clocks = <&ccu_pll CCU_CPU_PLL>;
+ };
+ };
+
+ cpc: cpc@1bde0000 {
+ compatible = "mti,mips-cpc";
+ reg = <0 0x1bde0000 0 0x8000>;
+ };
+
+ cdmm: cdmm@1bde8000 {
+ compatible = "mti,mips-cdmm";
+ reg = <0 0x1bde8000 0 0x8000>;
+ };
+
+ cm2: cm2@1fbf8000 {
+ compatible = "mti,mips-cm";
+ reg = <0 0x1fbf8000 0 0x8000>,
+ <0 0x1fbf0000 0 0x1000>;
+ reg-names = "gcr", "l2sync";
+ };
+
+ /*
+ * Note setting up too low CPU frequency may cause time-critical
+ * applications not working correctly. For instance in order to have
+ * the DW APB SSI memory interface (EEPROM-read and Tx-only) working
+ * correctly with the whole CPU clock range defined below we had to
+ * accordingly constraint the SPI bus speed.
+ */
+ cpu_opp: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ clock-latency-ns = <20000>;
+ };
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ clock-latency-ns = <20000>;
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&pvt>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_alert1: trip1 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_warn: trip2 {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_crit: trip3 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map-alert1 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ /* External fixed reference clocks */
+ clocks {
+ ref_clk: clock-oscillator-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "ref25m";
+ };
+ };
+
+ apb: bus@1f059000 {
+ compatible = "baikal,bt1-apb", "simple-bus";
+ reg = <0 0x1f059000 0 0x1000>,
+ <0 0x1d000000 0 0x2040000>;
+ reg-names = "ehb", "nodev";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x1bfc0000 0 0x1bfc0000 0x03c38000>,
+ <0x1fc00000 0 0x1fc00000 0x00400000>;
+
+ interrupts = <GIC_SHARED 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "pclk";
+
+ resets = <&ccu_sys CCU_SYS_APB_RST>;
+ reset-names = "prst";
+
+ syscon: syscon@1f04d000 {
+ compatible = "baikal,bt1-sys-con", "syscon", "simple-mfd";
+ reg = <0x1f04d000 0x1000>;
+ reg-names = "sys";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ little-endian;
+ reg-io-width = <4>;
+
+ ccu_pll: clock-controller@1f04d000 {
+ compatible = "baikal,bt1-ccu-pll";
+ reg = <0x1f04d000 0x028>;
+ #clock-cells = <1>;
+
+ clocks = <&ref_clk>;
+ clock-names = "ref_clk";
+ };
+
+ ccu_axi: clock-controller@1f04d030 {
+ compatible = "baikal,bt1-ccu-axi";
+ reg = <0x1f04d030 0x030>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ clocks = <&ccu_pll CCU_SATA_PLL>,
+ <&ccu_pll CCU_PCIE_PLL>,
+ <&ccu_pll CCU_ETH_PLL>;
+ clock-names = "sata_clk", "pcie_clk", "eth_clk";
+ };
+
+ ccu_sys: clock-controller@1f04d060 {
+ compatible = "baikal,bt1-ccu-sys";
+ reg = <0x1f04d060 0x0a0>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ clocks = <&ref_clk>,
+ <&ccu_pll CCU_SATA_PLL>,
+ <&ccu_pll CCU_PCIE_PLL>,
+ <&ccu_pll CCU_ETH_PLL>;
+ clock-names = "ref_clk", "sata_clk", "pcie_clk",
+ "eth_clk";
+ };
+
+ l2: l2@1f04d028 {
+ compatible = "baikal,bt1-l2-ctl";
+ reg = <0x1f04d028 0x004>;
+
+ baikal,l2-ws-latency = <0>;
+ baikal,l2-tag-latency = <0>;
+ baikal,l2-data-latency = <1>;
+
+ status = "disabled";
+ };
+
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x118>;
+
+ mask = <0x1>;
+ value = <0x1>;
+
+ status = "disabled";
+ };
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x154>;
+
+ mode-normal = <RCR_BOOT_NORMAL>;
+ mode-loader = <RCR_BOOT_LOADER>;
+ mode-recovery = <RCR_BOOT_RECOVERY>;
+ };
+
+ i2c0: i2c@1f04d100 {
+ compatible = "baikal,bt1-sys-i2c";
+ reg = <0x1f04d100 0x010>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 32 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ };
+ };
+
+ bootcon: syscon@1f040000 {
+ compatible = "baikal,bt1-boot-con", "syscon", "simple-mfd";
+ reg = <0x1f040000 0x1000>,
+ <0x1fc00000 0x400000>;
+ reg-names = "boot", "mirror";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ little-endian;
+ reg-io-width = <4>;
+
+ boot_mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+
+ mux-reg-masks = <0x0 0x100>, <0x4 0x1>;
+ idle-states = <0x1>, <0x0>;
+ };
+
+ int_rom: rom@1bfc0000 {
+ compatible = "baikal,bt1-int-rom", "mtd-rom";
+ reg = <0x1bfc0000 0x10000>;
+
+ no-unaligned-direct-access;
+ bank-width = <4>;
+
+ status = "disabled";
+ };
+
+ /*
+ * Note that using the dirmap region stalls the APB bus
+ * until an IO operation is finished. It may cause
+ * significant lags in concurrent access to the system
+ * MMIO, since each SPI flash dword read operation takes
+ * at least 2.56 us to be finished (cmd + addr + data).
+ */
+ spi0: spi@1f040100 {
+ compatible = "baikal,bt1-sys-ssi";
+ reg = <0x1f040100 0x900>,
+ <0x1c000000 0x1000000>;
+ reg-names = "config", "map";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mux-controls = <&boot_mux 0>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "ssi_clk";
+
+ status = "disabled";
+ };
+ };
+
+ gpio0: gpio@1f044000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x1f044000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_sys CCU_SYS_GPIO_CLK>;
+ clock-names = "bus", "db";
+
+ status = "disabled";
+
+ port0: gpio-port@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ reg = <0>;
+
+ interrupts = <GIC_SHARED 19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <32>;
+ };
+ };
+
+ gpio1: gpio@1f045000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x1f045000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_sys CCU_SYS_GPIO_CLK>;
+ clock-names = "bus", "db";
+
+ status = "disabled";
+
+ port1: gpio-port@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ reg = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <3>;
+ };
+ };
+
+ i2c1: i2c@1f046000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x1f046000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_I2C1_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "ref", "pclk";
+ clock-frequency = <400000>;
+
+ dmas = <&dma 4 0 1 0xff>, <&dma 5 0 1 0xff>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ i2c2: i2c@1f047000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x1f047000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_I2C2_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "ref", "pclk";
+ clock-frequency = <400000>;
+
+ dmas = <&dma 6 0 1 0xff>, <&dma 7 0 1 0xff>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ timer_dw0: timer@1f049000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x1f049000 0x14>;
+
+ interrupts = <GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_TIMER0_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "timer", "pclk";
+
+ status = "disabled";
+ };
+
+ timer_dw1: timer@1f049014 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x1f049014 0x14>;
+
+ interrupts = <GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_TIMER1_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "timer", "pclk";
+
+ status = "disabled";
+ };
+
+ timer_dw2: timer@1f049028 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x1f049028 0x14>;
+
+ interrupts = <GIC_SHARED 26 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_TIMER2_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "timer", "pclk";
+
+ status = "disabled";
+ };
+
+ uart0: serial@1f04a000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x1f04a000 0x1000>;
+
+ interrupts = <GIC_SHARED 48 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_UART_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "baudclk", "apb_pclk";
+
+ dmas = <&dma 0 0 1 0xff>, <&dma 1 0 1 0xff>;
+ dma-names = "tx", "rx";
+
+ dcd-override;
+ dsr-override;
+ cts-override;
+ ri-override;
+
+ /* earlycon settings. */
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ status = "disabled";
+ };
+
+ uart1: serial@1f04b000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x1f04b000 0x1000>;
+
+ interrupts = <GIC_SHARED 49 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_UART_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "baudclk", "apb_pclk";
+
+ dmas = <&dma 2 0 1 0xff>, <&dma 3 0 1 0xff>;
+ dma-names = "tx", "rx";
+
+ /* earlycon settings. */
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ status = "disabled";
+ };
+
+ wdt: watchdog@1f04c000 {
+ compatible = "snps,dw-wdt";
+ reg = <0x1f04c000 0x1000>;
+
+ interrupts = <GIC_SHARED 17 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_WDT_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "tclk", "pclk";
+
+ /* Adjust ref-clock rate for better TOPs granularity */
+ assigned-clocks = <&ccu_sys CCU_SYS_WDT_CLK>;
+ assigned-clock-rates = <65534>;
+
+ snps,watchdog-tops = <0x000000ff 0x000001ff 0x000003ff
+ 0x000007ff 0x0000ffff 0x0001ffff
+ 0x0003ffff 0x0007ffff 0x000fffff
+ 0x001fffff 0x003fffff 0x007fffff
+ 0x00ffffff 0x01ffffff 0x03ffffff
+ 0x07ffffff>;
+
+ status = "disabled";
+ };
+
+ /*
+ * It's highly recommended to use all DW APB SSI controllers
+ * with GPIO-based CS, due to the native CS being automatically
+ * asserted/de-asserted on transmissions. Such HW design isn't
+ * that suitable for the kernel SPI subsystem, so GPIO-based CS
+ * will help to prevent very nasty, hard-to-fix errors.
+ */
+ spi1: spi@1f04e000 {
+ compatible = "baikal,bt1-ssi";
+ reg = <0x1f04e000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 40 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "ssi_clk";
+
+ /*
+ * Make sure Rx DMA channels have higher priority. Note
+ * also that first two DW DMAC channels aren't suitable
+ * for the well-balanced Tx and Rx SPI transfers.
+ */
+ dmas = <&dma 8 0 1 0xe0>, <&dma 9 0 1 0x1c>;
+ dma-names = "tx", "rx";
+
+ reg-io-width = <4>;
+
+ status = "disabled";
+ };
+
+ spi2: spi@1f04f000 {
+ compatible = "baikal,bt1-ssi";
+ reg = <0x1f04f000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 41 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "ssi_clk";
+
+ /*
+ * Make sure Rx DMA channels have higher priority. Note
+ * also that first two DW DMAC channels aren't suitable
+ * for the well-balanced Tx and Rx SPI transfers.
+ */
+ dmas = <&dma 10 0 1 0xe0>, <&dma 11 0 1 0x1c>;
+ dma-names = "tx", "rx";
+
+ reg-io-width = <4>;
+
+ status = "disabled";
+ };
+
+ pvt: temperature-sensor@1f200000 {
+ compatible = "baikal,bt1-pvt";
+ reg = <0x1f200000 0x1000>;
+ #thermal-sensor-cells = <0>;
+
+ interrupts = <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_PVT_CLK>,
+ <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "ref", "pclk";
+
+ status = "disabled";
+ };
+
+ efuse: efuse@1f201000 {
+ compatible = "baikal,bt1-efuse";
+ reg = <0x1f201000 0x1000>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "pclk";
+
+ status = "disabled";
+ };
+ };
+
+ axi: bus@1f05a000 {
+ compatible = "baikal,bt1-axi", "simple-bus";
+ reg = <0 0x1f05a000 0 0x1000>,
+ <0 0x1f04d110 0 0x8>;
+ reg-names = "qos", "ehb";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #interconnect-cells = <1>;
+
+ /*
+ * CPU can find the AXI-accessible devices over the next MMIO
+ * ranges.
+ */
+ ranges = <0 0x08000000 0 0x08000000 0 0x13dc0000>,
+ <0 0x1bf80000 0 0x1bf80000 0 0x00040000>,
+ <0 0x1bfc0000 0 0x1bfc0000 0 0x03c38000>;
+
+ /*
+ * Not all AXI-bus DMA-capable devices can reach any address in
+ * the physical memory space. SATA/USB/GMACx are limited to work
+ * with the lowest 4GB of memory. Here we set the normal DMA
+ * ranges mapping, while device-specific dma-ranges or device
+ * driver software must make sure the devices have been
+ * restricted on working with the permited memory range.
+ */
+ dma-ranges = <0 0 0 0 0x100 0>;
+
+ interrupts = <GIC_SHARED 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_axi CCU_AXI_MAIN_CLK>;
+ clock-names = "aclk";
+
+ resets = <&ccu_axi CCU_AXI_MAIN_RST>;
+ reset-names = "arst";
+
+ syscon = <&syscon>;
+
+ /*
+ * Note the (dma-)ranges mapping must be 64K aligned due to
+ * iATU constraints (lowest 16 bits aren't writable). Also
+ * note that we have to split the MEM-range up into two so
+ * one of them would be 256MB-aligned as some of the PCIe
+ * peripherals require. It can be done since AXI-interconnect
+ * doesn't permit the PCIe-master to access the MMIO-range
+ * anyway, so we can freely use the memory range above
+ * 0x1bfc0000 locally within the PCIe space.
+ */
+ pcie: pcie@1f052000 {
+ compatible = "baikal,bt1-pcie";
+ device_type = "pci";
+ reg = <0 0x1f052000 0 0x1000>,
+ <0 0x1f053000 0 0x1000>,
+ <0 0x1bdb0000 0 0x10000>;
+ reg-names = "dbi", "dbi2", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x82000000 0 0x08000000 0 0x08000000 0 0x03da0000>, /* mem */
+ <0x82000000 0 0x10000000 0 0x0bda0000 0 0x10000000>, /* mem */
+ <0x81000000 0 0x0bda0000 0 0x1bda0000 0 0x00010000>; /* io */
+ bus-range = <0x0 0xff>;
+
+ interrupts = <GIC_SHARED 80 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 83 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 91 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 92 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 93 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma0", "dma1", "dma2", "dma3",
+ "dma4", "dma5", "dma6", "dma7",
+ "msi", "aer", "pme", "hp", "bw_mg",
+ "l_eq";
+
+ /*
+ * Note 1. External reference clock source is required
+ * for the interface to work (name "ref").
+ * Note 2. PCIe AXI M and S clocks and resets are
+ * defined from the system interrconnected point of
+ * view so the AXI master and slave interfaces are
+ * actually connected to the DW PCIe RC AXI Slave and
+ * Master ports respectively.
+ */
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_S_CLK>,
+ <&ccu_axi CCU_AXI_PCIE_M_CLK>;
+ clock-names = "dbi", "mstr", "slv";
+
+ resets = <&ccu_axi CCU_AXI_PCIE_S_RST>,
+ <&ccu_axi CCU_AXI_PCIE_M_RST>,
+ <&ccu_sys CCU_SYS_PCIE_PWR_RST>,
+ <&ccu_sys CCU_SYS_PCIE_HOT_RST>,
+ <&ccu_sys CCU_SYS_PCIE_PCS_PHY_RST>,
+ <&ccu_sys CCU_SYS_PCIE_CORE_RST>,
+ <&ccu_sys CCU_SYS_PCIE_PIPE0_RST>,
+ <&ccu_sys CCU_SYS_PCIE_STICKY_RST>,
+ <&ccu_sys CCU_SYS_PCIE_NSTICKY_RST>;
+ reset-names = "mstr", "slv", "pwr", "hot", "phy",
+ "core", "pipe", "sticky", "non-sticky";
+
+ baikal,bt1-syscon = <&syscon>;
+
+ num-lanes = <4>;
+ max-link-speed = <3>;
+
+ status = "disabled";
+ };
+
+ sram: sram-controller@1bf80000 {
+ compatible = "baikal,bt1-sram", "mmio-sram";
+ reg = <0 0x1bf80000 0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x1bf80000 0x10000>;
+
+ clocks = <&ccu_axi CCU_AXI_SRAM_CLK>;
+ clock-names = "aclk";
+
+ resets = <&ccu_axi CCU_AXI_SRAM_RST>;
+ reset-names = "arst";
+
+ mux-controls = <&boot_mux 1>;
+
+ status = "disabled";
+
+ boot-sram@0 {
+ compatible = "baikal,bt1-boot-sram";
+ reg = <0 0x10000>;
+ label="Internal SRAM";
+ export;
+ };
+ };
+
+ dma: dma-controller@1f041000 {
+ compatible = "baikal,bt1-dmac", "snps,dma-spear1340";
+ reg = <0 0x1f041000 0 0x1000>;
+ #dma-cells = <4>;
+
+ interrupts = <GIC_SHARED 56 IRQ_TYPE_LEVEL_HIGH>;
+
+ /* Clock rate up to 200MHz */
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>;
+ clock-names = "hclk";
+
+ dma-channels = <8>;
+ dma-requests = <12>;
+ dma-masters = <2>;
+
+ chan_allocation_order = <0>;
+ chan_priority = <0>;
+ block_size = <4095>;
+ data-width = <16 4>;
+ multi-block = <0 0 0 0 0 0 0 0>;
+ snps,max-burst-len = <16 16 4 4 4 4 4 4>;
+
+ status = "disabled";
+ };
+
+ mc: memory-controller@1f042000 {
+ compatible = "baikal,bt1-ddrc";
+ reg = <0 0x1f042000 0 0x1000>;
+
+ interrupts = <GIC_SHARED 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 99 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dfi_e", "ecc_ce", "ecc_ue", "ecc_sbr";
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_DDR_CLK>,
+ <&ccu_pll CCU_DDR_PLL>;
+ clock-names = "pclk", "aclk", "core";
+
+ resets = <&ccu_axi CCU_AXI_DDR_RST>,
+ <&ccu_sys CCU_SYS_DDR_INIT_RST>;
+ reset-names = "arst", "core";
+
+ status = "disabled";
+ };
+
+ mc_phy: memory-controller-phy@1f043000 {
+ compatible = "baikal,bt1-ddrc-phy";
+ reg = <0 0x1f043000 0 0x1000>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_pll CCU_DDR_PLL>;
+ clock-names = "pclk", "ddr";
+
+ status = "disabled";
+ };
+
+ /*
+ * DWC AHCI SATA controller has been configured with 32-bits
+ * AMBA Master Address Bus width. Make sure any buffer
+ * allocated above that limit is bounced down to the permitted
+ * memory space before being passed to the device.
+ */
+ sata: sata@1f050000 {
+ compatible = "baikal,bt1-ahci";
+ reg = <0 0x1f050000 0 0x2000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 64 IRQ_TYPE_LEVEL_HIGH>;
+
+ /* Using an external 100MHz clock source is preferable */
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_SATA_CLK>,
+ <&ccu_sys CCU_SYS_SATA_REF_CLK>;
+ clock-names = "pclk", "aclk", "ref";
+
+ resets = <&ccu_axi CCU_AXI_SATA_RST>,
+ <&ccu_sys CCU_SYS_SATA_REF_RST>;
+ reset-names = "arst", "ref";
+
+ ports-implemented = <0x3>;
+
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+
+ snps,tx-ts-max = <16>;
+ snps,rx-ts-max = <16>;
+
+ status = "disabled";
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+
+ snps,tx-ts-max = <16>;
+ snps,rx-ts-max = <16>;
+
+ status = "disabled";
+ };
+ };
+
+ /*
+ * Replace compatible string with "baikal,bt1-xgmac", drop the
+ * XPCS reg space and enable the xgmac_mi node shall you need
+ * to have the XGMAC device handled by the STMMAC driver.
+ */
+ xgmac: ethernet@1f054000 {
+ compatible = "amd,bt1-xgmac";
+ reg = <0 0x1f054000 0 0x4000>,
+ <0 0x1f05d000 0 0x1000>;
+ reg-names = "stmmaceth", "xpcs";
+
+ interrupts = <GIC_SHARED 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 78 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "dma_tx0", "dma_tx1",
+ "dma_rx0", "dma_rx1";
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_XGMAC_CLK>,
+ <&ccu_sys CCU_SYS_XGMAC_REF_CLK>,
+ <&ccu_sys CCU_SYS_XGMAC_PTP_CLK>;
+ clock-names = "pclk", "stmmaceth", "tx", "ptp_ref";
+
+ resets = <&ccu_axi CCU_AXI_XGMAC_RST>;
+ reset-names = "stmmaceth";
+
+ pcs-handle = <&xgmac_pcs>;
+
+ /*
+ * xgmii - auto-negotiate 10GBASE-KX4/KR protocols
+ * (set the managed property to "in-band-status")
+ * 10gbase-r - select 10GBASE-R protocol
+ * 10gbase-x/xaui - select 10GBASE-X protocol
+ * (set the managed property to "auto")
+ */
+ phy-mode = "xgmii";
+
+ managed = "in-band-status";
+
+ rx-fifo-depth = <32768>;
+ tx-fifo-depth = <32768>;
+
+ /*
+ * Actual burst length will be (32 * 8 * 16) bytes due
+ * to the snps,no-pbl-x8 property absence and having
+ * the AXI bus data width of 128 bits.
+ */
+ snps,pbl = <32>;
+ snps,data-width = <16>;
+
+ /* Enable TSO for all DMA channels */
+ snps,tso;
+
+ snps,perfect-filter-entries = <8>;
+ snps,multicast-filter-bins = <64>;
+ local-mac-address = [ 00 20 13 ba 1c a1 ];
+
+ status = "disabled";
+
+ axi-bus-config {
+ snps,wr_osr_lmt = <0x7>;
+ snps,rd_osr_lmt = <0x7>;
+ /* It's AXI3 bus so up to 16 xfers */
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
+ };
+
+ hwa: hwa@1f05b000 {
+ compatible = "baikal,bt1-hwa";
+ reg = <0 0x1f05b000 0 0x1000>,
+ <0 0x1f05c000 0 0x1000>;
+ reg-names = "core", "dma";
+
+ interrupts = <GIC_SHARED 104 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_HWA_CLK>,
+ <&ccu_sys CCU_SYS_HWA_CLK>;
+ clock-names = "pclk", "aclk", "ref";
+
+ resets = <&ccu_axi CCU_AXI_HWA_RST>;
+ reset-names = "arst";
+
+ status = "disabled";
+ };
+
+ xgmac_mi: mdio@1f05d000 {
+ compatible = "snps,dw-xpcs-mi";
+ reg = <0 0x1f05d000 0 0x1000>;
+ reg-names = "indirect";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg-io-width = <4>;
+
+ status = "disabled";
+
+ xgmac_pcs: ethernet-pcs@0 {
+ compatible = "baikal,bt1-xpcs";
+ reg = <0>;
+
+ interrupts = <GIC_SHARED 79 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys CCU_SYS_XGMAC_REF_CLK>;
+ clock-names = "core";
+ };
+ };
+
+ gmac0: ethernet@1f05e000 {
+ compatible = "baikal,bt1-gmac";
+ reg = <0 0x1f05e000 0 0x2000>;
+ #address-cells = <1>;
+ #size-cells = <2>;
+ dma-ranges = <0 0 0 0x1 0>;
+
+ interrupts = <GIC_SHARED 72 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_GMAC0_CLK>,
+ <&ccu_sys CCU_SYS_GMAC0_TX_CLK>,
+ <&ccu_sys CCU_SYS_GMAC0_PTP_CLK>;
+ clock-names = "pclk", "stmmaceth", "tx", "ptp_ref";
+
+ resets = <&ccu_axi CCU_AXI_GMAC0_RST>;
+ reset-names = "stmmaceth";
+
+ /* DW GMAC is configured to export 1xGPI and 1xGPO */
+ ngpios = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ /*
+ * MAC always adds 2ns delay of TXC with respect to TXD
+ * so let the PHY to add some RXC delay if it's
+ * applicable.
+ */
+ phy-mode = "rgmii-rxid";
+ tx-internal-delay-ps = <2000>;
+
+ rx-fifo-depth = <16384>;
+ tx-fifo-depth = <16384>;
+
+ /*
+ * Actual burst length will be (32 * 8 * 16) bytes due
+ * to the snps,no-pbl-x8 property absence and having
+ * the AXI bus data width of 128 bits.
+ */
+ snps,pbl = <32>;
+ snps,data-width = <16>;
+
+ snps,perfect-filter-entries = <8>;
+ snps,multicast-filter-bins = <0>;
+ loacl-mac-address = [ 7a 72 6c 4a 7a 07 ];
+
+ status = "disabled";
+
+ axi-bus-config {
+ snps,wr_osr_lmt = <0x3>;
+ snps,rd_osr_lmt = <0x3>;
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
+
+ gmac0_mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ gmac1: ethernet@1f060000 {
+ compatible = "baikal,bt1-gmac";
+ reg = <0 0x1f060000 0 0x2000>;
+ #address-cells = <1>;
+ #size-cells = <2>;
+ dma-ranges = <0 0 0 0x1 0>;
+
+ interrupts = <GIC_SHARED 73 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_GMAC1_CLK>,
+ <&ccu_sys CCU_SYS_GMAC1_TX_CLK>,
+ <&ccu_sys CCU_SYS_GMAC1_PTP_CLK>;
+ clock-names = "pclk", "stmmaceth", "tx", "ptp_ref";
+
+ resets = <&ccu_axi CCU_AXI_GMAC1_RST>;
+ reset-names = "stmmaceth";
+
+ /* DW GMAC is configured to export 1xGPI and 1xGPO */
+ ngpios = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ /*
+ * MAC always adds 2ns delay of TXC with respect to TXD
+ * so let the PHY to add some RXC delay if it's
+ * applicable.
+ */
+ phy-mode = "rgmii-rxid";
+ tx-internal-delay-ps = <2000>;
+
+ rx-fifo-depth = <16384>;
+ tx-fifo-depth = <16384>;
+
+ /*
+ * Actual burst length will be (32 * 8 * 16) bytes due
+ * to the snps,no-pbl-x8 property absence and having
+ * the AXI bus data width of 128 bits.
+ */
+ snps,pbl = <32>;
+ snps,data-width = <16>;
+
+ snps,perfect-filter-entries = <8>;
+ snps,multicast-filter-bins = <0>;
+ loacl-mac-address = [ 7a 72 6c 4a 7b 07 ];
+
+ status = "disabled";
+
+ axi-bus-config {
+ snps,wr_osr_lmt = <0x3>;
+ snps,rd_osr_lmt = <0x3>;
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
+
+ gmac1_mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ usb: usb@1f100000 {
+ compatible = "baikal,bt1-usb3", "snps,dwc3";
+ reg = <0 0x1f100000 0 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SHARED 68 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host";
+
+ clocks = <&ccu_sys CCU_SYS_APB_CLK>,
+ <&ccu_axi CCU_AXI_USB_CLK>,
+ <&ccu_sys CCU_SYS_USB_CLK>;
+ clock-names = "pclk", "bus_early", "ref";
+
+ resets = <&ccu_axi CCU_AXI_USB_RST>;
+ reset-names = "arst";
+
+ dr_mode = "host";
+ phy_type = "ulpi";
+ maximum-speed = "high-speed";
+
+ snps,incr-burst-type-adjustment = <1 4 8 16>;
+
+ status = "disabled";
+ };
+ };
+};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal Electronics KR SFP+ Mezzanine Card device tree
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/*
+ * TODO Convert this file to be a subordinate of a nexus node/connector when the
+ * kernel gets ready for it:
+ * 3: I2C SCL
+ * 5: I2C SDA
+ * 7: SFP Mod ABS
+ * 9: SFP Tx Disable
+ * 11: SFP Tx Fault
+ * 12: SFP Rx Los
+ * 17,18,19,20: TxN3,RxN3,TxP3,RxP3 - Lane 3 10GBASE-X
+ * 23,24,25,26: TxN2,RxN2,TxP2,RxP2 - Lane 2 10GBASE-X
+ * 29,30,31,32: TxN1,RxN1,TxP1,RxP1 - Lane 1 10GBASE-X
+ * 35,36,37,38: TxN0,RxN0,TxP0,RxP0 - Lane 0 10GBASE-X/10GBASE-R
+ */
+
+/ {
+ xgmac_sfp: sfp {
+ compatible = "sff,sfp";
+
+ i2c-bus = <&i2c1>;
+
+ los-gpios = <&port0 27 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&port0 11 GPIO_ACTIVE_LOW>;
+ tx-disable-gpios = <&port0 10 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpios = <&port0 11 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&xgmac_pcs {
+ sfp = <&xgmac_sfp>;
+};
+
+&xgmac {
+ phy-mode = "10gbase-r";
+};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal Electronics KR/KX4 Marvell 88x2222 SFP+ Mezzanine Card device tree
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/net/mv-phy-88x2222.h>
+
+/*
+ * TODO Convert this file to be a subordinate of a nexus node/connector when the
+ * kernel gets ready for it:
+ * 3: I2C SCL
+ * 5: I2C SDA
+ * 7: MDI/MDIO
+ * 9: MDO
+ * 11: MDC
+ * 12: PHY INT
+ * 17,18,19,20: TxN3,RxN3,TxP3,RxP3 - Lane 3 10GBASE-X
+ * 23,24,25,26: TxN2,RxN2,TxP2,RxP2 - Lane 2 10GBASE-X
+ * 29,30,31,32: TxN1,RxN1,TxP1,RxP1 - Lane 1 10GBASE-X
+ * 35,36,37,38: TxN0,RxN0,TxP0,RxP0 - Lane 0 10GBASE-X/10GBASE-R
+ */
+
+/ {
+ aliases {
+ mdio-gpio0 = &xgmac_mdio;
+ };
+
+ xgmac_mdio: mdio {
+ compatible = "virtual,mdio-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* PORT0.9 - MDC, PORT0.10 - MDO, PORT0.11 - MDI/MDIO */
+ gpios = <&port0 9 GPIO_ACTIVE_HIGH>, <&port0 11 GPIO_ACTIVE_HIGH>,
+ <&port0 10 GPIO_ACTIVE_HIGH>;
+
+ reset-gpios = <&xgmac_gpio 0 GPIO_ACTIVE_HIGH>;
+ reset-delay-us = <10000>;
+ reset-post-delay-us = <10000>;
+
+ xgmac_phy: ethernet-phy@c {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0x0c>;
+
+ interrupt-parent = <&port0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-controller; /* 12 */
+ #gpio-cells = <2>;
+ gpio-reserved-ranges = <MV_88X2222_LED0 1>,
+ <MV_88X2222_LED1 1>,
+ <MV_88X2222_SDA 1>,
+ <MV_88X2222_SCL 1>;
+
+ sfp = <&xgmac_sfp>;
+ };
+ };
+
+ xgmac_sfp: sfp {
+ compatible = "sff,sfp";
+
+ i2c-bus = <&xgmac_phy>;
+
+ mod-def0-gpios = <&xgmac_phy MV_88X2222_MOD_ABS GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&xgmac_phy MV_88X2222_TX_FAULT GPIO_ACTIVE_HIGH>;
+ los-gpios = <&xgmac_phy MV_88X2222_RX_LOS GPIO_ACTIVE_HIGH>;
+ tx-disable-gpios = <&xgmac_phy MV_88X2222_TX_DISABLE GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&xgmac {
+ /* Possible protocols: 10gbase-r, 10gbase-x/xaui */
+ phy-mode = "10gbase-r";
+
+ managed = "auto";
+
+ phy-handle = <&xgmac_phy>;
+};
+
+&i2c1 {
+ /* Marvell PHY Reset-controller (NXP PCA9500 8-bit GPIO) */
+ xgmac_gpio: gpio@20 {
+ compatible = "nxp,pcf8574";
+ reg = <0x20>;
+
+ gpio-controller; /* 8 */
+ #gpio-cells = <2>;
+
+ /* nc - not connected */
+ gpio-line-names = "RST_PHY", "nc", "nc", "nc",
+ "nc", "nc", "nc", "nc";
+ };
+
+ /* Mezzanine card firmware (NXP PCA9500 2-kbit EEPROM) */
+ xgmac_fw: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+
+ pagesize = <4>;
+ };
+};
--- /dev/null
+CONFIG_LOCALVERSION="-bt1"
+CONFIG_DEFAULT_HOSTNAME="baikal"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MIPS_BAIKAL_T1=y
+CONFIG_BT1_CPU_FEATURE_OVERRIDES=y
+CONFIG_BT1_DTB_ALL=y
+CONFIG_CPU_P5600=y
+CONFIG_CPU_MIPS32_R5_FEATURES=y
+CONFIG_CPU_MIPS32_R5_XPA=y
+CONFIG_ZBOOT_LOAD_ADDRESS=0x85100000
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_ARCH_FORCE_MAX_ORDER=11
+CONFIG_CPU_HAS_MSA=y
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_IEEE754_DEFAULT_RELAXED=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_SLAB=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_RAW_DIAG=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_LOG=y
+CONFIG_NFT_LIMIT=y
+CONFIG_NFT_MASQ=y
+CONFIG_NFT_REDIR=y
+CONFIG_NFT_NAT=y
+CONFIG_NFT_REJECT=y
+CONFIG_NFT_HASH=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CGROUP=y
+CONFIG_NETFILTER_XT_MATCH_CPU=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NFT_DUP_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NFT_DUP_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBS=y
+CONFIG_NET_SCH_ETF=y
+CONFIG_NET_SCH_TAPRIO=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOWER=y
+CONFIG_DCB=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+# CONFIG_WIRELESS is not set
+CONFIG_PAGE_POOL_STATS=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEAER_INJECT=y
+CONFIG_PCIE_ECRC=y
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_BUS_PERFORMANCE=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_PCIE_BT1=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEBUG_DRIVER=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_BT1_APB=y
+CONFIG_BT1_AXI=y
+CONFIG_MIPS_CDMM=y
+CONFIG_DMI_SYSFS=y
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PHYSMAP_BT1_ROM=y
+CONFIG_MTD_SPI_NOR=y
+# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=4
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_NVME_VERBOSE_ERRORS=y
+CONFIG_NVME_HWMON=y
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_RAID_ATTRS=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_DWC=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_AMD_XGBE=y
+CONFIG_AMD_XGBE_DCB=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_ENGLEDER is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_ADI is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_SELFTESTS=y
+CONFIG_DWMAC_BT1=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_TEHUTI=y
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VERTEXCOM is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_SFP=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MARVELL_10G_PHY=y
+CONFIG_MARVELL_88X2222_PHY=y
+CONFIG_MARVELL_88X2222_GPIO=y
+CONFIG_MARVELL_88X2222_I2C=y
+CONFIG_MICREL_PHY=y
+CONFIG_MICROCHIP_PHY=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_GPIO=y
+CONFIG_MDIO_DW_XPCS=y
+# CONFIG_USB_NET_DRIVERS is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT_CONSOLE is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_SERIAL_8250_PERICOM is not set
+CONFIG_MIPS_EJTAG_FDC_TTY=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_SLAVE_EEPROM=y
+CONFIG_SPI=y
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_DMA=y
+CONFIG_SPI_DW_MMIO=y
+CONFIG_SPI_DW_BT1=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCF857X=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_SENSORS_BT1_PVT=y
+CONFIG_SENSORS_BT1_PVT_ALARMS=y
+CONFIG_SENSORS_TMP102=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
+CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y
+CONFIG_DW_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_MIPS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_USB_ULPI_BUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_ULPI=y
+CONFIG_USB_HUB_USB251XB=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_EDAC=y
+CONFIG_EDAC_DEBUG=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ABEOZ9=y
+CONFIG_RTC_DRV_PCF85363=y
+CONFIG_RTC_DRV_PCF2127=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_DW_EDMA=y
+CONFIG_SYNC_FILE=y
+# CONFIG_VIRTIO_MENU is not set
+CONFIG_COMMON_CLK_VC5=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_BT1_L2_CTL=y
+CONFIG_NVMEM_U_BOOT_ENV=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_WBUF_VERIFY=y
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFS_USE_LEGACY_DNS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_UTF8=y
+CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf"
+CONFIG_CRYPTO_USER=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FORCE_NR_CPUS=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
--- /dev/null
+CONFIG_LOCALVERSION="-bt1"
+CONFIG_DEFAULT_HOSTNAME="baikal"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MIPS_BAIKAL_T1=y
+CONFIG_BT1_CPU_FEATURE_OVERRIDES=y
+CONFIG_BT1_DTB_BFK=y
+CONFIG_CPU_P5600=y
+CONFIG_CPU_MIPS32_R5_FEATURES=y
+CONFIG_CPU_MIPS32_R5_XPA=y
+CONFIG_ZBOOT_LOAD_ADDRESS=0x85100000
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_ARCH_FORCE_MAX_ORDER=11
+CONFIG_CPU_HAS_MSA=y
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_IEEE754_DEFAULT_RELAXED=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_SLAB=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_RAW_DIAG=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_LOG=y
+CONFIG_NFT_LIMIT=y
+CONFIG_NFT_MASQ=y
+CONFIG_NFT_REDIR=y
+CONFIG_NFT_NAT=y
+CONFIG_NFT_REJECT=y
+CONFIG_NFT_HASH=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CGROUP=y
+CONFIG_NETFILTER_XT_MATCH_CPU=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NFT_DUP_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NFT_DUP_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBS=y
+CONFIG_NET_SCH_ETF=y
+CONFIG_NET_SCH_TAPRIO=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOWER=y
+CONFIG_DCB=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+# CONFIG_WIRELESS is not set
+CONFIG_PAGE_POOL_STATS=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEAER_INJECT=y
+CONFIG_PCIE_ECRC=y
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_BUS_PERFORMANCE=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_PCIE_BT1=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEBUG_DRIVER=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_BT1_APB=y
+CONFIG_BT1_AXI=y
+CONFIG_MIPS_CDMM=y
+CONFIG_DMI_SYSFS=y
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PHYSMAP_BT1_ROM=y
+CONFIG_MTD_SPI_NOR=y
+# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=4
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_NVME_VERBOSE_ERRORS=y
+CONFIG_NVME_HWMON=y
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_RAID_ATTRS=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_DWC=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_AMD_XGBE=y
+CONFIG_AMD_XGBE_DCB=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_ENGLEDER is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_ADI is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_SELFTESTS=y
+CONFIG_DWMAC_BT1=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_TEHUTI=y
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VERTEXCOM is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_SFP=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MARVELL_10G_PHY=y
+CONFIG_MARVELL_88X2222_PHY=y
+CONFIG_MARVELL_88X2222_GPIO=y
+CONFIG_MARVELL_88X2222_I2C=y
+CONFIG_MICREL_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_GPIO=y
+CONFIG_MDIO_DW_XPCS=y
+# CONFIG_USB_NET_DRIVERS is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT_CONSOLE is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_SERIAL_8250_PERICOM is not set
+CONFIG_MIPS_EJTAG_FDC_TTY=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_SLAVE_EEPROM=y
+CONFIG_SPI=y
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_DMA=y
+CONFIG_SPI_DW_MMIO=y
+CONFIG_SPI_DW_BT1=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCF857X=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_SENSORS_BT1_PVT=y
+CONFIG_SENSORS_BT1_PVT_ALARMS=y
+CONFIG_SENSORS_TMP102=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
+CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y
+CONFIG_DW_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB_ULPI_BUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_ULPI=y
+CONFIG_EDAC=y
+CONFIG_EDAC_DEBUG=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ABEOZ9=y
+CONFIG_RTC_DRV_PCF85363=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_DW_EDMA=y
+CONFIG_SYNC_FILE=y
+# CONFIG_VIRTIO_MENU is not set
+CONFIG_COMMON_CLK_VC5=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_BT1_L2_CTL=y
+CONFIG_NVMEM_U_BOOT_ENV=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_WBUF_VERIFY=y
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFS_USE_LEGACY_DNS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_UTF8=y
+CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf"
+CONFIG_CRYPTO_USER=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FORCE_NR_CPUS=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
*/
u32 loongson3_cpucfg_data[3];
#endif
-} __attribute__((aligned(SMP_CACHE_BYTES)));
+} __aligned(SMP_CACHE_BYTES) __randomize_layout;
extern struct cpuinfo_mips cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#include <linux/io.h>
#include <linux/memblock.h>
-#define dmi_early_remap(x, l) ioremap_cache(x, l)
+#define dmi_early_remap(x, l) ioremap_uc(x, l)
#define dmi_early_unmap(x, l) iounmap(x)
#define dmi_remap(x, l) ioremap_cache(x, l)
#define dmi_unmap(x) iounmap(x)
#if defined(CONFIG_MACH_LOONGSON64)
#define SMBIOS_ENTRY_POINT_SCAN_START 0xFFFE000
+#elif defined(CONFIG_MIPS_BAIKAL_T1)
+#define SMBIOS_ENTRY_POINT_SCAN_START 0x0
#endif
#endif /* _ASM_DMI_H */
#define war_io_reorder_wmb() barrier()
#endif
-#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
+#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, relax, irq) \
\
static inline void pfx##write##bwlq(type val, \
volatile void __iomem *mem) \
volatile type *__mem; \
type __val; \
\
- if (barrier) \
+ if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \
iobarrier_rw(); \
else \
war_io_reorder_wmb(); \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
- if (barrier) \
+ if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \
iobarrier_rw(); \
\
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
return pfx##ioswab##bwlq(__mem, __val); \
}
-#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
+#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, relax, p) \
\
static inline void pfx##out##bwlq##p(type val, unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
- if (barrier) \
+ if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \
iobarrier_rw(); \
else \
war_io_reorder_wmb(); \
\
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
- if (barrier) \
+ if (!(relax && IS_ENABLED(CONFIG_STRONG_UC_ORDERING))) \
iobarrier_rw(); \
\
__val = *__addr; \
#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
\
-__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
+__BUILD_MEMORY_SINGLE(bus, bwlq, type, relax, 1)
#define BUILDIO_MEM(bwlq, type) \
\
#endif
#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
- __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
- __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
+ __BUILD_IOPORT_SINGLE(bus, bwlq, type, 0,) \
+ __BUILD_IOPORT_SINGLE(bus, bwlq, type, 0, _p)
#define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX(, bwlq, type) \
#define __BUILDIO(bwlq, type) \
\
-__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
+__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0, 0)
__BUILDIO(q, u64)
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_run)(struct kvm_vcpu *vcpu);
void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
-};
+} __no_randomize_layout;
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 core features override
+ */
+#ifndef __ASM_MACH_BAIKAL_T1_CPU_FEATURE_OVERRIDES_H__
+#define __ASM_MACH_BAIKAL_T1_CPU_FEATURE_OVERRIDES_H__
+
+#ifdef CONFIG_BT1_CPU_FEATURE_OVERRIDES
+
+#define cpu_has_tlb 1
+/* Don't override FTLB flag otherwise 'noftlb' option won't work. */
+/* #define cpu_has_ftlb 1 */
+#define cpu_has_tlbinv 1
+#define cpu_has_segments 1
+#define cpu_has_eva 1
+#define cpu_has_htw 1
+#define cpu_has_ldpte 0
+#define cpu_has_rixiex 1
+#define cpu_has_maar 1
+#define cpu_has_rw_llb 1
+
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_octeon_cache 0
+
+/* Don't override FPU flags otherwise 'nofpu' option won't work. */
+/* #define cpu_has_fpu 1 */
+/* #define raw_cpu_has_fpu 1 */
+#define cpu_has_32fpr 1
+
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+#define cpu_has_bp_ghist 0
+#define cpu_has_guestctl0ext 1 /* ? */
+#define cpu_has_guestctl1 1 /* ? */
+#define cpu_has_guestctl2 1 /* ? */
+#define cpu_has_guestid 1
+#define cpu_has_drg 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_rixi 1
+
+#define cpu_has_mmips 0
+#define cpu_has_lpa 1
+#define cpu_has_mvh 1
+#define cpu_has_xpa 1
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+/* Depends on the MIPS_CM/SMP configs. */
+/* #define cpu_icache_snoops_remote_store 1 */
+
+/*
+ * MIPS P5600 Warrior is based on the MIPS32 Release 2 architecture, which
+ * makes it backward compatible with all 32bits early MIPS architecture
+ * releases.
+ */
+#define cpu_has_mips_1 1
+#define cpu_has_mips_2 1
+#define cpu_has_mips_3 0
+#define cpu_has_mips_4 0
+#define cpu_has_mips_5 0
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips32r5 1
+#define cpu_has_mips32r6 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_mips64r6 0
+#define cpu_has_mips_r2_exec_hazard 0
+
+#define cpu_has_clo_clz 1
+#define cpu_has_wsbh 1
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_dsp3 0
+#define cpu_has_loongson_mmi 0
+#define cpu_has_loongson_cam 0
+#define cpu_has_loongson_ext 0
+#define cpu_has_loongson_ext2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_vp 0
+#define cpu_has_userlocal 1
+
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+#define cpu_has_64bit_addresses 0
+/*
+ * VINT is hardwired to 1 by P5600 design while VEIC as being SI_EICPresent
+ * and seeing we always have MIPS GIC available in the chip must have been set
+ * to 1. Alas the IP core engineers mistakenly made it to be wired with
+ * GIC_VX_CTL_EIC bit. Lets fix it by manually setting the flag to 1.
+ */
+#define cpu_has_vint 1
+#define cpu_has_veic 1
+/* Chaches line size is fixed by P5600 design. */
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_scache_line_size() 32
+#define cpu_tcache_line_size() 0
+#define cpu_hwrena_impl_bits 0
+#define cpu_has_perf_cntr_intr_bit 1
+#define cpu_has_vz 1
+#define cpu_has_msa 1
+#define cpu_has_ufr 1
+#define cpu_has_fre 0
+#define cpu_has_cdmm 1
+#define cpu_has_small_pages 0
+#define cpu_has_nan_legacy 0
+#define cpu_has_nan_2008 1
+#define cpu_has_ebase_wg 1
+#define cpu_has_badinstr 1
+#define cpu_has_badinstrp 1
+#define cpu_has_contextconfig 1
+#define cpu_has_perf 1
+#define cpu_has_mac2008_only 0
+#define cpu_has_mmid 0
+#define cpu_has_mm_sysad 0
+#define cpu_has_mm_full 1
+
+#endif /* CONFIG_BT1_CPU_FEATURE_OVERRIDES */
+
+#endif /* __ASM_MACH_BAIKAL_T1_CPU_FEATURE_OVERRIDES_H__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 IRQ numbers declaration
+ */
+#ifndef __ASM_MACH_BAIKAL_T1_IRQ_H__
+#define __ASM_MACH_BAIKAL_T1_IRQ_H__
+
+#define NR_IRQS 255
+#define MIPS_CPU_IRQ_BASE 0
+
+#define BT1_APB_EHB_IRQ 16
+#define BT1_WDT_IRQ 17
+#define BT1_GPIO32_IRQ 19
+#define BT1_TIMER0_IRQ 24
+#define BT1_TIMER1_IRQ 25
+#define BT1_TIMER2_IRQ 26
+#define BT1_PVT_IRQ 31
+#define BT1_I2C1_IRQ 33
+#define BT1_I2C2_IRQ 34
+#define BT1_SPI1_IRQ 40
+#define BT1_SPI2_IRQ 41
+#define BT1_UART0_IRQ 48
+#define BT1_UART1_IRQ 49
+#define BT1_DMAC_IRQ 56
+#define BT1_SATA_IRQ 64
+#define BT1_USB_IRQ 68
+#define BT1_GMAC0_IRQ 72
+#define BT1_GMAC1_IRQ 73
+#define BT1_XGMAC_IRQ 74
+#define BT1_XGMAC_TX0_IRQ 75
+#define BT1_XGMAC_TX1_IRQ 76
+#define BT1_XGMAC_RX0_IRQ 77
+#define BT1_XGMAC_RX1_IRQ 78
+#define BT1_XGMAC_XPCS_IRQ 79
+#define BT1_PCIE_EDMA_TX0_IRQ 80
+#define BT1_PCIE_EDMA_TX1_IRQ 81
+#define BT1_PCIE_EDMA_TX2_IRQ 82
+#define BT1_PCIE_EDMA_TX3_IRQ 83
+#define BT1_PCIE_EDMA_RX0_IRQ 84
+#define BT1_PCIE_EDMA_RX1_IRQ 85
+#define BT1_PCIE_EDMA_RX2_IRQ 86
+#define BT1_PCIE_EDMA_RX3_IRQ 87
+#define BT1_PCIE_MSI_IRQ 88
+#define BT1_PCIE_AER_IRQ 89
+#define BT1_PCIE_PME_IRQ 90
+#define BT1_PCIE_HP_IRQ 91
+#define BT1_PCIE_BW_IRQ 92
+#define BT1_PCIE_L_REQ_IRQ 93
+#define BT1_DDR_DFI_E_IRQ 96
+#define BT1_DDR_ECC_CE_IRQ 97
+#define BT1_DDR_ECC_UE_IRQ 98
+#define BT1_DDR_ECC_SBR_IRQ 99
+#define BT1_HWA_IRQ 104
+#define BT1_AXI_EHB_IRQ 127
+
+#include_next <irq.h>
+
+#endif /* __ASM_MACH_BAIKAL_T1_IRQ_H__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 platform low-level initialization
+ */
+#ifndef __ASM_MACH_BAIKAL_T1_KERNEL_ENTRY_INIT_H__
+#define __ASM_MACH_BAIKAL_T1_KERNEL_ENTRY_INIT_H__
+
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
+ /*
+ * Prepare segments for EVA boot:
+ *
+ * This is in case the processor boots in legacy configuration
+ * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
+ *
+ * =========================== Mappings ===============================
+ * CFG Virtual memory Physical memory CCA Mapping
+ * 5 0x00000000 0x3fffffff 0x80000000 0xBffffffff K0 MUSUK (kuseg)
+ * 4 0x40000000 0x7fffffff 0xC0000000 0xfffffffff K0 MUSUK (kuseg)
+ * Flat 2GB physical mem
+ *
+ * 3 0x80000000 0x9fffffff 0x00000000 0x1ffffffff K0 MUSUK (kseg0)
+ * 2 0xa0000000 0xbf000000 0x00000000 0x1ffffffff UC MUSUK (kseg1)
+ * 1 0xc0000000 0xdfffffff - K0 MK (kseg2)
+ * 0 0xe0000000 0xffffffff - K0 MK (kseg3)
+ * where UC = 2 Uncached non-coherent,
+ * WB = 3 Cacheable, non-coherent, write-back, write allocate,
+ * CWBE = 4 Cacheable, coherent, write-back, write-allocate, read
+ * misses request Exclusive,
+ * CWB = 5 Cacheable, coherent, write-back, write-allocate, read misses
+ * request Shared,
+ * UCA = 7 Uncached Accelerated, non-coherent.
+ * UK = 0 Kernel-only unmapped region,
+ * MK = 1 Kernel-only mapped region,
+ * MSK = 2 Supervisor and kernel mapped region,
+ * MUSK = 3 User, supervisor and kernel mapped region,
+ * MUSUK = 4 Used to implement a fully-mapped flat address space in
+ * user and supervisor modes, with unmapped regions which
+ * appear in kernel mode,
+ * USK = 5 Supervisor and kernel unmapped region,
+ * UUSK = 7 Unrestricted unmapped region.
+ *
+ * Note K0 = 2 by default on MIPS Warrior P5600.
+ *
+ * Lowmem is expanded to 2GB.
+ *
+ * The following code uses the t0, t1, t2 and ra registers without
+ * previously preserving them.
+ *
+ */
+ .macro platform_eva_init
+
+ .set push
+ .set reorder
+ /*
+ * Get Config.K0 value and use it to program
+ * the segmentation registers
+ */
+ mfc0 t1, CP0_CONFIG
+ andi t1, 0x7 /* CCA */
+ move t2, t1
+ ins t2, t1, 16, 3
+ /* SegCtl0 */
+ li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, CP0_SEGCTL0
+
+ /* SegCtl1 */
+ li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ ins t0, t1, 16, 3
+ mtc0 t0, CP0_SEGCTL1
+
+ /* SegCtl2 */
+ li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (6 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, CP0_SEGCTL2
+
+ jal mips_ihb
+ mfc0 t0, CP0_CONFIG5
+ li t2, MIPS_CONF5_K /* K bit */
+ or t0, t0, t2
+ mtc0 t0, CP0_CONFIG5
+ sync
+ jal mips_ihb
+ nop
+
+ .set pop
+ .endm
+
+ /*
+ * Prepare segments for LEGACY boot:
+ *
+ * =========================== Mappings ==============================
+ * CFG Virtual memory Physical memory CCA Mapping
+ * 5 0x00000000 0x3fffffff - CWB MUSK (kuseg)
+ * 4 0x40000000 0x7fffffff - CWB MUSK (kuseg)
+ * 3 0x80000000 0x9fffffff 0x00000000 0x1ffffffff CWB UK (kseg0)
+ * 2 0xa0000000 0xbf000000 0x00000000 0x1ffffffff 2 UK (kseg1)
+ * 1 0xc0000000 0xdfffffff - CWB MSK (kseg2)
+ * 0 0xe0000000 0xffffffff - CWB MK (kseg3)
+ *
+ * The following code uses the t0, t1, t2 and ra registers without
+ * previously preserving them.
+ *
+ */
+ .macro platform_legacy_init
+
+ .set push
+ .set reorder
+
+ /*
+ * Directly use cacheable, coherent, write-back, write-allocate, read
+ * misses request shared attribute (CWB).
+ */
+ li t1, 0x5
+ move t2, t1
+ ins t2, t1, 16, 3
+ /* SegCtl0 */
+ li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT)) | \
+ (((MIPS_SEGCFG_MSK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, CP0_SEGCTL0
+
+ /* SegCtl1 */
+ li t0, ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (2 << MIPS_SEGCFG_C_SHIFT)) | \
+ (((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT)) << 16)
+ ins t0, t1, 16, 3
+ mtc0 t0, CP0_SEGCTL1
+
+ /* SegCtl2 */
+ li t0, ((MIPS_SEGCFG_MUSK << MIPS_SEGCFG_AM_SHIFT) | \
+ (6 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, CP0_SEGCTL2
+
+ jal mips_ihb
+ nop
+
+ mfc0 t0, CP0_CONFIG, 5
+ li t2, MIPS_CONF5_K /* K bit */
+ or t0, t0, t2
+ mtc0 t0, CP0_CONFIG, 5
+ sync
+ jal mips_ihb
+ nop
+
+ .set pop
+ .endm
+
+ /*
+ * Baikal-T1 engineering chip had problems when the next features
+ * were enabled.
+ */
+ .macro platform_errata_jr_ls_fix
+
+ .set push
+ .set reorder
+
+ jal mips_ihb
+ nop
+
+ /* Disable load/store bonding. */
+ mfc0 t0, CP0_CONFIG, 6
+ lui t1, (MIPS_CONF6_DLSB >> 16)
+ or t0, t0, t1
+ /* Disable all JR prediction except JR $31. */
+ ori t0, t0, MIPS_CONF6_JRCD
+ mtc0 t0, CP0_CONFIG, 6
+ sync
+ jal mips_ihb
+ nop
+
+ /* Disable all JR $31 prediction through return prediction stack. */
+ mfc0 t0, CP0_CONFIG, 7
+ ori t0, t0, MIPS_CONF7_RPS
+ mtc0 t0, CP0_CONFIG, 7
+ sync
+ jal mips_ihb
+ nop
+
+ .set pop
+ .endm
+
+ /*
+ * Setup Baikal-T1 platform specific setups of the memory segments
+ * layout. In case if the kernel is built for engineering version
+ * of the chip some errata must be fixed.
+ */
+ .macro kernel_entry_setup
+
+ sync
+ ehb
+
+#ifdef CONFIG_EVA
+ platform_eva_init
+#else
+ platform_legacy_init
+#endif
+
+#ifdef CONFIG_BT1_ERRATA_JR_LS_BUG
+ platform_errata_jr_ls_fix
+#endif
+
+ .endm
+
+ /*
+ * Do SMP slave processor setup necessary before we can safely execute
+ * C code.
+ */
+ .macro smp_slave_setup
+ sync
+ ehb
+
+#ifdef CONFIG_EVA
+ platform_eva_init
+#else
+ platform_legacy_init
+#endif
+
+#ifdef CONFIG_BT1_ERRATA_JR_LS_BUG
+ platform_errata_jr_ls_fix
+#endif
+
+ .endm
+#endif /* __ASM_MACH_BAIKAL_T1_KERNEL_ENTRY_INIT_H__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 SoC Static Memory Mapping
+ */
+#ifndef __ASM_MACH_BAIKAL_T1_MEMORY_H__
+#define __ASM_MACH_BAIKAL_T1_MEMORY_H__
+
+#include <linux/sizes.h>
+#include <asm/addrspace.h>
+
+#define BT1_LOMEM_BASE 0x00000000
+#define BT1_LOMEM_SIZE SZ_128M
+
+#define BT1_MMIO_START BT1_PCIE_MAP_BASE
+
+#define BT1_PCIE_MAP_BASE 0x08000000
+#define BT1_PCIE_MAP_SIZE 0x13DC0000
+
+#define BT1_P5600_GIC_BASE 0x1BDC0000
+#define BT1_P5600_GIC_SIZE SZ_128K
+#define BT1_P5600_CPC_BASE 0x1BDE0000
+#define BT1_P5600_CPC_SIZE SZ_32K
+#define BT1_P5600_CDMM_BASE 0x1BDE8000
+#define BT1_P5600_CDMM_SIZE SZ_32K
+
+#define BT1_SRAM_BASE 0x1BF80000
+#define BT1_SRAM_SIZE SZ_64K
+#define BT1_ROM_BASE 0x1BFC0000
+#define BT1_ROM_SIZE SZ_64K
+#define BT1_FLASH_BASE 0x1C000000
+#define BT1_FLASH_SIZE SZ_16M
+
+#define BT1_BOOT_CTRL_BASE 0x1F040000
+#define BT1_BOOT_CTRL_SIZE SZ_4K
+#define BT1_BOOT_CTRL_CSR 0x00
+#define BT1_BOOT_CTRL_MAR 0x04
+#define BT1_BOOT_CTRL_DRID 0x08
+#define BT1_BOOT_CTRL_VID 0x0C
+
+#define BT1_DMAC_BASE 0x1F041000
+#define BT1_DMAC_SIZE SZ_4K
+#define BT1_DDR_UMCTL2_BASE 0x1F042000
+#define BT1_DDR_UMCTL2_SIZE SZ_4K
+#define BT1_DDR_PHY_BASE 0x1F043000
+#define BT1_DDR_PHY_SIZE SZ_4K
+#define BT1_GPIO32_BASE 0x1F044000
+#define BT1_GPIO32_SIZE SZ_4K
+#define BT1_GPIO3_BASE 0x1F045000
+#define BT1_GPIO3_SIZE SZ_4K
+#define BT1_I2C1_BASE 0x1F046000
+#define BT1_I2C1_SIZE SZ_4K
+#define BT1_I2C2_BASE 0x1F047000
+#define BT1_I2C2_SIZE SZ_4K
+#define BT1_TIMERS_BASE 0x1F049000
+#define BT1_TIMERS_SIZE SZ_4K
+#define BT1_UART0_BASE 0x1F04A000
+#define BT1_UART0_SIZE SZ_4K
+#define BT1_UART1_BASE 0x1F04B000
+#define BT1_UART1_SIZE SZ_4K
+#define BT1_WDT_BASE 0x1F04C000
+#define BT1_WDT_SIZE SZ_4K
+#define BT1_CCU_BASE 0x1F04D000
+#define BT1_CCU_SIZE SZ_4K
+#define BT1_SPI1_BASE 0x1F04E000
+#define BT1_SPI1_SIZE SZ_4K
+#define BT1_SPI2_BASE 0x1F04F000
+#define BT1_SPI2_SIZE SZ_4K
+#define BT1_SATA_BASE 0x1F050000
+#define BT1_SATA_SIZE SZ_4K
+#define BT1_PCIE_BASE 0x1F052000
+#define BT1_PCIE_SIZE SZ_4K
+#define BT1_PCIE_DBI2_BASE 0x1F053000
+#define BT1_PCIE_DBI2_SIZE SZ_4K
+#define BT1_XGMAC_BASE 0x1F054000
+#define BT1_XGMAC_SIZE SZ_16K
+#define BT1_APB_EHB_BASE 0x1F059000
+#define BT1_APB_EHB_SIZE SZ_4K
+#define BT1_MAIN_IC_BASE 0x1F05A000
+#define BT1_MAIN_IC_SIZE SZ_4K
+#define BT1_HWA_BASE 0x1F05B000
+#define BT1_HWA_SIZE SZ_8K
+#define BT1_XGMAC_XPCS_BASE 0x1F05D000
+#define BT1_XGMAC_XPCS_SIZE SZ_4K
+#define BT1_GMAC0_BASE 0x1F05E000
+#define BT1_GMAC0_SIZE SZ_8K
+#define BT1_GMAC1_BASE 0x1F060000
+#define BT1_GMAC1_SIZE SZ_8K
+#define BT1_USB_BASE 0x1F100000
+#define BT1_USB_SIZE SZ_1M
+#define BT1_PVT_BASE 0x1F200000
+#define BT1_PVT_SIZE SZ_4K
+#define BT1_EFUSE_BASE 0x1F201000
+#define BT1_EFUSE_SIZE SZ_4K
+
+#define BT1_P5600_GCR_L2SYNC_BASE 0x1FBF0000
+#define BT1_P5600_GCR_L2SYNC_SIZE SZ_4K
+#define BT1_P5600_GCB_BASE 0x1FBF8000
+#define BT1_P5600_GCB_SIZE SZ_8K
+#define BT1_P5600_CLCB_BASE 0x1FBFA000
+#define BT1_P5600_CLCB_SIZE SZ_8K
+#define BT1_P5600_COCB_BASE 0x1FBFC000
+#define BT1_P5600_COCB_SIZE SZ_8K
+#define BT1_P5600_DBG_BASE 0x1FBFE000
+#define BT1_P5600_DBG_SIZE SZ_8K
+#define BT1_BOOT_MAP_BASE 0x1FC00000
+#define BT1_BOOT_MAP_SIZE SZ_4M
+
+#define BT1_DEFAULT_BEV KSEG1ADDR(BT1_BOOT_MAP_BASE)
+
+#define BT1_MMIO_END BT1_HIMEM_BASE
+
+#define BT1_HIMEM_BASE 0x20000000
+#define BT1_HIMEM_SIZE SZ_256M
+
+#endif /* __ASM_MACH_BAIKAL_T1_MEMORY_H__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 platform declarations
+ */
+#ifndef __ASM_MACH_BAIKAL_T1_PLATFORM_H__
+#define __ASM_MACH_BAIKAL_T1_PLATFORM_H__
+
+#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
+
+int mips_set_uca_range(phys_addr_t start, phys_addr_t end);
+
+#else /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
+
+static inline int mips_set_uca_range(phys_addr_t start, phys_addr_t end)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
+
+#endif /* __ASM_MACH_BAIKAL_T1_PLATFORM_H__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 SoC Memory Spaces
+ */
+#ifndef __ASM_MACH_BAIKAL_T1_SPACES_H__
+#define __ASM_MACH_BAIKAL_T1_SPACES_H__
+
+#define PCI_IOBASE mips_io_port_base
+#define PCI_IOSIZE SZ_64K
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+#define pci_remap_iospace pci_remap_iospace
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_MACH_BAIKAL_T1_SPACES_H__ */
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
+/* GCR_CONTROL - Global CM2 Settings */
+GCR_ACCESSOR_RW(64, 0x010, control)
+#define CM_GCR_CONTROL_SYNCCTL BIT(16)
+#define CM_GCR_CONTROL_SYNCDIS BIT(5)
+#define CM_GCR_CONTROL_IVU_EN BIT(4)
+#define CM_GCR_CONTROL_SHST_EN BIT(3)
+#define CM_GCR_CONTROL_PARK_EN BIT(2)
+#define CM_GCR_CONTROL_MMIO_LIMIT_DIS BIT(1)
+#define CM_GCR_CONTROL_SPEC_READ_EN BIT(0)
+
+/* GCR_CONTROL2 - Global CM2 Settings (continue) */
+GCR_ACCESSOR_RW(64, 0x018, control2)
+#define CM_GCR_CONTROL2_L2_CACHEOP_LIMIT GENMASK(19, 16)
+#define CM_GCR_CONTROL2_L1_CACHEOP_LIMIT GENMASK(3, 0)
+
/* GCR_ACCESS - Controls core/IOCU access to GCRs */
GCR_ACCESSOR_RW(32, 0x020, access)
#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
res = read_##name(); \
new = res | set; \
write_##name(new); \
+ _ehb(); \
\
return res; \
} \
res = read_##name(); \
new = res & ~clear; \
write_##name(new); \
+ _ehb(); \
\
return res; \
} \
new = res & ~change; \
new |= (val & change); \
write_##name(new); \
+ _ehb(); \
\
return res; \
}
local_irq_restore(flags);
}
+#ifdef CONFIG_MIPS_BAIKAL_T1
+/* Workaround for core stuck on TLB load exception */
+#define tlb_prefetch tlb_prefetch
+static inline void tlb_prefetch(unsigned long addr)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int idx, pid;
+
+ if (addr < MAP_BASE)
+ return;
+
+ addr &= (PAGE_MASK << 1);
+ if (cpu_has_mmid) {
+ write_c0_entryhi(addr);
+ } else {
+ pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
+ write_c0_entryhi(addr | pid);
+ }
+ pgdp = pgd_offset(&init_mm, addr);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ idx = read_c0_index();
+
+ ptep = pte_offset_map(pmdp, addr);
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+ write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+ if (cpu_has_xpa)
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ ptep++;
+ write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+ if (cpu_has_xpa)
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
+ write_c0_entrylo0(ptep->pte_high);
+ ptep++;
+ write_c0_entrylo1(ptep->pte_high);
+#endif
+#else
+ write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
+ write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
+#endif
+ mtc0_tlbw_hazard();
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+
+ tlbw_use_hazard();
+}
+#endif
+
#include <asm-generic/mmu_context.h>
#endif /* _ASM_MMU_CONTEXT_H */
#ifdef CONFIG_KEXEC
void (*kexec_nonboot_cpu)(void);
#endif
-};
+} __no_randomize_layout;
extern void register_smp_ops(const struct plat_smp_ops *ops);
panic("Incorrect memory mapping !!!");
if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
+ max_low_pfn = PFN_DOWN(HIGHMEM_START);
#ifdef CONFIG_HIGHMEM
- highstart_pfn = PFN_DOWN(HIGHMEM_START);
+ highstart_pfn = max_low_pfn;
highend_pfn = max_pfn;
#else
- max_low_pfn = PFN_DOWN(HIGHMEM_START);
max_pfn = max_low_pfn;
#endif
}
wmb();
}
- write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+ /*
+ * Use PWRUP instead of RESET command for operating EJTAG.
+ * Otherwise there is no EJTAG chain.
+ */
+ write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
timeout = 100;
while (true) {
if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
+ uasm_i_ehb(p);
r.r1 = K0;
r.r2 = K1;
r.r3 = 1;
config DW_EDMA_PCIE
tristate "Synopsys DesignWare eDMA PCIe driver"
depends on PCI && PCI_MSI
- select DW_EDMA
+ depends on DW_EDMA
help
Provides a glue-logic between the Synopsys DesignWare
eDMA controller and an endpoint PCIe device. This also serves
return container_of(vd, struct dw_edma_desc, vd);
}
+static inline
+u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
+{
+ struct dw_edma_chip *chip = chan->dw->chip;
+
+ if (chip->ops->pci_address)
+ return chip->ops->pci_address(chip->dev, cpu_addr);
+
+ return cpu_addr;
+}
+
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *burst;
return 1;
}
+static void dw_edma_device_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if (chan->dir == EDMA_DIR_READ)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ } else {
+ if (chan->dir == EDMA_DIR_WRITE)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ }
+}
+
static int dw_edma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
- phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
+ u64 src_addr, dst_addr;
+ size_t fsz = 0;
u32 cnt = 0;
int i;
if (xfer->xfer.sg.len < 1)
return NULL;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (!xfer->xfer.il->numf)
+ if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
return NULL;
- if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
return NULL;
} else {
return NULL;
dst_addr = chan->config.dst_addr;
}
+ if (dir == DMA_DEV_TO_MEM)
+ src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
+ else
+ dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
+
if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (xfer->xfer.il->numf > 0)
- cnt = xfer->xfer.il->numf;
- else
- cnt = xfer->xfer.il->frame_size;
+ cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
+ fsz = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
else if (xfer->type == EDMA_XFER_INTERLEAVED)
- burst->sz = xfer->xfer.il->sgl[i].size;
+ burst->sz = xfer->xfer.il->sgl[i % fsz].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
- } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
- xfer->xfer.il->frame_size > 0) {
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
struct dma_interleaved_template *il = xfer->xfer.il;
- struct data_chunk *dc = &il->sgl[i];
+ struct data_chunk *dc = &il->sgl[i % fsz];
- if (il->src_sgl) {
- src_addr += burst->sz;
+ src_addr += burst->sz;
+ if (il->src_sgl)
src_addr += dmaengine_get_src_icg(il, dc);
- }
- if (il->dst_sgl) {
- dst_addr += burst->sz;
+ dst_addr += burst->sz;
+ if (il->dst_sgl)
dst_addr += dmaengine_get_dst_icg(il, dc);
- }
}
}
}
}
-static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
- u32 wr_alloc, u32 rd_alloc)
+static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
{
struct dw_edma_chip *chip = dw->chip;
- struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 alloc, off_alloc;
- u32 i, j, cnt;
- int err = 0;
+ u32 i, ch_cnt;
u32 pos;
- if (write) {
- i = 0;
- cnt = dw->wr_ch_cnt;
- dma = &dw->wr_edma;
- alloc = wr_alloc;
- off_alloc = 0;
- } else {
- i = dw->wr_ch_cnt;
- cnt = dw->rd_ch_cnt;
- dma = &dw->rd_edma;
- alloc = rd_alloc;
- off_alloc = wr_alloc;
- }
+ ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
+ dma = &dw->dma;
INIT_LIST_HEAD(&dma->channels);
- for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
+
+ for (i = 0; i < ch_cnt; i++) {
chan = &dw->chan[i];
- dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
- if (!dt_region)
- return -ENOMEM;
+ chan->dw = dw;
- chan->vc.chan.private = dt_region;
+ if (i < dw->wr_ch_cnt) {
+ chan->id = i;
+ chan->dir = EDMA_DIR_WRITE;
+ } else {
+ chan->id = i - dw->wr_ch_cnt;
+ chan->dir = EDMA_DIR_READ;
+ }
- chan->dw = dw;
- chan->id = j;
- chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- if (write)
- chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
+ if (chan->dir == EDMA_DIR_WRITE)
+ chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
else
- chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
- write ? "write" : "read", j, chan->ll_max);
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read",
+ chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
+ else if (chan->dir == EDMA_DIR_WRITE)
+ pos = chan->id % wr_alloc;
else
- pos = off_alloc + (j % alloc);
+ pos = wr_alloc + chan->id % rd_alloc;
irq = &dw->irq[pos];
- if (write)
- irq->wr_mask |= BIT(j);
+ if (chan->dir == EDMA_DIR_WRITE)
+ irq->wr_mask |= BIT(chan->id);
else
- irq->rd_mask |= BIT(j);
+ irq->rd_mask |= BIT(chan->id);
irq->dw = dw;
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
- write ? "write" : "read", j,
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
chan->vc.desc_free = vchan_free_desc;
- vchan_init(&chan->vc, dma);
+ chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
+ &dw->chip->dt_region_wr[chan->id] :
+ &dw->chip->dt_region_rd[chan->id];
- if (write) {
- dt_region->paddr = chip->dt_region_wr[j].paddr;
- dt_region->vaddr = chip->dt_region_wr[j].vaddr;
- dt_region->sz = chip->dt_region_wr[j].sz;
- } else {
- dt_region->paddr = chip->dt_region_rd[j].paddr;
- dt_region->vaddr = chip->dt_region_rd[j].vaddr;
- dt_region->sz = chip->dt_region_rd[j].sz;
- }
+ vchan_init(&chan->vc, dma);
dw_edma_v0_core_device_config(chan);
}
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
+ dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma->chancnt = cnt;
/* Set DMA channel callbacks */
dma->dev = chip->dev;
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
dma->device_free_chan_resources = dw_edma_free_chan_resources;
+ dma->device_caps = dw_edma_device_caps;
dma->device_config = dw_edma_device_config;
dma->device_pause = dw_edma_device_pause;
dma->device_resume = dw_edma_device_resume;
dma_set_max_seg_size(dma->dev, U32_MAX);
/* Register DMA device */
- err = dma_async_device_register(dma);
-
- return err;
+ return dma_async_device_register(dma);
}
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
dw_edma_interrupt_read,
IRQF_SHARED, dw->name,
&dw->irq[i]);
- if (err) {
- dw->nr_irqs = i;
- return err;
- }
+ if (err)
+ goto err_irq_free;
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[i].msi);
dw->nr_irqs = i;
}
+ return 0;
+
+err_irq_free:
+ for (i--; i >= 0; i--) {
+ irq = chip->ops->irq_vector(dev, i);
+ free_irq(irq, &dw->irq[i]);
+ }
+
return err;
}
if (!dw->chan)
return -ENOMEM;
- snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
+ snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
+ dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
dw_edma_v0_core_off(dw);
if (err)
return err;
- /* Setup write channels */
- err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
- if (err)
- goto err_irq_free;
-
- /* Setup read channels */
- err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
+ /* Setup write/read channels */
+ err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
struct dw_edma *dw = chip->dw;
int i;
+ /* Skip removal if no private data found */
+ if (!dw)
+ return -ENODEV;
+
/* Disable eDMA */
dw_edma_v0_core_off(dw);
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
+ dma_async_device_unregister(&dw->dma);
+ list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
- dma_async_device_unregister(&dw->rd_edma);
- list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
- vc.chan.device_node) {
- tasklet_kill(&chan->vc.task);
- list_del(&chan->vc.chan.device_node);
- }
-
- /* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(dw);
-
return 0;
}
EXPORT_SYMBOL_GPL(dw_edma_remove);
};
struct dw_edma {
- char name[20];
+ char name[32];
- struct dma_device wr_edma;
- u16 wr_ch_cnt;
+ struct dma_device dma;
- struct dma_device rd_edma;
+ u16 wr_ch_cnt;
u16 rd_ch_cnt;
struct dw_edma_irq *irq;
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
return pci_irq_vector(to_pci_dev(dev), nr);
}
+static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus_region region;
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = cpu_addr,
+ .end = cpu_addr,
+ };
+
+ pcibios_resource_to_bus(pdev->bus, ®ion, &res);
+ return region.start;
+}
+
static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
+ .pci_address = dw_edma_pcie_address,
};
static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->id = pdev->devfn;
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
- chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
+ chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
- chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
+ chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
- chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
+ chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
- chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
+ chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
SET_32(dw, rd_##name, value); \
} while (0)
-#ifdef CONFIG_64BIT
-
#define SET_64(dw, name, value) \
writeq(value, &(__dw_regs(dw)->name))
SET_64(dw, rd_##name, value); \
} while (0)
-#endif /* CONFIG_64BIT */
-
#define SET_COMPAT(dw, name, value) \
writel(value, &(__dw_regs(dw)->type.unroll.name))
#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_32(ll, value) \
- writel(value, ll)
-
-#ifdef CONFIG_64BIT
-
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
#define GET_CH_64(dw, dir, ch, name) \
readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_64(ll, value) \
- writeq(value, ll)
-
-#endif /* CONFIG_64BIT */
-
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
GET_RW_32(dw, dir, int_status));
}
+static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
+ u32 control, u32 size, u64 sar, u64 dar)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
+
+ lli->control = control;
+ lli->transfer_size = size;
+ lli->sar.reg = sar;
+ lli->dar.reg = dar;
+ } else {
+ struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &lli->control);
+ writel(size, &lli->transfer_size);
+ writeq(sar, &lli->sar.reg);
+ writeq(dar, &lli->dar.reg);
+ }
+}
+
+static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk,
+ int i, u32 control, u64 pointer)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
+
+ llp->control = control;
+ llp->llp.reg = pointer;
+ } else {
+ struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &llp->control);
+ writeq(pointer, &llp->llp.reg);
+ }
+}
+
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma_v0_lli __iomem *lli;
- struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
int j;
- lli = chunk->ll_region.vaddr;
-
if (chunk->cb)
control = DW_EDMA_V0_CB;
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_EDMA_V0_RIE;
}
- /* Channel control */
- SET_LL_32(&lli[i].control, control);
- /* Transfer size */
- SET_LL_32(&lli[i].transfer_size, child->sz);
- /* SAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].sar.reg, child->sar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
- SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
- #endif /* CONFIG_64BIT */
- /* DAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].dar.reg, child->dar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
- SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
- #endif /* CONFIG_64BIT */
- i++;
+
+ dw_edma_v0_write_ll_data(chunk, i++, control, child->sz,
+ child->sar, child->dar);
}
- llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
- /* Channel control */
- SET_LL_32(&llp->control, control);
- /* Linked list */
- #ifdef CONFIG_64BIT
- SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
- #else /* CONFIG_64BIT */
- SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
- SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
+ dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
dw_edma_v0_debugfs_on(dw);
}
-
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
-{
- dw_edma_v0_debugfs_off(dw);
-}
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
#include "dw-edma-v0-regs.h"
#include "dw-edma-core.h"
-#define REGS_ADDR(name) \
- ((void __force *)®s->name)
-#define REGISTER(name) \
- { #name, REGS_ADDR(name) }
-
-#define WR_REGISTER(name) \
- { #name, REGS_ADDR(wr_##name) }
-#define RD_REGISTER(name) \
- { #name, REGS_ADDR(rd_##name) }
-
-#define WR_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.wr_##name) }
+#define REGS_ADDR(dw, name) \
+ ({ \
+ struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
+ \
+ (void __iomem *)&__regs->name; \
+ })
+
+#define REGS_CH_ADDR(dw, name, _dir, _ch) \
+ ({ \
+ struct dw_edma_v0_ch_regs __iomem *__ch_regs; \
+ \
+ if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \
+ __ch_regs = REGS_ADDR(dw, type.legacy.ch); \
+ else if (_dir == EDMA_DIR_READ) \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \
+ else \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \
+ \
+ (void __iomem *)&__ch_regs->name; \
+ })
+
+#define REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, name) }
+
+#define CTX_REGISTER(dw, name, dir, ch) \
+ { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch }
+
+#define WR_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, wr_##name) }
+#define RD_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, rd_##name) }
+
+#define WR_REGISTER_LEGACY(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) }
#define RD_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.rd_##name) }
+ { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) }
-#define WR_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.wr_##name) }
-#define RD_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.rd_##name) }
+#define WR_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) }
+#define RD_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) }
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dw_edma *dw;
-static struct dw_edma_v0_regs __iomem *regs;
-
-static struct {
- void __iomem *start;
- void __iomem *end;
-} lim[2][EDMA_V0_MAX_NR_CH];
-
-struct debugfs_entries {
+struct dw_edma_debugfs_entry {
+ struct dw_edma *dw;
const char *name;
- dma_addr_t *reg;
+ void __iomem *reg;
+ enum dw_edma_dir dir;
+ u16 ch;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
- void __iomem *reg = (void __force __iomem *)data;
+ struct dw_edma_debugfs_entry *entry = data;
+ struct dw_edma *dw = entry->dw;
+ void __iomem *reg = entry->reg;
+
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
- reg >= (void __iomem *)®s->type.legacy.ch) {
- void __iomem *ptr = ®s->type.legacy.ch;
- u32 viewport_sel = 0;
+ reg >= REGS_ADDR(dw, type.legacy.ch)) {
unsigned long flags;
- u16 ch;
-
- for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
- ptr += (reg - lim[0][ch].start);
- goto legacy_sel_wr;
- }
-
- for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
- ptr += (reg - lim[1][ch].start);
- goto legacy_sel_rd;
- }
-
- return 0;
-legacy_sel_rd:
- viewport_sel = BIT(31);
-legacy_sel_wr:
- viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ u32 viewport_sel;
+
+ viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0;
+ viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch);
raw_spin_lock_irqsave(&dw->lock, flags);
- writel(viewport_sel, ®s->type.legacy.viewport_sel);
- *val = readl(ptr);
+ writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel));
+ *val = readl(reg);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
-static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
- int nr_entries, struct dentry *dir)
+static void dw_edma_debugfs_create_x32(struct dw_edma *dw,
+ const struct dw_edma_debugfs_entry ini[],
+ int nr_entries, struct dentry *dent)
{
+ struct dw_edma_debugfs_entry *entries;
int i;
+ entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
+ GFP_KERNEL);
+ if (!entries)
+ return;
+
for (i = 0; i < nr_entries; i++) {
- if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
- entries[i].reg, &fops_x32))
- break;
+ entries[i] = ini[i];
+
+ debugfs_create_file_unsafe(entries[i].name, 0444, dent,
+ &entries[i], &fops_x32);
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
- struct dentry *dir)
+static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ u16 ch, struct dentry *dent)
{
- int nr_entries;
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ch_control1),
- REGISTER(ch_control2),
- REGISTER(transfer_size),
- REGISTER(sar.lsb),
- REGISTER(sar.msb),
- REGISTER(dar.lsb),
- REGISTER(dar.msb),
- REGISTER(llp.lsb),
- REGISTER(llp.msb),
+ struct dw_edma_debugfs_entry debugfs_regs[] = {
+ CTX_REGISTER(dw, ch_control1, dir, ch),
+ CTX_REGISTER(dw, ch_control2, dir, ch),
+ CTX_REGISTER(dw, transfer_size, dir, ch),
+ CTX_REGISTER(dw, sar.lsb, dir, ch),
+ CTX_REGISTER(dw, sar.msb, dir, ch),
+ CTX_REGISTER(dw, dar.lsb, dir, ch),
+ CTX_REGISTER(dw, dar.msb, dir, ch),
+ CTX_REGISTER(dw, llp.lsb, dir, ch),
+ CTX_REGISTER(dw, llp.msb, dir, ch),
};
+ int nr_entries;
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
}
-static void dw_edma_debugfs_regs_wr(struct dentry *dir)
+static void dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- WR_REGISTER(engine_en),
- WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight.lsb),
- WR_REGISTER(ch_arb_weight.msb),
+ WR_REGISTER(dw, engine_en),
+ WR_REGISTER(dw, doorbell),
+ WR_REGISTER(dw, ch_arb_weight.lsb),
+ WR_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- WR_REGISTER(int_status),
- WR_REGISTER(int_mask),
- WR_REGISTER(int_clear),
- WR_REGISTER(err_status),
- WR_REGISTER(done_imwr.lsb),
- WR_REGISTER(done_imwr.msb),
- WR_REGISTER(abort_imwr.lsb),
- WR_REGISTER(abort_imwr.msb),
- WR_REGISTER(ch01_imwr_data),
- WR_REGISTER(ch23_imwr_data),
- WR_REGISTER(ch45_imwr_data),
- WR_REGISTER(ch67_imwr_data),
- WR_REGISTER(linked_list_err_en),
+ WR_REGISTER(dw, int_status),
+ WR_REGISTER(dw, int_mask),
+ WR_REGISTER(dw, int_clear),
+ WR_REGISTER(dw, err_status),
+ WR_REGISTER(dw, done_imwr.lsb),
+ WR_REGISTER(dw, done_imwr.msb),
+ WR_REGISTER(dw, abort_imwr.lsb),
+ WR_REGISTER(dw, abort_imwr.msb),
+ WR_REGISTER(dw, ch01_imwr_data),
+ WR_REGISTER(dw, ch23_imwr_data),
+ WR_REGISTER(dw, ch45_imwr_data),
+ WR_REGISTER(dw, ch67_imwr_data),
+ WR_REGISTER(dw, linked_list_err_en),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
- WR_REGISTER_UNROLL(ch0_pwr_en),
- WR_REGISTER_UNROLL(ch1_pwr_en),
- WR_REGISTER_UNROLL(ch2_pwr_en),
- WR_REGISTER_UNROLL(ch3_pwr_en),
- WR_REGISTER_UNROLL(ch4_pwr_en),
- WR_REGISTER_UNROLL(ch5_pwr_en),
- WR_REGISTER_UNROLL(ch6_pwr_en),
- WR_REGISTER_UNROLL(ch7_pwr_en),
+ WR_REGISTER_UNROLL(dw, engine_chgroup),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ WR_REGISTER_UNROLL(dw, ch0_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch1_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch2_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch3_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch4_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch5_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch6_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(WRITE_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[0][i].start = ®s->type.unroll.ch[i].wr;
- lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs_rd(struct dentry *dir)
+static void dw_edma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- RD_REGISTER(engine_en),
- RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight.lsb),
- RD_REGISTER(ch_arb_weight.msb),
+ RD_REGISTER(dw, engine_en),
+ RD_REGISTER(dw, doorbell),
+ RD_REGISTER(dw, ch_arb_weight.lsb),
+ RD_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- RD_REGISTER(int_status),
- RD_REGISTER(int_mask),
- RD_REGISTER(int_clear),
- RD_REGISTER(err_status.lsb),
- RD_REGISTER(err_status.msb),
- RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr.lsb),
- RD_REGISTER(done_imwr.msb),
- RD_REGISTER(abort_imwr.lsb),
- RD_REGISTER(abort_imwr.msb),
- RD_REGISTER(ch01_imwr_data),
- RD_REGISTER(ch23_imwr_data),
- RD_REGISTER(ch45_imwr_data),
- RD_REGISTER(ch67_imwr_data),
+ RD_REGISTER(dw, int_status),
+ RD_REGISTER(dw, int_mask),
+ RD_REGISTER(dw, int_clear),
+ RD_REGISTER(dw, err_status.lsb),
+ RD_REGISTER(dw, err_status.msb),
+ RD_REGISTER(dw, linked_list_err_en),
+ RD_REGISTER(dw, done_imwr.lsb),
+ RD_REGISTER(dw, done_imwr.msb),
+ RD_REGISTER(dw, abort_imwr.lsb),
+ RD_REGISTER(dw, abort_imwr.msb),
+ RD_REGISTER(dw, ch01_imwr_data),
+ RD_REGISTER(dw, ch23_imwr_data),
+ RD_REGISTER(dw, ch45_imwr_data),
+ RD_REGISTER(dw, ch67_imwr_data),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
- RD_REGISTER_UNROLL(ch0_pwr_en),
- RD_REGISTER_UNROLL(ch1_pwr_en),
- RD_REGISTER_UNROLL(ch2_pwr_en),
- RD_REGISTER_UNROLL(ch3_pwr_en),
- RD_REGISTER_UNROLL(ch4_pwr_en),
- RD_REGISTER_UNROLL(ch5_pwr_en),
- RD_REGISTER_UNROLL(ch6_pwr_en),
- RD_REGISTER_UNROLL(ch7_pwr_en),
+ RD_REGISTER_UNROLL(dw, engine_chgroup),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ RD_REGISTER_UNROLL(dw, ch0_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch1_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch2_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch3_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch4_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch5_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch6_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(READ_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(READ_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[1][i].start = ®s->type.unroll.ch[i].rd;
- lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs(void)
+static void dw_edma_debugfs_regs(struct dw_edma *dw)
{
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ctrl_data_arb_prior),
- REGISTER(ctrl),
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
+ REGISTER(dw, ctrl_data_arb_prior),
+ REGISTER(dw, ctrl),
};
- struct dentry *regs_dir;
+ struct dentry *regs_dent;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
- dw_edma_debugfs_regs_wr(regs_dir);
- dw_edma_debugfs_regs_rd(regs_dir);
+ dw_edma_debugfs_regs_wr(dw, regs_dent);
+ dw_edma_debugfs_regs_rd(dw, regs_dent);
}
-void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
+void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
- dw = _dw;
- if (!dw)
- return;
-
- regs = dw->chip->reg_base;
- if (!regs)
- return;
-
- dw->debugfs = debugfs_create_dir(dw->name, NULL);
- if (!dw->debugfs)
+ if (!debugfs_initialized())
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
- debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
-
- dw_edma_debugfs_regs();
-}
-
-void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
-{
- dw = _dw;
- if (!dw)
- return;
+ debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
- debugfs_remove_recursive(dw->debugfs);
- dw->debugfs = NULL;
+ dw_edma_debugfs_regs(dw);
}
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-
-static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
-{
-}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_EDMA_V0_DEBUG_FS_H */
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || ARCH_MXC
help
Support for error detection and correction on the Synopsys DDR
memory controller.
Support for error detection and correction on the
SoCs with ARM DMC-520 DRAM controller.
+config EDAC_ZYNQ
+ tristate "Xilinx Zynq A05 DDR Memory Controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ help
+ Support for error detection and correction on the Xilinx Zynq A05
+ DDR memory controller.
+
endif # EDAC
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
+obj-$(CONFIG_EDAC_ZYNQ) += zynq_edac.o
int masks[NUMBER_OF_IRQS];
};
-static int dmc520_mc_idx;
-
static u32 dmc520_read_reg(struct dmc520_edac *pvt, u32 offset)
{
return readl(pvt->reg_base + offset);
layers[0].size = dmc520_get_rank_count(reg_base);
layers[0].is_virt_csrow = true;
- mci = edac_mc_alloc(dmc520_mc_idx++, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"Failed to allocate memory for mc instance\n");
#include <linux/edac.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+
#include <asm/page.h>
#include "edac_mc.h"
#include "edac_module.h"
/* lock to memory controller's control array */
static DEFINE_MUTEX(mem_ctls_mutex);
static LIST_HEAD(mc_devices);
+static DEFINE_IDR(mc_idr);
/*
* Used to lock EDAC MC to just one module, avoiding two drivers e. g.
[MEM_RDR] = "Registered-SDR",
[MEM_DDR] = "Unbuffered-DDR",
[MEM_RDDR] = "Registered-DDR",
+ [MEM_LPDDR] = "Low-Power-(m)DDR-RAM",
[MEM_RMBS] = "RMBS",
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
+ [MEM_LPDDR2] = "Low-Power-DDR2-RAM",
[MEM_XDR] = "XDR",
[MEM_DDR3] = "Unbuffered-DDR3",
[MEM_RDDR3] = "Registered-DDR3",
unsigned int pos[EDAC_MAX_LAYERS];
unsigned int row, chn, idx;
int layer;
- void *p;
/*
* Allocate and fill the dimm structs
for (idx = 0; idx < mci->tot_dimms; idx++) {
struct dimm_info *dimm;
struct rank_info *chan;
- int n, len;
chan = mci->csrows[row]->channels[chn];
dimm->mci = mci;
dimm->idx = idx;
- /*
- * Copy DIMM location and initialize it.
- */
- len = sizeof(dimm->label);
- p = dimm->label;
- n = scnprintf(p, len, "mc#%u", mci->mc_idx);
- p += n;
- len -= n;
- for (layer = 0; layer < mci->n_layers; layer++) {
- n = scnprintf(p, len, "%s#%u",
- edac_layer_name[mci->layers[layer].type],
- pos[layer]);
- p += n;
- len -= n;
+ /* Copy DIMM location */
+ for (layer = 0; layer < mci->n_layers; layer++)
dimm->location[layer] = pos[layer];
- }
/* Link it to the csrows old API data */
chan->dimm = dimm;
mutex_unlock(&mem_ctls_mutex);
}
+/**
+ * edac_mc_alloc_id() - Allocate unique Memory Controller identifier
+ *
+ * @mci: pointer to the mci structure to allocate ID for
+ *
+ * Use edac_mc_free_id() to coherently free the MC identifier.
+ *
+ * .. note::
+ * locking model: must be called with the mem_ctls_mutex lock held
+ *
+ * Returns:
+ * 0 on Success, or an error code on failure
+ */
+static int edac_mc_alloc_id(struct mem_ctl_info *mci)
+{
+ struct device_node *np = dev_of_node(mci->pdev);
+ int ret, min, max;
+
+ if (mci->mc_idx == EDAC_AUTO_MC_NUM) {
+ ret = of_alias_get_id(np, "mc");
+ if (ret >= 0) {
+ min = ret;
+ max = ret + 1;
+ } else {
+ min = of_alias_get_highest_id("mc");
+ if (min >= 0)
+ min++;
+ else
+ min = 0;
+
+ max = 0;
+ }
+ } else {
+ min = mci->mc_idx;
+ max = mci->mc_idx + 1;
+ }
+
+ ret = idr_alloc(&mc_idr, mci, min, max, GFP_KERNEL);
+ if (ret < 0)
+ return ret == -ENOSPC ? -EBUSY : ret;
+
+ mci->mc_idx = ret;
+
+ return 0;
+}
+
+/**
+ * edac_mc_free_id() - Free Memory Controller identifier
+ *
+ * @mci: pointer to the mci structure to free ID from
+ *
+ * .. note::
+ * locking model: must be called with the mem_ctls_mutex lock held
+ */
+static void edac_mc_free_id(struct mem_ctl_info *mci)
+{
+ idr_remove(&mc_idr, mci->mc_idx);
+}
+
+/**
+ * edac_mc_init_labels() - Initialize DIMM labels
+ *
+ * @mci: pointer to the mci structure which DIMM labels need to be initialized
+ *
+ * .. note::
+ * locking model: must be called with the mem_ctls_mutex lock held
+ */
+static void edac_mc_init_labels(struct mem_ctl_info *mci)
+{
+ int n, len, layer;
+ unsigned int idx;
+ char *p;
+ for (idx = 0; idx < mci->tot_dimms; idx++) {
+ len = sizeof(mci->dimms[idx]->label);
+ p = mci->dimms[idx]->label;
+
+ n = scnprintf(p, len, "mc#%u", mci->mc_idx);
+ for (layer = 0; layer < mci->n_layers; layer++) {
+ n += scnprintf(p + n, len - n, "%s#%u",
+ edac_layer_name[mci->layers[layer].type],
+ mci->dimms[idx]->location[layer]);
+ }
+ }
+}
/* Return 0 on success, 1 on failure.
* Before calling this function, caller must
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
- int ret = -EINVAL;
+ int ret;
+
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
goto fail0;
}
- if (add_mc_to_global_list(mci))
+ ret = edac_mc_alloc_id(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MC, "failed to allocate MC idx %u\n",
+ mci->mc_idx);
goto fail0;
+ }
+
+ edac_mc_init_labels(mci);
+
+ if (add_mc_to_global_list(mci)) {
+ ret = -EINVAL;
+ goto fail1;
+ }
/* set load time so that error rate can be tracked */
mci->start_time = jiffies;
mci->bus = edac_get_sysfs_subsys();
- if (edac_create_sysfs_mci_device(mci, groups)) {
+ ret = edac_create_sysfs_mci_device(mci, groups);
+ if (ret) {
edac_mc_printk(mci, KERN_WARNING,
"failed to create sysfs device\n");
- goto fail1;
+ goto fail2;
}
if (mci->edac_check) {
mutex_unlock(&mem_ctls_mutex);
return 0;
-fail1:
+fail2:
del_mc_from_global_list(mci);
+fail1:
+ edac_mc_free_id(mci);
+
fail0:
mutex_unlock(&mem_ctls_mutex);
return ret;
if (del_mc_from_global_list(mci))
edac_mc_owner = NULL;
+ edac_mc_free_id(mci);
+
mutex_unlock(&mem_ctls_mutex);
if (mci->edac_check)
#define _EDAC_MC_H_
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/edac.h>
+/* Generate MC identifier automatically */
+#define EDAC_AUTO_MC_NUM UINT_MAX
+
#if PAGE_SHIFT < 20
#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
#define PASEMI_EDAC_ERROR_GRAIN 64
static int last_page_in_mmc;
-static int system_mmc_id;
-
static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
{
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = PASEMI_EDAC_NR_CHANS;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
- 0);
+ mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- static int ppc4xx_edac_instance;
/*
* At this point, we only support the controller realized on
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = ppc4xx_edac_nr_chans;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+ mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers,
sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
ppc4xx_edac_printk(KERN_ERR, "%pOF: "
goto fail1;
}
- ppc4xx_edac_instance++;
-
return 0;
fail1:
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Synopsys DDR ECC Driver
+ * Synopsys DW uMCTL2 DDR ECC Driver
* This driver is based on ppc4xx_edac.c drivers
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/edac.h>
+#include <linux/fs.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/module.h>
+#include <linux/pfn.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/units.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include "edac_module.h"
-/* Number of cs_rows needed per memory controller */
-#define SYNPS_EDAC_NR_CSROWS 1
-
/* Number of channels per memory controller */
-#define SYNPS_EDAC_NR_CHANS 1
-
-/* Granularity of reported error in bytes */
-#define SYNPS_EDAC_ERR_GRAIN 1
-
-#define SYNPS_EDAC_MSG_SIZE 256
-
-#define SYNPS_EDAC_MOD_STRING "synps_edac"
-#define SYNPS_EDAC_MOD_VER "1"
-
-/* Synopsys DDR memory controller registers that are relevant to ECC */
-#define CTRL_OFST 0x0
-#define T_ZQ_OFST 0xA4
-
-/* ECC control register */
-#define ECC_CTRL_OFST 0xC4
-/* ECC log register */
-#define CE_LOG_OFST 0xC8
-/* ECC address register */
-#define CE_ADDR_OFST 0xCC
-/* ECC data[31:0] register */
-#define CE_DATA_31_0_OFST 0xD0
-
-/* Uncorrectable error info registers */
-#define UE_LOG_OFST 0xDC
-#define UE_ADDR_OFST 0xE0
-#define UE_DATA_31_0_OFST 0xE4
-
-#define STAT_OFST 0xF0
-#define SCRUB_OFST 0xF4
-
-/* Control register bit field definitions */
-#define CTRL_BW_MASK 0xC
-#define CTRL_BW_SHIFT 2
-
-#define DDRCTL_WDTH_16 1
-#define DDRCTL_WDTH_32 0
-
-/* ZQ register bit field definitions */
-#define T_ZQ_DDRMODE_MASK 0x2
-
-/* ECC control register bit field definitions */
-#define ECC_CTRL_CLR_CE_ERR 0x2
-#define ECC_CTRL_CLR_UE_ERR 0x1
-
-/* ECC correctable/uncorrectable error log register definitions */
-#define LOG_VALID 0x1
-#define CE_LOG_BITPOS_MASK 0xFE
-#define CE_LOG_BITPOS_SHIFT 1
-
-/* ECC correctable/uncorrectable error address register definitions */
-#define ADDR_COL_MASK 0xFFF
-#define ADDR_ROW_MASK 0xFFFF000
-#define ADDR_ROW_SHIFT 12
-#define ADDR_BANK_MASK 0x70000000
-#define ADDR_BANK_SHIFT 28
-
-/* ECC statistic register definitions */
-#define STAT_UECNT_MASK 0xFF
-#define STAT_CECNT_MASK 0xFF00
-#define STAT_CECNT_SHIFT 8
-
-/* ECC scrub register definitions */
-#define SCRUB_MODE_MASK 0x7
-#define SCRUB_MODE_SECDED 0x4
-
-/* DDR ECC Quirks */
-#define DDR_ECC_INTR_SUPPORT BIT(0)
-#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
-#define DDR_ECC_INTR_SELF_CLEAR BIT(2)
-
-/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
+#define SNPS_EDAC_NR_CHANS 1
+
+#define SNPS_EDAC_MSG_SIZE 256
+
+#define SNPS_EDAC_MOD_STRING "snps_edac"
+#define SNPS_EDAC_MOD_VER "1"
+
+/* DDR capabilities */
+#define SNPS_CAP_ECC_SCRUB BIT(0)
+#define SNPS_CAP_ECC_SCRUBBER BIT(1)
+#define SNPS_CAP_ZYNQMP BIT(31)
+
+/* Synopsys uMCTL2 DDR controller registers that are relevant to ECC */
+
+/* DDRC Master 0 Register */
+#define DDR_MSTR_OFST 0x0
+
/* ECC Configuration Registers */
#define ECC_CFG0_OFST 0x70
#define ECC_CFG1_OFST 0x74
#define ECC_POISON0_OFST 0xB8
#define ECC_POISON1_OFST 0xBC
-#define ECC_ADDRMAP0_OFFSET 0x200
+/* DDR CRC/Parity Registers */
+#define DDR_CRCPARCTL0_OFST 0xC0
+#define DDR_CRCPARCTL1_OFST 0xC4
+#define DDR_CRCPARCTL2_OFST 0xC8
+#define DDR_CRCPARSTAT_OFST 0xCC
+
+/* DDR Address Map Registers */
+#define DDR_ADDRMAP0_OFST 0x200
+
+/* DDR Software Control Register */
+#define DDR_SWCTL 0x320
+
+/* ECC Poison Pattern Registers */
+#define ECC_POISONPAT0_OFST 0x37C
+#define ECC_POISONPAT1_OFST 0x380
+#define ECC_POISONPAT2_OFST 0x384
+
+/* DDR SAR Registers */
+#define DDR_SARBASE0_OFST 0xF04
+#define DDR_SARSIZE0_OFST 0xF08
+
+/* ECC Scrubber Registers */
+#define ECC_SBRCTL_OFST 0xF24
+#define ECC_SBRSTAT_OFST 0xF28
+#define ECC_SBRWDATA0_OFST 0xF2C
+#define ECC_SBRWDATA1_OFST 0xF30
+
+/* ZynqMP DDR QOS Registers */
+#define ZYNQMP_DDR_QOS_IRQ_STAT_OFST 0x20200
+#define ZYNQMP_DDR_QOS_IRQ_EN_OFST 0x20208
+#define ZYNQMP_DDR_QOS_IRQ_DB_OFST 0x2020C
+
+/* DDR Master register definitions */
+#define DDR_MSTR_DEV_CFG_MASK GENMASK(31, 30)
+#define DDR_MSTR_DEV_X4 0
+#define DDR_MSTR_DEV_X8 1
+#define DDR_MSTR_DEV_X16 2
+#define DDR_MSTR_DEV_X32 3
+#define DDR_MSTR_ACT_RANKS_MASK GENMASK(27, 24)
+#define DDR_MSTR_FREQ_RATIO11 BIT(22)
+#define DDR_MSTR_BURST_RDWR GENMASK(19, 16)
+#define DDR_MSTR_BUSWIDTH_MASK GENMASK(13, 12)
+#define DDR_MSTR_MEM_MASK GENMASK(5, 0)
+#define DDR_MSTR_MEM_LPDDR4 BIT(5)
+#define DDR_MSTR_MEM_DDR4 BIT(4)
+#define DDR_MSTR_MEM_LPDDR3 BIT(3)
+#define DDR_MSTR_MEM_LPDDR2 BIT(2)
+#define DDR_MSTR_MEM_LPDDR BIT(1)
+#define DDR_MSTR_MEM_DDR3 BIT(0)
+#define DDR_MSTR_MEM_DDR2 0
+
+/* ECC CFG0 register definitions */
+#define ECC_CFG0_DIS_SCRUB BIT(4)
+#define ECC_CFG0_MODE_MASK GENMASK(2, 0)
+
+/* ECC CFG1 register definitions */
+#define ECC_CFG1_POISON_BIT BIT(1)
+#define ECC_CFG1_POISON_EN BIT(0)
+
+/* ECC status register definitions */
+#define ECC_STAT_UE_MASK GENMASK(23, 16)
+#define ECC_STAT_CE_MASK GENMASK(15, 8)
+#define ECC_STAT_BITNUM_MASK GENMASK(6, 0)
-/* Control register bitfield definitions */
-#define ECC_CTRL_BUSWIDTH_MASK 0x3000
-#define ECC_CTRL_BUSWIDTH_SHIFT 12
+/* ECC control/clear register definitions */
+#define ECC_CTRL_CLR_CE_ERR BIT(0)
+#define ECC_CTRL_CLR_UE_ERR BIT(1)
#define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
#define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
-
-/* DDR Control Register width definitions */
-#define DDRCTL_EWDTH_16 2
-#define DDRCTL_EWDTH_32 1
-#define DDRCTL_EWDTH_64 0
-
-/* ECC status register definitions */
-#define ECC_STAT_UECNT_MASK 0xF0000
-#define ECC_STAT_UECNT_SHIFT 16
-#define ECC_STAT_CECNT_MASK 0xF00
-#define ECC_STAT_CECNT_SHIFT 8
-#define ECC_STAT_BITNUM_MASK 0x7F
+#define ECC_CTRL_EN_CE_IRQ BIT(8)
+#define ECC_CTRL_EN_UE_IRQ BIT(9)
/* ECC error count register definitions */
-#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
-#define ECC_ERRCNT_UECNT_SHIFT 16
-#define ECC_ERRCNT_CECNT_MASK 0xFFFF
-
-/* DDR QOS Interrupt register definitions */
-#define DDR_QOS_IRQ_STAT_OFST 0x20200
-#define DDR_QOSUE_MASK 0x4
-#define DDR_QOSCE_MASK 0x2
-#define ECC_CE_UE_INTR_MASK 0x6
-#define DDR_QOS_IRQ_EN_OFST 0x20208
-#define DDR_QOS_IRQ_DB_OFST 0x2020C
-
-/* DDR QOS Interrupt register definitions */
-#define DDR_UE_MASK BIT(9)
-#define DDR_CE_MASK BIT(8)
-
-/* ECC Corrected Error Register Mask and Shifts*/
-#define ECC_CEADDR0_RW_MASK 0x3FFFF
-#define ECC_CEADDR0_RNK_MASK BIT(24)
-#define ECC_CEADDR1_BNKGRP_MASK 0x3000000
-#define ECC_CEADDR1_BNKNR_MASK 0x70000
-#define ECC_CEADDR1_BLKNR_MASK 0xFFF
-#define ECC_CEADDR1_BNKGRP_SHIFT 24
-#define ECC_CEADDR1_BNKNR_SHIFT 16
-
-/* ECC Poison register shifts */
-#define ECC_POISON0_RANK_SHIFT 24
-#define ECC_POISON0_RANK_MASK BIT(24)
-#define ECC_POISON0_COLUMN_SHIFT 0
-#define ECC_POISON0_COLUMN_MASK 0xFFF
-#define ECC_POISON1_BG_SHIFT 28
-#define ECC_POISON1_BG_MASK 0x30000000
-#define ECC_POISON1_BANKNR_SHIFT 24
-#define ECC_POISON1_BANKNR_MASK 0x7000000
-#define ECC_POISON1_ROW_SHIFT 0
-#define ECC_POISON1_ROW_MASK 0x3FFFF
-
-/* DDR Memory type defines */
-#define MEM_TYPE_DDR3 0x1
-#define MEM_TYPE_LPDDR3 0x8
-#define MEM_TYPE_DDR2 0x4
-#define MEM_TYPE_DDR4 0x10
-#define MEM_TYPE_LPDDR4 0x20
-
-/* DDRC Software control register */
-#define DDRC_SWCTL 0x320
-
-/* DDRC ECC CE & UE poison mask */
-#define ECC_CEPOISON_MASK 0x3
-#define ECC_UEPOISON_MASK 0x1
-
-/* DDRC Device config masks */
-#define DDRC_MSTR_CFG_MASK 0xC0000000
-#define DDRC_MSTR_CFG_SHIFT 30
-#define DDRC_MSTR_CFG_X4_MASK 0x0
-#define DDRC_MSTR_CFG_X8_MASK 0x1
-#define DDRC_MSTR_CFG_X16_MASK 0x2
-#define DDRC_MSTR_CFG_X32_MASK 0x3
-
-#define DDR_MAX_ROW_SHIFT 18
-#define DDR_MAX_COL_SHIFT 14
-#define DDR_MAX_BANK_SHIFT 3
-#define DDR_MAX_BANKGRP_SHIFT 2
-
-#define ROW_MAX_VAL_MASK 0xF
-#define COL_MAX_VAL_MASK 0xF
-#define BANK_MAX_VAL_MASK 0x1F
-#define BANKGRP_MAX_VAL_MASK 0x1F
-#define RANK_MAX_VAL_MASK 0x1F
+#define ECC_ERRCNT_UECNT_MASK GENMASK(31, 16)
+#define ECC_ERRCNT_CECNT_MASK GENMASK(15, 0)
+
+/* ECC Corrected Error register definitions */
+#define ECC_CEADDR0_RANK_MASK GENMASK(27, 24)
+#define ECC_CEADDR0_ROW_MASK GENMASK(17, 0)
+#define ECC_CEADDR1_BANKGRP_MASK GENMASK(25, 24)
+#define ECC_CEADDR1_BANK_MASK GENMASK(23, 16)
+#define ECC_CEADDR1_COL_MASK GENMASK(11, 0)
+
+/* DDR CRC/Parity register definitions */
+#define DDR_CRCPARCTL0_CLR_ALRT_ERRCNT BIT(2)
+#define DDR_CRCPARCTL0_CLR_ALRT_ERR BIT(1)
+#define DDR_CRCPARCTL0_EN_ALRT_IRQ BIT(0)
+#define DDR_CRCPARSTAT_ALRT_ERR BIT(16)
+#define DDR_CRCPARSTAT_ALRT_CNT_MASK GENMASK(15, 0)
+
+/* ECC Poison register definitions */
+#define ECC_POISON0_RANK_MASK GENMASK(27, 24)
+#define ECC_POISON0_COL_MASK GENMASK(11, 0)
+#define ECC_POISON1_BANKGRP_MASK GENMASK(29, 28)
+#define ECC_POISON1_BANK_MASK GENMASK(26, 24)
+#define ECC_POISON1_ROW_MASK GENMASK(17, 0)
+
+/* DDRC address mapping parameters */
+#define DDR_ADDRMAP_NREGS 12
+
+#define DDR_MAX_HIF_WIDTH 60
+#define DDR_MAX_ROW_WIDTH 18
+#define DDR_MAX_COL_WIDTH 14
+#define DDR_MAX_BANK_WIDTH 3
+#define DDR_MAX_BANKGRP_WIDTH 2
+#define DDR_MAX_RANK_WIDTH 2
+
+#define DDR_ADDRMAP_B0_M15 GENMASK(3, 0)
+#define DDR_ADDRMAP_B8_M15 GENMASK(11, 8)
+#define DDR_ADDRMAP_B16_M15 GENMASK(19, 16)
+#define DDR_ADDRMAP_B24_M15 GENMASK(27, 24)
+
+#define DDR_ADDRMAP_B0_M31 GENMASK(4, 0)
+#define DDR_ADDRMAP_B8_M31 GENMASK(12, 8)
+#define DDR_ADDRMAP_B16_M31 GENMASK(20, 16)
+#define DDR_ADDRMAP_B24_M31 GENMASK(28, 24)
+
+#define DDR_ADDRMAP_UNUSED ((u8)-1)
+#define DDR_ADDRMAP_MAX_15 DDR_ADDRMAP_B0_M15
+#define DDR_ADDRMAP_MAX_31 DDR_ADDRMAP_B0_M31
#define ROW_B0_BASE 6
#define ROW_B1_BASE 7
#define BANKGRP_B1_BASE 3
#define RANK_B0_BASE 6
+#define RANK_B1_BASE 7
+
+/* DDRC System Address parameters */
+#define DDR_MAX_NSAR 4
+#define DDR_MIN_SARSIZE SZ_256M
+
+/* ECC Scrubber registers definitions */
+#define ECC_SBRCTL_SCRUB_INTERVAL GENMASK(20, 8)
+#define ECC_SBRCTL_INTERVAL_STEP 512
+#define ECC_SBRCTL_INTERVAL_MIN 0
+#define ECC_SBRCTL_INTERVAL_SAFE 1
+#define ECC_SBRCTL_INTERVAL_MAX FIELD_MAX(ECC_SBRCTL_SCRUB_INTERVAL)
+#define ECC_SBRCTL_SCRUB_BURST GENMASK(6, 4)
+#define ECC_SBRCTL_SCRUB_MODE_WR BIT(2)
+#define ECC_SBRCTL_SCRUB_EN BIT(0)
+#define ECC_SBRSTAT_SCRUB_DONE BIT(1)
+#define ECC_SBRSTAT_SCRUB_BUSY BIT(0)
+
+/* ZynqMP DDR QOS Interrupt register definitions */
+#define ZYNQMP_DDR_QOS_UE_MASK BIT(2)
+#define ZYNQMP_DDR_QOS_CE_MASK BIT(1)
/**
- * struct ecc_error_info - ECC error log information.
- * @row: Row number.
- * @col: Column number.
- * @bank: Bank number.
- * @bitpos: Bit position.
- * @data: Data causing the error.
- * @bankgrpnr: Bank group number.
- * @blknr: Block number.
+ * enum snps_dq_width - SDRAM DQ bus width (ECC capable).
+ * @SNPS_DQ_32: 32-bit memory data width.
+ * @SNPS_DQ_64: 64-bit memory data width.
*/
-struct ecc_error_info {
- u32 row;
- u32 col;
- u32 bank;
- u32 bitpos;
- u32 data;
- u32 bankgrpnr;
- u32 blknr;
+enum snps_dq_width {
+ SNPS_DQ_32 = 2,
+ SNPS_DQ_64 = 3,
};
/**
- * struct synps_ecc_status - ECC status information to report.
- * @ce_cnt: Correctable error count.
- * @ue_cnt: Uncorrectable error count.
- * @ceinfo: Correctable error log information.
- * @ueinfo: Uncorrectable error log information.
+ * enum snps_dq_mode - SDRAM DQ bus mode.
+ * @SNPS_DQ_FULL: Full DQ bus width.
+ * @SNPS_DQ_HALF: Half DQ bus width.
+ * @SNPS_DQ_QRTR: Quarter DQ bus width.
*/
-struct synps_ecc_status {
- u32 ce_cnt;
- u32 ue_cnt;
- struct ecc_error_info ceinfo;
- struct ecc_error_info ueinfo;
+enum snps_dq_mode {
+ SNPS_DQ_FULL = 0,
+ SNPS_DQ_HALF = 1,
+ SNPS_DQ_QRTR = 2,
};
/**
- * struct synps_edac_priv - DDR memory controller private instance data.
- * @baseaddr: Base address of the DDR controller.
- * @message: Buffer for framing the event specific info.
- * @stat: ECC status information.
- * @p_data: Platform data.
- * @ce_cnt: Correctable Error count.
- * @ue_cnt: Uncorrectable Error count.
- * @poison_addr: Data poison address.
- * @row_shift: Bit shifts for row bit.
- * @col_shift: Bit shifts for column bit.
- * @bank_shift: Bit shifts for bank bit.
- * @bankgrp_shift: Bit shifts for bank group bit.
- * @rank_shift: Bit shifts for rank bit.
+ * enum snps_burst_length - HIF/SDRAM burst transactions length.
+ * @SNPS_DDR_BL2: Burst length 2xSDRAM-words.
+ * @SNPS_DDR_BL4: Burst length 4xSDRAM-words.
+ * @SNPS_DDR_BL8: Burst length 8xSDRAM-words.
+ * @SNPS_DDR_BL16: Burst length 16xSDRAM-words.
*/
-struct synps_edac_priv {
- void __iomem *baseaddr;
- char message[SYNPS_EDAC_MSG_SIZE];
- struct synps_ecc_status stat;
- const struct synps_platform_data *p_data;
- u32 ce_cnt;
- u32 ue_cnt;
-#ifdef CONFIG_EDAC_DEBUG
- ulong poison_addr;
- u32 row_shift[18];
- u32 col_shift[14];
- u32 bank_shift[3];
- u32 bankgrp_shift[2];
- u32 rank_shift[1];
-#endif
+enum snps_burst_length {
+ SNPS_DDR_BL2 = 2,
+ SNPS_DDR_BL4 = 4,
+ SNPS_DDR_BL8 = 8,
+ SNPS_DDR_BL16 = 16,
};
/**
- * struct synps_platform_data - synps platform data structure.
- * @get_error_info: Get EDAC error info.
- * @get_mtype: Get mtype.
- * @get_dtype: Get dtype.
- * @get_ecc_state: Get ECC state.
- * @quirks: To differentiate IPs.
+ * enum snps_freq_ratio - HIF:SDRAM frequency ratio mode.
+ * @SNPS_FREQ_RATIO11: 1:1 frequency mode.
+ * @SNPS_FREQ_RATIO12: 1:2 frequency mode.
*/
-struct synps_platform_data {
- int (*get_error_info)(struct synps_edac_priv *priv);
- enum mem_type (*get_mtype)(const void __iomem *base);
- enum dev_type (*get_dtype)(const void __iomem *base);
- bool (*get_ecc_state)(void __iomem *base);
- int quirks;
+enum snps_freq_ratio {
+ SNPS_FREQ_RATIO11 = 1,
+ SNPS_FREQ_RATIO12 = 2,
};
/**
- * zynq_get_error_info - Get the current ECC error info.
- * @priv: DDR memory controller private instance data.
- *
- * Return: one if there is no error, otherwise zero.
+ * enum snps_ecc_mode - ECC mode.
+ * @SNPS_ECC_DISABLED: ECC is disabled/unavailable.
+ * @SNPS_ECC_SECDED: SEC/DED over 1 beat ECC (SideBand/Inline).
+ * @SNPS_ECC_ADVX4X8: Advanced ECC X4/X8 (SideBand).
*/
-static int zynq_get_error_info(struct synps_edac_priv *priv)
-{
- struct synps_ecc_status *p;
- u32 regval, clearval = 0;
- void __iomem *base;
-
- base = priv->baseaddr;
- p = &priv->stat;
+enum snps_ecc_mode {
+ SNPS_ECC_DISABLED = 0,
+ SNPS_ECC_SECDED = 4,
+ SNPS_ECC_ADVX4X8 = 5,
+};
- regval = readl(base + STAT_OFST);
- if (!regval)
- return 1;
+/**
+ * enum snps_ref_clk - DW uMCTL2 DDR controller clocks.
+ * @SNPS_CSR_CLK: CSR/APB interface clock.
+ * @SNPS_AXI_CLK: AXI (AHB) Port reference clock.
+ * @SNPS_CORE_CLK: DDR controller (including DFI) clock. SDRAM clock
+ * matches runs with this freq in 1:1 ratio mode and
+ * with twice of this freq in case of 1:2 ratio mode.
+ * @SNPS_SBR_CLK: Scrubber port reference clock (synchronous to
+ * the core clock).
+ * @SNPS_MAX_NCLK: Total number of clocks.
+ */
+enum snps_ref_clk {
+ SNPS_CSR_CLK,
+ SNPS_AXI_CLK,
+ SNPS_CORE_CLK,
+ SNPS_SBR_CLK,
+ SNPS_MAX_NCLK
+};
- p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
- p->ue_cnt = regval & STAT_UECNT_MASK;
-
- regval = readl(base + CE_LOG_OFST);
- if (!(p->ce_cnt && (regval & LOG_VALID)))
- goto ue_err;
-
- p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
- regval = readl(base + CE_ADDR_OFST);
- p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
- p->ceinfo.col = regval & ADDR_COL_MASK;
- p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
- p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
- edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
- p->ceinfo.data);
- clearval = ECC_CTRL_CLR_CE_ERR;
-
-ue_err:
- regval = readl(base + UE_LOG_OFST);
- if (!(p->ue_cnt && (regval & LOG_VALID)))
- goto out;
-
- regval = readl(base + UE_ADDR_OFST);
- p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
- p->ueinfo.col = regval & ADDR_COL_MASK;
- p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
- p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
- clearval |= ECC_CTRL_CLR_UE_ERR;
-
-out:
- writel(clearval, base + ECC_CTRL_OFST);
- writel(0x0, base + ECC_CTRL_OFST);
+/**
+ * struct snps_ddrc_info - DDR controller platform parameters.
+ * @caps: DDR controller capabilities.
+ * @sdram_mode: Current SDRAM mode selected.
+ * @dev_cfg: Current memory device config (if applicable).
+ * @dq_width: Memory data bus width (width of the DQ signals
+ * connected to SDRAM chips).
+ * @dq_mode: Proportion of the DQ bus utilized to access SDRAM.
+ * @sdram_burst_len: SDRAM burst transaction length.
+ * @hif_burst_len: HIF burst transaction length (Host Interface).
+ * @freq_ratio: HIF/SDRAM frequency ratio mode.
+ * @ecc_mode: ECC mode enabled for the DDR controller (SEC/DED, etc).
+ * @ranks: Number of ranks enabled to access DIMM (1, 2 or 4).
+ */
+struct snps_ddrc_info {
+ unsigned int caps;
+ enum mem_type sdram_mode;
+ enum dev_type dev_cfg;
+ enum snps_dq_width dq_width;
+ enum snps_dq_mode dq_mode;
+ enum snps_burst_length sdram_burst_len;
+ enum snps_burst_length hif_burst_len;
+ enum snps_freq_ratio freq_ratio;
+ enum snps_ecc_mode ecc_mode;
+ unsigned int ranks;
+};
- return 0;
-}
+/**
+ * struct snps_sys_app_map - System/Application mapping table.
+ * @nsar: Number of SARs enabled on the controller (max 4).
+ * @minsize: Minimal block size (from 256MB to 32GB).
+ * @sar.base: SAR base address aligned to minsize.
+ * @sar.size: SAR size aligned to minsize.
+ * @sar.ofst: SAR address offset.
+ */
+struct snps_sys_app_map {
+ u8 nsar;
+ u64 minsize;
+ struct {
+ u64 base;
+ u64 size;
+ u64 ofst;
+ } sar[DDR_MAX_NSAR];
+};
/**
- * zynqmp_get_error_info - Get the current ECC error info.
- * @priv: DDR memory controller private instance data.
+ * struct snps_hif_sdram_map - HIF/SDRAM mapping table.
+ * @row: HIF bit offsets used as row address bits.
+ * @col: HIF bit offsets used as column address bits.
+ * @bank: HIF bit offsets used as bank address bits.
+ * @bankgrp: HIF bit offsets used as bank group address bits.
+ * @rank: HIF bit offsets used as rank address bits.
*
- * Return: one if there is no error otherwise returns zero.
+ * For example, row[0] = 6 means row bit #0 is encoded by the HIF
+ * address bit #6 and vice-versa.
*/
-static int zynqmp_get_error_info(struct synps_edac_priv *priv)
-{
- struct synps_ecc_status *p;
- u32 regval, clearval = 0;
- void __iomem *base;
-
- base = priv->baseaddr;
- p = &priv->stat;
-
- regval = readl(base + ECC_ERRCNT_OFST);
- p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
- p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
- if (!p->ce_cnt)
- goto ue_err;
+struct snps_hif_sdram_map {
+ u8 row[DDR_MAX_ROW_WIDTH];
+ u8 col[DDR_MAX_COL_WIDTH];
+ u8 bank[DDR_MAX_BANK_WIDTH];
+ u8 bankgrp[DDR_MAX_BANKGRP_WIDTH];
+ u8 rank[DDR_MAX_RANK_WIDTH];
+};
- regval = readl(base + ECC_STAT_OFST);
- if (!regval)
- return 1;
+/**
+ * struct snps_sdram_addr - SDRAM address.
+ * @row: Row number.
+ * @col: Column number.
+ * @bank: Bank number.
+ * @bankgrp: Bank group number.
+ * @rank: Rank number.
+ */
+struct snps_sdram_addr {
+ u16 row;
+ u16 col;
+ u8 bank;
+ u8 bankgrp;
+ u8 rank;
+};
- p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
-
- regval = readl(base + ECC_CEADDR0_OFST);
- p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
- regval = readl(base + ECC_CEADDR1_OFST);
- p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
- ECC_CEADDR1_BNKNR_SHIFT;
- p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
- ECC_CEADDR1_BNKGRP_SHIFT;
- p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
- p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
- edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
- readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
- readl(base + ECC_CSYND2_OFST));
-ue_err:
- if (!p->ue_cnt)
- goto out;
-
- regval = readl(base + ECC_UEADDR0_OFST);
- p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
- regval = readl(base + ECC_UEADDR1_OFST);
- p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
- ECC_CEADDR1_BNKGRP_SHIFT;
- p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
- ECC_CEADDR1_BNKNR_SHIFT;
- p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
- p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
-out:
- clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
- clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
- writel(clearval, base + ECC_CLR_OFST);
- writel(0x0, base + ECC_CLR_OFST);
+/**
+ * struct snps_ecc_error_info - ECC error log information.
+ * @ecnt: Number of detected errors.
+ * @syndrome: Error syndrome.
+ * @sdram: SDRAM address.
+ * @syndrome: Error syndrome.
+ * @bitpos: Bit position.
+ * @data: Data causing the error.
+ * @ecc: Data ECC.
+ */
+struct snps_ecc_error_info {
+ u16 ecnt;
+ struct snps_sdram_addr sdram;
+ u32 syndrome;
+ u32 bitpos;
+ u64 data;
+ u32 ecc;
+};
- return 0;
-}
+/**
+ * struct snps_edac_priv - DDR memory controller private data.
+ * @info: DDR controller config info.
+ * @sys_app_map: Sys/App mapping table.
+ * @hif_sdram_map: HIF/SDRAM mapping table.
+ * @pdev: Platform device.
+ * @baseaddr: Base address of the DDR controller.
+ * @reglock: Concurrent CSRs access lock.
+ * @clks: Controller reference clocks.
+ * @message: Buffer for framing the event specific info.
+ */
+struct snps_edac_priv {
+ struct snps_ddrc_info info;
+ struct snps_sys_app_map sys_app_map;
+ struct snps_hif_sdram_map hif_sdram_map;
+ struct platform_device *pdev;
+ void __iomem *baseaddr;
+ spinlock_t reglock;
+ struct clk_bulk_data clks[SNPS_MAX_NCLK];
+ char message[SNPS_EDAC_MSG_SIZE];
+};
/**
- * handle_error - Handle Correctable and Uncorrectable errors.
- * @mci: EDAC memory controller instance.
- * @p: Synopsys ECC status structure.
+ * snps_map_sys_to_app - Map System address to Application address.
+ * @priv: DDR memory controller private instance data.
+ * @sys: System address (source).
+ * @app: Application address (destination).
+ *
+ * System address space is used to define disjoint memory regions
+ * mapped then to the contiguous application memory space:
*
- * Handles ECC correctable and uncorrectable errors.
+ * System Address Space (SAR) <-> Application Address Space
+ * +------+ +------+
+ * | SAR0 |----------------------->| Reg0 |
+ * +------+ -offset +------+
+ * | ... | +----------->| Reg1 |
+ * +------+ | +------+
+ * | SAR1 |-----------+ | ... |
+ * +------+
+ * | ... |
+ *
+ * The translation is done by applying the corresponding SAR offset
+ * to the inbound system address. Note according to the hardware reference
+ * manual the same mapping is applied to the addresses up to the next
+ * SAR base address irrespective to the region size.
*/
-static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
+static void snps_map_sys_to_app(struct snps_edac_priv *priv,
+ dma_addr_t sys, u64 *app)
{
- struct synps_edac_priv *priv = mci->pvt_info;
- struct ecc_error_info *pinf;
-
- if (p->ce_cnt) {
- pinf = &p->ceinfo;
- if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
- "CE", pinf->row, pinf->bank,
- pinf->bankgrpnr, pinf->blknr,
- pinf->bitpos, pinf->data);
- } else {
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
- "CE", pinf->row, pinf->bank, pinf->col,
- pinf->bitpos, pinf->data);
- }
-
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
- p->ce_cnt, 0, 0, 0, 0, 0, -1,
- priv->message, "");
- }
+ struct snps_sys_app_map *map = &priv->sys_app_map;
+ u64 ofst;
+ int i;
- if (p->ue_cnt) {
- pinf = &p->ueinfo;
- if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
- "UE", pinf->row, pinf->bank,
- pinf->bankgrpnr, pinf->blknr);
- } else {
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "UE", pinf->row, pinf->bank, pinf->col);
- }
+ ofst = 0;
+ for (i = 0; i < map->nsar; i++) {
+ if (sys < map->sar[i].base)
+ break;
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
- p->ue_cnt, 0, 0, 0, 0, 0, -1,
- priv->message, "");
+ ofst = map->sar[i].ofst;
}
- memset(p, 0, sizeof(*p));
-}
-
-static void enable_intr(struct synps_edac_priv *priv)
-{
- /* Enable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(DDR_UE_MASK | DDR_CE_MASK,
- priv->baseaddr + ECC_CLR_OFST);
- else
- writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
- priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
-
-}
-
-static void disable_intr(struct synps_edac_priv *priv)
-{
- /* Disable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(0x0, priv->baseaddr + ECC_CLR_OFST);
- else
- writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
- priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+ *app = sys - ofst;
}
/**
- * intr_handler - Interrupt Handler for ECC interrupts.
- * @irq: IRQ number.
- * @dev_id: Device ID.
+ * snps_map_sys_to_app - Map Application address to System address.
+ * @priv: DDR memory controller private instance data.
+ * @app: Application address (source).
+ * @sys: System address (destination).
*
- * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
+ * Backward App-to-sys translation is easier because the application address
+ * space is contiguous. So we just need to add the offset corresponding
+ * to the region the passed address belongs to. Note the later offset is applied
+ * to all the addresses above the last available region.
*/
-static irqreturn_t intr_handler(int irq, void *dev_id)
+static void snps_map_app_to_sys(struct snps_edac_priv *priv,
+ u64 app, dma_addr_t *sys)
{
- const struct synps_platform_data *p_data;
- struct mem_ctl_info *mci = dev_id;
- struct synps_edac_priv *priv;
- int status, regval;
-
- priv = mci->pvt_info;
- p_data = priv->p_data;
-
- /*
- * v3.0 of the controller has the ce/ue bits cleared automatically,
- * so this condition does not apply.
- */
- if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
- regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
- regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
- if (!(regval & ECC_CE_UE_INTR_MASK))
- return IRQ_NONE;
+ struct snps_sys_app_map *map = &priv->sys_app_map;
+ u64 ofst, size;
+ int i;
+
+ ofst = 0;
+ for (i = 0, size = 0; i < map->nsar; i++) {
+ ofst = map->sar[i].ofst;
+ size += map->sar[i].size;
+ if (app < size)
+ break;
}
- status = p_data->get_error_info(priv);
- if (status)
- return IRQ_NONE;
-
- priv->ce_cnt += priv->stat.ce_cnt;
- priv->ue_cnt += priv->stat.ue_cnt;
- handle_error(mci, &priv->stat);
-
- edac_dbg(3, "Total error count CE %d UE %d\n",
- priv->ce_cnt, priv->ue_cnt);
- /* v3.0 of the controller does not have this register */
- if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
- writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
- else
- enable_intr(priv);
-
- return IRQ_HANDLED;
+ *sys = app + ofst;
}
/**
- * check_errors - Check controller for ECC errors.
- * @mci: EDAC memory controller instance.
+ * snps_map_app_to_hif - Map Application address to HIF address.
+ * @priv: DDR memory controller private instance data.
+ * @app: Application address (source).
+ * @hif: HIF address (destination).
*
- * Check and post ECC errors. Called by the polling thread.
+ * HIF address is used to perform the DQ bus width aligned burst transactions.
+ * So in order to perform the Application-to-HIF address translation we just
+ * need to discard the SDRAM-word bits of the Application address.
*/
-static void check_errors(struct mem_ctl_info *mci)
+static void snps_map_app_to_hif(struct snps_edac_priv *priv,
+ u64 app, u64 *hif)
{
- const struct synps_platform_data *p_data;
- struct synps_edac_priv *priv;
- int status;
-
- priv = mci->pvt_info;
- p_data = priv->p_data;
-
- status = p_data->get_error_info(priv);
- if (status)
- return;
-
- priv->ce_cnt += priv->stat.ce_cnt;
- priv->ue_cnt += priv->stat.ue_cnt;
- handle_error(mci, &priv->stat);
-
- edac_dbg(3, "Total error count CE %d UE %d\n",
- priv->ce_cnt, priv->ue_cnt);
+ *hif = app >> priv->info.dq_width;
}
/**
- * zynq_get_dtype - Return the controller memory width.
- * @base: DDR memory controller base address.
- *
- * Get the EDAC device type width appropriate for the current controller
- * configuration.
+ * snps_map_hif_to_app - Map HIF address to Application address.
+ * @priv: DDR memory controller private instance data.
+ * @hif: HIF address (source).
+ * @app: Application address (destination).
*
- * Return: a device type width enumeration.
+ * Backward HIF-to-App translation is just the opposite DQ-width-based
+ * shift operation.
*/
-static enum dev_type zynq_get_dtype(const void __iomem *base)
+static void snps_map_hif_to_app(struct snps_edac_priv *priv,
+ u64 hif, u64 *app)
{
- enum dev_type dt;
- u32 width;
-
- width = readl(base + CTRL_OFST);
- width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
-
- switch (width) {
- case DDRCTL_WDTH_16:
- dt = DEV_X2;
- break;
- case DDRCTL_WDTH_32:
- dt = DEV_X4;
- break;
- default:
- dt = DEV_UNKNOWN;
- }
-
- return dt;
+ *app = hif << priv->info.dq_width;
}
/**
- * zynqmp_get_dtype - Return the controller memory width.
- * @base: DDR memory controller base address.
- *
- * Get the EDAC device type width appropriate for the current controller
- * configuration.
+ * snps_map_hif_to_sdram - Map HIF address to SDRAM address.
+ * @priv: DDR memory controller private instance data.
+ * @hif: HIF address (source).
+ * @sdram: SDRAM address (destination).
*
- * Return: a device type width enumeration.
+ * HIF-SDRAM address mapping is configured with the ADDRMAPx registers, Based
+ * on the CSRs value the HIF address bits are mapped to the corresponding bits
+ * in the SDRAM rank/bank/column/row. If an SDRAM address bit is unused (there
+ * is no any HIF address bit corresponding to it) it will be set to zero. Using
+ * this fact we can freely set the output SDRAM address with zeros and walk
+ * over the set HIF address bits only. Similarly the unmapped HIF address bits
+ * are just ignored.
*/
-static enum dev_type zynqmp_get_dtype(const void __iomem *base)
+static void snps_map_hif_to_sdram(struct snps_edac_priv *priv,
+ u64 hif, struct snps_sdram_addr *sdram)
{
- enum dev_type dt;
- u32 width;
-
- width = readl(base + CTRL_OFST);
- width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
- switch (width) {
- case DDRCTL_EWDTH_16:
- dt = DEV_X2;
- break;
- case DDRCTL_EWDTH_32:
- dt = DEV_X4;
- break;
- case DDRCTL_EWDTH_64:
- dt = DEV_X8;
- break;
- default:
- dt = DEV_UNKNOWN;
- }
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ int i;
- return dt;
-}
+ sdram->row = 0;
+ for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) {
+ if (map->row[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->row[i]))
+ sdram->row |= BIT(i);
+ }
-/**
- * zynq_get_ecc_state - Return the controller ECC enable/disable status.
- * @base: DDR memory controller base address.
- *
- * Get the ECC enable/disable status of the controller.
- *
- * Return: true if enabled, otherwise false.
- */
-static bool zynq_get_ecc_state(void __iomem *base)
-{
- enum dev_type dt;
- u32 ecctype;
+ sdram->col = 0;
+ for (i = 0; i < DDR_MAX_COL_WIDTH; i++) {
+ if (map->col[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->col[i]))
+ sdram->col |= BIT(i);
+ }
- dt = zynq_get_dtype(base);
- if (dt == DEV_UNKNOWN)
- return false;
+ sdram->bank = 0;
+ for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) {
+ if (map->bank[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->bank[i]))
+ sdram->bank |= BIT(i);
+ }
- ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
- if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
- return true;
+ sdram->bankgrp = 0;
+ for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) {
+ if (map->bankgrp[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->bankgrp[i]))
+ sdram->bankgrp |= BIT(i);
+ }
- return false;
+ sdram->rank = 0;
+ for (i = 0; i < DDR_MAX_RANK_WIDTH; i++) {
+ if (map->rank[i] != DDR_ADDRMAP_UNUSED && hif & BIT(map->rank[i]))
+ sdram->rank |= BIT(i);
+ }
}
/**
- * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
- * @base: DDR memory controller base address.
+ * snps_map_sdram_to_hif - Map SDRAM address to HIF address.
+ * @priv: DDR memory controller private instance data.
+ * @sdram: SDRAM address (source).
+ * @hif: HIF address (destination).
*
- * Get the ECC enable/disable status for the controller.
+ * SDRAM-HIF address mapping is similar to the HIF-SDRAM mapping procedure, but
+ * we'll traverse each SDRAM rank/bank/column/row bit.
*
- * Return: a ECC status boolean i.e true/false - enabled/disabled.
+ * Note the unmapped bits of the SDRAM address components will be just
+ * ignored. So make sure the source address is valid.
*/
-static bool zynqmp_get_ecc_state(void __iomem *base)
+static void snps_map_sdram_to_hif(struct snps_edac_priv *priv,
+ struct snps_sdram_addr *sdram, u64 *hif)
{
- enum dev_type dt;
- u32 ecctype;
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ unsigned long addr;
+ int i;
- dt = zynqmp_get_dtype(base);
- if (dt == DEV_UNKNOWN)
- return false;
+ *hif = 0;
- ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
- if ((ecctype == SCRUB_MODE_SECDED) &&
- ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
- return true;
+ addr = sdram->row;
+ for_each_set_bit(i, &addr, DDR_MAX_ROW_WIDTH) {
+ if (map->row[i] != DDR_ADDRMAP_UNUSED)
+ *hif |= BIT_ULL(map->row[i]);
+ }
- return false;
-}
+ addr = sdram->col;
+ for_each_set_bit(i, &addr, DDR_MAX_COL_WIDTH) {
+ if (map->col[i] != DDR_ADDRMAP_UNUSED)
+ *hif |= BIT_ULL(map->col[i]);
+ }
-/**
- * get_memsize - Read the size of the attached memory device.
- *
- * Return: the memory size in bytes.
- */
-static u32 get_memsize(void)
-{
- struct sysinfo inf;
+ addr = sdram->bank;
+ for_each_set_bit(i, &addr, DDR_MAX_BANK_WIDTH) {
+ if (map->bank[i] != DDR_ADDRMAP_UNUSED)
+ *hif |= BIT_ULL(map->bank[i]);
+ }
- si_meminfo(&inf);
+ addr = sdram->bankgrp;
+ for_each_set_bit(i, &addr, DDR_MAX_BANKGRP_WIDTH) {
+ if (map->bankgrp[i] != DDR_ADDRMAP_UNUSED)
+ *hif |= BIT_ULL(map->bankgrp[i]);
+ }
- return inf.totalram * inf.mem_unit;
+ addr = sdram->rank;
+ for_each_set_bit(i, &addr, DDR_MAX_RANK_WIDTH) {
+ if (map->rank[i] != DDR_ADDRMAP_UNUSED)
+ *hif |= BIT_ULL(map->rank[i]);
+ }
}
/**
- * zynq_get_mtype - Return the controller memory type.
- * @base: Synopsys ECC status structure.
- *
- * Get the EDAC memory type appropriate for the current controller
- * configuration.
+ * snps_map_sys_to_sdram - Map System address to SDRAM address.
+ * @priv: DDR memory controller private instance data.
+ * @sys: System address (source).
+ * @sdram: SDRAM address (destination).
*
- * Return: a memory type enumeration.
+ * Perform a full mapping of the system address (detected on the controller
+ * ports) to the SDRAM address tuple row/column/bank/etc.
*/
-static enum mem_type zynq_get_mtype(const void __iomem *base)
+static void snps_map_sys_to_sdram(struct snps_edac_priv *priv,
+ dma_addr_t sys, struct snps_sdram_addr *sdram)
{
- enum mem_type mt;
- u32 memtype;
+ u64 app, hif;
- memtype = readl(base + T_ZQ_OFST);
+ snps_map_sys_to_app(priv, sys, &app);
- if (memtype & T_ZQ_DDRMODE_MASK)
- mt = MEM_DDR3;
- else
- mt = MEM_DDR2;
+ snps_map_app_to_hif(priv, app, &hif);
- return mt;
+ snps_map_hif_to_sdram(priv, hif, sdram);
}
/**
- * zynqmp_get_mtype - Returns controller memory type.
- * @base: Synopsys ECC status structure.
- *
- * Get the EDAC memory type appropriate for the current controller
- * configuration.
+ * snps_map_sdram_to_sys - Map SDRAM address to SDRAM address.
+ * @priv: DDR memory controller private instance data.
+ * @sys: System address (source).
+ * @sdram: SDRAM address (destination).
*
- * Return: a memory type enumeration.
+ * Perform a full mapping of the SDRAM address (row/column/bank/etc) to
+ * the system address specific to the controller system bus ports.
*/
-static enum mem_type zynqmp_get_mtype(const void __iomem *base)
+static void snps_map_sdram_to_sys(struct snps_edac_priv *priv,
+ struct snps_sdram_addr *sdram, dma_addr_t *sys)
{
- enum mem_type mt;
- u32 memtype;
+ u64 app, hif;
- memtype = readl(base + CTRL_OFST);
+ snps_map_sdram_to_hif(priv, sdram, &hif);
- if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
- mt = MEM_DDR3;
- else if (memtype & MEM_TYPE_DDR2)
- mt = MEM_RDDR2;
- else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
- mt = MEM_DDR4;
- else
- mt = MEM_EMPTY;
+ snps_map_hif_to_app(priv, hif, &app);
- return mt;
+ snps_map_app_to_sys(priv, app, sys);
}
/**
- * init_csrows - Initialize the csrow data.
- * @mci: EDAC memory controller instance.
+ * snps_get_bitpos - Get DQ-bus corrected bit position.
+ * @syndrome: Error syndrome.
+ * @dq_width: Controller DQ-bus width.
*
- * Initialize the chip select rows associated with the EDAC memory
- * controller instance.
+ * Return: actual corrected DQ-bus bit position starting from 0.
*/
-static void init_csrows(struct mem_ctl_info *mci)
+static inline u32 snps_get_bitpos(u32 syndrome, enum snps_dq_width dq_width)
{
- struct synps_edac_priv *priv = mci->pvt_info;
- const struct synps_platform_data *p_data;
- struct csrow_info *csi;
- struct dimm_info *dimm;
- u32 size, row;
- int j;
-
- p_data = priv->p_data;
+ /* ecc[0] bit */
+ if (syndrome == 0)
+ return BITS_PER_BYTE << dq_width;
- for (row = 0; row < mci->nr_csrows; row++) {
- csi = mci->csrows[row];
- size = get_memsize();
+ /* ecc[1:x] bit */
+ if (is_power_of_2(syndrome))
+ return (BITS_PER_BYTE << dq_width) + ilog2(syndrome) + 1;
- for (j = 0; j < csi->nr_channels; j++) {
- dimm = csi->channels[j]->dimm;
- dimm->edac_mode = EDAC_SECDED;
- dimm->mtype = p_data->get_mtype(priv->baseaddr);
- dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
- dimm->grain = SYNPS_EDAC_ERR_GRAIN;
- dimm->dtype = p_data->get_dtype(priv->baseaddr);
- }
- }
+ /* data[0:y] bit */
+ return syndrome - ilog2(syndrome) - 2;
}
/**
- * mc_init - Initialize one driver instance.
- * @mci: EDAC memory controller instance.
- * @pdev: platform device.
+ * snps_ce_irq_handler - Corrected error interrupt handler.
+ * @irq: IRQ number.
+ * @dev_id: Device ID.
*
- * Perform initialization of the EDAC memory controller instance and
- * related driver-private data associated with the memory controller the
- * instance is bound to.
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
*/
-static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
+static irqreturn_t snps_ce_irq_handler(int irq, void *dev_id)
{
- struct synps_edac_priv *priv;
-
- mci->pdev = &pdev->dev;
- priv = mci->pvt_info;
- platform_set_drvdata(pdev, mci);
+ struct mem_ctl_info *mci = dev_id;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct snps_ecc_error_info einfo;
+ unsigned long flags;
+ u32 qosval, regval;
+ dma_addr_t sys;
+
+ /* Make sure IRQ is caused by a corrected ECC error */
+ if (priv->info.caps & SNPS_CAP_ZYNQMP) {
+ qosval = readl(priv->baseaddr + ZYNQMP_DDR_QOS_IRQ_STAT_OFST);
+ if (!(qosval & ZYNQMP_DDR_QOS_CE_MASK))
+ return IRQ_NONE;
- /* Initialize controller capabilities and configuration */
- mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->scrub_cap = SCRUB_HW_SRC;
- mci->scrub_mode = SCRUB_NONE;
-
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->ctl_name = "synps_ddr_controller";
- mci->dev_name = SYNPS_EDAC_MOD_STRING;
- mci->mod_name = SYNPS_EDAC_MOD_VER;
-
- if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
- edac_op_state = EDAC_OPSTATE_INT;
- } else {
- edac_op_state = EDAC_OPSTATE_POLL;
- mci->edac_check = check_errors;
+ qosval &= ZYNQMP_DDR_QOS_CE_MASK;
}
- mci->ctl_page_to_phys = NULL;
+ regval = readl(priv->baseaddr + ECC_STAT_OFST);
+ if (!FIELD_GET(ECC_STAT_CE_MASK, regval))
+ return IRQ_NONE;
- init_csrows(mci);
-}
+ /* Read error info like syndrome, bit position, SDRAM address, data */
+ einfo.syndrome = FIELD_GET(ECC_STAT_BITNUM_MASK, regval);
-static int setup_irq(struct mem_ctl_info *mci,
- struct platform_device *pdev)
-{
- struct synps_edac_priv *priv = mci->pvt_info;
- int ret, irq;
+ einfo.bitpos = snps_get_bitpos(einfo.syndrome, priv->info.dq_width);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No IRQ %d in DT\n", irq);
- return irq;
- }
+ regval = readl(priv->baseaddr + ECC_ERRCNT_OFST);
+ einfo.ecnt = FIELD_GET(ECC_ERRCNT_CECNT_MASK, regval);
- ret = devm_request_irq(&pdev->dev, irq, intr_handler,
- 0, dev_name(&pdev->dev), mci);
- if (ret < 0) {
- edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
- return ret;
- }
+ regval = readl(priv->baseaddr + ECC_CEADDR0_OFST);
+ einfo.sdram.rank = FIELD_GET(ECC_CEADDR0_RANK_MASK, regval);
+ einfo.sdram.row = FIELD_GET(ECC_CEADDR0_ROW_MASK, regval);
- enable_intr(priv);
+ regval = readl(priv->baseaddr + ECC_CEADDR1_OFST);
+ einfo.sdram.bankgrp = FIELD_GET(ECC_CEADDR1_BANKGRP_MASK, regval);
+ einfo.sdram.bank = FIELD_GET(ECC_CEADDR1_BANK_MASK, regval);
+ einfo.sdram.col = FIELD_GET(ECC_CEADDR1_COL_MASK, regval);
- return 0;
-}
+ einfo.data = readl(priv->baseaddr + ECC_CSYND0_OFST);
+ if (priv->info.dq_width == SNPS_DQ_64)
+ einfo.data |= (u64)readl(priv->baseaddr + ECC_CSYND1_OFST) << 32;
-static const struct synps_platform_data zynq_edac_def = {
- .get_error_info = zynq_get_error_info,
- .get_mtype = zynq_get_mtype,
- .get_dtype = zynq_get_dtype,
- .get_ecc_state = zynq_get_ecc_state,
- .quirks = 0,
-};
+ einfo.ecc = readl(priv->baseaddr + ECC_CSYND2_OFST);
-static const struct synps_platform_data zynqmp_edac_def = {
- .get_error_info = zynqmp_get_error_info,
- .get_mtype = zynqmp_get_mtype,
- .get_dtype = zynqmp_get_dtype,
- .get_ecc_state = zynqmp_get_ecc_state,
- .quirks = (DDR_ECC_INTR_SUPPORT
-#ifdef CONFIG_EDAC_DEBUG
- | DDR_ECC_DATA_POISON_SUPPORT
-#endif
- ),
-};
+ /* Report the detected errors with the corresponding sys address */
+ snps_map_sdram_to_sys(priv, &einfo.sdram, &sys);
-static const struct synps_platform_data synopsys_edac_def = {
- .get_error_info = zynqmp_get_error_info,
- .get_mtype = zynqmp_get_mtype,
- .get_dtype = zynqmp_get_dtype,
- .get_ecc_state = zynqmp_get_ecc_state,
- .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
-#ifdef CONFIG_EDAC_DEBUG
- | DDR_ECC_DATA_POISON_SUPPORT
-#endif
- ),
-};
+ snprintf(priv->message, SNPS_EDAC_MSG_SIZE,
+ "Row %hu Col %hu Bank %hhu Bank Group %hhu Rank %hhu Bit %d Data 0x%08llx:0x%02x",
+ einfo.sdram.row, einfo.sdram.col, einfo.sdram.bank,
+ einfo.sdram.bankgrp, einfo.sdram.rank,
+ einfo.bitpos, einfo.data, einfo.ecc);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, einfo.ecnt,
+ PHYS_PFN(sys), offset_in_page(sys),
+ einfo.syndrome, einfo.sdram.rank, 0, -1,
+ priv->message, "");
-static const struct of_device_id synps_edac_match[] = {
- {
- .compatible = "xlnx,zynq-ddrc-a05",
- .data = (void *)&zynq_edac_def
- },
- {
- .compatible = "xlnx,zynqmp-ddrc-2.40a",
- .data = (void *)&zynqmp_edac_def
- },
- {
- .compatible = "snps,ddrc-3.80a",
- .data = (void *)&synopsys_edac_def
- },
- {
- /* end of table */
- }
-};
+ /* Make sure the CE IRQ status is cleared */
+ spin_lock_irqsave(&priv->reglock, flags);
-MODULE_DEVICE_TABLE(of, synps_edac_match);
+ regval = readl(priv->baseaddr + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
+ writel(regval, priv->baseaddr + ECC_CLR_OFST);
-#ifdef CONFIG_EDAC_DEBUG
-#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ if (priv->info.caps & SNPS_CAP_ZYNQMP)
+ writel(qosval, priv->baseaddr + ZYNQMP_DDR_QOS_IRQ_STAT_OFST);
+
+ return IRQ_HANDLED;
+}
/**
- * ddr_poison_setup - Update poison registers.
- * @priv: DDR memory controller private instance data.
+ * snps_ue_irq_handler - Uncorrected error interrupt handler.
+ * @irq: IRQ number.
+ * @dev_id: Device ID.
*
- * Update poison registers as per DDR mapping.
- * Return: none.
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
*/
-static void ddr_poison_setup(struct synps_edac_priv *priv)
+static irqreturn_t snps_ue_irq_handler(int irq, void *dev_id)
{
- int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
- int index;
- ulong hif_addr = 0;
-
- hif_addr = priv->poison_addr >> 3;
+ struct mem_ctl_info *mci = dev_id;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct snps_ecc_error_info einfo;
+ unsigned long flags;
+ u32 qosval, regval;
+ dma_addr_t sys;
+
+ /* Make sure IRQ is caused by an uncorrected ECC error */
+ if (priv->info.caps & SNPS_CAP_ZYNQMP) {
+ qosval = readl(priv->baseaddr + ZYNQMP_DDR_QOS_IRQ_STAT_OFST);
+ if (!(regval & ZYNQMP_DDR_QOS_UE_MASK))
+ return IRQ_NONE;
- for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
- if (priv->row_shift[index])
- row |= (((hif_addr >> priv->row_shift[index]) &
- BIT(0)) << index);
- else
- break;
+ qosval &= ZYNQMP_DDR_QOS_UE_MASK;
}
- for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
- if (priv->col_shift[index] || index < 3)
- col |= (((hif_addr >> priv->col_shift[index]) &
- BIT(0)) << index);
- else
- break;
- }
+ regval = readl(priv->baseaddr + ECC_STAT_OFST);
+ if (!FIELD_GET(ECC_STAT_UE_MASK, regval))
+ return IRQ_NONE;
- for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
- if (priv->bank_shift[index])
- bank |= (((hif_addr >> priv->bank_shift[index]) &
- BIT(0)) << index);
- else
- break;
- }
+ /* Read error info like SDRAM address, data and syndrome */
+ regval = readl(priv->baseaddr + ECC_ERRCNT_OFST);
+ einfo.ecnt = FIELD_GET(ECC_ERRCNT_UECNT_MASK, regval);
- for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
- if (priv->bankgrp_shift[index])
- bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
- & BIT(0)) << index);
- else
- break;
- }
+ regval = readl(priv->baseaddr + ECC_UEADDR0_OFST);
+ einfo.sdram.rank = FIELD_GET(ECC_CEADDR0_RANK_MASK, regval);
+ einfo.sdram.row = FIELD_GET(ECC_CEADDR0_ROW_MASK, regval);
- if (priv->rank_shift[0])
- rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
+ regval = readl(priv->baseaddr + ECC_UEADDR1_OFST);
+ einfo.sdram.bankgrp = FIELD_GET(ECC_CEADDR1_BANKGRP_MASK, regval);
+ einfo.sdram.bank = FIELD_GET(ECC_CEADDR1_BANK_MASK, regval);
+ einfo.sdram.col = FIELD_GET(ECC_CEADDR1_COL_MASK, regval);
- regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
- regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
- writel(regval, priv->baseaddr + ECC_POISON0_OFST);
+ einfo.data = readl(priv->baseaddr + ECC_UESYND0_OFST);
+ if (priv->info.dq_width == SNPS_DQ_64)
+ einfo.data |= (u64)readl(priv->baseaddr + ECC_UESYND1_OFST) << 32;
- regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
- regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
- regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
- writel(regval, priv->baseaddr + ECC_POISON1_OFST);
-}
+ einfo.ecc = readl(priv->baseaddr + ECC_UESYND2_OFST);
-static ssize_t inject_data_error_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct synps_edac_priv *priv = mci->pvt_info;
-
- return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
- "Error injection Address: 0x%lx\n\r",
- readl(priv->baseaddr + ECC_POISON0_OFST),
- readl(priv->baseaddr + ECC_POISON1_OFST),
- priv->poison_addr);
-}
+ /* Report the detected errors with the corresponding sys address */
+ snps_map_sdram_to_sys(priv, &einfo.sdram, &sys);
-static ssize_t inject_data_error_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct synps_edac_priv *priv = mci->pvt_info;
+ snprintf(priv->message, SNPS_EDAC_MSG_SIZE,
+ "Row %hu Col %hu Bank %hhu Bank Group %hhu Rank %hhu Data 0x%08llx:0x%02x",
+ einfo.sdram.row, einfo.sdram.col, einfo.sdram.bank,
+ einfo.sdram.bankgrp, einfo.sdram.rank,
+ einfo.data, einfo.ecc);
- if (kstrtoul(data, 0, &priv->poison_addr))
- return -EINVAL;
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, einfo.ecnt,
+ PHYS_PFN(sys), offset_in_page(sys),
+ 0, einfo.sdram.rank, 0, -1,
+ priv->message, "");
- ddr_poison_setup(priv);
+ /* Make sure the UE IRQ status is cleared */
+ spin_lock_irqsave(&priv->reglock, flags);
- return count;
-}
+ regval = readl(priv->baseaddr + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ writel(regval, priv->baseaddr + ECC_CLR_OFST);
-static ssize_t inject_data_poison_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct synps_edac_priv *priv = mci->pvt_info;
+ spin_unlock_irqrestore(&priv->reglock, flags);
- return sprintf(data, "Data Poisoning: %s\n\r",
- (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
- ? ("Correctable Error") : ("UnCorrectable Error"));
+ if (priv->info.caps & SNPS_CAP_ZYNQMP)
+ writel(qosval, priv->baseaddr + ZYNQMP_DDR_QOS_IRQ_STAT_OFST);
+
+ return IRQ_HANDLED;
}
-static ssize_t inject_data_poison_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
+/**
+ * snps_dfi_irq_handler - DFI CRC/Parity error interrupt handler.
+ * @irq: IRQ number.
+ * @dev_id: Device ID.
+ *
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
+ */
+static irqreturn_t snps_dfi_irq_handler(int irq, void *dev_id)
{
- struct mem_ctl_info *mci = to_mci(dev);
- struct synps_edac_priv *priv = mci->pvt_info;
+ struct mem_ctl_info *mci = dev_id;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ unsigned long flags;
+ u32 regval;
+ u16 ecnt;
+
+ /* Make sure IRQ is caused by an DFI alert error */
+ regval = readl(priv->baseaddr + DDR_CRCPARSTAT_OFST);
+ if (!(regval & DDR_CRCPARSTAT_ALRT_ERR))
+ return IRQ_NONE;
- writel(0, priv->baseaddr + DDRC_SWCTL);
- if (strncmp(data, "CE", 2) == 0)
- writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
- else
- writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
- writel(1, priv->baseaddr + DDRC_SWCTL);
+ /* Just a number of CRC/Parity errors is available */
+ ecnt = FIELD_GET(DDR_CRCPARSTAT_ALRT_CNT_MASK, regval);
- return count;
-}
+ /* Report the detected errors with just the custom message */
+ snprintf(priv->message, SNPS_EDAC_MSG_SIZE,
+ "DFI CRC/Parity error detected on dfi_alert_n");
-static DEVICE_ATTR_RW(inject_data_error);
-static DEVICE_ATTR_RW(inject_data_poison);
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, ecnt,
+ 0, 0, 0, 0, 0, -1, priv->message, "");
-static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
-{
- int rc;
+ /* Make sure the DFI alert IRQ status is cleared */
+ spin_lock_irqsave(&priv->reglock, flags);
- rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
- if (rc < 0)
- return rc;
- rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
- if (rc < 0)
- return rc;
- return 0;
-}
+ regval = readl(priv->baseaddr + DDR_CRCPARCTL0_OFST) |
+ DDR_CRCPARCTL0_CLR_ALRT_ERR | DDR_CRCPARCTL0_CLR_ALRT_ERRCNT;
+ writel(regval, priv->baseaddr + DDR_CRCPARCTL0_OFST);
-static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
-{
- device_remove_file(&mci->dev, &dev_attr_inject_data_error);
- device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ return IRQ_HANDLED;
}
-static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
-{
- u32 addrmap_row_b2_10;
- int index;
+/**
+ * snps_sbr_irq_handler - Scrubber Done interrupt handler.
+ * @irq: IRQ number.
+ * @dev_id: Device ID.
+ *
+ * It just checks whether the IRQ has been caused by the Scrubber Done event
+ * and disables the back-to-back scrubbing by falling back to the smallest
+ * delay between the Scrubber read commands.
+ *
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
+ */
+static irqreturn_t snps_sbr_irq_handler(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ unsigned long flags;
+ u32 regval, en;
+
+ /* Make sure IRQ is caused by the Scrubber Done event */
+ regval = readl(priv->baseaddr + ECC_SBRSTAT_OFST);
+ if (!(regval & ECC_SBRSTAT_SCRUB_DONE))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ regval = readl(priv->baseaddr + ECC_SBRCTL_OFST);
+ en = regval & ECC_SBRCTL_SCRUB_EN;
+ writel(regval & ~en, priv->baseaddr + ECC_SBRCTL_OFST);
+
+ regval = FIELD_PREP(ECC_SBRCTL_SCRUB_INTERVAL, ECC_SBRCTL_INTERVAL_SAFE);
+ writel(regval, priv->baseaddr + ECC_SBRCTL_OFST);
+
+ writel(regval | en, priv->baseaddr + ECC_SBRCTL_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ edac_mc_printk(mci, KERN_WARNING, "Back-to-back scrubbing disabled\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * snps_com_irq_handler - Interrupt IRQ signal handler.
+ * @irq: IRQ number.
+ * @dev_id: Device ID.
+ *
+ * Return: IRQ_NONE, if interrupts not set or IRQ_HANDLED otherwise.
+ */
+static irqreturn_t snps_com_irq_handler(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ irqreturn_t rc = IRQ_NONE;
+
+ rc |= snps_ce_irq_handler(irq, dev_id);
+
+ rc |= snps_ue_irq_handler(irq, dev_id);
+
+ rc |= snps_dfi_irq_handler(irq, dev_id);
+
+ if (priv->info.caps & SNPS_CAP_ECC_SCRUBBER)
+ rc |= snps_sbr_irq_handler(irq, dev_id);
+
+ return rc;
+}
+
+static void snps_enable_irq(struct snps_edac_priv *priv)
+{
+ unsigned long flags;
+
+ /* Enable UE/CE Interrupts */
+ if (priv->info.caps & SNPS_CAP_ZYNQMP) {
+ writel(ZYNQMP_DDR_QOS_UE_MASK | ZYNQMP_DDR_QOS_CE_MASK,
+ priv->baseaddr + ZYNQMP_DDR_QOS_IRQ_EN_OFST);
+
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ /*
+ * IRQs Enable/Disable flags have been available since v3.10a.
+ * This is noop for the older controllers.
+ */
+ writel(ECC_CTRL_EN_CE_IRQ | ECC_CTRL_EN_UE_IRQ,
+ priv->baseaddr + ECC_CLR_OFST);
+
+ /*
+ * CRC/Parity interrupts control has been available since v2.10a.
+ * This is noop for the older controllers.
+ */
+ writel(DDR_CRCPARCTL0_EN_ALRT_IRQ,
+ priv->baseaddr + DDR_CRCPARCTL0_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
+}
+
+static void snps_disable_irq(struct snps_edac_priv *priv)
+{
+ unsigned long flags;
+
+ /* Disable UE/CE Interrupts */
+ if (priv->info.caps & SNPS_CAP_ZYNQMP) {
+ writel(ZYNQMP_DDR_QOS_UE_MASK | ZYNQMP_DDR_QOS_CE_MASK,
+ priv->baseaddr + ZYNQMP_DDR_QOS_IRQ_DB_OFST);
+
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(0, priv->baseaddr + ECC_CLR_OFST);
+ writel(0, priv->baseaddr + DDR_CRCPARCTL0_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
+}
+
+/**
+ * snps_get_sdram_bw - Get SDRAM bandwidth.
+ * @priv: DDR memory controller private instance data.
+ *
+ * The SDRAM interface bandwidth is calculated based on the DDRC Core clock rate
+ * and the DW uMCTL2 IP-core parameters like DQ-bus width and mode and
+ * Core/SDRAM clocks frequency ratio. Note it returns the theoretical bandwidth
+ * which in reality is hardly possible to reach.
+ *
+ * Return: SDRAM bandwidth or zero if no Core clock specified.
+ */
+static u64 snps_get_sdram_bw(struct snps_edac_priv *priv)
+{
+ unsigned long rate;
+
+ /*
+ * Depending on the ratio mode the SDRAM clock either matches the Core
+ * clock or runs with the twice its frequency.
+ */
+ rate = clk_get_rate(priv->clks[SNPS_CORE_CLK].clk);
+ rate *= priv->info.freq_ratio;
+
+ /*
+ * Scale up by 2 since it's DDR (Double Data Rate) and subtract the
+ * DQ-mode since in non-Full mode only a part of the DQ-bus is utilised
+ * on each SDRAM clock edge.
+ */
+ return (2U << (priv->info.dq_width - priv->info.dq_mode)) * (u64)rate;
+}
+
+/**
+ * snps_get_scrub_bw - Get Scrubber bandwidth.
+ * @priv: DDR memory controller private instance data.
+ * @interval: Scrub interval.
+ *
+ * DW uMCTL2 DDRC Scrubber performs periodical progressive burst reads (RMW if
+ * ECC CE is detected) commands from the whole memory space. The read commands
+ * can be delayed by means of the SBRCTL.scrub_interval field. The Scrubber
+ * cycles look as follows:
+ *
+ * |-HIF-burst-read-|-------delay-------|-HIF-burst-read-|------- etc
+ *
+ * Tb = Bl*[DQ]/Bw[RAM], Td = 512*interval/Fc - periods of the HIF-burst-read
+ * and delay stages, where
+ * Bl - HIF burst length, [DQ] - Full DQ-bus width, Bw[RAM] - SDRAM bandwidth,
+ * Fc - Core clock frequency (Scrubber and Core clocks are synchronous).
+ *
+ * After some simple calculations the expressions above can be used to get the
+ * next Scrubber bandwidth formulae:
+ *
+ * Bw[Sbr] = Bw[RAM] / (1 + F * interval), where
+ * F = 2 * 512 * Fr * Fc * [DQ]e - interval scale factor with
+ * Fr - HIF/SDRAM clock frequency ratio (1 or 2), [DQ]e - DQ-bus width mode.
+ *
+ * Return: Scrubber bandwidth or zero if no Core clock specified.
+ */
+static u64 snps_get_scrub_bw(struct snps_edac_priv *priv, u32 interval)
+{
+ unsigned long fac;
+ u64 bw_ram;
+
+ fac = (2 * ECC_SBRCTL_INTERVAL_STEP * priv->info.freq_ratio) /
+ (priv->info.hif_burst_len * (1UL << priv->info.dq_mode));
+
+ bw_ram = snps_get_sdram_bw(priv);
+
+ return div_u64(bw_ram, 1 + fac * interval);
+}
+
+/**
+ * snps_get_scrub_interval - Get Scrubber delay interval.
+ * @priv: DDR memory controller private instance data.
+ * @bw: Scrubber bandwidth.
+ *
+ * Similarly to the Scrubber bandwidth the interval formulae can be inferred
+ * from the same expressions:
+ *
+ * interval = (Bw[RAM] - Bw[Sbr]) / (F * Bw[Sbr])
+ *
+ * Return: Scrubber delay interval or zero if no Core clock specified.
+ */
+static u32 snps_get_scrub_interval(struct snps_edac_priv *priv, u32 bw)
+{
+ unsigned long fac;
+ u64 bw_ram;
+
+ fac = (2 * priv->info.freq_ratio * ECC_SBRCTL_INTERVAL_STEP) /
+ (priv->info.hif_burst_len * (1UL << priv->info.dq_mode));
+
+ bw_ram = snps_get_sdram_bw(priv);
+
+ /* Divide twice so not to cause the integer overflow in (fac * bw) */
+ return div_u64(div_u64(bw_ram - bw, bw), fac);
+}
+
+/**
+ * snps_set_sdram_scrub_rate - Set the Scrubber bandwidth.
+ * @mci: EDAC memory controller instance.
+ * @bw: Bandwidth.
+ *
+ * It calculates the delay between the Scrubber read commands based on the
+ * specified bandwidth and the Core clock rate. If the Core clock is unavailable
+ * the passed bandwidth will be directly used as the interval value.
+ *
+ * Note the method warns about the back-to-back scrubbing since it may
+ * significantly degrade the system performance. This mode is supposed to be
+ * used for a single SDRAM scrubbing pass only. So it will be turned off in the
+ * Scrubber Done IRQ handler.
+ *
+ * Return: Actually set bandwidth (interval-based approximated bandwidth if the
+ * Core clock is unavailable) or zero if the Scrubber was disabled.
+ */
+static int snps_set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 bw)
+{
+ struct snps_edac_priv *priv = mci->pvt_info;
+ u32 regval, interval;
+ unsigned long flags;
+ u64 bw_min, bw_max;
+
+ /* Don't bother with the calculations just disable and return. */
+ if (!bw) {
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ regval = readl(priv->baseaddr + ECC_SBRCTL_OFST);
+ regval &= ~ECC_SBRCTL_SCRUB_EN;
+ writel(regval, priv->baseaddr + ECC_SBRCTL_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ return 0;
+ }
+
+ /* If no Core clock specified fallback to the direct interval setup. */
+ bw_max = snps_get_scrub_bw(priv, ECC_SBRCTL_INTERVAL_MIN);
+ if (bw_max) {
+ bw_min = snps_get_scrub_bw(priv, ECC_SBRCTL_INTERVAL_MAX);
+ bw = clamp_t(u64, bw, bw_min, bw_max);
+
+ interval = snps_get_scrub_interval(priv, bw);
+ } else {
+ bw = clamp_val(bw, ECC_SBRCTL_INTERVAL_MIN, ECC_SBRCTL_INTERVAL_MAX);
+
+ interval = ECC_SBRCTL_INTERVAL_MAX - bw;
+ }
+
+ /*
+ * SBRCTL.scrub_en bitfield must be accessed separately from the other
+ * CSR bitfields. It means the flag must be set/cleared with no updates
+ * to the rest of the fields.
+ */
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ regval = FIELD_PREP(ECC_SBRCTL_SCRUB_INTERVAL, interval);
+ writel(regval, priv->baseaddr + ECC_SBRCTL_OFST);
+
+ writel(regval | ECC_SBRCTL_SCRUB_EN, priv->baseaddr + ECC_SBRCTL_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ if (!interval)
+ edac_mc_printk(mci, KERN_WARNING, "Back-to-back scrubbing enabled\n");
+
+ if (!bw_max)
+ return interval ? bw : INT_MAX;
+
+ return snps_get_scrub_bw(priv, interval);
+}
+
+/**
+ * snps_get_sdram_scrub_rate - Get the Scrubber bandwidth.
+ * @mci: EDAC memory controller instance.
+ *
+ * Return: Scrubber bandwidth (interval-based approximated bandwidth if the
+ * Core clock is unavailable) or zero if the Scrubber was disabled.
+ */
+static int snps_get_sdram_scrub_rate(struct mem_ctl_info *mci)
+{
+ struct snps_edac_priv *priv = mci->pvt_info;
+ u32 regval;
+ u64 bw;
+
+ regval = readl(priv->baseaddr + ECC_SBRCTL_OFST);
+ if (!(regval & ECC_SBRCTL_SCRUB_EN))
+ return 0;
+
+ regval = FIELD_GET(ECC_SBRCTL_SCRUB_INTERVAL, regval);
+
+ bw = snps_get_scrub_bw(priv, regval);
+ if (!bw)
+ return regval ? ECC_SBRCTL_INTERVAL_MAX - regval : INT_MAX;
+
+ return bw;
+}
+
+/**
+ * snps_create_data - Create private data.
+ * @pdev: platform device.
+ *
+ * Return: Private data instance or negative errno.
+ */
+static struct snps_edac_priv *snps_create_data(struct platform_device *pdev)
+{
+ struct snps_edac_priv *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->baseaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->baseaddr))
+ return ERR_CAST(priv->baseaddr);
+
+ priv->pdev = pdev;
+ spin_lock_init(&priv->reglock);
+
+ return priv;
+}
+
+/**
+ * snps_get_res - Get platform device resources.
+ * @priv: DDR memory controller private instance data.
+ *
+ * It's supposed to request all the controller resources available for the
+ * particular platform and enable all the required for the driver normal
+ * work. Note only the CSR and Scrubber clocks are supposed to be switched
+ * on/off by the driver.
+ *
+ * Return: negative errno if failed to get the resources, otherwise - zero.
+ */
+static int snps_get_res(struct snps_edac_priv *priv)
+{
+ const char * const ids[] = {
+ [SNPS_CSR_CLK] = "pclk",
+ [SNPS_AXI_CLK] = "aclk",
+ [SNPS_CORE_CLK] = "core",
+ [SNPS_SBR_CLK] = "sbr",
+ };
+ int i, rc;
+
+ for (i = 0; i < SNPS_MAX_NCLK; i++)
+ priv->clks[i].id = ids[i];
+
+ rc = devm_clk_bulk_get_optional(&priv->pdev->dev, SNPS_MAX_NCLK,
+ priv->clks);
+ if (rc) {
+ edac_printk(KERN_INFO, EDAC_MC, "Failed to get ref clocks\n");
+ return rc;
+ }
+
+ /*
+ * Don't touch the Core and AXI clocks since they are critical for the
+ * stable system functioning and are supposed to have been enabled
+ * anyway.
+ */
+ rc = clk_prepare_enable(priv->clks[SNPS_CSR_CLK].clk);
+ if (rc) {
+ edac_printk(KERN_INFO, EDAC_MC, "Couldn't enable CSR clock\n");
+ return rc;
+ }
+
+ rc = clk_prepare_enable(priv->clks[SNPS_SBR_CLK].clk);
+ if (rc) {
+ edac_printk(KERN_INFO, EDAC_MC, "Couldn't enable Scrubber clock\n");
+ goto err_disable_pclk;
+ }
+
+ return 0;
+
+err_disable_pclk:
+ clk_disable_unprepare(priv->clks[SNPS_CSR_CLK].clk);
+
+ return rc;
+}
+
+/**
+ * snps_put_res - Put platform device resources.
+ * @priv: DDR memory controller private instance data.
+ */
+static void snps_put_res(struct snps_edac_priv *priv)
+{
+ clk_disable_unprepare(priv->clks[SNPS_SBR_CLK].clk);
+
+ clk_disable_unprepare(priv->clks[SNPS_CSR_CLK].clk);
+}
+
+/*
+ * zynqmp_init_plat - ZynqMP-specific platform initialization.
+ * @priv: DDR memory controller private data.
+ *
+ * Return: always zero.
+ */
+static int zynqmp_init_plat(struct snps_edac_priv *priv)
+{
+ priv->info.caps |= SNPS_CAP_ZYNQMP;
+ priv->info.dq_width = SNPS_DQ_64;
+
+ return 0;
+}
+
+/*
+ * bt1_init_plat - Baikal-T1-specific platform initialization.
+ * @priv: DDR memory controller private data.
+ *
+ * Return: always zero.
+ */
+static int bt1_init_plat(struct snps_edac_priv *priv)
+{
+ priv->info.dq_width = SNPS_DQ_32;
+ priv->info.hif_burst_len = SNPS_DDR_BL8;
+ priv->sys_app_map.minsize = SZ_256M;
+
+ return 0;
+}
+
+/**
+ * snps_get_dtype - Return the controller memory width.
+ * @mstr: Master CSR value.
+ *
+ * Get the EDAC device type width appropriate for the current controller
+ * configuration.
+ *
+ * Return: a device type width enumeration.
+ */
+static inline enum dev_type snps_get_dtype(u32 mstr)
+{
+ if (!(mstr & DDR_MSTR_MEM_DDR4))
+ return DEV_UNKNOWN;
+
+ switch (FIELD_GET(DDR_MSTR_DEV_CFG_MASK, mstr)) {
+ case DDR_MSTR_DEV_X4:
+ return DEV_X4;
+ case DDR_MSTR_DEV_X8:
+ return DEV_X8;
+ case DDR_MSTR_DEV_X16:
+ return DEV_X16;
+ case DDR_MSTR_DEV_X32:
+ return DEV_X32;
+ }
+
+ return DEV_UNKNOWN;
+}
+
+/**
+ * snps_get_mtype - Returns controller memory type.
+ * @mstr: Master CSR value.
+ *
+ * Get the EDAC memory type appropriate for the current controller
+ * configuration.
+ *
+ * Return: a memory type enumeration.
+ */
+static inline enum mem_type snps_get_mtype(u32 mstr)
+{
+ switch (FIELD_GET(DDR_MSTR_MEM_MASK, mstr)) {
+ case DDR_MSTR_MEM_DDR2:
+ return MEM_DDR2;
+ case DDR_MSTR_MEM_DDR3:
+ return MEM_DDR3;
+ case DDR_MSTR_MEM_LPDDR:
+ return MEM_LPDDR;
+ case DDR_MSTR_MEM_LPDDR2:
+ return MEM_LPDDR2;
+ case DDR_MSTR_MEM_LPDDR3:
+ return MEM_LPDDR3;
+ case DDR_MSTR_MEM_DDR4:
+ return MEM_DDR4;
+ case DDR_MSTR_MEM_LPDDR4:
+ return MEM_LPDDR4;
+ }
+
+ return MEM_RESERVED;
+}
+
+/**
+ * snps_get_ddrc_info - Get the DDR controller config data.
+ * @priv: DDR memory controller private data.
+ *
+ * Return: negative errno if no ECC detected, otherwise - zero.
+ */
+static int snps_get_ddrc_info(struct snps_edac_priv *priv)
+{
+ int (*init_plat)(struct snps_edac_priv *priv);
+ u32 regval;
+
+ /* Before getting the DDRC parameters make sure ECC is enabled */
+ regval = readl(priv->baseaddr + ECC_CFG0_OFST);
+
+ priv->info.ecc_mode = FIELD_GET(ECC_CFG0_MODE_MASK, regval);
+ if (priv->info.ecc_mode != SNPS_ECC_SECDED) {
+ edac_printk(KERN_INFO, EDAC_MC, "SEC/DED ECC not enabled\n");
+ return -ENXIO;
+ }
+
+ /* Assume HW-src scrub is always available if it isn't disabled */
+ if (!(regval & ECC_CFG0_DIS_SCRUB))
+ priv->info.caps |= SNPS_CAP_ECC_SCRUB;
+
+ /* Auto-detect the scrubber by writing to the SBRWDATA0 CSR */
+ regval = readl(priv->baseaddr + ECC_SBRWDATA0_OFST);
+ writel(~regval, priv->baseaddr + ECC_SBRWDATA0_OFST);
+ if (regval != readl(priv->baseaddr + ECC_SBRWDATA0_OFST)) {
+ priv->info.caps |= SNPS_CAP_ECC_SCRUBBER;
+ writel(regval, priv->baseaddr + ECC_SBRWDATA0_OFST);
+ }
+
+ /* Auto-detect the basic HIF/SDRAM bus parameters */
+ regval = readl(priv->baseaddr + DDR_MSTR_OFST);
+
+ priv->info.sdram_mode = snps_get_mtype(regval);
+ priv->info.dev_cfg = snps_get_dtype(regval);
+
+ priv->info.dq_mode = FIELD_GET(DDR_MSTR_BUSWIDTH_MASK, regval);
+
+ /*
+ * Assume HIF burst length matches the SDRAM burst length since it's
+ * not auto-detectable
+ */
+ priv->info.sdram_burst_len = FIELD_GET(DDR_MSTR_BURST_RDWR, regval) << 1;
+ priv->info.hif_burst_len = priv->info.sdram_burst_len;
+
+ /* Retrieve the current HIF/SDRAM frequency ratio: 1:1 vs 1:2 */
+ priv->info.freq_ratio = !(regval & DDR_MSTR_FREQ_RATIO11) + 1;
+
+ /* Activated ranks field: set bit corresponds to populated rank */
+ priv->info.ranks = FIELD_GET(DDR_MSTR_ACT_RANKS_MASK, regval);
+ priv->info.ranks = hweight_long(priv->info.ranks);
+
+ /* Auto-detect the DQ bus width by using the ECC-poison pattern CSR */
+ writel(0, priv->baseaddr + DDR_SWCTL);
+
+ /*
+ * If poison pattern [32:64] is changeable then DQ is 64-bit wide.
+ * Note the feature has been available since IP-core v2.51a.
+ */
+ regval = readl(priv->baseaddr + ECC_POISONPAT1_OFST);
+ writel(~regval, priv->baseaddr + ECC_POISONPAT1_OFST);
+ if (regval != readl(priv->baseaddr + ECC_POISONPAT1_OFST)) {
+ priv->info.dq_width = SNPS_DQ_64;
+ writel(regval, priv->baseaddr + ECC_POISONPAT1_OFST);
+ } else {
+ priv->info.dq_width = SNPS_DQ_32;
+ }
+
+ writel(1, priv->baseaddr + DDR_SWCTL);
+
+ /* Apply platform setups after all the configs auto-detection */
+ init_plat = device_get_match_data(&priv->pdev->dev);
+
+ return init_plat ? init_plat(priv) : 0;
+}
+
+/**
+ * snps_get_sys_app_map - Get System/Application address map.
+ * @priv: DDR memory controller private instance data.
+ * @sarregs: Array with SAR registers value.
+ *
+ * System address regions are defined by the SARBASEn and SARSIZEn registers.
+ * Controller reference manual requires the base addresses and sizes creating
+ * a set of ascending non-overlapped regions in order to have a linear
+ * application address space. Doing otherwise causes unpredictable results.
+ */
+static void snps_get_sys_app_map(struct snps_edac_priv *priv, u32 *sarregs)
+{
+ struct snps_sys_app_map *map = &priv->sys_app_map;
+ int i, ofst;
+
+ /*
+ * SARs are supposed to be initialized in the ascending non-overlapped
+ * order: base[i - 1] < base[i] < etc. If that rule is broken for a SAR
+ * it's considered as no more SARs have been enabled, so the detection
+ * procedure will halt. Having the very first SAR with zero base
+ * address only makes sense if there is a consequent SAR.
+ */
+ for (i = 0, ofst = 0; i < DDR_MAX_NSAR; i++) {
+ map->sar[i].base = sarregs[2 * i] * map->minsize;
+ if (map->sar[i].base)
+ map->nsar = i + 1;
+ else if (i && map->sar[i].base <= map->sar[i - 1].base)
+ break;
+
+ map->sar[i].size = (sarregs[2 * i + 1] + 1) * map->minsize;
+ map->sar[i].ofst = map->sar[i].base - ofst;
+ ofst += map->sar[i].size;
+ }
+
+ /*
+ * SAR block size isn't auto-detectable. If one isn't specified for the
+ * platform there is a good chance to have invalid mapping of the
+ * detected SARs. So proceed with 1:1 mapping then.
+ */
+ if (!map->minsize && map->nsar) {
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "No block size specified. Discard SARs mapping\n");
+ map->nsar = 0;
+ }
+}
+
+/**
+ * snps_get_hif_row_map - Get HIF/SDRAM-row address map.
+ * @priv: DDR memory controller private instance data.
+ * @addrmap: Array with ADDRMAP registers value.
+ *
+ * SDRAM-row address is defined by the fields in the ADDRMAP[5-7,9-11]
+ * registers. Those fields value indicate the HIF address bits used to encode
+ * the DDR row address.
+ */
+static void snps_get_hif_row_map(struct snps_edac_priv *priv, u32 *addrmap)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ u8 map_row_b2_10;
+ int i;
+
+ for (i = 0; i < DDR_MAX_ROW_WIDTH; i++)
+ map->row[i] = DDR_ADDRMAP_UNUSED;
+
+ map->row[0] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[5]) + ROW_B0_BASE;
+ map->row[1] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[5]) + ROW_B1_BASE;
+
+ map_row_b2_10 = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[5]);
+ if (map_row_b2_10 != DDR_ADDRMAP_MAX_15) {
+ for (i = 2; i < 11; i++)
+ map->row[i] = map_row_b2_10 + i + ROW_B0_BASE;
+ } else {
+ map->row[2] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[9]) + ROW_B2_BASE;
+ map->row[3] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[9]) + ROW_B3_BASE;
+ map->row[4] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[9]) + ROW_B4_BASE;
+ map->row[5] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[9]) + ROW_B5_BASE;
+ map->row[6] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[10]) + ROW_B6_BASE;
+ map->row[7] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[10]) + ROW_B7_BASE;
+ map->row[8] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[10]) + ROW_B8_BASE;
+ map->row[9] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[10]) + ROW_B9_BASE;
+ map->row[10] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[11]) + ROW_B10_BASE;
+ }
+
+ map->row[11] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[5]);
+ map->row[11] = map->row[11] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[11] + ROW_B11_BASE;
+
+ map->row[12] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[6]);
+ map->row[12] = map->row[12] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[12] + ROW_B12_BASE;
+
+ map->row[13] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[6]);
+ map->row[13] = map->row[13] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[13] + ROW_B13_BASE;
+
+ map->row[14] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[6]);
+ map->row[14] = map->row[14] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[14] + ROW_B14_BASE;
+
+ map->row[15] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[6]);
+ map->row[15] = map->row[15] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[15] + ROW_B15_BASE;
+
+ if (priv->info.sdram_mode == MEM_DDR4 || priv->info.sdram_mode == MEM_LPDDR4) {
+ map->row[16] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[7]);
+ map->row[16] = map->row[16] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[16] + ROW_B16_BASE;
+
+ map->row[17] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[7]);
+ map->row[17] = map->row[17] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->row[17] + ROW_B17_BASE;
+ }
+}
+
+/**
+ * snps_get_hif_col_map - Get HIF/SDRAM-column address map.
+ * @priv: DDR memory controller private instance data.
+ * @addrmap: Array with ADDRMAP registers value.
+ *
+ * SDRAM-column address is defined by the fields in the ADDRMAP[2-4]
+ * registers. Those fields value indicate the HIF address bits used to encode
+ * the DDR row address.
+ */
+static void snps_get_hif_col_map(struct snps_edac_priv *priv, u32 *addrmap)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ int i;
+
+ for (i = 0; i < DDR_MAX_COL_WIDTH; i++)
+ map->col[i] = DDR_ADDRMAP_UNUSED;
+
+ map->col[0] = 0;
+ map->col[1] = 1;
+ map->col[2] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[2]) + COL_B2_BASE;
+ map->col[3] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[2]) + COL_B3_BASE;
+
+ map->col[4] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[2]);
+ map->col[4] = map->col[4] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[4] + COL_B4_BASE;
+
+ map->col[5] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[2]);
+ map->col[5] = map->col[5] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[5] + COL_B5_BASE;
+
+ map->col[6] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[3]);
+ map->col[6] = map->col[6] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[6] + COL_B6_BASE;
+
+ map->col[7] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[3]);
+ map->col[7] = map->col[7] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[7] + COL_B7_BASE;
+
+ map->col[8] = FIELD_GET(DDR_ADDRMAP_B16_M15, addrmap[3]);
+ map->col[8] = map->col[8] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[8] + COL_B8_BASE;
+
+ map->col[9] = FIELD_GET(DDR_ADDRMAP_B24_M15, addrmap[3]);
+ map->col[9] = map->col[9] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[9] + COL_B9_BASE;
+
+ map->col[10] = FIELD_GET(DDR_ADDRMAP_B0_M15, addrmap[4]);
+ map->col[10] = map->col[10] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[10] + COL_B10_BASE;
+
+ map->col[11] = FIELD_GET(DDR_ADDRMAP_B8_M15, addrmap[4]);
+ map->col[11] = map->col[11] == DDR_ADDRMAP_MAX_15 ?
+ DDR_ADDRMAP_UNUSED : map->col[11] + COL_B11_BASE;
+
+ /*
+ * In case of the non-Full DQ bus mode the lowest columns are
+ * unmapped and used by the controller to read the full DQ word
+ * in multiple cycles (col[0] for the Half bus mode, col[0:1] for
+ * the Quarter bus mode).
+ */
+ if (priv->info.dq_mode) {
+ for (i = 11 + priv->info.dq_mode; i >= priv->info.dq_mode; i--) {
+ map->col[i] = map->col[i - priv->info.dq_mode];
+ map->col[i - priv->info.dq_mode] = DDR_ADDRMAP_UNUSED;
+ }
+ }
+
+ /*
+ * Per JEDEC DDR2/3/4/mDDR specification, column address bit 10 is
+ * reserved for indicating auto-precharge, and hence no source
+ * address bit can be mapped to col[10].
+ */
+ if (priv->info.sdram_mode == MEM_LPDDR || priv->info.sdram_mode == MEM_DDR2 ||
+ priv->info.sdram_mode == MEM_DDR3 || priv->info.sdram_mode == MEM_DDR4) {
+ for (i = 12 + priv->info.dq_mode; i > 10; i--) {
+ map->col[i] = map->col[i - 1];
+ map->col[i - 1] = DDR_ADDRMAP_UNUSED;
+ }
+ }
+
+ /*
+ * Per JEDEC specification, column address bit 12 is reserved
+ * for the Burst-chop status, so no source address bit mapping
+ * for col[12] either.
+ */
+ map->col[13] = map->col[12];
+ map->col[12] = DDR_ADDRMAP_UNUSED;
+}
+
+/**
+ * snps_get_hif_bank_map - Get HIF/SDRAM-bank address map.
+ * @priv: DDR memory controller private instance data.
+ * @addrmap: Array with ADDRMAP registers value.
+ *
+ * SDRAM-bank address is defined by the fields in the ADDRMAP[1]
+ * register. Those fields value indicate the HIF address bits used to encode
+ * the DDR bank address.
+ */
+static void snps_get_hif_bank_map(struct snps_edac_priv *priv, u32 *addrmap)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ int i;
+
+ for (i = 0; i < DDR_MAX_BANK_WIDTH; i++)
+ map->bank[i] = DDR_ADDRMAP_UNUSED;
+
+ map->bank[0] = FIELD_GET(DDR_ADDRMAP_B0_M31, addrmap[1]) + BANK_B0_BASE;
+ map->bank[1] = FIELD_GET(DDR_ADDRMAP_B8_M31, addrmap[1]) + BANK_B1_BASE;
+
+ map->bank[2] = FIELD_GET(DDR_ADDRMAP_B16_M31, addrmap[1]);
+ map->bank[2] = map->bank[2] == DDR_ADDRMAP_MAX_31 ?
+ DDR_ADDRMAP_UNUSED : map->bank[2] + BANK_B2_BASE;
+}
+
+/**
+ * snps_get_hif_bankgrp_map - Get HIF/SDRAM-bank group address map.
+ * @priv: DDR memory controller private instance data.
+ * @addrmap: Array with ADDRMAP registers value.
+ *
+ * SDRAM-bank group address is defined by the fields in the ADDRMAP[8]
+ * register. Those fields value indicate the HIF address bits used to encode
+ * the DDR bank group address.
+ */
+static void snps_get_hif_bankgrp_map(struct snps_edac_priv *priv, u32 *addrmap)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ int i;
+
+ for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++)
+ map->bankgrp[i] = DDR_ADDRMAP_UNUSED;
+
+ /* Bank group signals are available on the DDR4 memory only */
+ if (priv->info.sdram_mode != MEM_DDR4)
+ return;
+
+ map->bankgrp[0] = FIELD_GET(DDR_ADDRMAP_B0_M31, addrmap[8]) + BANKGRP_B0_BASE;
+
+ map->bankgrp[1] = FIELD_GET(DDR_ADDRMAP_B8_M31, addrmap[8]);
+ map->bankgrp[1] = map->bankgrp[1] == DDR_ADDRMAP_MAX_31 ?
+ DDR_ADDRMAP_UNUSED : map->bankgrp[1] + BANKGRP_B1_BASE;
+}
+
+/**
+ * snps_get_hif_rank_map - Get HIF/SDRAM-rank address map.
+ * @priv: DDR memory controller private instance data.
+ * @addrmap: Array with ADDRMAP registers value.
+ *
+ * SDRAM-rank address is defined by the fields in the ADDRMAP[0]
+ * register. Those fields value indicate the HIF address bits used to encode
+ * the DDR rank address.
+ */
+static void snps_get_hif_rank_map(struct snps_edac_priv *priv, u32 *addrmap)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ int i;
+
+ for (i = 0; i < DDR_MAX_RANK_WIDTH; i++)
+ map->rank[i] = DDR_ADDRMAP_UNUSED;
+
+ if (priv->info.ranks > 1) {
+ map->rank[0] = FIELD_GET(DDR_ADDRMAP_B0_M31, addrmap[0]);
+ map->rank[0] = map->rank[0] == DDR_ADDRMAP_MAX_31 ?
+ DDR_ADDRMAP_UNUSED : map->rank[0] + RANK_B0_BASE;
+ }
+
+ if (priv->info.ranks > 2) {
+ map->rank[1] = FIELD_GET(DDR_ADDRMAP_B8_M31, addrmap[0]);
+ map->rank[1] = map->rank[1] == DDR_ADDRMAP_MAX_31 ?
+ DDR_ADDRMAP_UNUSED : map->rank[1] + RANK_B1_BASE;
+ }
+}
+
+/**
+ * snps_get_addr_map - Get HIF/SDRAM/etc address map from CSRs.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Parse the controller registers content creating the addresses mapping tables.
+ * They will be used for the erroneous and poison addresses encode/decode.
+ */
+static void snps_get_addr_map(struct snps_edac_priv *priv)
+{
+ u32 regval[max(DDR_ADDRMAP_NREGS, 2 * DDR_MAX_NSAR)];
+ int i;
+
+ for (i = 0; i < 2 * DDR_MAX_NSAR; i++)
+ regval[i] = readl(priv->baseaddr + DDR_SARBASE0_OFST + i * 4);
+
+ snps_get_sys_app_map(priv, regval);
- priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
- priv->row_shift[1] = ((addrmap[5] >> 8) &
- ROW_MAX_VAL_MASK) + ROW_B1_BASE;
+ for (i = 0; i < DDR_ADDRMAP_NREGS; i++)
+ regval[i] = readl(priv->baseaddr + DDR_ADDRMAP0_OFST + i * 4);
- addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
- if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
- for (index = 2; index < 11; index++)
- priv->row_shift[index] = addrmap_row_b2_10 +
- index + ROW_B0_BASE;
+ snps_get_hif_row_map(priv, regval);
+ snps_get_hif_col_map(priv, regval);
+
+ snps_get_hif_bank_map(priv, regval);
+
+ snps_get_hif_bankgrp_map(priv, regval);
+
+ snps_get_hif_rank_map(priv, regval);
+}
+
+/**
+ * snps_get_sdram_size - Calculate SDRAM size.
+ * @priv: DDR memory controller private data.
+ *
+ * The total size of the attached memory is calculated based on the HIF/SDRAM
+ * mapping table. It can be done since the hardware reference manual demands
+ * that none two SDRAM bits should be mapped to the same HIF bit and that the
+ * unused SDRAM address bits mapping must be disabled.
+ *
+ * Return: the memory size in bytes.
+ */
+static u64 snps_get_sdram_size(struct snps_edac_priv *priv)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ u64 size = 0;
+ int i;
+
+ for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) {
+ if (map->row[i] != DDR_ADDRMAP_UNUSED)
+ size++;
+ }
+
+ for (i = 0; i < DDR_MAX_COL_WIDTH; i++) {
+ if (map->col[i] != DDR_ADDRMAP_UNUSED)
+ size++;
+ }
+
+ for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) {
+ if (map->bank[i] != DDR_ADDRMAP_UNUSED)
+ size++;
+ }
+
+ for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) {
+ if (map->bankgrp[i] != DDR_ADDRMAP_UNUSED)
+ size++;
+ }
+
+ /* Skip the ranks since the multi-rankness is determined by layer[0] */
+
+ return 1ULL << (size + priv->info.dq_width);
+}
+
+/**
+ * snps_init_csrows - Initialize the csrow data.
+ * @mci: EDAC memory controller instance.
+ *
+ * Initialize the chip select rows associated with the EDAC memory
+ * controller instance.
+ */
+static void snps_init_csrows(struct mem_ctl_info *mci)
+{
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 row, width;
+ u64 size;
+ int j;
+
+ /* Actual SDRAM-word width for which ECC is calculated */
+ width = 1U << (priv->info.dq_width - priv->info.dq_mode);
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ size = snps_get_sdram_size(priv);
+
+ for (j = 0; j < csi->nr_channels; j++) {
+ dimm = csi->channels[j]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = priv->info.sdram_mode;
+ dimm->nr_pages = PHYS_PFN(size) / csi->nr_channels;
+ dimm->grain = width;
+ dimm->dtype = priv->info.dev_cfg;
+ }
+ }
+}
+
+/**
+ * snps_mc_create - Create and initialize MC instance.
+ * @priv: DDR memory controller private data.
+ *
+ * Allocate the EDAC memory controller descriptor and initialize it
+ * using the private data info.
+ *
+ * Return: MC data instance or negative errno.
+ */
+static struct mem_ctl_info *snps_mc_create(struct snps_edac_priv *priv)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = priv->info.ranks;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = SNPS_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+
+ mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers, 0);
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed memory allocation for mc instance\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mci->pvt_info = priv;
+ mci->pdev = &priv->pdev->dev;
+ platform_set_drvdata(priv->pdev, mci);
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_LPDDR | MEM_FLAG_DDR2 | MEM_FLAG_LPDDR2 |
+ MEM_FLAG_DDR3 | MEM_FLAG_LPDDR3 |
+ MEM_FLAG_DDR4 | MEM_FLAG_LPDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_PARITY;
+ mci->edac_cap = mci->edac_ctl_cap;
+
+ if (priv->info.caps & SNPS_CAP_ECC_SCRUB) {
+ mci->scrub_mode = SCRUB_HW_SRC;
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
} else {
- priv->row_shift[2] = (addrmap[9] &
- ROW_MAX_VAL_MASK) + ROW_B2_BASE;
- priv->row_shift[3] = ((addrmap[9] >> 8) &
- ROW_MAX_VAL_MASK) + ROW_B3_BASE;
- priv->row_shift[4] = ((addrmap[9] >> 16) &
- ROW_MAX_VAL_MASK) + ROW_B4_BASE;
- priv->row_shift[5] = ((addrmap[9] >> 24) &
- ROW_MAX_VAL_MASK) + ROW_B5_BASE;
- priv->row_shift[6] = (addrmap[10] &
- ROW_MAX_VAL_MASK) + ROW_B6_BASE;
- priv->row_shift[7] = ((addrmap[10] >> 8) &
- ROW_MAX_VAL_MASK) + ROW_B7_BASE;
- priv->row_shift[8] = ((addrmap[10] >> 16) &
- ROW_MAX_VAL_MASK) + ROW_B8_BASE;
- priv->row_shift[9] = ((addrmap[10] >> 24) &
- ROW_MAX_VAL_MASK) + ROW_B9_BASE;
- priv->row_shift[10] = (addrmap[11] &
- ROW_MAX_VAL_MASK) + ROW_B10_BASE;
+ mci->scrub_mode = SCRUB_SW_SRC;
+ mci->scrub_cap = SCRUB_FLAG_SW_SRC;
+ }
+
+ if (priv->info.caps & SNPS_CAP_ECC_SCRUBBER) {
+ mci->scrub_cap |= SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_TUN;
+ mci->set_sdram_scrub_rate = snps_set_sdram_scrub_rate;
+ mci->get_sdram_scrub_rate = snps_get_sdram_scrub_rate;
}
- priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
- ROW_MAX_VAL_MASK) + ROW_B11_BASE);
- priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
- ROW_MAX_VAL_MASK) + ROW_B12_BASE);
- priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
- ROW_MAX_VAL_MASK) + ROW_B13_BASE);
- priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
- ROW_MAX_VAL_MASK) + ROW_B14_BASE);
- priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
- ROW_MAX_VAL_MASK) + ROW_B15_BASE);
- priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
- ROW_MAX_VAL_MASK) + ROW_B16_BASE);
- priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
- ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
- ROW_MAX_VAL_MASK) + ROW_B17_BASE);
+ mci->ctl_name = "snps_umctl2_ddrc";
+ mci->dev_name = SNPS_EDAC_MOD_STRING;
+ mci->mod_name = SNPS_EDAC_MOD_VER;
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ mci->ctl_page_to_phys = NULL;
+
+ snps_init_csrows(mci);
+
+ return mci;
+}
+
+/**
+ * snps_mc_free - Free MC instance.
+ * @mci: EDAC memory controller instance.
+ *
+ * Just revert what was done in the framework of the snps_mc_create().
+ *
+ * Return: MC data instance or negative errno.
+ */
+static void snps_mc_free(struct mem_ctl_info *mci)
+{
+ struct snps_edac_priv *priv = mci->pvt_info;
+
+ platform_set_drvdata(priv->pdev, NULL);
+
+ edac_mc_free(mci);
}
-static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+/**
+ * snps_request_ind_irq - Request individual DDRC IRQs.
+ * @mci: EDAC memory controller instance.
+ *
+ * Return: 0 if the IRQs were successfully requested, 1 if the individual IRQs
+ * are unavailable, otherwise negative errno.
+ */
+static int snps_request_ind_irq(struct mem_ctl_info *mci)
{
- u32 width, memtype;
- int index;
-
- memtype = readl(priv->baseaddr + CTRL_OFST);
- width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
-
- priv->col_shift[0] = 0;
- priv->col_shift[1] = 1;
- priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
- priv->col_shift[3] = ((addrmap[2] >> 8) &
- COL_MAX_VAL_MASK) + COL_B3_BASE;
- priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
- COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
- COL_MAX_VAL_MASK) + COL_B4_BASE);
- priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
- COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
- COL_MAX_VAL_MASK) + COL_B5_BASE);
- priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
- COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
- COL_MAX_VAL_MASK) + COL_B6_BASE);
- priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
- COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
- COL_MAX_VAL_MASK) + COL_B7_BASE);
- priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
- COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
- COL_MAX_VAL_MASK) + COL_B8_BASE);
- priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
- COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
- COL_MAX_VAL_MASK) + COL_B9_BASE);
- if (width == DDRCTL_EWDTH_64) {
- if (memtype & MEM_TYPE_LPDDR3) {
- priv->col_shift[10] = ((addrmap[4] &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- ((addrmap[4] & COL_MAX_VAL_MASK) +
- COL_B10_BASE);
- priv->col_shift[11] = (((addrmap[4] >> 8) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
- COL_B11_BASE);
- } else {
- priv->col_shift[11] = ((addrmap[4] &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- ((addrmap[4] & COL_MAX_VAL_MASK) +
- COL_B10_BASE);
- priv->col_shift[13] = (((addrmap[4] >> 8) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
- COL_B11_BASE);
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct device *dev = &priv->pdev->dev;
+ int rc, irq;
+
+ irq = platform_get_irq_byname_optional(priv->pdev, "ecc_ce");
+ if (irq == -ENXIO)
+ return 1;
+ if (irq < 0)
+ return irq;
+
+ rc = devm_request_irq(dev, irq, snps_ce_irq_handler, 0, "ecc_ce", mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to request ECC CE IRQ\n");
+ return rc;
+ }
+
+ irq = platform_get_irq_byname(priv->pdev, "ecc_ue");
+ if (irq < 0)
+ return irq;
+
+ rc = devm_request_irq(dev, irq, snps_ue_irq_handler, 0, "ecc_ue", mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to request ECC UE IRQ\n");
+ return rc;
+ }
+
+ irq = platform_get_irq_byname_optional(priv->pdev, "dfi_e");
+ if (irq > 0) {
+ rc = devm_request_irq(dev, irq, snps_dfi_irq_handler, 0, "dfi_e", mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to request DFI IRQ\n");
+ return rc;
}
- } else if (width == DDRCTL_EWDTH_32) {
- if (memtype & MEM_TYPE_LPDDR3) {
- priv->col_shift[10] = (((addrmap[3] >> 24) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
- COL_B9_BASE);
- priv->col_shift[11] = ((addrmap[4] &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- ((addrmap[4] & COL_MAX_VAL_MASK) +
- COL_B10_BASE);
- } else {
- priv->col_shift[11] = (((addrmap[3] >> 24) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
- COL_B9_BASE);
- priv->col_shift[13] = ((addrmap[4] &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- ((addrmap[4] & COL_MAX_VAL_MASK) +
- COL_B10_BASE);
+ }
+
+ irq = platform_get_irq_byname_optional(priv->pdev, "ecc_sbr");
+ if (irq > 0) {
+ rc = devm_request_irq(dev, irq, snps_sbr_irq_handler, 0, "ecc_sbr", mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to request Sbr IRQ\n");
+ return rc;
}
+ }
+
+ return 0;
+}
+
+/**
+ * snps_request_com_irq - Request common DDRC IRQ.
+ * @mci: EDAC memory controller instance.
+ *
+ * It first attempts to get the named IRQ. If failed the method fallbacks
+ * to first available one.
+ *
+ * Return: 0 if the IRQ was successfully requested otherwise negative errno.
+ */
+static int snps_request_com_irq(struct mem_ctl_info *mci)
+{
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct device *dev = &priv->pdev->dev;
+ int rc, irq;
+
+ irq = platform_get_irq_byname_optional(priv->pdev, "ecc");
+ if (irq < 0) {
+ irq = platform_get_irq(priv->pdev, 0);
+ if (irq < 0)
+ return irq;
+ }
+
+ rc = devm_request_irq(dev, irq, snps_com_irq_handler, 0, "ecc", mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * snps_setup_irq - Request and enable DDRC IRQs.
+ * @mci: EDAC memory controller instance.
+ *
+ * It first tries to get and request individual IRQs. If failed the method
+ * fallbacks to the common IRQ line case. The IRQs will be enabled only if
+ * some of these requests have been successful.
+ *
+ * Return: 0 if IRQs were successfully setup otherwise negative errno.
+ */
+static int snps_setup_irq(struct mem_ctl_info *mci)
+{
+ struct snps_edac_priv *priv = mci->pvt_info;
+ int rc;
+
+ rc = snps_request_ind_irq(mci);
+ if (rc > 0)
+ rc = snps_request_com_irq(mci);
+ if (rc)
+ return rc;
+
+ snps_enable_irq(priv);
+
+ return 0;
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+
+#define SNPS_DEBUGFS_FOPS(__name, __read, __write) \
+ static const struct file_operations __name = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = __read, \
+ .write = __write, \
+ }
+
+#define SNPS_DBGFS_BUF_LEN 128
+
+static int snps_ddrc_info_show(struct seq_file *s, void *data)
+{
+ struct mem_ctl_info *mci = s->private;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ unsigned long rate;
+
+ seq_printf(s, "SDRAM: %s\n", edac_mem_types[priv->info.sdram_mode]);
+
+ rate = clk_get_rate(priv->clks[SNPS_CORE_CLK].clk);
+ if (rate) {
+ rate = rate / HZ_PER_MHZ;
+ seq_printf(s, "Clock: Core %luMHz SDRAM %luMHz\n",
+ rate, priv->info.freq_ratio * rate);
+ }
+
+ seq_printf(s, "DQ bus: %u/%s\n", (BITS_PER_BYTE << priv->info.dq_width),
+ priv->info.dq_mode == SNPS_DQ_FULL ? "Full" :
+ priv->info.dq_mode == SNPS_DQ_HALF ? "Half" :
+ priv->info.dq_mode == SNPS_DQ_QRTR ? "Quarter" :
+ "Unknown");
+ seq_printf(s, "Burst: SDRAM %u HIF %u\n", priv->info.sdram_burst_len,
+ priv->info.hif_burst_len);
+
+ seq_printf(s, "Ranks: %u\n", priv->info.ranks);
+
+ seq_printf(s, "ECC: %s\n",
+ priv->info.ecc_mode == SNPS_ECC_SECDED ? "SEC/DED" :
+ priv->info.ecc_mode == SNPS_ECC_ADVX4X8 ? "Advanced X4/X8" :
+ "Unknown");
+
+ seq_puts(s, "Caps:");
+ if (priv->info.caps) {
+ if (priv->info.caps & SNPS_CAP_ECC_SCRUB)
+ seq_puts(s, " +Scrub");
+ if (priv->info.caps & SNPS_CAP_ECC_SCRUBBER)
+ seq_puts(s, " +Scrubber");
+ if (priv->info.caps & SNPS_CAP_ZYNQMP)
+ seq_puts(s, " +ZynqMP");
} else {
- if (memtype & MEM_TYPE_LPDDR3) {
- priv->col_shift[10] = (((addrmap[3] >> 16) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
- COL_B8_BASE);
- priv->col_shift[11] = (((addrmap[3] >> 24) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
- COL_B9_BASE);
- priv->col_shift[13] = ((addrmap[4] &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- ((addrmap[4] & COL_MAX_VAL_MASK) +
- COL_B10_BASE);
- } else {
- priv->col_shift[11] = (((addrmap[3] >> 16) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
- COL_B8_BASE);
- priv->col_shift[13] = (((addrmap[3] >> 24) &
- COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
- (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
- COL_B9_BASE);
+ seq_puts(s, " -");
+ }
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(snps_ddrc_info);
+
+static int snps_sys_app_map_show(struct seq_file *s, void *data)
+{
+ struct mem_ctl_info *mci = s->private;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct snps_sys_app_map *map = &priv->sys_app_map;
+ u64 size;
+ int i;
+
+ if (!map->nsar) {
+ seq_puts(s, "No SARs detected\n");
+ return 0;
+ }
+
+ seq_printf(s, "%9s %-37s %-18s %-37s\n",
+ "", "System address", "Offset", "App address");
+
+ for (i = 0, size = 0; i < map->nsar; i++) {
+ seq_printf(s, "Region %d: ", i);
+ seq_printf(s, "0x%016llx-0x%016llx ", map->sar[i].base,
+ map->sar[i].base + map->sar[i].size - 1);
+ seq_printf(s, "0x%016llx ", map->sar[i].ofst);
+ seq_printf(s, "0x%016llx-0x%016llx\n", size,
+ size + map->sar[i].size - 1);
+ size += map->sar[i].size;
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(snps_sys_app_map);
+
+static u8 snps_find_sdram_dim(struct snps_edac_priv *priv, u8 hif, char *dim)
+{
+ struct snps_hif_sdram_map *map = &priv->hif_sdram_map;
+ int i;
+
+ for (i = 0; i < DDR_MAX_ROW_WIDTH; i++) {
+ if (map->row[i] == hif) {
+ *dim = 'r';
+ return i;
+ }
+ }
+
+ for (i = 0; i < DDR_MAX_COL_WIDTH; i++) {
+ if (map->col[i] == hif) {
+ *dim = 'c';
+ return i;
+ }
+ }
+
+ for (i = 0; i < DDR_MAX_BANK_WIDTH; i++) {
+ if (map->bank[i] == hif) {
+ *dim = 'b';
+ return i;
+ }
+ }
+
+ for (i = 0; i < DDR_MAX_BANKGRP_WIDTH; i++) {
+ if (map->bankgrp[i] == hif) {
+ *dim = 'g';
+ return i;
}
}
- if (width) {
- for (index = 9; index > width; index--) {
- priv->col_shift[index] = priv->col_shift[index - width];
- priv->col_shift[index - width] = 0;
+ for (i = 0; i < DDR_MAX_RANK_WIDTH; i++) {
+ if (map->rank[i] == hif) {
+ *dim = 'a';
+ return i;
}
}
+ return DDR_ADDRMAP_UNUSED;
+}
+
+static int snps_hif_sdram_map_show(struct seq_file *s, void *data)
+{
+ struct mem_ctl_info *mci = s->private;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ char dim, buf[SNPS_DBGFS_BUF_LEN];
+ const int line_len = 10;
+ u8 bit;
+ int i;
+
+ seq_printf(s, "%3s", "");
+ for (i = 0; i < line_len; i++)
+ seq_printf(s, " %02d ", i);
+
+ for (i = 0; i < DDR_MAX_HIF_WIDTH; i++) {
+ if (i % line_len == 0)
+ seq_printf(s, "\n%02d ", i);
+
+ bit = snps_find_sdram_dim(priv, i, &dim);
+
+ if (bit != DDR_ADDRMAP_UNUSED)
+ scnprintf(buf, SNPS_DBGFS_BUF_LEN, "%c%hhu", dim, bit);
+ else
+ scnprintf(buf, SNPS_DBGFS_BUF_LEN, "--");
+
+ seq_printf(s, "%3s ", buf);
+ }
+ seq_putc(s, '\n');
+
+ seq_puts(s, "r - row, c - column, b - bank, g - bank group, a - rank\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(snps_hif_sdram_map);
+
+static ssize_t snps_inject_data_error_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct mem_ctl_info *mci = filep->private_data;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct snps_sdram_addr sdram;
+ char buf[SNPS_DBGFS_BUF_LEN];
+ dma_addr_t sys;
+ u32 regval;
+ int pos;
+
+ regval = readl(priv->baseaddr + ECC_POISON0_OFST);
+ sdram.rank = FIELD_GET(ECC_POISON0_RANK_MASK, regval);
+ sdram.col = FIELD_GET(ECC_POISON0_COL_MASK, regval);
+
+ regval = readl(priv->baseaddr + ECC_POISON1_OFST);
+ sdram.bankgrp = FIELD_PREP(ECC_POISON1_BANKGRP_MASK, regval);
+ sdram.bank = FIELD_PREP(ECC_POISON1_BANK_MASK, regval);
+ sdram.row = FIELD_PREP(ECC_POISON1_ROW_MASK, regval);
+
+ snps_map_sdram_to_sys(priv, &sdram, &sys);
+
+ pos = scnprintf(buf, sizeof(buf),
+ "%pad: Row %hu Col %hu Bank %hhu Bank Group %hhu Rank %hhu\n",
+ &sys, sdram.row, sdram.col, sdram.bank, sdram.bankgrp,
+ sdram.rank);
+
+ return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
-static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+static ssize_t snps_inject_data_error_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
{
- priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
- priv->bank_shift[1] = ((addrmap[1] >> 8) &
- BANK_MAX_VAL_MASK) + BANK_B1_BASE;
- priv->bank_shift[2] = (((addrmap[1] >> 16) &
- BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
- (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
- BANK_B2_BASE);
+ struct mem_ctl_info *mci = filep->private_data;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ struct snps_sdram_addr sdram;
+ u32 regval;
+ u64 sys;
+ int rc;
+
+ rc = kstrtou64_from_user(ubuf, size, 0, &sys);
+ if (rc)
+ return rc;
+
+ snps_map_sys_to_sdram(priv, sys, &sdram);
+
+ regval = FIELD_PREP(ECC_POISON0_RANK_MASK, sdram.rank) |
+ FIELD_PREP(ECC_POISON0_COL_MASK, sdram.col);
+ writel(regval, priv->baseaddr + ECC_POISON0_OFST);
+
+ regval = FIELD_PREP(ECC_POISON1_BANKGRP_MASK, sdram.bankgrp) |
+ FIELD_PREP(ECC_POISON1_BANK_MASK, sdram.bank) |
+ FIELD_PREP(ECC_POISON1_ROW_MASK, sdram.row);
+ writel(regval, priv->baseaddr + ECC_POISON1_OFST);
+ return size;
}
-static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+SNPS_DEBUGFS_FOPS(snps_inject_data_error, snps_inject_data_error_read,
+ snps_inject_data_error_write);
+
+static ssize_t snps_inject_data_poison_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
{
- priv->bankgrp_shift[0] = (addrmap[8] &
- BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
- priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
- BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
- & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
+ struct mem_ctl_info *mci = filep->private_data;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ char buf[SNPS_DBGFS_BUF_LEN];
+ const char *errstr;
+ u32 regval;
+ int pos;
+
+ regval = readl(priv->baseaddr + ECC_CFG1_OFST);
+ if (!(regval & ECC_CFG1_POISON_EN))
+ errstr = "Off";
+ else if (regval & ECC_CFG1_POISON_BIT)
+ errstr = "CE";
+ else
+ errstr = "UE";
+
+ pos = scnprintf(buf, sizeof(buf), "%s\n", errstr);
+ return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
-static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+static ssize_t snps_inject_data_poison_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
{
- priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
- RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
- RANK_MAX_VAL_MASK) + RANK_B0_BASE);
+ struct mem_ctl_info *mci = filep->private_data;
+ struct snps_edac_priv *priv = mci->pvt_info;
+ char buf[SNPS_DBGFS_BUF_LEN];
+ u32 regval;
+ int rc;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf), offp, ubuf, size);
+ if (rc < 0)
+ return rc;
+
+ writel(0, priv->baseaddr + DDR_SWCTL);
+
+ regval = readl(priv->baseaddr + ECC_CFG1_OFST);
+ if (strncmp(buf, "CE", 2) == 0)
+ regval |= ECC_CFG1_POISON_BIT | ECC_CFG1_POISON_EN;
+ else if (strncmp(buf, "UE", 2) == 0)
+ regval = (regval & ~ECC_CFG1_POISON_BIT) | ECC_CFG1_POISON_EN;
+ else
+ regval &= ~ECC_CFG1_POISON_EN;
+ writel(regval, priv->baseaddr + ECC_CFG1_OFST);
+
+ writel(1, priv->baseaddr + DDR_SWCTL);
+
+ return size;
}
+SNPS_DEBUGFS_FOPS(snps_inject_data_poison, snps_inject_data_poison_read,
+ snps_inject_data_poison_write);
+
/**
- * setup_address_map - Set Address Map by querying ADDRMAP registers.
- * @priv: DDR memory controller private instance data.
+ * snps_create_debugfs_nodes - Create DebugFS nodes.
+ * @mci: EDAC memory controller instance.
*
- * Set Address Map by querying ADDRMAP registers.
+ * Create DW uMCTL2 EDAC driver DebugFS nodes in the device private
+ * DebugFS directory.
*
* Return: none.
*/
-static void setup_address_map(struct synps_edac_priv *priv)
+static void snps_create_debugfs_nodes(struct mem_ctl_info *mci)
{
- u32 addrmap[12];
- int index;
+ edac_debugfs_create_file("ddrc_info", 0400, mci->debugfs, mci,
+ &snps_ddrc_info_fops);
- for (index = 0; index < 12; index++) {
- u32 addrmap_offset;
+ edac_debugfs_create_file("sys_app_map", 0400, mci->debugfs, mci,
+ &snps_sys_app_map_fops);
- addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
- addrmap[index] = readl(priv->baseaddr + addrmap_offset);
- }
+ edac_debugfs_create_file("hif_sdram_map", 0400, mci->debugfs, mci,
+ &snps_hif_sdram_map_fops);
- setup_row_address_map(priv, addrmap);
+ edac_debugfs_create_file("inject_data_error", 0600, mci->debugfs, mci,
+ &snps_inject_data_error);
- setup_column_address_map(priv, addrmap);
+ edac_debugfs_create_file("inject_data_poison", 0600, mci->debugfs, mci,
+ &snps_inject_data_poison);
+}
- setup_bank_address_map(priv, addrmap);
+#else /* !CONFIG_EDAC_DEBUG */
- setup_bg_address_map(priv, addrmap);
+static inline void snps_create_debugfs_nodes(struct mem_ctl_info *mci) {}
- setup_rank_address_map(priv, addrmap);
-}
-#endif /* CONFIG_EDAC_DEBUG */
+#endif /* !CONFIG_EDAC_DEBUG */
/**
- * mc_probe - Check controller and bind driver.
+ * snps_mc_probe - Check controller and bind driver.
* @pdev: platform device.
*
* Probe a specific controller instance for binding with the driver.
* Return: 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
-static int mc_probe(struct platform_device *pdev)
+static int snps_mc_probe(struct platform_device *pdev)
{
- const struct synps_platform_data *p_data;
- struct edac_mc_layer layers[2];
- struct synps_edac_priv *priv;
+ struct snps_edac_priv *priv;
struct mem_ctl_info *mci;
- void __iomem *baseaddr;
- struct resource *res;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- baseaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(baseaddr))
- return PTR_ERR(baseaddr);
+ priv = snps_create_data(pdev);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- p_data = of_device_get_match_data(&pdev->dev);
- if (!p_data)
- return -ENODEV;
+ rc = snps_get_res(priv);
+ if (rc)
+ return rc;
- if (!p_data->get_ecc_state(baseaddr)) {
- edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
- return -ENXIO;
- }
+ rc = snps_get_ddrc_info(priv);
+ if (rc)
+ goto put_res;
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = SYNPS_EDAC_NR_CSROWS;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = SYNPS_EDAC_NR_CHANS;
- layers[1].is_virt_csrow = false;
+ snps_get_addr_map(priv);
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
- sizeof(struct synps_edac_priv));
- if (!mci) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Failed memory allocation for mc instance\n");
- return -ENOMEM;
+ mci = snps_mc_create(priv);
+ if (IS_ERR(mci)) {
+ rc = PTR_ERR(mci);
+ goto put_res;
}
- priv = mci->pvt_info;
- priv->baseaddr = baseaddr;
- priv->p_data = p_data;
-
- mc_init(mci, pdev);
-
- if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
- rc = setup_irq(mci, pdev);
- if (rc)
- goto free_edac_mc;
- }
+ rc = snps_setup_irq(mci);
+ if (rc)
+ goto free_edac_mc;
rc = edac_mc_add_mc(mci);
if (rc) {
goto free_edac_mc;
}
-#ifdef CONFIG_EDAC_DEBUG
- if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
- rc = edac_create_sysfs_attributes(mci);
- if (rc) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Failed to create sysfs entries\n");
- goto free_edac_mc;
- }
- }
-
- if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
- setup_address_map(priv);
-#endif
-
- /*
- * Start capturing the correctable and uncorrectable errors. A write of
- * 0 starts the counters.
- */
- if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
- writel(0x0, baseaddr + ECC_CTRL_OFST);
+ snps_create_debugfs_nodes(mci);
- return rc;
+ return 0;
free_edac_mc:
- edac_mc_free(mci);
+ snps_mc_free(mci);
+
+put_res:
+ snps_put_res(priv);
return rc;
}
/**
- * mc_remove - Unbind driver from controller.
+ * snps_mc_remove - Unbind driver from device.
* @pdev: Platform device.
*
* Return: Unconditionally 0
*/
-static int mc_remove(struct platform_device *pdev)
+static int snps_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
- struct synps_edac_priv *priv = mci->pvt_info;
+ struct snps_edac_priv *priv = mci->pvt_info;
- if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
- disable_intr(priv);
-
-#ifdef CONFIG_EDAC_DEBUG
- if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
- edac_remove_sysfs_attributes(mci);
-#endif
+ snps_disable_irq(priv);
edac_mc_del_mc(&pdev->dev);
- edac_mc_free(mci);
+
+ snps_mc_free(mci);
+
+ snps_put_res(priv);
return 0;
}
-static struct platform_driver synps_edac_mc_driver = {
+static const struct of_device_id snps_edac_match[] = {
+ { .compatible = "xlnx,zynqmp-ddrc-2.40a", .data = zynqmp_init_plat },
+ { .compatible = "baikal,bt1-ddrc", .data = bt1_init_plat },
+ { .compatible = "snps,ddrc-3.80a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, snps_edac_match);
+
+static struct platform_driver snps_edac_mc_driver = {
.driver = {
- .name = "synopsys-edac",
- .of_match_table = synps_edac_match,
+ .name = "snps-edac",
+ .of_match_table = snps_edac_match,
},
- .probe = mc_probe,
- .remove = mc_remove,
+ .probe = snps_mc_probe,
+ .remove = snps_mc_remove,
};
-
-module_platform_driver(synps_edac_mc_driver);
+module_platform_driver(snps_edac_mc_driver);
MODULE_AUTHOR("Xilinx Inc");
-MODULE_DESCRIPTION("Synopsys DDR ECC driver");
+MODULE_DESCRIPTION("Synopsys uMCTL2 DDR ECC driver");
MODULE_LICENSE("GPL v2");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Zynq DDR ECC Driver
+ * This driver is based on ppc4xx_edac.c drivers
+ *
+ * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ */
+
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+/* Number of cs_rows needed per memory controller */
+#define ZYNQ_EDAC_NR_CSROWS 1
+
+/* Number of channels per memory controller */
+#define ZYNQ_EDAC_NR_CHANS 1
+
+/* Granularity of reported error in bytes */
+#define ZYNQ_EDAC_ERR_GRAIN 1
+
+#define ZYNQ_EDAC_MSG_SIZE 256
+
+#define ZYNQ_EDAC_MOD_STRING "zynq_edac"
+#define ZYNQ_EDAC_MOD_VER "1"
+
+/* Zynq DDR memory controller ECC registers */
+#define ZYNQ_CTRL_OFST 0x0
+#define ZYNQ_T_ZQ_OFST 0xA4
+
+/* ECC control register */
+#define ZYNQ_ECC_CTRL_OFST 0xC4
+/* ECC log register */
+#define ZYNQ_CE_LOG_OFST 0xC8
+/* ECC address register */
+#define ZYNQ_CE_ADDR_OFST 0xCC
+/* ECC data[31:0] register */
+#define ZYNQ_CE_DATA_31_0_OFST 0xD0
+
+/* Uncorrectable error info registers */
+#define ZYNQ_UE_LOG_OFST 0xDC
+#define ZYNQ_UE_ADDR_OFST 0xE0
+#define ZYNQ_UE_DATA_31_0_OFST 0xE4
+
+#define ZYNQ_STAT_OFST 0xF0
+#define ZYNQ_SCRUB_OFST 0xF4
+
+/* Control register bit field definitions */
+#define ZYNQ_CTRL_BW_MASK 0xC
+#define ZYNQ_CTRL_BW_SHIFT 2
+
+#define ZYNQ_DDRCTL_WDTH_16 1
+#define ZYNQ_DDRCTL_WDTH_32 0
+
+/* ZQ register bit field definitions */
+#define ZYNQ_T_ZQ_DDRMODE_MASK 0x2
+
+/* ECC control register bit field definitions */
+#define ZYNQ_ECC_CTRL_CLR_CE_ERR 0x2
+#define ZYNQ_ECC_CTRL_CLR_UE_ERR 0x1
+
+/* ECC correctable/uncorrectable error log register definitions */
+#define ZYNQ_LOG_VALID 0x1
+#define ZYNQ_CE_LOG_BITPOS_MASK 0xFE
+#define ZYNQ_CE_LOG_BITPOS_SHIFT 1
+
+/* ECC correctable/uncorrectable error address register definitions */
+#define ZYNQ_ADDR_COL_MASK 0xFFF
+#define ZYNQ_ADDR_ROW_MASK 0xFFFF000
+#define ZYNQ_ADDR_ROW_SHIFT 12
+#define ZYNQ_ADDR_BANK_MASK 0x70000000
+#define ZYNQ_ADDR_BANK_SHIFT 28
+
+/* ECC statistic register definitions */
+#define ZYNQ_STAT_UECNT_MASK 0xFF
+#define ZYNQ_STAT_CECNT_MASK 0xFF00
+#define ZYNQ_STAT_CECNT_SHIFT 8
+
+/* ECC scrub register definitions */
+#define ZYNQ_SCRUB_MODE_MASK 0x7
+#define ZYNQ_SCRUB_MODE_SECDED 0x4
+
+/**
+ * struct zynq_ecc_error_info - ECC error log information.
+ * @row: Row number.
+ * @col: Column number.
+ * @bank: Bank number.
+ * @bitpos: Bit position.
+ * @data: Data causing the error.
+ */
+struct zynq_ecc_error_info {
+ u32 row;
+ u32 col;
+ u32 bank;
+ u32 bitpos;
+ u32 data;
+};
+
+/**
+ * struct zynq_ecc_status - ECC status information to report.
+ * @ce_cnt: Correctable error count.
+ * @ue_cnt: Uncorrectable error count.
+ * @ceinfo: Correctable error log information.
+ * @ueinfo: Uncorrectable error log information.
+ */
+struct zynq_ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct zynq_ecc_error_info ceinfo;
+ struct zynq_ecc_error_info ueinfo;
+};
+
+/**
+ * struct zynq_edac_priv - DDR memory controller private instance data.
+ * @baseaddr: Base address of the DDR controller.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ */
+struct zynq_edac_priv {
+ void __iomem *baseaddr;
+ char message[ZYNQ_EDAC_MSG_SIZE];
+ struct zynq_ecc_status stat;
+};
+
+/**
+ * zynq_get_error_info - Get the current ECC error info.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Return: one if there is no error, otherwise zero.
+ */
+static int zynq_get_error_info(struct zynq_edac_priv *priv)
+{
+ struct zynq_ecc_status *p;
+ u32 regval, clearval = 0;
+ void __iomem *base;
+
+ base = priv->baseaddr;
+ p = &priv->stat;
+
+ regval = readl(base + ZYNQ_STAT_OFST);
+ if (!regval)
+ return 1;
+
+ p->ce_cnt = (regval & ZYNQ_STAT_CECNT_MASK) >> ZYNQ_STAT_CECNT_SHIFT;
+ p->ue_cnt = regval & ZYNQ_STAT_UECNT_MASK;
+
+ regval = readl(base + ZYNQ_CE_LOG_OFST);
+ if (!(p->ce_cnt && (regval & ZYNQ_LOG_VALID)))
+ goto ue_err;
+
+ p->ceinfo.bitpos = (regval & ZYNQ_CE_LOG_BITPOS_MASK) >> ZYNQ_CE_LOG_BITPOS_SHIFT;
+ regval = readl(base + ZYNQ_CE_ADDR_OFST);
+ p->ceinfo.row = (regval & ZYNQ_ADDR_ROW_MASK) >> ZYNQ_ADDR_ROW_SHIFT;
+ p->ceinfo.col = regval & ZYNQ_ADDR_COL_MASK;
+ p->ceinfo.bank = (regval & ZYNQ_ADDR_BANK_MASK) >> ZYNQ_ADDR_BANK_SHIFT;
+ p->ceinfo.data = readl(base + ZYNQ_CE_DATA_31_0_OFST);
+ edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
+ p->ceinfo.data);
+ clearval = ZYNQ_ECC_CTRL_CLR_CE_ERR;
+
+ue_err:
+ regval = readl(base + ZYNQ_UE_LOG_OFST);
+ if (!(p->ue_cnt && (regval & ZYNQ_LOG_VALID)))
+ goto out;
+
+ regval = readl(base + ZYNQ_UE_ADDR_OFST);
+ p->ueinfo.row = (regval & ZYNQ_ADDR_ROW_MASK) >> ZYNQ_ADDR_ROW_SHIFT;
+ p->ueinfo.col = regval & ZYNQ_ADDR_COL_MASK;
+ p->ueinfo.bank = (regval & ZYNQ_ADDR_BANK_MASK) >> ZYNQ_ADDR_BANK_SHIFT;
+ p->ueinfo.data = readl(base + ZYNQ_UE_DATA_31_0_OFST);
+ clearval |= ZYNQ_ECC_CTRL_CLR_UE_ERR;
+
+out:
+ writel(clearval, base + ZYNQ_ECC_CTRL_OFST);
+ writel(0x0, base + ZYNQ_ECC_CTRL_OFST);
+
+ return 0;
+}
+
+/**
+ * zynq_handle_error - Handle Correctable and Uncorrectable errors.
+ * @mci: EDAC memory controller instance.
+ * @p: Zynq ECC status structure.
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void zynq_handle_error(struct mem_ctl_info *mci, struct zynq_ecc_status *p)
+{
+ struct zynq_edac_priv *priv = mci->pvt_info;
+ struct zynq_ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+
+ snprintf(priv->message, ZYNQ_EDAC_MSG_SIZE,
+ "Row %d Bank %d Col %d Bit %d Data 0x%08x",
+ pinf->row, pinf->bank, pinf->col,
+ pinf->bitpos, pinf->data);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ p->ce_cnt, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+
+ snprintf(priv->message, ZYNQ_EDAC_MSG_SIZE,
+ "Row %d Bank %d Col %d",
+ pinf->row, pinf->bank, pinf->col);
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ p->ue_cnt, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * zynq_check_errors - Check controller for ECC errors.
+ * @mci: EDAC memory controller instance.
+ *
+ * Check and post ECC errors. Called by the polling thread.
+ */
+static void zynq_check_errors(struct mem_ctl_info *mci)
+{
+ struct zynq_edac_priv *priv = mci->pvt_info;
+ int status;
+
+ status = zynq_get_error_info(priv);
+ if (status)
+ return;
+
+ zynq_handle_error(mci, &priv->stat);
+}
+
+/**
+ * zynq_get_dtype - Return the controller memory width.
+ * @base: DDR memory controller base address.
+ *
+ * Get the EDAC device type width appropriate for the current controller
+ * configuration.
+ *
+ * Return: a device type width enumeration.
+ */
+static enum dev_type zynq_get_dtype(const void __iomem *base)
+{
+ enum dev_type dt;
+ u32 width;
+
+ width = readl(base + ZYNQ_CTRL_OFST);
+ width = (width & ZYNQ_CTRL_BW_MASK) >> ZYNQ_CTRL_BW_SHIFT;
+
+ switch (width) {
+ case ZYNQ_DDRCTL_WDTH_16:
+ dt = DEV_X2;
+ break;
+ case ZYNQ_DDRCTL_WDTH_32:
+ dt = DEV_X4;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ return dt;
+}
+
+/**
+ * zynq_get_ecc_state - Return the controller ECC enable/disable status.
+ * @base: DDR memory controller base address.
+ *
+ * Get the ECC enable/disable status of the controller.
+ *
+ * Return: true if enabled, otherwise false.
+ */
+static bool zynq_get_ecc_state(void __iomem *base)
+{
+ enum dev_type dt;
+ u32 ecctype;
+
+ dt = zynq_get_dtype(base);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(base + ZYNQ_SCRUB_OFST) & ZYNQ_SCRUB_MODE_MASK;
+ if ((ecctype == ZYNQ_SCRUB_MODE_SECDED) && (dt == DEV_X2))
+ return true;
+
+ return false;
+}
+
+/**
+ * zynq_get_memsize - Read the size of the attached memory device.
+ *
+ * Return: the memory size in bytes.
+ */
+static u32 zynq_get_memsize(void)
+{
+ struct sysinfo inf;
+
+ si_meminfo(&inf);
+
+ return inf.totalram * inf.mem_unit;
+}
+
+/**
+ * zynq_get_mtype - Return the controller memory type.
+ * @base: Zynq ECC status structure.
+ *
+ * Get the EDAC memory type appropriate for the current controller
+ * configuration.
+ *
+ * Return: a memory type enumeration.
+ */
+static enum mem_type zynq_get_mtype(const void __iomem *base)
+{
+ enum mem_type mt;
+ u32 memtype;
+
+ memtype = readl(base + ZYNQ_T_ZQ_OFST);
+
+ if (memtype & ZYNQ_T_ZQ_DDRMODE_MASK)
+ mt = MEM_DDR3;
+ else
+ mt = MEM_DDR2;
+
+ return mt;
+}
+
+/**
+ * zynq_init_csrows - Initialize the csrow data.
+ * @mci: EDAC memory controller instance.
+ *
+ * Initialize the chip select rows associated with the EDAC memory
+ * controller instance.
+ */
+static void zynq_init_csrows(struct mem_ctl_info *mci)
+{
+ struct zynq_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 size, row;
+ int j;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ size = zynq_get_memsize();
+
+ for (j = 0; j < csi->nr_channels; j++) {
+ dimm = csi->channels[j]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = zynq_get_mtype(priv->baseaddr);
+ dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
+ dimm->grain = ZYNQ_EDAC_ERR_GRAIN;
+ dimm->dtype = zynq_get_dtype(priv->baseaddr);
+ }
+ }
+}
+
+/**
+ * zynq_mc_init - Initialize one driver instance.
+ * @mci: EDAC memory controller instance.
+ * @pdev: platform device.
+ *
+ * Perform initialization of the EDAC memory controller instance and
+ * related driver-private data associated with the memory controller the
+ * instance is bound to.
+ */
+static void zynq_mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
+{
+ mci->pdev = &pdev->dev;
+ platform_set_drvdata(pdev, mci);
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "zynq_ddr_controller";
+ mci->dev_name = ZYNQ_EDAC_MOD_STRING;
+ mci->mod_name = ZYNQ_EDAC_MOD_VER;
+
+ edac_op_state = EDAC_OPSTATE_POLL;
+ mci->edac_check = zynq_check_errors;
+
+ mci->ctl_page_to_phys = NULL;
+
+ zynq_init_csrows(mci);
+}
+
+/**
+ * zynq_mc_probe - Check controller and bind driver.
+ * @pdev: platform device.
+ *
+ * Probe a specific controller instance for binding with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int zynq_mc_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct zynq_edac_priv *priv;
+ struct mem_ctl_info *mci;
+ void __iomem *baseaddr;
+ int rc;
+
+ baseaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!zynq_get_ecc_state(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
+ return -ENXIO;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = ZYNQ_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = ZYNQ_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+
+ mci = edac_mc_alloc(EDAC_AUTO_MC_NUM, ARRAY_SIZE(layers), layers,
+ sizeof(struct zynq_edac_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed memory allocation for mc instance\n");
+ return -ENOMEM;
+ }
+
+ priv = mci->pvt_info;
+ priv->baseaddr = baseaddr;
+
+ zynq_mc_init(mci, pdev);
+
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to register with EDAC core\n");
+ goto free_edac_mc;
+ }
+
+ /*
+ * Start capturing the correctable and uncorrectable errors. A write of
+ * 0 starts the counters.
+ */
+ writel(0x0, baseaddr + ZYNQ_ECC_CTRL_OFST);
+
+ return 0;
+
+free_edac_mc:
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/**
+ * zynq_mc_remove - Unbind driver from controller.
+ * @pdev: Platform device.
+ *
+ * Return: Unconditionally 0
+ */
+static int zynq_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_edac_match[] = {
+ { .compatible = "xlnx,zynq-ddrc-a05" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zynq_edac_match);
+
+static struct platform_driver zynq_edac_mc_driver = {
+ .driver = {
+ .name = "zynq-edac",
+ .of_match_table = zynq_edac_match,
+ },
+ .probe = zynq_mc_probe,
+ .remove = zynq_mc_remove,
+};
+module_platform_driver(zynq_edac_mc_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Zynq DDR ECC driver");
+MODULE_LICENSE("GPL v2");
}
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
+/**
+ * i2c_smbus_write_i2c_block_data_or_emulated - write block or emulate
+ * @client: Handle to slave device
+ * @command: Byte interpreted by slave
+ * @length: Size of data block; SMBus allows at most I2C_SMBUS_BLOCK_MAX bytes
+ * @values: Byte array which data will be written. SMBus allows at most
+ * I2C_SMBUS_BLOCK_MAX bytes.
+ *
+ * This executes the SMBus "write read" protocol if supported by the adapter.
+ * If block write is not supported, it emulates it using either word or byte
+ * write protocols depending on availability.
+ *
+ * The addresses of the I2C slave device that are accessed with this function
+ * must be mapped to a linear region, so that a block write will have the same
+ * effect as a byte write. Before using this function you must double-check
+ * if the I2C slave does support exchanging a block transfer with a byte
+ * transfer.
+ */
+s32 i2c_smbus_write_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values)
+{
+ u16 data;
+ u8 i = 0;
+ int status;
+
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ status = i2c_smbus_write_i2c_block_data(client, command, length, values);
+ return status ?: length;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EOPNOTSUPP;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA)) {
+ while ((i + 2) <= length) {
+ data = values[i] | (values[i + 1] << 8);
+ status = i2c_smbus_write_word_data(client, command + i, data);
+ if (status < 0)
+ return status;
+ i += 2;
+ }
+ }
+
+ while (i < length) {
+ status = i2c_smbus_write_byte_data(client, command + i, values[i]);
+ if (status < 0)
+ return status;
+ i++;
+ }
+
+ return i;
+}
+EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data_or_emulated);
+
/**
* i2c_new_smbus_alert_device - get ara client for SMBus alert support
* @adapter: the target adapter
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct phylink_pcs *phylink_pcs;
- struct mdio_device *mdio_device;
if (dsa_is_unused_port(felix->ds, port))
continue;
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- mdio_device = mdio_device_create(felix->imdio, port);
- if (IS_ERR(mdio_device))
+ phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, port);
+ if (IS_ERR(phylink_pcs))
continue;
- phylink_pcs = lynx_pcs_create(mdio_device);
- if (!phylink_pcs) {
- mdio_device_free(mdio_device);
- continue;
- }
-
felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct phylink_pcs *phylink_pcs = felix->pcs[port];
- struct mdio_device *mdio_device;
-
- if (!phylink_pcs)
- continue;
- mdio_device = lynx_get_mdio_device(phylink_pcs);
- mdio_device_free(mdio_device);
- lynx_pcs_destroy(phylink_pcs);
+ if (phylink_pcs)
+ lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
mdiobus_free(felix->imdio);
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct phylink_pcs *phylink_pcs;
- struct mdio_device *mdio_device;
int addr = port + 4;
if (dsa_is_unused_port(felix->ds, port))
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- mdio_device = mdio_device_create(felix->imdio, addr);
- if (IS_ERR(mdio_device))
+ phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, addr);
+ if (IS_ERR(phylink_pcs))
continue;
- phylink_pcs = lynx_pcs_create(mdio_device);
- if (!phylink_pcs) {
- mdio_device_free(mdio_device);
- continue;
- }
-
felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct phylink_pcs *phylink_pcs = felix->pcs[port];
- struct mdio_device *mdio_device;
-
- if (!phylink_pcs)
- continue;
- mdio_device = lynx_get_mdio_device(phylink_pcs);
- mdio_device_free(mdio_device);
- lynx_pcs_destroy(phylink_pcs);
+ if (phylink_pcs)
+ lynx_pcs_destroy(phylink_pcs);
}
/* mdiobus_unregister and mdiobus_free handled by devres */
}
for (port = 0; port < ds->num_ports; port++) {
- struct mdio_device *mdiodev;
struct dw_xpcs *xpcs;
if (dsa_is_unused_port(ds, port))
priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
continue;
- mdiodev = mdio_device_create(bus, port);
- if (IS_ERR(mdiodev)) {
- rc = PTR_ERR(mdiodev);
- goto out_pcs_free;
- }
-
- xpcs = xpcs_create(mdiodev, priv->phy_mode[port]);
+ xpcs = xpcs_create_byaddr(bus, port, priv->phy_mode[port]);
if (IS_ERR(xpcs)) {
rc = PTR_ERR(xpcs);
goto out_pcs_free;
if (!priv->xpcs[port])
continue;
- mdio_device_free(priv->xpcs[port]->mdiodev);
xpcs_destroy(priv->xpcs[port]);
priv->xpcs[port] = NULL;
}
if (!priv->xpcs[port])
continue;
- mdio_device_free(priv->xpcs[port]->mdiodev);
xpcs_destroy(priv->xpcs[port]);
priv->xpcs[port] = NULL;
}
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM
- depends on X86 || ARM64 || COMPILE_TEST
+ depends on X86 || ARM64 || MIPS || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
select BITREVERSE
select CRC32
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
xgbe-ptp.o \
- xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
+ xgbe-i2c.o \
+ xgbe-phy-v1.o xgbe-phy-v2.o xgbe-phy-v3.o \
xgbe-platform.o
amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
return ret;
}
+ /* Set platform-specific DMA bus settings */
+ if (pdata->vdata->blen)
+ pdata->blen = pdata->vdata->blen;
+ if (pdata->vdata->pbl)
+ pdata->pbl = pdata->vdata->pbl;
+ if (pdata->vdata->rd_osr_limit)
+ pdata->rd_osr_limit = pdata->vdata->rd_osr_limit;
+ if (pdata->vdata->wr_osr_limit)
+ pdata->wr_osr_limit = pdata->vdata->wr_osr_limit;
+
/* Set default max values if not provided */
if (!pdata->tx_max_fifo_size)
pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
--- /dev/null
+/*
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define VR_XS_PMA_MII_Gen5_MPLL_CTRL 0x807A
+#define VR_XS_PMA_MII_Gen5_MPLL_CTRL_REF_CLK_SEL_bit (1 << 13)
+#define VR_XS_PCS_DIG_CTRL1 0x8000
+#define VR_XS_PCS_DIG_CTRL1_VR_RST_Bit MDIO_CTRL1_RESET
+#define SR_XC_or_PCS_MMD_Control1 MDIO_CTRL1
+#define SR_XC_or_PCS_MMD_Control1_RST_Bit MDIO_CTRL1_RESET
+#define DWC_GLBL_PLL_MONITOR 0x8010
+#define SDS_PCS_CLOCK_READY_mask 0x1C
+#define SDS_PCS_CLOCK_READY_bit 0x10
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL 0x809C
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_KX4 (4 << 0)
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_MASK 0x0007
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_4 (2 << 8)
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_MASK 0x0700
+#define VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST (1 << 15)
+
+#define DELAY_COUNT 50
+
+/* PHY related configuration information */
+struct xgbe_phy_data {
+ struct phy_device *phydev;
+};
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata);
+
+static int xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ DBGPR("%s\n", __FUNCTION__);
+
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+ ret |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ usleep_range(75, 100);
+
+ ret &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ return 0;
+}
+
+static int xgbe_phy_xgmii_mode_kx4(struct xgbe_prv_data *pdata)
+{
+ int ret, count;
+
+ DBGPR_MDIO("%s\n", __FUNCTION__);
+
+ /* Write 2'b01 to Bits[1:0] of SR PCS Control2 to set the xpcx_kr_0
+ * output to 0.
+ */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ /* Set Bit 13 SR PMA MMD Control1 Register (for back plane) to 1. */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+
+ ret |= 0x2000;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_CTRL1, ret);
+
+ /* Set LANE_MODE TO KX4 (4). */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL);
+
+ ret &= ~VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_MASK;
+ ret |= VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_KX4;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL, ret);
+
+ /* Set LANE_WIDTH (2) 4 lanes per link. */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL);
+
+ ret &= ~VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_MASK;
+ ret |= VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_4;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL, ret);
+
+ /* Initiate Software Reset. */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+
+ ret |= VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1, ret);
+
+ /* Wait until reset done. */
+ count = DELAY_COUNT;
+ do {
+ msleep(20);
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+ } while (!!(ret & VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST) && --count);
+
+ if (ret & VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int xgbe_phy_xgmii_mode_kr(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ DBGPR("%s\n", __FUNCTION__);
+
+ /* Set PCS to KR/10G speed */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = xgbe_phy_pcs_power_cycle(pdata);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int xgbe_phy_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ if(pdata->phy_mode == PHY_INTERFACE_MODE_XAUI ||
+ pdata->phy_mode == PHY_INTERFACE_MODE_10GBASEX) {
+ DBGPR("xgbe: mode KX4: %s\n", __FUNCTION__);
+ return xgbe_phy_xgmii_mode_kx4(pdata);
+ }
+
+ DBGPR("xgbe: mode KR: %s\n", __FUNCTION__);
+ return xgbe_phy_xgmii_mode_kr(pdata);
+}
+
+/* The link change will be picked up by the status read poller */
+static void xgbe_phy_adjust_link(struct net_device *netdev)
+{
+
+}
+
+static int xgbe_phy_probe(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev;
+ int ret;
+
+ phydev = device_phy_find_device(pdata->phy_dev);
+ if (!phydev)
+ return -ENODEV;
+
+ ret = phy_connect_direct(pdata->netdev, phydev, xgbe_phy_adjust_link,
+ pdata->phy_mode);
+ if (ret)
+ return ret;
+
+ /* Initialize supported features */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported, 1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported, 1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported, 1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Backplane_BIT, phydev->supported, 1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, phydev->supported, 1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, phydev->supported, 1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, phydev->supported, 1);
+ linkmode_copy(phydev->advertising, phydev->supported);
+
+ XGBE_ZERO_SUP(lks);
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, Backplane);
+ XGBE_SET_SUP(lks, 10000baseKR_Full);
+ /*XGBE_SET_SUP(lks, 10000baseKX4_Full);
+ XGBE_SET_SUP(lks, 10000baseT_Full);*/
+
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ phy_data->phydev = phydev;
+
+ return 0;
+}
+
+int xgbe_phy_config_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data;
+ int count = DELAY_COUNT;
+ int ret;
+
+ DBGPR("%s\n", __FUNCTION__);
+
+ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+
+ pdata->phy_data = phy_data;
+
+ ret = xgbe_phy_probe(pdata);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to probe external PHY\n");
+ return ret;
+ }
+
+ /* Switch XGMAC PHY PLL to use external ref clock from pad */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_Gen5_MPLL_CTRL);
+ ret &= ~(VR_XS_PMA_MII_Gen5_MPLL_CTRL_REF_CLK_SEL_bit);
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_Gen5_MPLL_CTRL, ret);
+ wmb();
+
+ /* Make vendor specific soft reset */
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+ ret |= VR_XS_PCS_DIG_CTRL1_VR_RST_Bit;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1, ret);
+ wmb();
+
+ /* Wait reset finish */
+ count = DELAY_COUNT;
+ do {
+ usleep_range(500, 600);
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+ } while(((ret & VR_XS_PCS_DIG_CTRL1_VR_RST_Bit) != 0) && count--);
+
+
+ /*
+ * Wait for the RST (bit 15) of the “SR XS or PCS MMD Control1” Register is 0.
+ * This bit is self-cleared when Bits[4:2] in VR XS or PCS MMD Digital
+ * Status Register are equal to 3’b100, that is, Tx/Rx clocks are stable
+ * and in Power_Good state.
+ */
+ count = DELAY_COUNT;
+ do {
+ usleep_range(500, 600);
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, SR_XC_or_PCS_MMD_Control1);
+ } while(((ret & SR_XC_or_PCS_MMD_Control1_RST_Bit) != 0) && count--);
+
+ /*
+ * This bit is self-cleared when Bits[4:2] in VR XS or PCS MMD Digital
+ * Status Register are equal to 3’b100, that is, Tx/Rx clocks are stable
+ * and in Power_Good state.
+ */
+ count = DELAY_COUNT;
+ do {
+ usleep_range(500, 600);
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, DWC_GLBL_PLL_MONITOR);
+ } while(((ret & SDS_PCS_CLOCK_READY_mask) != SDS_PCS_CLOCK_READY_bit) && count-- );
+
+ /* Turn off and clear interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ wmb();
+
+ xgbe_phy_config_aneg(pdata);
+
+ ret = xgbe_phy_xgmii_mode(pdata);
+
+ count = DELAY_COUNT;
+ do
+ {
+ msleep(10);
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, 0x0001);
+ } while(((ret & 0x0004) != 0x0004) && count--);
+
+ return 0;
+}
+
+/**
+ * xgbe_phy_exit() - dummy
+ */
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ return;
+}
+
+static int xgbe_phy_soft_reset(struct xgbe_prv_data *pdata)
+{
+ /* No real soft-reset for now. Sigh... */
+ DBGPR("%s\n", __FUNCTION__);
+#if 0
+ int count, ret;
+
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+ ret |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ count = DELAY_COUNT;
+ do {
+ msleep(20);
+ ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while ((ret & MDIO_CTRL1_RESET) && --count);
+
+ if (ret & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+#endif
+
+ return 0;
+}
+
+static void xgbe_phy_update_link(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev = phy_data->phydev;
+ int new_state = 0;
+
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != pdata->phy.tx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy.rx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed) {
+ new_state = 1;
+ pdata->phy_speed = pdata->phy.speed;
+ }
+
+ if (pdata->phy_link != pdata->phy.link) {
+ new_state = 1;
+ pdata->phy_link = pdata->phy.link;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state && netif_msg_link(pdata))
+ phy_print_status(phydev);
+}
+
+/**
+ * xgbe_phy_start() - dummy
+ */
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev = phy_data->phydev;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+ phy_start(phydev);
+
+ return 0;
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev = phy_data->phydev;
+
+ netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+ phy_stop(phydev);
+
+ /* Disable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ pdata->phy.link = 0;
+ netif_carrier_off(pdata->netdev);
+
+ xgbe_phy_update_link(pdata);
+}
+
+static int xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ DBGPR("%s\n", __FUNCTION__);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1);
+
+ return (reg & MDIO_AN_STAT1_COMPLETE) ? 1 : 0;
+}
+
+static void xgbe_phy_read_status(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev = phy_data->phydev;
+ int reg, link_aneg;
+
+ pdata->phy.link = 1;
+
+ if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+
+ pdata->phy.link = 0;
+ goto update_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ if (!phydev->link)
+ pdata->phy.link &= phydev->link;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ pdata->phy.link &= (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_STAT1);
+ pdata->phy.link &= (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (pdata->phy.link) {
+ if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+ return;
+ }
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+ netif_carrier_on(pdata->netdev);
+ } else {
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ if (link_aneg)
+ return;
+ }
+
+ netif_carrier_off(pdata->netdev);
+ }
+
+update_link:
+ xgbe_phy_update_link(pdata);
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ DBGPR("%s\n", __FUNCTION__);
+
+ pdata->link_check = jiffies;
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+
+ /* Disable auto negotiation in any case! */
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
+ return 0;
+}
+
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ if (speed == SPEED_10000)
+ return true;
+
+ return false;
+}
+
+/**
+ * xgbe_an_isr() - dummy
+ */
+static irqreturn_t xgbe_an_isr(struct xgbe_prv_data *pdata)
+{
+ DBGPR("Unhandled AN IRQ\n");
+
+ return IRQ_HANDLED;
+}
+
+void xgbe_init_function_ptrs_phy_v3(struct xgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = xgbe_phy_config_init;
+ phy_if->phy_exit = xgbe_phy_exit;
+
+ phy_if->phy_reset = xgbe_phy_soft_reset;
+ phy_if->phy_start = xgbe_phy_start;
+ phy_if->phy_stop = xgbe_phy_stop;
+
+ phy_if->phy_status = xgbe_phy_read_status;
+ phy_if->phy_config_aneg = xgbe_phy_config_aneg;
+
+ phy_if->phy_valid_speed = xgbe_phy_valid_speed;
+
+ phy_if->an_isr = xgbe_an_isr;
+}
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
: xgbe_of_vdata(pdata);
}
-static int xgbe_platform_probe(struct platform_device *pdev)
+static int xgbe_init_function_plat_amd(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
+ unsigned int phy_memnum, phy_irqnum, dma_irqnum, dma_irqend;
+ struct platform_device *pdev = pdata->platdev;
struct platform_device *phy_pdev;
- const char *phy_mode;
- unsigned int phy_memnum, phy_irqnum;
- unsigned int dma_irqnum, dma_irqend;
- enum dev_dma_attr attr;
+ struct device *dev = pdata->dev;
int ret;
- pdata = xgbe_alloc_pdata(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto err_alloc;
- }
-
- pdata->platdev = pdev;
- pdata->adev = ACPI_COMPANION(dev);
- platform_set_drvdata(pdev, pdata);
-
- /* Check if we should use ACPI or DT */
- pdata->use_acpi = dev->of_node ? 0 : 1;
-
- /* Get the version data */
- pdata->vdata = xgbe_get_vdata(pdata);
-
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
dev_err(dev, "unable to obtain phy device\n");
- ret = -EINVAL;
- goto err_phydev;
+ return -EINVAL;
}
pdata->phy_platdev = phy_pdev;
pdata->phy_dev = &phy_pdev->dev;
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
- /* Retrieve the MAC address */
- ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
- pdata->mac_addr,
- sizeof(pdata->mac_addr));
- if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
- dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
- if (!ret)
- ret = -EINVAL;
- goto err_io;
- }
-
- /* Retrieve the PHY mode - it must be "xgmii" */
- ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
- &phy_mode);
- if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
- dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
- if (!ret)
- ret = -EINVAL;
- goto err_io;
- }
- pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
-
/* Check for per channel interrupt support */
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
pdata->per_channel_irq = 1;
if (ret)
goto err_io;
- /* Set the DMA coherency values */
- attr = device_get_dma_attr(dev);
- if (attr == DEV_DMA_NOT_SUPPORTED) {
- dev_err(dev, "DMA is not supported");
- ret = -ENODEV;
- goto err_io;
- }
- pdata->coherent = (attr == DEV_DMA_COHERENT);
- if (pdata->coherent) {
- pdata->arcr = XGBE_DMA_OS_ARCR;
- pdata->awcr = XGBE_DMA_OS_AWCR;
- } else {
- pdata->arcr = XGBE_DMA_SYS_ARCR;
- pdata->awcr = XGBE_DMA_SYS_AWCR;
- }
-
- /* Set the maximum fifo amounts */
- pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
- pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
-
- /* Set the hardware channel and queue counts */
- xgbe_set_counts(pdata);
-
/* Always have XGMAC and XPCS (auto-negotiation) interrupts */
pdata->irq_count = 2;
goto err_io;
pdata->an_irq = ret;
+ return 0;
+
+err_io:
+ platform_device_put(phy_pdev);
+
+ return ret;
+}
+
+static void xgbe_init_function_disclk_baikal(void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+
+ clk_disable_unprepare(pdata->sysclk);
+}
+
+static int xgbe_init_function_plat_baikal(struct xgbe_prv_data *pdata)
+{
+ struct platform_device *pdev = pdata->platdev;
+ struct device *dev = pdata->dev;
+ struct device_node *phy_node;
+ struct mdio_device *mdio_dev;
+ int ret;
+
+ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev, "unable to obtain phy node\n");
+ return -ENODEV;
+ }
+
+ /* Nothing more sophisticated available at the moment... */
+ mdio_dev = of_mdio_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!mdio_dev) {
+ dev_err_probe(dev, -EPROBE_DEFER, "unable to obtain mdio device\n");
+ return -EPROBE_DEFER;
+ }
+
+ pdata->phy_platdev = NULL;
+ pdata->phy_dev = &mdio_dev->dev;
+
+ /* Obtain the CSR regions of the device */
+ pdata->xgmac_regs = devm_platform_ioremap_resource_byname(pdev, "stmmaceth");
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ pdata->xpcs_regs = devm_platform_ioremap_resource_byname(pdev, "xpcs");
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Obtain the platform clocks setting */
+ pdata->apbclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(pdata->apbclk)) {
+ dev_err(dev, "apb devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->apbclk);
+ goto err_io;
+ }
+
+ pdata->sysclk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->sysclk);
+ goto err_io;
+ }
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+ pdata->ptpclk = devm_clk_get(dev, "ptp_ref");
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->ptpclk);
+ goto err_io;
+ }
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+ pdata->refclk = devm_clk_get(dev, "tx");
+ if (IS_ERR(pdata->refclk)) {
+ dev_err(dev, "ref devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->refclk);
+ goto err_io;
+ }
+
+ /* Even though it's claimed that the CSR clock source is different from
+ * the application clock the CSRs are still unavailable until the DMA
+ * clock signal is enabled.
+ */
+ ret = clk_prepare_enable(pdata->sysclk);
+ if (ret) {
+ dev_err(dev, "sys clock enable failed\n");
+ goto err_io;
+ }
+
+ ret = devm_add_action_or_reset(dev, xgbe_init_function_disclk_baikal, pdata);
+ if (ret) {
+ dev_err(dev, "sys clock undo registration failed\n");
+ goto err_io;
+ }
+
+ /* Forget about the per-channel IRQs for now... */
+ pdata->per_channel_irq = 0; // 1
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE; // XGBE_IRQ_MODE_LEVEL;
+
+ pdata->irq_count = 1;
+
+ ret = platform_get_irq_byname(pdev, "macirq");
+ if (ret < 0)
+ goto err_io;
+ pdata->dev_irq = ret;
+ pdata->an_irq = pdata->dev_irq;
+
+ return 0;
+
+err_io:
+ put_device(pdata->phy_dev);
+
+ return ret;
+}
+
+static int xgbe_platform_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ const char *phy_mode;
+ enum dev_dma_attr attr;
+ int ret;
+
+ pdata = xgbe_alloc_pdata(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_alloc;
+ }
+
+ pdata->platdev = pdev;
+ pdata->adev = ACPI_COMPANION(dev);
+ platform_set_drvdata(pdev, pdata);
+
+ /* Check if we should use ACPI or DT */
+ pdata->use_acpi = dev->of_node ? 0 : 1;
+
+ /* Get the version data */
+ pdata->vdata = xgbe_get_vdata(pdata);
+
+ /* Platform-specific resources setup */
+ ret = pdata->vdata->init_function_plat_impl(pdata);
+ if (ret)
+ goto err_plat;
+
+ /* Activate basic clocks */
+ ret = clk_prepare_enable(pdata->apbclk);
+ if (ret) {
+ dev_err(dev, "apb clock enable failed\n");
+ goto err_apb;
+ }
+
+ ret = clk_prepare_enable(pdata->refclk);
+ if (ret) {
+ dev_err(dev, "ref clock enable failed\n");
+ goto err_ref;
+ }
+
+ /* Retrieve the MAC address */
+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+ pdata->mac_addr,
+ sizeof(pdata->mac_addr));
+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY mode - "xgmii", "10gbase-r" or "xaui"/"10gbase-x" */
+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+ &phy_mode);
+ if (ret) {
+ dev_err(dev, "failed to read %s property\n", XGBE_PHY_MODE_PROPERTY);
+ goto err_io;
+ } else if (!strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+ } else if (!strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_10GBASER))) {
+ pdata->phy_mode = PHY_INTERFACE_MODE_10GBASER;
+ } else if (!strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XAUI))) {
+ pdata->phy_mode = PHY_INTERFACE_MODE_XAUI;
+ } else if (!strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_10GBASEX))) {
+ pdata->phy_mode = PHY_INTERFACE_MODE_10GBASEX;
+ } else {
+ ret = -EINVAL;
+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+ goto err_io;
+ }
+
+ /* Set the DMA coherency values */
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
+ ret = -ENODEV;
+ goto err_io;
+ }
+ pdata->coherent = (attr == DEV_DMA_COHERENT);
+ if (pdata->coherent) {
+ pdata->arcr = XGBE_DMA_OS_ARCR;
+ pdata->awcr = XGBE_DMA_OS_AWCR;
+ } else {
+ pdata->arcr = XGBE_DMA_SYS_ARCR;
+ pdata->awcr = XGBE_DMA_SYS_AWCR;
+ }
+
+ /* Set the maximum fifo amounts */
+ pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
+ pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
+
+ /* Set the hardware channel and queue counts */
+ xgbe_set_counts(pdata);
+
/* Configure the netdev resource */
ret = xgbe_config_netdev(pdata);
if (ret)
return 0;
err_io:
- platform_device_put(phy_pdev);
+ clk_disable_unprepare(pdata->refclk);
+
+err_ref:
+ clk_disable_unprepare(pdata->apbclk);
-err_phydev:
+err_apb:
+ put_device(pdata->phy_dev);
+
+err_plat:
xgbe_free_pdata(pdata);
err_alloc:
xgbe_deconfig_netdev(pdata);
- platform_device_put(pdata->phy_platdev);
+ clk_disable_unprepare(pdata->refclk);
+
+ clk_disable_unprepare(pdata->apbclk);
+
+ put_device(pdata->phy_dev);
xgbe_free_pdata(pdata);
#endif /* CONFIG_PM_SLEEP */
static const struct xgbe_version_data xgbe_v1 = {
+ .init_function_plat_impl = xgbe_init_function_plat_amd,
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
.xpcs_access = XGBE_XPCS_ACCESS_V1,
.tx_max_fifo_size = 81920,
.tx_tstamp_workaround = 1,
};
+static const struct xgbe_version_data xgbe_v3 = {
+ .init_function_plat_impl = xgbe_init_function_plat_baikal,
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v3,
+ .xpcs_access = XGBE_XPCS_ACCESS_V1,
+ .tx_max_fifo_size = 32768,
+ .rx_max_fifo_size = 32768,
+ .blen = DMA_SBMR_BLEN_16,
+ .pbl = DMA_PBL_256,
+ .rd_osr_limit = 8,
+ .wr_osr_limit = 8,
+ .tx_tstamp_workaround = 1,
+};
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
+ { .compatible = "amd,bt1-xgmac",
+ .data = &xgbe_v3 },
{},
};
};
struct xgbe_version_data {
+ int (*init_function_plat_impl)(struct xgbe_prv_data *);
void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
enum xgbe_xpcs_access xpcs_access;
unsigned int mmc_64bit;
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
+ unsigned int blen;
+ unsigned int pbl;
+ unsigned int rd_osr_limit;
+ unsigned int wr_osr_limit;
unsigned int tx_tstamp_workaround;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* Device clocks */
+ struct clk *apbclk;
+ struct clk *refclk;
struct clk *sysclk;
unsigned long sysclk_rate;
struct clk *ptpclk;
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *);
+void xgbe_init_function_ptrs_phy_v3(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *);
const struct net_device_ops *xgbe_get_netdev_ops(void);
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
struct phylink_pcs *phylink_pcs;
- struct mdio_device *mdio_device;
struct mii_bus *bus;
int err;
goto free_mdio_bus;
}
- mdio_device = mdio_device_create(bus, 0);
- if (IS_ERR(mdio_device)) {
- err = PTR_ERR(mdio_device);
- dev_err(dev, "cannot create mdio device (%d)\n", err);
- goto unregister_mdiobus;
- }
-
- phylink_pcs = lynx_pcs_create(mdio_device);
- if (!phylink_pcs) {
- mdio_device_free(mdio_device);
- err = -ENOMEM;
+ phylink_pcs = lynx_pcs_create_mdiodev(bus, 0);
+ if (IS_ERR(phylink_pcs)) {
+ err = PTR_ERR(phylink_pcs);
dev_err(dev, "cannot create lynx pcs (%d)\n", err);
goto unregister_mdiobus;
}
static void enetc_imdio_remove(struct enetc_pf *pf)
{
- struct mdio_device *mdio_device;
-
- if (pf->pcs) {
- mdio_device = lynx_get_mdio_device(pf->pcs);
- mdio_device_free(mdio_device);
+ if (pf->pcs)
lynx_pcs_destroy(pf->pcs);
- }
if (pf->imdio) {
mdiobus_unregister(pf->imdio);
mdiobus_free(pf->imdio);
This selects the Anarion SoC glue layer support for the stmmac driver.
+config DWMAC_BT1
+ tristate "Baikal-T1 GMAC/xGMAC support"
+ depends on OF && (MIPS_BAIKAL_T1 || COMPILE_TEST)
+ help
+ Support for Baikal-T1 (x)GMAC Ethernet controller.
+
+ This selects the Baikal-T1 platform specific glue layer of the
+ STMMAC driver.
+
config DWMAC_INGENIC
tristate "Ingenic MAC support"
default MACH_INGENIC
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_BT1) += dwmac-bt1.o
obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Baikal-T1 GMAC driver
+ *
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ */
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* General Purpose IO */
+#define GMAC_GPIO 0x000000e0
+#define GMAC_GPIO_GPIS BIT(0)
+#define GMAC_GPIO_GPO BIT(8)
+
+struct bt1_xgmac {
+ struct device *dev;
+ struct clk *tx_clk;
+};
+
+typedef int (*bt1_xgmac_plat_init)(struct bt1_xgmac *, struct plat_stmmacenet_data *);
+
+static int bt1_xgmac_clks_config(void *bsp_priv, bool enable)
+{
+ struct bt1_xgmac *btxg = bsp_priv;
+ int ret = 0;
+
+ if (enable) {
+ ret = clk_prepare_enable(btxg->tx_clk);
+ if (ret)
+ dev_err(btxg->dev, "Failed to enable Tx clock\n");
+ } else {
+ clk_disable_unprepare(btxg->tx_clk);
+ }
+
+ return ret;
+}
+
+static int bt1_gmac_bus_reset(void *bsp_priv)
+{
+ struct bt1_xgmac *btxg = bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(btxg->dev));
+
+ writel(0, priv->ioaddr + GMAC_GPIO);
+ fsleep(priv->mii->reset_delay_us);
+ writel(GMAC_GPIO_GPO, priv->ioaddr + GMAC_GPIO);
+ if (priv->mii->reset_post_delay_us > 0)
+ fsleep(priv->mii->reset_post_delay_us);
+
+ return 0;
+}
+
+/* Clean the basic MAC registers up. Note the MAC interrupts are enabled by
+ * default after reset. Let's mask them out so not to have any spurious
+ * MAC-related IRQ generated during the cleanup procedure.
+ */
+static void bt1_gmac_core_clean(struct stmmac_priv *priv)
+{
+ int i;
+
+ writel(0x7FF, priv->ioaddr + GMAC_INT_MASK);
+ writel(0, priv->ioaddr + GMAC_CONTROL);
+ writel(0, priv->ioaddr + GMAC_FRAME_FILTER);
+ writel(0, priv->ioaddr + GMAC_HASH_HIGH);
+ writel(0, priv->ioaddr + GMAC_HASH_LOW);
+ writel(0, priv->ioaddr + GMAC_FLOW_CTRL);
+ writel(0, priv->ioaddr + GMAC_VLAN_TAG);
+ writel(0, priv->ioaddr + GMAC_DEBUG);
+ writel(0x80000000, priv->ioaddr + GMAC_PMT);
+ writel(0, priv->ioaddr + LPI_CTRL_STATUS);
+ writel(0x03e80000, priv->ioaddr + LPI_TIMER_CTRL);
+ for (i = 0; i < 15; ++i) {
+ writel(0x0000ffff, priv->ioaddr + GMAC_ADDR_HIGH(i));
+ writel(0xffffffff, priv->ioaddr + GMAC_ADDR_LOW(i));
+ }
+ writel(0, priv->ioaddr + GMAC_PCS_BASE);
+ writel(0, priv->ioaddr + GMAC_RGSMIIIS);
+ writel(0x1, priv->ioaddr + GMAC_MMC_CTRL);
+ readl(priv->ioaddr + GMAC_INT_STATUS);
+ readl(priv->ioaddr + GMAC_PMT);
+ readl(priv->ioaddr + LPI_CTRL_STATUS);
+}
+
+/* Clean the basic DMA registers up */
+static void bt1_gmac_dma_clean(struct stmmac_priv *priv)
+{
+ writel(0, priv->ioaddr + DMA_INTR_ENA);
+ writel(0x00020100, priv->ioaddr + DMA_BUS_MODE);
+ writel(0, priv->ioaddr + DMA_RCV_BASE_ADDR);
+ writel(0, priv->ioaddr + DMA_TX_BASE_ADDR);
+ writel(0x00100000, priv->ioaddr + DMA_CONTROL);
+ writel(0x00110001, priv->ioaddr + DMA_AXI_BUS_MODE);
+ writel(0x0001FFFF, priv->ioaddr + DMA_STATUS);
+}
+
+static int bt1_gmac_swr_reset(void *bsp_priv)
+{
+ struct bt1_xgmac *btxg = bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(btxg->dev));
+
+ bt1_gmac_core_clean(priv);
+
+ bt1_gmac_dma_clean(priv);
+
+ return 0;
+}
+
+static void bt1_gmac_fix_mac_speed(void *bsp_priv, unsigned int speed)
+{
+ struct bt1_xgmac *btxg = bsp_priv;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 250000000;
+ break;
+ case SPEED_100:
+ rate = 50000000;
+ break;
+ case SPEED_10:
+ rate = 5000000;
+ break;
+ default:
+ dev_err(btxg->dev, "Unsupported speed %u\n", speed);
+ return;
+ }
+
+ /* The clock must be gated to successfully update the rate */
+ clk_disable_unprepare(btxg->tx_clk);
+
+ ret = clk_set_rate(btxg->tx_clk, rate);
+ if (ret)
+ dev_err(btxg->dev, "Failed to update Tx clock rate %lu\n", rate);
+
+ ret = clk_prepare_enable(btxg->tx_clk);
+ if (ret)
+ dev_err(btxg->dev, "Failed to re-enable Tx clock\n");
+}
+
+static int bt1_gmac_plat_data_init(struct bt1_xgmac *btxg,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->has_gmac = 1;
+ plat->host_dma_width = 32;
+ plat->tx_fifo_size = SZ_16K;
+ plat->rx_fifo_size = SZ_16K;
+ plat->enh_desc = 1; /* cap.enh_desc */
+ plat->tx_coe = 1;
+ plat->rx_coe = 1;
+ plat->pmt = 1;
+ plat->unicast_filter_entries = 8;
+ plat->multicast_filter_bins = 0;
+ plat->clks_config = bt1_xgmac_clks_config;
+ plat->bus_reset = bt1_gmac_bus_reset;
+ plat->swr_reset = bt1_gmac_swr_reset;
+ plat->fix_mac_speed = bt1_gmac_fix_mac_speed;
+ plat->mdio_bus_data->needs_reset = true;
+
+ return 0;
+}
+
+static int bt1_xgmac_plat_data_init(struct bt1_xgmac *btxg,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->has_xgmac = 1;
+ plat->host_dma_width = 40; /* = Cap */
+ plat->tx_fifo_size = SZ_32K; /* = Cap */
+ plat->rx_fifo_size = SZ_32K; /* = Cap */
+ plat->tx_coe = 1; /* = Cap */
+ plat->rx_coe = 1; /* = Cap */
+ plat->tso_en = 1; /* & cap.tsoen */
+ plat->rss_en = 1; /* & cap.rssen */
+ plat->sph_disable = 0; /* Default */
+ //plat->pmt = 0; /* cap.pmt_rwk */
+ plat->unicast_filter_entries = 8;
+ plat->multicast_filter_bins = 64;
+ plat->clks_config = bt1_xgmac_clks_config;
+ plat->multi_msi_en = true;
+
+ return 0;
+}
+
+static int bt1_xgmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources stmmac_res;
+ bt1_xgmac_plat_init plat_init;
+ struct bt1_xgmac *btxg;
+ int ret;
+
+ btxg = devm_kzalloc(&pdev->dev, sizeof(*btxg), GFP_KERNEL);
+ if (!btxg)
+ return -ENOMEM;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat), "DT config failed\n");
+
+ btxg->dev = &pdev->dev;
+
+ plat->bsp_priv = btxg;
+
+ btxg->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(btxg->tx_clk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(btxg->tx_clk),
+ "Failed to get Tx clock\n");
+ goto err_remove_config_dt;
+ }
+
+ ret = clk_prepare_enable(btxg->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to pre-enable Tx clock\n");
+ goto err_remove_config_dt;
+ }
+
+ plat_init = device_get_match_data(&pdev->dev);
+ if (plat_init) {
+ ret = plat_init(btxg, plat);
+ if (ret)
+ goto err_disable_tx_clk;
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &stmmac_res);
+ if (ret)
+ goto err_disable_tx_clk;
+
+ return 0;
+
+err_disable_tx_clk:
+ clk_disable_unprepare(btxg->tx_clk);
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat);
+
+ return ret;
+}
+
+static const struct of_device_id bt1_xgmac_match[] = {
+ { .compatible = "baikal,bt1-gmac", .data = (void *)bt1_gmac_plat_data_init },
+ { .compatible = "baikal,bt1-xgmac", .data = (void *)bt1_xgmac_plat_data_init },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_xgmac_match);
+
+static struct platform_driver bt1_xgmac_driver = {
+ .probe = bt1_xgmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "bt1-xgmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(bt1_xgmac_match),
+ },
+};
+module_platform_driver(bt1_xgmac_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 GMAC/XGMAC glue driver");
+MODULE_LICENSE("GPL v2");
#include <linux/clk-provider.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include <linux/dmi.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
/* Intel mgbe SGMII interface uses pcs-xcps */
if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
+ struct mdio_board_info *xpcs_info;
+
+ xpcs_info = devm_kzalloc(&pdev->dev,
+ sizeof(*xpcs_info) + MII_BUS_ID_SIZE,
+ GFP_KERNEL);
+ if (!xpcs_info) {
+ ret = -ENOMEM;
+ goto err_alloc_info;
+ }
+
+ xpcs_info->bus_id = (void *)xpcs_info + sizeof(*xpcs_info);
+ snprintf((char *)xpcs_info->bus_id, MII_BUS_ID_SIZE,
+ "stmmac-%x", plat->bus_id);
+
+ snprintf(xpcs_info->modalias, MDIO_NAME_SIZE, "dwxpcs");
+
+ xpcs_info->mdio_addr = INTEL_MGBE_XPCS_ADDR;
+
+ ret = mdiobus_register_board_info(xpcs_info, 1);
+ if (ret)
+ goto err_alloc_info;
+
plat->mdio_bus_data->has_xpcs = true;
plat->mdio_bus_data->xpcs_an_inband = true;
}
fwnode_handle_put(fixed_node);
}
- /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
+ /* Ensure mdio bus PHY-scan skips intel serdes and pcs-xpcs */
plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
plat->msi_tx_base_vec = 1;
return 0;
+
+err_alloc_info:
+ clk_disable_unprepare(clk);
+ clk_unregister_fixed_rate(clk);
+
+ return ret;
}
static int ehl_common_data(struct pci_dev *pdev,
{
void __iomem *ioaddr = (void __iomem *)dev->base_addr;
unsigned int value = 0;
- unsigned int perfect_addr_number = hw->unicast_filter_entries;
u32 mc_filter[8];
int mcbitslog2 = hw->mcast_bits_log2;
} else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
/* Fall back to all multicast if we've no filter */
value = GMAC_FRAME_FILTER_PM;
- } else if (!netdev_mc_empty(dev)) {
+ } else if (!netdev_mc_empty(dev) && dev->flags & IFF_MULTICAST) {
struct netdev_hw_addr *ha;
/* Hash filter for multicast */
dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2);
/* Handle multiple unicast addresses (perfect filtering) */
- if (netdev_uc_count(dev) > perfect_addr_number)
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries - 1)
/* Switch to promiscuous mode if more than unicast
* addresses are requested than supported by hardware.
*/
reg++;
}
- while (reg < perfect_addr_number) {
+ for (; reg < hw->unicast_filter_entries; reg++) {
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
- reg++;
}
}
value |= GMAC_PACKET_FILTER_HPF;
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries - 1) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
reg++;
}
- while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
+ for (; reg < GMAC_MAX_PERFECT_ADDRESSES; reg++) {
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
- reg++;
}
}
#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_SMASEL BIT(5)
#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
/* DMA Registers */
#define XGMAC_DMA_MODE 0x00003000
+#define XGMAC_INTM GENMASK(13, 12)
+#define XGMAC_INTM_MODE1 0x1
#define XGMAC_SWR BIT(0)
#define XGMAC_DMA_SYSBUS_MODE 0x00003004
#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
int mcbitslog2 = hw->mcast_bits_log2;
u32 mc_filter[8];
- int i;
value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
value |= XGMAC_FILTER_HPF;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > hw->multicast_filter_bins)) {
value |= XGMAC_FILTER_PM;
-
- for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
- writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
+ memset(mc_filter, 0xff, sizeof(mc_filter));
} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
struct netdev_hw_addr *ha;
dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries - 1) {
value |= XGMAC_FILTER_PR;
} else {
struct netdev_hw_addr *ha;
p->des7 = 0;
}
+static void dwxgmac2_display_ring(void *head, unsigned int size, bool rx,
+ dma_addr_t dma_rx_phy, unsigned int desc_size)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ dma_addr_t dma_addr;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ dma_addr = dma_rx_phy + i * sizeof(*p);
+ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ p++;
+ }
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
.set_vlan_tag = dwxgmac2_set_vlan_tag,
.set_vlan = dwxgmac2_set_vlan,
.set_tbs = dwxgmac2_set_tbs,
+ .display_ring = dwxgmac2_display_ring,
};
value |= XGMAC_EAME;
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+
+ if (dma_cfg->multi_msi_en) {
+ value = readl(ioaddr + XGMAC_DMA_MODE);
+ value &= ~XGMAC_INTM;
+ value |= FIELD_PREP(XGMAC_INTM, XGMAC_INTM_MODE1);
+ writel(value, ioaddr + XGMAC_DMA_MODE);
+ }
}
static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & XGMAC_NIS)) {
+ if (likely(intr_status & XGMAC_NIS))
x->normal_irq_n++;
- if (likely(intr_status & XGMAC_RI)) {
- x->rx_normal_irq_n++;
- ret |= handle_rx;
- }
- if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- x->tx_normal_irq_n++;
- ret |= handle_tx;
- }
+ if (likely(intr_status & XGMAC_RI)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+
+ if (likely(intr_status & XGMAC_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
}
+ if (unlikely(intr_status & XGMAC_TBU))
+ ret |= handle_tx;
+
/* Clear interrupts */
writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5;
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
-int stmmac_xpcs_setup(struct mii_bus *mii);
+int stmmac_xpcs_setup(struct net_device *ndev);
+void stmmac_xpcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
{
u32 clk_rate;
- clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+ /* If APB clock has been specified then it is supposed to be used
+ * to select the CSR mode. Otherwise the application clock is the
+ * source of the periodic signal for the CSR interface.
+ */
+ clk_rate = clk_get_rate(priv->plat->pclk) ?:
+ clk_get_rate(priv->plat->stmmac_clk);
/* Platform provided default clk_csr would be assumed valid
* for all other cases except for the below mentioned ones.
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- if (priv->synopsys_id < DWMAC_CORE_4_10)
- ts_event_en = PTP_TCR_TSEVNTENA;
+
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl = old_ctrl & ~priv->hw->link.speed_mask;
- if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ if (interface == PHY_INTERFACE_MODE_XGMII ||
+ interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_10GBASEX ||
+ interface == PHY_INTERFACE_MODE_XAUI) {
+ switch (speed) {
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ default:
+ return;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
case SPEED_10000:
ctrl |= priv->hw->link.xgmii.speed10000;
case SPEED_2500:
ctrl |= priv->hw->link.xgmii.speed2500;
break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
default:
return;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
atds = 1;
- ret = stmmac_reset(priv, priv->ioaddr);
+ if (priv->plat->swr_reset)
+ ret = priv->plat->swr_reset(priv->plat->bsp_priv);
+ else
+ ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
dev_err(priv->device, "Failed to reset the dma\n");
return ret;
switch (irq_err) {
case REQ_IRQ_ERR_ALL:
- irq_idx = priv->plat->tx_queues_to_use;
+ irq_idx = priv->plat->multi_msi_en ? priv->plat->tx_queues_to_use : 0;
fallthrough;
case REQ_IRQ_ERR_TX:
for (j = irq_idx - 1; j >= 0; j--) {
free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
}
}
- irq_idx = priv->plat->rx_queues_to_use;
+ irq_idx = priv->plat->multi_msi_en ? priv->plat->rx_queues_to_use : 0;
fallthrough;
case REQ_IRQ_ERR_RX:
for (j = irq_idx - 1; j >= 0; j--) {
else if (priv->dma_cap.rx_coe_type1)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+ /* Override the SMA availability flag with the actual caps */
+ priv->plat->sma = priv->dma_cap.sma_mdio;
} else {
dev_info(priv->device, "No HW DMA feature register supported\n");
+
+ /* TODO Temporarily default to always on SMA */
+ priv->plat->sma = 1;
}
if (priv->plat->rx_coe) {
if (priv->plat->speed_mode_2500)
priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
- if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
- ret = stmmac_xpcs_setup(priv->mii);
- if (ret)
- goto error_xpcs_setup;
- }
+ ret = stmmac_xpcs_setup(ndev);
+ if (ret)
+ goto error_xpcs_setup;
ret = stmmac_phy_setup(priv);
if (ret) {
error_netdev_register:
phylink_destroy(priv->phylink);
-error_xpcs_setup:
error_phy_setup:
+ stmmac_xpcs_clean(ndev);
+error_xpcs_setup:
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
stmmac_exit_fs(ndev);
#endif
phylink_destroy(priv->phylink);
- if (priv->plat->stmmac_rst)
- reset_control_assert(priv->plat->stmmac_rst);
- reset_control_assert(priv->plat->stmmac_ahb_rst);
+
+ stmmac_xpcs_clean(ndev);
+
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
+
+ if (priv->plat->stmmac_rst)
+ reset_control_assert(priv->plat->stmmac_rst);
+
+ reset_control_assert(priv->plat->stmmac_ahb_rst);
+
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
+ int ret;
+
+ /* TODO Temporary solution. DELME when you are done with implementing
+ * the generic DW MAC GPIOs support.
+ */
+ if (priv->plat->bus_reset) {
+ ret = priv->plat->bus_reset(priv->plat->bsp_priv);
+ if (ret)
+ return ret;
+ }
#ifdef CONFIG_OF
if (priv->device->of_node) {
return 0;
}
-int stmmac_xpcs_setup(struct mii_bus *bus)
+int stmmac_xpcs_setup(struct net_device *ndev)
{
- struct net_device *ndev = bus->priv;
- struct mdio_device *mdiodev;
+ struct fwnode_handle *fwnode;
struct stmmac_priv *priv;
struct dw_xpcs *xpcs;
- int mode, addr;
+ int ret, mode, addr;
priv = netdev_priv(ndev);
mode = priv->plat->phy_interface;
-
- /* Try to probe the XPCS by scanning all addresses. */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- mdiodev = mdio_device_create(bus, addr);
- if (IS_ERR(mdiodev))
- continue;
-
- xpcs = xpcs_create(mdiodev, mode);
- if (IS_ERR_OR_NULL(xpcs)) {
- mdio_device_free(mdiodev);
- continue;
+ fwnode = of_fwnode_handle(priv->plat->phylink_node);
+
+ /* If PCS-node is specified use it to create the XPCS descriptor */
+ if (fwnode_property_present(fwnode, "pcs-handle")) {
+ xpcs = xpcs_create_bynode(fwnode, mode);
+ ret = PTR_ERR_OR_ZERO(xpcs);
+ } else if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
+ /* Try to probe the XPCS by scanning all addresses */
+ for (ret = -ENODEV, addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ xpcs = xpcs_create_byaddr(priv->mii, addr, mode);
+ if (IS_ERR(xpcs))
+ continue;
+
+ ret = 0;
+ break;
}
-
- priv->hw->xpcs = xpcs;
- break;
+ } else {
+ return 0;
}
- if (!priv->hw->xpcs) {
- dev_warn(priv->device, "No xPCS found\n");
- return -ENODEV;
- }
+ if (ret)
+ return dev_err_probe(priv->device, ret, "No xPCS found\n");
+
+ priv->hw->xpcs = xpcs;
return 0;
}
+void stmmac_xpcs_clean(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (!priv->hw->xpcs)
+ return;
+
+ xpcs_destroy(priv->hw->xpcs);
+ priv->hw->xpcs = NULL;
+}
+
/**
* stmmac_mdio_register
* @ndev: net device structure
struct fwnode_handle *fixed_node;
int addr, found, max_addr;
- if (!mdio_bus_data)
+ if (!mdio_bus_data || !priv->plat->sma)
return 0;
- new_bus = mdiobus_alloc();
+ priv->mii = new_bus = mdiobus_alloc();
if (!new_bus)
return -ENOMEM;
}
bus_register_done:
- priv->mii = new_bus;
-
return 0;
no_phy_found:
mdiobus_unregister(new_bus);
bus_register_fail:
mdiobus_free(new_bus);
+ priv->mii = NULL;
return err;
}
if (!priv->mii)
return 0;
- if (priv->hw->xpcs) {
- mdio_device_free(priv->hw->xpcs->mdiodev);
- xpcs_destroy(priv->hw->xpcs);
- }
-
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
int x = mcast_bins;
switch (x) {
+ case 0:
case HASH_TABLE_SIZE:
case 128:
case 256:
struct device_node *np;
struct stmmac_axi *axi;
- np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
+ np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0) ?:
+ of_get_child_by_name(pdev->dev.of_node, "axi-bus-config");
if (!np)
return NULL;
plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
- rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
+ rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0) ?:
+ of_get_child_by_name(pdev->dev.of_node, "rx-queues-config");
if (!rx_node)
return ret;
- tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
+ tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0) ?:
+ of_get_child_by_name(pdev->dev.of_node, "tx-queues-config");
if (!tx_node) {
of_node_put(rx_node);
return ret;
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
+ char irq_name[IFNAMSIZ];
+ int irq, i;
+
memset(stmmac_res, 0, sizeof(*stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
+ /* Request optional MTL per-queue IRQs. Note in fact these are the
+ * DMA per-channel IRQs, the driver just maps them one-on-one.
+ */
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++) {
+ snprintf(irq_name, IFNAMSIZ, "dma_rx%d", i);
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq < 0)
+ break;
+
+ stmmac_res->rx_irq[i] = irq;
+ }
+
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ snprintf(irq_name, IFNAMSIZ, "dma_tx%d", i);
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq < 0)
+ break;
+
+ stmmac_res->tx_irq[i] = irq;
+ }
+
stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
This driver supports the MDIO interface found in the network
interface units of the IPQ8064 SoC
+config MDIO_DW_XPCS
+ tristate "Synopsys DesignWare XPCS MI bus support"
+ depends on HAS_IOMEM
+ select MDIO_DEVRES
+ help
+ This driver supports the MCI/APB3 Management Interface responsible
+ for communicating with the Synopsys DesignWare XPCS devices.
+
config MDIO_THUNDER
tristate "ThunderX SOCs MDIO buses"
depends on 64BIT
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_MDIO_DW_XPCS) += mdio-dw-xpcs.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare XPCS Management Interface driver
+ *
+ * Copyright (C) 2023 BAIKAL ELECTRONICS, JSC
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/sizes.h>
+
+/* Page select register for the indirect MMIO CSRs access */
+#define DW_VR_CSR_VIEWPORT 0xff
+
+struct dw_xpcs_mi {
+ struct platform_device *pdev;
+ struct mii_bus *bus;
+ bool reg_indir;
+ int reg_width;
+ void __iomem *reg_base;
+ struct clk *pclk;
+};
+
+static inline ptrdiff_t dw_xpcs_mmio_addr_format(int dev, int reg)
+{
+ return FIELD_PREP(0x1f0000, dev) | FIELD_PREP(0xffff, reg);
+}
+
+static inline u16 dw_xpcs_mmio_addr_page(ptrdiff_t csr)
+{
+ return FIELD_GET(0x1fff00, csr);
+}
+
+static inline ptrdiff_t dw_xpcs_mmio_addr_offset(ptrdiff_t csr)
+{
+ return FIELD_GET(0xff, csr);
+}
+
+static int dw_xpcs_mmio_read_reg_indirect(struct dw_xpcs_mi *dxmi,
+ int dev, int reg)
+{
+ ptrdiff_t csr, ofs;
+ u16 page;
+ int ret;
+
+ csr = dw_xpcs_mmio_addr_format(dev, reg);
+ page = dw_xpcs_mmio_addr_page(csr);
+ ofs = dw_xpcs_mmio_addr_offset(csr);
+
+ ret = pm_runtime_resume_and_get(&dxmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (dxmi->reg_width) {
+ case 4:
+ writel(page, dxmi->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+ ret = readl(dxmi->reg_base + (ofs << 2));
+ break;
+ default:
+ writew(page, dxmi->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+ ret = readw(dxmi->reg_base + (ofs << 1));
+ break;
+ }
+
+ pm_runtime_put(&dxmi->pdev->dev);
+
+ return ret;
+}
+
+static int dw_xpcs_mmio_write_reg_indirect(struct dw_xpcs_mi *dxmi,
+ int dev, int reg, u16 val)
+{
+ ptrdiff_t csr, ofs;
+ u16 page;
+ int ret;
+
+ csr = dw_xpcs_mmio_addr_format(dev, reg);
+ page = dw_xpcs_mmio_addr_page(csr);
+ ofs = dw_xpcs_mmio_addr_offset(csr);
+
+ ret = pm_runtime_resume_and_get(&dxmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (dxmi->reg_width) {
+ case 4:
+ writel(page, dxmi->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+ writel(val, dxmi->reg_base + (ofs << 2));
+ break;
+ default:
+ writew(page, dxmi->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+ writew(val, dxmi->reg_base + (ofs << 1));
+ break;
+ }
+
+ pm_runtime_put(&dxmi->pdev->dev);
+
+ return 0;
+}
+
+static int dw_xpcs_mmio_read_reg_direct(struct dw_xpcs_mi *dxmi,
+ int dev, int reg)
+{
+ ptrdiff_t csr;
+ int ret;
+
+ csr = dw_xpcs_mmio_addr_format(dev, reg);
+
+ ret = pm_runtime_resume_and_get(&dxmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (dxmi->reg_width) {
+ case 4:
+ ret = readl(dxmi->reg_base + (csr << 2));
+ break;
+ default:
+ ret = readw(dxmi->reg_base + (csr << 1));
+ break;
+ }
+
+ pm_runtime_put(&dxmi->pdev->dev);
+
+ return ret;
+}
+
+static int dw_xpcs_mmio_write_reg_direct(struct dw_xpcs_mi *dxmi,
+ int dev, int reg, u16 val)
+{
+ ptrdiff_t csr;
+ int ret;
+
+ csr = dw_xpcs_mmio_addr_format(dev, reg);
+
+ ret = pm_runtime_resume_and_get(&dxmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (dxmi->reg_width) {
+ case 4:
+ writel(val, dxmi->reg_base + (csr << 2));
+ break;
+ default:
+ writew(val, dxmi->reg_base + (csr << 1));
+ break;
+ }
+
+ pm_runtime_put(&dxmi->pdev->dev);
+
+ return 0;
+}
+
+static int dw_xpcs_mmio_read_c22(struct mii_bus *bus, int addr, int reg)
+{
+ struct dw_xpcs_mi *dxmi = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (dxmi->reg_indir)
+ return dw_xpcs_mmio_read_reg_indirect(dxmi, MDIO_MMD_VEND2, reg);
+ else
+ return dw_xpcs_mmio_read_reg_direct(dxmi, MDIO_MMD_VEND2, reg);
+}
+
+static int dw_xpcs_mmio_write_c22(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct dw_xpcs_mi *dxmi = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (dxmi->reg_indir)
+ return dw_xpcs_mmio_write_reg_indirect(dxmi, MDIO_MMD_VEND2, reg, val);
+ else
+ return dw_xpcs_mmio_write_reg_direct(dxmi, MDIO_MMD_VEND2, reg, val);
+}
+
+static int dw_xpcs_mmio_read_c45(struct mii_bus *bus, int addr, int dev, int reg)
+{
+ struct dw_xpcs_mi *dxmi = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (dxmi->reg_indir)
+ return dw_xpcs_mmio_read_reg_indirect(dxmi, dev, reg);
+ else
+ return dw_xpcs_mmio_read_reg_direct(dxmi, dev, reg);
+}
+
+static int dw_xpcs_mmio_write_c45(struct mii_bus *bus, int addr, int dev,
+ int reg, u16 val)
+{
+ struct dw_xpcs_mi *dxmi = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (dxmi->reg_indir)
+ return dw_xpcs_mmio_write_reg_indirect(dxmi, dev, reg, val);
+ else
+ return dw_xpcs_mmio_write_reg_direct(dxmi, dev, reg, val);
+}
+
+static int dw_xpcs_mmio_read(struct mii_bus *bus, int addr, int reg)
+{
+ if (reg & MII_ADDR_C45) {
+ u8 c45_dev = (reg >> 16) & 0x1F;
+ u16 c45_reg = reg & 0xFFFF;
+
+ return dw_xpcs_mmio_read_c45(bus, addr, c45_dev, c45_reg);
+ }
+
+ return dw_xpcs_mmio_read_c22(bus, addr, reg);
+}
+
+static int dw_xpcs_mmio_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ if (reg & MII_ADDR_C45) {
+ u8 c45_dev = (reg >> 16) & 0x1F;
+ u16 c45_reg = reg & 0xFFFF;
+
+ return dw_xpcs_mmio_write_c45(bus, addr, c45_dev, c45_reg, val);
+ }
+
+ return dw_xpcs_mmio_write_c22(bus, addr, reg, val);
+}
+
+static struct dw_xpcs_mi *dw_xpcs_mi_create_data(struct platform_device *pdev)
+{
+ struct dw_xpcs_mi *dxmi;
+
+ dxmi = devm_kzalloc(&pdev->dev, sizeof(*dxmi), GFP_KERNEL);
+ if (!dxmi)
+ return ERR_PTR(-ENOMEM);
+
+ dxmi->pdev = pdev;
+
+ dev_set_drvdata(&pdev->dev, dxmi);
+
+ return dxmi;
+}
+
+static int dw_xpcs_mi_init_res(struct dw_xpcs_mi *dxmi)
+{
+ struct device *dev = &dxmi->pdev->dev;
+ struct resource *res;
+
+ if (!device_property_read_u32(dev, "reg-io-width", &dxmi->reg_width)) {
+ if (dxmi->reg_width != 2 && dxmi->reg_width != 4) {
+ dev_err(dev, "Invalid regspace data width\n");
+ return -EINVAL;
+ }
+ } else {
+ dxmi->reg_width = 2;
+ }
+
+ res = platform_get_resource_byname(dxmi->pdev, IORESOURCE_MEM, "direct") ?:
+ platform_get_resource_byname(dxmi->pdev, IORESOURCE_MEM, "indirect");
+ if (!res) {
+ dev_err(dev, "No regspace found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(res->name, "indirect"))
+ dxmi->reg_indir = true;
+
+ if ((dxmi->reg_indir && resource_size(res) < dxmi->reg_width * SZ_256) ||
+ (!dxmi->reg_indir && resource_size(res) < dxmi->reg_width * SZ_2M)) {
+ dev_err(dev, "Invalid regspace size\n");
+ return -EINVAL;
+ }
+
+ dxmi->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dxmi->reg_base)) {
+ dev_err(dev, "Failed to map regspace\n");
+ return PTR_ERR(dxmi->reg_base);
+ }
+
+ return 0;
+}
+
+static int dw_xpcs_mi_init_clk(struct dw_xpcs_mi *dxmi)
+{
+ struct device *dev = &dxmi->pdev->dev;
+ int ret;
+
+ dxmi->pclk = devm_clk_get_optional(dev, "pclk");
+ if (IS_ERR(dxmi->pclk))
+ return dev_err_probe(dev, PTR_ERR(dxmi->pclk),
+ "Failed to get ref clock\n");
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable runtime-PM\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_xpcs_mi_init_mdio(struct dw_xpcs_mi *dxmi)
+{
+ struct device *dev = &dxmi->pdev->dev;
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ dxmi->bus = devm_mdiobus_alloc_size(dev, 0);
+ if (!dxmi->bus)
+ return -ENOMEM;
+
+ dxmi->bus->name = "DW XPCS MI";
+ dxmi->bus->read = dw_xpcs_mmio_read;
+ dxmi->bus->write = dw_xpcs_mmio_write;
+ dxmi->bus->probe_capabilities = MDIOBUS_C22_C45;
+ dxmi->bus->phy_mask = ~0;
+ dxmi->bus->parent = dev;
+ dxmi->bus->priv = dxmi;
+
+ snprintf(dxmi->bus->id, MII_BUS_ID_SIZE,
+ "dwxpcs-%x", atomic_inc_return(&id));
+
+ ret = devm_of_mdiobus_register(dev, dxmi->bus, dev_of_node(dev));
+ if (ret) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_xpcs_mi_probe(struct platform_device *pdev)
+{
+ struct dw_xpcs_mi *dxmi;
+ int ret;
+
+ dxmi = dw_xpcs_mi_create_data(pdev);
+ if (IS_ERR(dxmi))
+ return PTR_ERR(dxmi);
+
+ ret = dw_xpcs_mi_init_res(dxmi);
+ if (ret)
+ return ret;
+
+ ret = dw_xpcs_mi_init_clk(dxmi);
+ if (ret)
+ return ret;
+
+ ret = dw_xpcs_mi_init_mdio(dxmi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused dw_xpcs_mi_pm_runtime_suspend(struct device *dev)
+{
+ struct dw_xpcs_mi *dxmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dxmi->pclk);
+
+ return 0;
+}
+
+static int __maybe_unused dw_xpcs_mi_pm_runtime_resume(struct device *dev)
+{
+ struct dw_xpcs_mi *dxmi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(dxmi->pclk);
+}
+
+const struct dev_pm_ops dw_xpcs_mi_pm_ops = {
+ SET_RUNTIME_PM_OPS(dw_xpcs_mi_pm_runtime_suspend, dw_xpcs_mi_pm_runtime_resume, NULL)
+};
+
+static const struct of_device_id dw_xpcs_mi_of_ids[] = {
+ { .compatible = "snps,dw-xpcs-mi" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, dw_xpcs_mi_of_ids);
+
+static struct platform_driver dw_xpcs_mi_driver = {
+ .probe = dw_xpcs_mi_probe,
+ .driver = {
+ .name = "dw-xpcs-mi",
+ .pm = &dw_xpcs_mi_pm_ops,
+ .of_match_table = dw_xpcs_mi_of_ids,
+ },
+};
+
+module_platform_driver(dw_xpcs_mi_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS Management Interface driver");
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_LICENSE("GPL v2");
menu "PCS device drivers"
config PCS_XPCS
- tristate
+ tristate "Synopsys DesignWare Ethernet XPCS"
select PHYLINK
help
- This module provides helper functions for Synopsys DesignWare XPCS
- controllers.
+ This module provides a driver and helper functions for Synopsys
+ DesignWare XPCS controllers.
config PCS_LYNX
tristate
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PCS drivers
-pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
+pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-pma.o pcs-xpcs-nxp.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
if (!lynx)
return NULL;
+ mdio_device_get(mdio);
lynx->mdio = mdio;
lynx->pcs.ops = &lynx_pcs_phylink_ops;
lynx->pcs.poll = true;
}
EXPORT_SYMBOL(lynx_pcs_create);
+struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr)
+{
+ struct mdio_device *mdio;
+ struct phylink_pcs *pcs;
+
+ mdio = mdio_device_create(bus, addr);
+ if (IS_ERR(mdio))
+ return ERR_CAST(mdio);
+
+ pcs = lynx_pcs_create(mdio);
+
+ /* Convert failure to create the PCS to an error pointer, so this
+ * function has a consistent return value strategy.
+ */
+ if (!pcs)
+ pcs = ERR_PTR(-ENOMEM);
+
+ /* lynx_create() has taken a refcount on the mdiodev if it was
+ * successful. If lynx_create() fails, this will free the mdio
+ * device here. In any case, we don't need to hold our reference
+ * anymore, and putting it here will allow mdio_device_put() in
+ * lynx_destroy() to automatically free the mdio device.
+ */
+ mdio_device_put(mdio);
+
+ return pcs;
+}
+EXPORT_SYMBOL(lynx_pcs_create_mdiodev);
+
void lynx_pcs_destroy(struct phylink_pcs *pcs)
{
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+ mdio_device_put(lynx->mdio);
kfree(lynx);
}
EXPORT_SYMBOL(lynx_pcs_destroy);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 BAIKAL ELECTRONICS, JSC
+ *
+ * Author: Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mdio.h>
+#include <linux/pcs/pcs-xpcs.h>
+
+#include "pcs-xpcs.h"
+
+/* DesignWare Gen5 10G PHY can be clocked by an external clock source.
+ * It will be enabled instead of the internal one in case if it's specified.
+ */
+static int xpcs_gen5_10g_ref_clock_select(struct dw_xpcs *xpcs, bool reset)
+{
+ int ret;
+
+ ret = xpcs_read_vendor(xpcs, MDIO_MMD_PMAPMD,
+ DW_VR_XS_PMA_GEN5_10G_MPLL_CTRL);
+ if (ret < 0)
+ return ret;
+
+ if (xpcs->clks[DW_XPCS_CLK_PAD].clk)
+ ret &= ~DW_VR_XS_PMA_REF_CLK_SEL_CORE;
+ else if (xpcs->clks[DW_XPCS_CLK_CORE].clk)
+ ret |= DW_VR_XS_PMA_REF_CLK_SEL_CORE;
+ else if (reset)
+ goto out_vendor_reset;
+ else
+ return 0;
+
+ ret = xpcs_write_vendor(xpcs, MDIO_MMD_PMAPMD,
+ DW_VR_XS_PMA_GEN5_10G_MPLL_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Vendor reset must be immediately performed. Note it will workout
+ * only if Tx/Rx are stable (Power_Good state).
+ */
+out_vendor_reset:
+ return xpcs_vendor_reset(xpcs);
+}
+
+static int xpcs_10gbaser_gen5_10g_pma_config(struct dw_xpcs *xpcs)
+{
+ int ret;
+
+ ret = xpcs_read_vendor(xpcs, MDIO_MMD_PMAPMD,
+ DW_VR_XS_PMA_GEN5_10G_GEN_CTRL);
+ if (ret < 0)
+ return ret;
+
+ /* Select KR-mode of the PHY lanes */
+ ret &= ~DW_VR_XS_PMA_LANE_MODE;
+ ret |= FIELD_PREP(DW_VR_XS_PMA_LANE_MODE, DW_VR_XS_PMA_LANE_MODE_KR);
+
+ /* Activate 1 lane per link */
+ ret &= ~DW_VR_XS_PMA_LINK_WIDTH;
+ ret |= FIELD_PREP(DW_VR_XS_PMA_LINK_WIDTH, DW_VR_XS_PMA_LINK_WIDTH_1);
+
+ /* Disable unused 1-3 lanes */
+ ret &= ~DW_VR_XS_PMA_LANE_PWR_OFF;
+ ret |= FIELD_PREP(DW_VR_XS_PMA_LANE_PWR_OFF, 0xe);
+
+ ret = xpcs_write_vendor(xpcs, MDIO_MMD_PMAPMD,
+ DW_VR_XS_PMA_GEN5_10G_GEN_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Select PCS/PMA refclk source: Pad or Core. Vendor-reset the device
+ * to make sure the updates are perceived by the core
+ */
+ return xpcs_gen5_10g_ref_clock_select(xpcs, true);
+}
+
+int xpcs_10gbaser_pma_config(struct dw_xpcs *xpcs)
+{
+ switch (xpcs->info.pma) {
+ case DW_XPCS_PMA_GEN5_10G:
+ return xpcs_10gbaser_gen5_10g_pma_config(xpcs);
+ default:
+ return 0;
+ }
+}
+
+static int xpcs_10gbasex_gen5_10g_pma_config(struct dw_xpcs *xpcs)
+{
+ int ret;
+
+ ret = xpcs_read_vendor(xpcs, MDIO_MMD_PMAPMD,
+ DW_VR_XS_PMA_GEN5_10G_GEN_CTRL);
+ if (ret < 0)
+ return ret;
+
+ /* Select KX4-mode of the PHY lanes */
+ ret &= ~DW_VR_XS_PMA_LANE_MODE;
+ ret |= FIELD_PREP(DW_VR_XS_PMA_LANE_MODE, DW_VR_XS_PMA_LANE_MODE_KX4);
+
+ /* Activate 4 lane per link */
+ ret &= ~DW_VR_XS_PMA_LINK_WIDTH;
+ ret |= FIELD_PREP(DW_VR_XS_PMA_LINK_WIDTH, DW_VR_XS_PMA_LINK_WIDTH_4);
+
+ /* Enable all 4 lanes since it's X4 */
+ ret &= ~DW_VR_XS_PMA_LANE_PWR_OFF;
+ ret |= FIELD_PREP(DW_VR_XS_PMA_LANE_PWR_OFF, 0x0);
+
+ ret = xpcs_write_vendor(xpcs, MDIO_MMD_PMAPMD,
+ DW_VR_XS_PMA_GEN5_10G_GEN_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Select PCS/PMA refclk source: Pad or Core. Vendor-reset the device
+ * to make sure the updates are perceived by the core
+ */
+ return xpcs_gen5_10g_ref_clock_select(xpcs, true);
+}
+
+int xpcs_10gbasex_pma_config(struct dw_xpcs *xpcs)
+{
+ switch (xpcs->info.pma) {
+ case DW_XPCS_PMA_GEN5_10G:
+ return xpcs_10gbasex_gen5_10g_pma_config(xpcs);
+ default:
+ return 0;
+ }
+}
* Author: Jose Abreu <Jose.Abreu@synopsys.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/pcs/pcs-xpcs.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/phy.h>
#include <linux/phylink.h>
-#include <linux/workqueue.h>
+#include <linux/property.h>
+
#include "pcs-xpcs.h"
#define phylink_pcs_to_xpcs(pl_pcs) \
container_of((pl_pcs), struct dw_xpcs, pcs)
+static const int xpcs_xgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_usxgmii_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
};
static const int xpcs_10gkr_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_10gbaser_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ /* Speed-compatible modes for the link setup in the external PHYs */
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ /* ETHTOOL_LINK_MODE_10000baseCX4_Full_BIT, */
+ /* ETHTOOL_LINK_MODE_10000baseLX4_Full_BIT, */
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_10gbasex_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ /* ETHTOOL_LINK_MODE_10000baseCX4_Full_BIT, */
+ /* ETHTOOL_LINK_MODE_10000baseLX4_Full_BIT, */
+ /* Speed-compatible modes for the link setup in the external PHYs */
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const phy_interface_t xpcs_xgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_XGMII,
+};
+
static const phy_interface_t xpcs_usxgmii_interfaces[] = {
PHY_INTERFACE_MODE_USXGMII,
};
PHY_INTERFACE_MODE_10GKR,
};
+static const phy_interface_t xpcs_10gbaser_interfaces[] = {
+ PHY_INTERFACE_MODE_10GBASER,
+};
+
+static const phy_interface_t xpcs_10gbasex_interfaces[] = {
+ PHY_INTERFACE_MODE_10GBASEX,
+ PHY_INTERFACE_MODE_XAUI,
+};
+
static const phy_interface_t xpcs_xlgmii_interfaces[] = {
PHY_INTERFACE_MODE_XLGMII,
};
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
- PHY_INTERFACE_MODE_MAX,
};
enum {
+ DW_XPCS_XGMII,
DW_XPCS_USXGMII,
DW_XPCS_10GKR,
+ DW_XPCS_10GBASER,
+ DW_XPCS_10GBASEX,
DW_XPCS_XLGMII,
DW_XPCS_SGMII,
DW_XPCS_1000BASEX,
return mdiobus_c45_write(bus, addr, dev, reg, val);
}
-static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg,
- u16 mask, u16 set)
+int xpcs_modify(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set)
+{
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+
+ return mdiodev_modify(xpcs->mdiodev, reg_addr, mask, set);
+}
+
+int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set)
{
u32 reg_addr = mdiobus_c45_addr(dev, reg);
return mdiodev_modify_changed(xpcs->mdiodev, reg_addr, mask, set);
}
-static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
+int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
{
return xpcs_read(xpcs, dev, DW_VENDOR | reg);
}
-static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg,
- u16 val)
+int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg, u16 val)
{
return xpcs_write(xpcs, dev, DW_VENDOR | reg, val);
}
return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
}
-static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
+int xpcs_poll_val(struct dw_xpcs *xpcs, int dev, int reg, u16 mask, u16 val)
{
- /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ /* Poll until value is detected (50ms per retry == 0.6 sec) */
unsigned int retries = 12;
int ret;
do {
msleep(50);
- ret = xpcs_read(xpcs, dev, MDIO_CTRL1);
+ ret = xpcs_read(xpcs, dev, reg);
if (ret < 0)
return ret;
- } while (ret & MDIO_CTRL1_RESET && --retries);
+ } while ((ret & mask) != val && --retries);
- return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
+ return ((ret & mask) != val) ? -ETIMEDOUT : 0;
}
-static int xpcs_soft_reset(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+static int __xpcs_soft_reset(struct dw_xpcs *xpcs, bool vendor)
{
- int ret, dev;
+ int ret, dev, reg;
- switch (compat->an_mode) {
- case DW_AN_C73:
- dev = MDIO_MMD_PCS;
- break;
- case DW_AN_C37_SGMII:
- case DW_2500BASEX:
- case DW_AN_C37_1000BASEX:
+ if (xpcs->mmd_ctrl & DW_SR_CTRL_MII_MMD_EN)
dev = MDIO_MMD_VEND2;
- break;
- default:
- return -1;
- }
+ else if (xpcs->mmd_ctrl & DW_SR_CTRL_PCS_XS_MMD_EN)
+ dev = MDIO_MMD_PCS;
+ else if (xpcs->mmd_ctrl & DW_SR_CTRL_PMA_MMD_EN)
+ dev = MDIO_MMD_PMAPMD;
+ else
+ return -EINVAL;
+
+ reg = MDIO_CTRL1;
+ if (vendor)
+ reg |= DW_VENDOR;
- ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ ret = xpcs_modify(xpcs, dev, reg, MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
if (ret < 0)
return ret;
- return xpcs_poll_reset(xpcs, dev);
+ return xpcs_poll_val(xpcs, dev, reg, MDIO_CTRL1_RESET, 0);
+}
+
+int xpcs_soft_reset(struct dw_xpcs *xpcs)
+{
+ return __xpcs_soft_reset(xpcs, false);
+}
+
+int xpcs_vendor_reset(struct dw_xpcs *xpcs)
+{
+ return __xpcs_soft_reset(xpcs, true);
}
#define xpcs_warn(__xpcs, __state, __args...) \
})
static int xpcs_read_fault_c73(struct dw_xpcs *xpcs,
- struct phylink_link_state *state)
+ struct phylink_link_state *state,
+ u16 pcs_stat1)
{
int ret;
- ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- if (ret & MDIO_STAT1_FAULT) {
+ if (pcs_stat1 & MDIO_STAT1_FAULT) {
xpcs_warn(xpcs, state, "Link fault condition detected!\n");
return -EFAULT;
}
return 0;
}
-static int xpcs_read_link_c73(struct dw_xpcs *xpcs)
+static void xpcs_config_xgmii(struct dw_xpcs *xpcs, int speed)
{
- bool link = true;
- int ret;
-
- ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- if (!(ret & MDIO_STAT1_LSTATUS))
- link = false;
-
- return link;
-}
-
-static int xpcs_get_max_usxgmii_speed(const unsigned long *supported)
-{
- int max = SPEED_UNKNOWN;
-
- if (phylink_test(supported, 1000baseKX_Full))
- max = SPEED_1000;
- if (phylink_test(supported, 2500baseX_Full))
- max = SPEED_2500;
- if (phylink_test(supported, 10000baseKX4_Full))
- max = SPEED_10000;
- if (phylink_test(supported, 10000baseKR_Full))
- max = SPEED_10000;
-
- return max;
+ /* TODO add some XGMII-related configs ...*/
}
static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed)
static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state,
- const struct xpcs_compat *compat)
+ const struct xpcs_compat *compat, u16 an_stat1)
{
int ret;
- ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- if (ret & MDIO_AN_STAT1_COMPLETE) {
- ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (an_stat1 & MDIO_AN_STAT1_COMPLETE) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_AN_LPA);
if (ret < 0)
return ret;
}
static int xpcs_read_lpa_c73(struct dw_xpcs *xpcs,
- struct phylink_link_state *state)
+ struct phylink_link_state *state, u16 an_stat1)
{
- int ret;
-
- ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
- if (ret < 0)
- return ret;
+ u16 lpa[3];
+ int i, ret;
- if (!(ret & MDIO_AN_STAT1_LPABLE)) {
+ if (!(an_stat1 & MDIO_AN_STAT1_LPABLE)) {
phylink_clear(state->lp_advertising, Autoneg);
return 0;
}
phylink_set(state->lp_advertising, Autoneg);
- /* Clause 73 outcome */
- ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL3);
- if (ret < 0)
- return ret;
-
- if (ret & DW_C73_2500KX)
- phylink_set(state->lp_advertising, 2500baseX_Full);
-
- ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL2);
- if (ret < 0)
- return ret;
-
- if (ret & DW_C73_1000KX)
- phylink_set(state->lp_advertising, 1000baseKX_Full);
- if (ret & DW_C73_10000KX4)
- phylink_set(state->lp_advertising, 10000baseKX4_Full);
- if (ret & DW_C73_10000KR)
- phylink_set(state->lp_advertising, 10000baseKR_Full);
+ /* Read Clause 73 link partner advertisement */
+ for (i = ARRAY_SIZE(lpa); --i >= 0; ) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_AN_LPA + i);
+ if (ret < 0)
+ return ret;
- ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
- if (ret < 0)
- return ret;
+ lpa[i] = ret;
+ }
- if (ret & DW_C73_PAUSE)
- phylink_set(state->lp_advertising, Pause);
- if (ret & DW_C73_ASYM_PAUSE)
- phylink_set(state->lp_advertising, Asym_Pause);
+ mii_c73_mod_linkmode(state->lp_advertising, lpa);
- linkmode_and(state->lp_advertising, state->lp_advertising,
- state->advertising);
return 0;
}
-static void xpcs_resolve_lpa_c73(struct dw_xpcs *xpcs,
- struct phylink_link_state *state)
-{
- int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
-
- state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
- state->speed = max_speed;
- state->duplex = DUPLEX_FULL;
-}
-
static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
state->duplex = DUPLEX_FULL;
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
- state->speed = SPEED_10000;
- break;
case PHY_INTERFACE_MODE_XLGMII:
state->speed = xpcs_get_max_xlgmii_speed(xpcs, state);
break;
xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return -EINVAL;
/* Populate the supported link modes for this PHY interface type.
* FIXME: what about the port modes and autoneg bit? This masks
* all those away.
*/
- if (compat)
- for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(compat->supported[i], xpcs_supported);
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
const struct xpcs_compat *compat = &xpcs->id->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
- if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
- __set_bit(compat->interface[j], interfaces);
+ __set_bit(compat->interface[j], interfaces);
}
}
EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
return changed;
}
+static int xpcs_config_10gbaser(struct dw_xpcs *xpcs)
+{
+ int ret;
+
+ /* Disable Clause 73 AN in anyway */
+ ret = xpcs_modify(xpcs, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Disable RXAUI if it's enabled by default */
+ ret = xpcs_read_vpcs(xpcs, DW_VR_XS_PCS_XAUI_MODE_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_VR_XS_PCS_RXAUI_MODE;
+ ret = xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_XAUI_MODE_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to 10G speed and 10GBASE-R PCS */
+ ret = xpcs_modify(xpcs, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_CTRL1_SPEEDSEL, MDIO_CTRL1_SPEED10G);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_modify(xpcs, MDIO_MMD_PCS, MDIO_CTRL2,
+ MDIO_PCS_CTRL2_TYPE, MDIO_PCS_CTRL2_10GBR);
+ if (ret < 0)
+ return ret;
+
+ /* Make sure the specified PCS type is perceived by the core */
+ return xpcs_vendor_reset(xpcs);
+}
+
+static int xpcs_config_10gbasex(struct dw_xpcs *xpcs)
+{
+ int ret;
+
+ /* Disable Clause 73 AN in anyway */
+ ret = xpcs_modify(xpcs, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Disable RXAUI if it's enabled by default */
+ ret = xpcs_read_vpcs(xpcs, DW_VR_XS_PCS_XAUI_MODE_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_VR_XS_PCS_RXAUI_MODE;
+ ret = xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_XAUI_MODE_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to 10G speed and 10GBASE-X PCS */
+ ret = xpcs_modify(xpcs, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_CTRL1_SPEEDSEL, MDIO_CTRL1_SPEED10G);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_modify(xpcs, MDIO_MMD_PCS, MDIO_CTRL2,
+ MDIO_PCS_CTRL2_TYPE, MDIO_PCS_CTRL2_10GBX);
+ if (ret < 0)
+ return ret;
+
+ /* Make sure the specified PCS type is perceived by the core */
+ return xpcs_vendor_reset(xpcs);
+}
+
static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
{
int ret;
if (ret)
return ret;
break;
+ case DW_10GBASER:
+ ret = xpcs_config_10gbaser(xpcs);
+ if (ret)
+ return ret;
+ break;
+ case DW_10GBASEX:
+ ret = xpcs_config_10gbasex(xpcs);
+ if (ret)
+ return ret;
+ break;
case DW_2500BASEX:
ret = xpcs_config_2500basex(xpcs);
if (ret)
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
struct phylink_link_state *state,
const struct xpcs_compat *compat)
{
+ int pcs_stat1;
+ int an_stat1;
int ret;
+ /* The link status bit is latching-low, so it is important to
+ * avoid unnecessary re-reads of this register to avoid missing
+ * a link-down event.
+ */
+ pcs_stat1 = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (pcs_stat1 < 0) {
+ state->link = false;
+ return pcs_stat1;
+ }
+
/* Link needs to be read first ... */
- state->link = xpcs_read_link_c73(xpcs) > 0 ? 1 : 0;
+ state->link = !!(pcs_stat1 & MDIO_STAT1_LSTATUS);
/* ... and then we check the faults. */
- ret = xpcs_read_fault_c73(xpcs, state);
+ ret = xpcs_read_fault_c73(xpcs, state, pcs_stat1);
if (ret) {
- ret = xpcs_soft_reset(xpcs, compat);
+ ret = xpcs_soft_reset(xpcs);
if (ret)
return ret;
return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND, NULL);
}
- if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state, compat)) {
- state->an_complete = true;
- xpcs_read_lpa_c73(xpcs, state);
- xpcs_resolve_lpa_c73(xpcs, state);
- } else if (state->an_enabled) {
- state->link = 0;
- } else if (state->link) {
+ /* There is no point doing anything else if the link is down. */
+ if (!state->link)
+ return 0;
+
+ if (state->an_enabled) {
+ /* The link status bit is latching-low, so it is important to
+ * avoid unnecessary re-reads of this register to avoid missing
+ * a link-down event.
+ */
+ an_stat1 = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (an_stat1 < 0) {
+ state->link = false;
+ return an_stat1;
+ }
+
+ state->an_complete = xpcs_aneg_done_c73(xpcs, state, compat,
+ an_stat1);
+ if (!state->an_complete) {
+ state->link = false;
+ return 0;
+ }
+
+ ret = xpcs_read_lpa_c73(xpcs, state, an_stat1);
+ if (ret < 0) {
+ state->link = false;
+ return ret;
+ }
+
+ phylink_resolve_c73(state);
+ } else {
xpcs_resolve_pma(xpcs, state);
}
return;
switch (compat->an_mode) {
+ case DW_10GBASER:
+ phylink_mii_c45_pcs_get_state(xpcs->mdiodev, state);
+ break;
case DW_AN_C73:
ret = xpcs_get_state_c73(xpcs, state, compat);
if (ret) {
}
}
+static void xpcs_link_up_check(struct dw_xpcs *xpcs, int speed, int duplex)
+{
+ int ret;
+
+ ret = xpcs_poll_val(xpcs, MDIO_MMD_PCS, MDIO_STAT1,
+ MDIO_STAT1_LSTATUS, MDIO_STAT1_LSTATUS);
+ if (ret < 0) {
+ dev_err(&xpcs->mdiodev->dev, "link is malfunction\n");
+ return;
+ }
+
+ if (speed != SPEED_10000)
+ dev_warn(&xpcs->mdiodev->dev, "Incompatible speed\n");
+
+ if (duplex != DUPLEX_FULL)
+ dev_warn(&xpcs->mdiodev->dev, "Incompatible duplex\n");
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT2);
+ if (ret < 0) {
+ dev_err(&xpcs->mdiodev->dev, "Failed to read PCS status\n");
+ return;
+ }
+
+ if (ret & MDIO_STAT2_RXFAULT)
+ dev_dbg(&xpcs->mdiodev->dev, "Receiver fault detected\n");
+
+ if (ret & MDIO_STAT2_TXFAULT)
+ dev_dbg(&xpcs->mdiodev->dev, "Transmitter fault detected\n");
+}
+
static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int mode,
int speed, int duplex)
{
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ if (interface == PHY_INTERFACE_MODE_XGMII)
+ return xpcs_config_xgmii(xpcs, speed);
if (interface == PHY_INTERFACE_MODE_USXGMII)
return xpcs_config_usxgmii(xpcs, speed);
+ if (interface == PHY_INTERFACE_MODE_10GBASER)
+ return xpcs_link_up_check(xpcs, speed, duplex);
+ if (interface == PHY_INTERFACE_MODE_10GBASEX)
+ return xpcs_link_up_check(xpcs, speed, duplex);
if (interface == PHY_INTERFACE_MODE_SGMII)
return xpcs_link_up_sgmii(xpcs, mode, speed, duplex);
if (interface == PHY_INTERFACE_MODE_1000BASEX)
}
static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_XGMII] = {
+ .supported = xpcs_xgmii_features,
+ .interface = xpcs_xgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_xgmii_interfaces),
+ .an_mode = DW_AN_C73,
+ },
[DW_XPCS_USXGMII] = {
.supported = xpcs_usxgmii_features,
.interface = xpcs_usxgmii_interfaces,
.num_interfaces = ARRAY_SIZE(xpcs_10gkr_interfaces),
.an_mode = DW_AN_C73,
},
+ [DW_XPCS_10GBASER] = {
+ .supported = xpcs_10gbaser_features,
+ .interface = xpcs_10gbaser_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_10gbaser_interfaces),
+ .an_mode = DW_10GBASER,
+ .pma_config = xpcs_10gbaser_pma_config,
+ },
+ [DW_XPCS_10GBASEX] = {
+ .supported = xpcs_10gbasex_features,
+ .interface = xpcs_10gbasex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_10gbasex_interfaces),
+ .an_mode = DW_10GBASEX,
+ .pma_config = xpcs_10gbasex_pma_config,
+ },
[DW_XPCS_XLGMII] = {
.supported = xpcs_xlgmii_features,
.interface = xpcs_xlgmii_interfaces,
},
};
+static const struct xpcs_compat bt1_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_XGMII] = {
+ .supported = xpcs_xgmii_features,
+ .interface = xpcs_xgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_xgmii_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_10GBASER] = {
+ .supported = xpcs_10gbaser_features,
+ .interface = xpcs_10gbaser_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_10gbaser_interfaces),
+ .an_mode = DW_10GBASER,
+ .pma_config = xpcs_10gbaser_pma_config,
+ },
+ [DW_XPCS_10GBASEX] = {
+ .supported = xpcs_10gbasex_features,
+ .interface = xpcs_10gbasex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_10gbasex_interfaces),
+ .an_mode = DW_10GBASEX,
+ .pma_config = xpcs_10gbasex_pma_config,
+ },
+};
+
static const struct xpcs_id xpcs_id_list[] = {
{
- .id = SYNOPSYS_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .id = DW_XPCS_ID,
+ .mask = DW_XPCS_ID_MASK,
.compat = synopsys_xpcs_compat,
}, {
.id = NXP_SJA1105_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .mask = DW_XPCS_ID_MASK,
.compat = nxp_sja1105_xpcs_compat,
}, {
.id = NXP_SJA1110_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .mask = DW_XPCS_ID_MASK,
.compat = nxp_sja1110_xpcs_compat,
+ }, {
+ .id = BT1_XGMAC_XPCS_ID,
+ .mask = DW_XPCS_ID_MASK,
+ .compat = bt1_xpcs_compat,
},
};
.pcs_link_up = xpcs_link_up,
};
-struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
- phy_interface_t interface)
+static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev)
{
struct dw_xpcs *xpcs;
- u32 xpcs_id;
- int i, ret;
xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
if (!xpcs)
return ERR_PTR(-ENOMEM);
xpcs->mdiodev = mdiodev;
+ xpcs->pcs.ops = &xpcs_phylink_ops;
+ xpcs->pcs.poll = true;
+
+ return xpcs;
+}
+
+static void xpcs_free_data(struct dw_xpcs *xpcs)
+{
+ kfree(xpcs);
+}
+
+static int xpcs_init_clks(struct dw_xpcs *xpcs)
+{
+ static const char *ids[DW_XPCS_NUM_CLKS] = {
+ [DW_XPCS_CLK_CORE] = "core",
+ [DW_XPCS_CLK_PAD] = "pad",
+ };
+ struct device *dev = &xpcs->mdiodev->dev;
+ int ret, i;
+
+ for (i = 0; i < DW_XPCS_NUM_CLKS; ++i)
+ xpcs->clks[i].id = ids[i];
+
+ ret = clk_bulk_get_optional(dev, DW_XPCS_NUM_CLKS, xpcs->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ ret = clk_bulk_prepare_enable(DW_XPCS_NUM_CLKS, xpcs->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable clocks\n");
+
+ return 0;
+}
+
+static void xpcs_clear_clks(struct dw_xpcs *xpcs)
+{
+ clk_bulk_disable_unprepare(DW_XPCS_NUM_CLKS, xpcs->clks);
+
+ clk_bulk_put(DW_XPCS_NUM_CLKS, xpcs->clks);
+}
- xpcs_id = xpcs_get_id(xpcs);
+static int xpcs_init_id(struct dw_xpcs *xpcs)
+{
+ const struct dw_xpcs_info *info;
+ int i;
+
+ info = device_get_match_data(&xpcs->mdiodev->dev) ?:
+ dev_get_platdata(&xpcs->mdiodev->dev);
+ if (!info) {
+ xpcs->info.did = DW_XPCS_ID_NATIVE;
+ xpcs->info.pma = DW_XPCS_PMA_UNKNOWN;
+ } else {
+ xpcs->info = *info;
+ }
+
+ if (xpcs->info.did == DW_XPCS_ID_NATIVE)
+ xpcs->info.did = xpcs_get_id(xpcs);
for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
const struct xpcs_id *entry = &xpcs_id_list[i];
- const struct xpcs_compat *compat;
- if ((xpcs_id & entry->mask) != entry->id)
+ if ((xpcs->info.did & entry->mask) != entry->id)
continue;
xpcs->id = entry;
- compat = xpcs_find_compat(entry, interface);
- if (!compat) {
- ret = -ENODEV;
- goto out;
- }
+ return 0;
+ }
- xpcs->pcs.ops = &xpcs_phylink_ops;
- xpcs->pcs.poll = true;
+ return -ENODEV;
+}
- ret = xpcs_soft_reset(xpcs, compat);
- if (ret)
- goto out;
+static int xpcs_init_iface(struct dw_xpcs *xpcs, phy_interface_t interface)
+{
+ const struct xpcs_compat *compat;
+ int ret;
+
+ compat = xpcs_find_compat(xpcs->id, interface);
+ if (!compat)
+ return -EINVAL;
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND1, DW_SR_CTRL_MMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ xpcs->mmd_ctrl = ret;
+
+ return xpcs_soft_reset(xpcs);
+}
+
+static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface)
+{
+ struct dw_xpcs *xpcs;
+ int ret;
+
+ ret = device_attach(&mdiodev->dev);
+ if (ret < 0 && ret != -ENODEV)
+ return ERR_PTR(ret);
+
+ xpcs = xpcs_create_data(mdiodev);
+ if (IS_ERR(xpcs))
return xpcs;
- }
- ret = -ENODEV;
+ ret = xpcs_init_clks(xpcs);
+ if (ret)
+ goto out_free_data;
+
+ ret = xpcs_init_id(xpcs);
+ if (ret)
+ goto out_clear_clks;
-out:
- kfree(xpcs);
+ ret = xpcs_init_iface(xpcs, interface);
+ if (ret)
+ goto out_clear_clks;
+
+ return xpcs;
+
+out_clear_clks:
+ xpcs_clear_clks(xpcs);
+
+out_free_data:
+ xpcs_free_data(xpcs);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(xpcs_create);
+
+struct dw_xpcs *xpcs_create_bynode(const struct fwnode_handle *fwnode,
+ phy_interface_t interface)
+{
+ struct fwnode_handle *pcs_node;
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ pcs_node = fwnode_find_reference(fwnode, "pcs-handle", 0);
+ if (IS_ERR(pcs_node))
+ return ERR_CAST(pcs_node);
+
+ mdiodev = fwnode_mdio_find_device(pcs_node);
+ fwnode_handle_put(pcs_node);
+ if (!mdiodev)
+ return ERR_PTR(-ENODEV);
+
+ xpcs = xpcs_create(mdiodev, interface);
+ if (IS_ERR(xpcs))
+ mdio_device_put(mdiodev);
+
+ return xpcs;
+}
+EXPORT_SYMBOL_GPL(xpcs_create_bynode);
+
+struct dw_xpcs *xpcs_create_byaddr(struct mii_bus *bus, int addr,
+ phy_interface_t interface)
+{
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (addr >= PHY_MAX_ADDR)
+ return ERR_PTR(-EINVAL);
+
+ if (mdiobus_is_registered_device(bus, addr)) {
+ mdiodev = bus->mdio_map[addr];
+ mdio_device_get(mdiodev);
+ } else {
+ mdiodev = mdio_device_create(bus, addr);
+ if (IS_ERR(mdiodev))
+ return ERR_CAST(mdiodev);
+ }
+
+ xpcs = xpcs_create(mdiodev, interface);
+ if (IS_ERR(xpcs))
+ mdio_device_put(mdiodev);
+
+ return xpcs;
+}
+EXPORT_SYMBOL_GPL(xpcs_create_byaddr);
void xpcs_destroy(struct dw_xpcs *xpcs)
{
- kfree(xpcs);
+ if (!xpcs)
+ return;
+
+ mdio_device_put(xpcs->mdiodev);
+
+ xpcs_clear_clks(xpcs);
+
+ xpcs_free_data(xpcs);
}
EXPORT_SYMBOL_GPL(xpcs_destroy);
+DW_XPCS_INFO_DECLARE(xpcs_generic, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_UNKNOWN);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen1_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN1_3G);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_3G);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_6G);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_3G);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_6G);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_10g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_10G);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_12g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_12G);
+DW_XPCS_INFO_DECLARE(xpcs_bt1, BT1_XGMAC_XPCS_ID, DW_XPCS_PMA_GEN5_10G);
+
+static const struct of_device_id xpcs_of_ids[] = {
+ { .compatible = "snps,dw-xpcs", .data = &xpcs_generic },
+ { .compatible = "snps,dw-xpcs-gen1-3g", .data = &xpcs_pma_gen1_3g },
+ { .compatible = "snps,dw-xpcs-gen2-3g", .data = &xpcs_pma_gen2_3g },
+ { .compatible = "snps,dw-xpcs-gen2-6g", .data = &xpcs_pma_gen2_6g },
+ { .compatible = "snps,dw-xpcs-gen4-3g", .data = &xpcs_pma_gen4_3g },
+ { .compatible = "snps,dw-xpcs-gen4-6g", .data = &xpcs_pma_gen4_6g },
+ { .compatible = "snps,dw-xpcs-gen5-10g", .data = &xpcs_pma_gen5_10g },
+ { .compatible = "snps,dw-xpcs-gen5-12g", .data = &xpcs_pma_gen5_12g },
+ { .compatible = "baikal,bt1-xpcs", .data = &xpcs_bt1 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, xpcs_of_ids);
+
+static struct mdio_device_id __maybe_unused xpcs_mdio_ids[] = {
+ { DW_XPCS_ID, DW_XPCS_ID_MASK },
+ { NXP_SJA1105_XPCS_ID, DW_XPCS_ID_MASK },
+ { NXP_SJA1110_XPCS_ID, DW_XPCS_ID_MASK },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, xpcs_mdio_ids);
+
+static struct mdio_driver xpcs_driver = {
+ .mdiodrv.driver = {
+ .name = "dwxpcs",
+ .of_match_table = xpcs_of_ids,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+};
+mdio_module_driver(xpcs_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet XPCS platform driver");
+MODULE_AUTHOR("Jose Abreu <Jose.Abreu@synopsys.com>");
MODULE_LICENSE("GPL v2");
* Author: Jose Abreu <Jose.Abreu@synopsys.com>
*/
-#define SYNOPSYS_XPCS_ID 0x7996ced0
-#define SYNOPSYS_XPCS_MASK 0xffffffff
+#include <linux/bits.h>
+#include <linux/pcs/pcs-xpcs.h>
/* Vendor regs access */
#define DW_VENDOR BIT(15)
+/* VR_XS_PMA */
+#define DW_VR_XS_PMA_GEN5_10G_MPLL_CTRL 0x007a
+#define DW_VR_XS_PMA_REF_CLK_SEL_CORE BIT(13)
+#define DW_VR_XS_PMA_GEN5_10G_GEN_CTRL 0x009c
+#define DW_VR_XS_PMA_LANE_MODE GENMASK(3, 0)
+#define DW_VR_XS_PMA_LANE_MODE_KX 0x3
+#define DW_VR_XS_PMA_LANE_MODE_KX4 0x4
+#define DW_VR_XS_PMA_LANE_MODE_KR 0x5
+#define DW_VR_XS_PMA_LANE_MODE_SGMII 0x6
+#define DW_VR_XS_PMA_LANE_MODE_RXAUI 0x8
+#define DW_VR_XS_PMA_LINK_WIDTH GENMASK(10, 8)
+#define DW_VR_XS_PMA_LINK_WIDTH_1 0x0
+#define DW_VR_XS_PMA_LINK_WIDTH_2 0x1
+#define DW_VR_XS_PMA_LINK_WIDTH_4 0x2
+#define DW_VR_XS_PMA_LANE_PWR_OFF GENMASK(15, 12)
+
/* VR_XS_PCS */
#define DW_USXGMII_RST BIT(10)
#define DW_USXGMII_EN BIT(9)
+#define DW_VR_XS_PCS_XAUI_MODE_CTRL 0x0004
+#define DW_VR_XS_PCS_RXAUI_MODE BIT(0)
+#define DW_VR_XS_PCS_MRVL_RXAUI BIT(1)
#define DW_VR_XS_PCS_DIG_STS 0x0010
+#define DW_PSEQ_STATE GENMASK(4, 2)
+#define DW_PSEQ_TXRX_STABLE 0x100
#define DW_RXFIFO_ERR GENMASK(6, 5)
/* SR_MII */
#define DW_SR_AN_ADV1 0x10
#define DW_SR_AN_ADV2 0x11
#define DW_SR_AN_ADV3 0x12
-#define DW_SR_AN_LP_ABL1 0x13
-#define DW_SR_AN_LP_ABL2 0x14
-#define DW_SR_AN_LP_ABL3 0x15
/* Clause 73 Defines */
/* AN_LP_ABL1 */
#define DW_C73_2500KX BIT(0)
#define DW_C73_5000KR BIT(1)
+/* VR_CTRL_MMD */
+#define DW_SR_CTRL_MMD_CTRL 0x0009
+#define DW_SR_CTRL_AN_MMD_EN BIT(0)
+#define DW_SR_CTRL_PCS_XS_MMD_EN BIT(1)
+#define DW_SR_CTRL_MII_MMD_EN BIT(2)
+#define DW_SR_CTRL_PMA_MMD_EN BIT(3)
+
/* Clause 37 Defines */
/* VR MII MMD registers offsets */
#define DW_VR_MII_MMD_CTRL 0x0000
/* VR MII EEE Control 1 defines */
#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define DW_XPCS_INFO_DECLARE(_name, _did, _pma) \
+ static const struct dw_xpcs_info _name = { .did = _did, .pma = _pma }
+
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
+int xpcs_modify(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set);
+int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set);
+int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg);
+int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg, u16 val);
+int xpcs_poll_val(struct dw_xpcs *xpcs, int dev, int reg, u16 mask, u16 val);
+int xpcs_soft_reset(struct dw_xpcs *xpcs);
+int xpcs_vendor_reset(struct dw_xpcs *xpcs);
+
+int xpcs_10gbaser_pma_config(struct dw_xpcs *xpcs);
+int xpcs_10gbasex_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs);
Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet
Transceiver.
+config MARVELL_88X2222_GPIO
+ bool "Marvell 88X2222 SFP+ GPIOs support"
+ depends on MARVELL_88X2222_PHY
+ depends on GPIOLIB && GPIOLIB_IRQCHIP
+ help
+ Support for the Marvell 88X2222 PHY SFP+/GPIO signals.
+
+config MARVELL_88X2222_I2C
+ bool "Marvell 88X2222 SFP+ I2C support"
+ depends on MARVELL_88X2222_PHY
+ depends on I2C
+ help
+ Support for the Marvell 88X2222 PHY SFP+/I2C signals.
+
config MAXLINEAR_GPHY
tristate "Maxlinear Ethernet PHYs"
select POLYNOMIAL if HWMON
* Marvell 88x2222 dual-port multi-speed ethernet transceiver.
*
* Supports:
- * XAUI on the host side.
+ * XAUI or 10GBase-R on the host side.
* 1000Base-X or 10GBase-R on the line side.
* SGMII over 1000Base-X.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/phy.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/delay.h>
#include <linux/mdio.h>
#include <linux/marvell_phy.h>
#include <linux/sfp.h>
#include <linux/netdevice.h>
+#include <dt-bindings/net/mv-phy-88x2222.h>
+
/* Port PCS Configuration */
#define MV_PCS_CONFIG 0xF002
+#define MV_PCS_HOST_PCS_SELECT GENMASK(6, 0)
+#define MV_PCS_HOST_10GBR 0x71
#define MV_PCS_HOST_XAUI 0x73
-#define MV_PCS_LINE_10GBR (0x71 << 8)
-#define MV_PCS_LINE_1GBX_AN (0x7B << 8)
-#define MV_PCS_LINE_SGMII_AN (0x7F << 8)
+#define MV_PCS_LINE_PCS_SELECT GENMASK(14, 8)
+#define MV_PCS_LINE_10GBR 0x71
+#define MV_PCS_LINE_1GBX_AN 0x7B
+#define MV_PCS_LINE_SGMII_AN 0x7F
/* Port Reset and Power Down */
#define MV_PORT_RST 0xF003
#define MV_HOST_RST_SW BIT(7)
#define MV_PORT_RST_SW (MV_LINE_RST_SW | MV_HOST_RST_SW)
+/* GPIO Interrupt Enable */
+#define MV_GPIO_INT_EN 0xF010
+
+/* GPIO Interrupt Status */
+#define MV_GPIO_INT_STAT 0xF011
+
+/* GPIO Input/Output data */
+#define MV_GPIO_DATA 0xF012
+
+/* GPIO Tristate control */
+#define MV_GPIO_TRISTATE_CTRL 0xF013
+
+/* GPIO Interrupt type 1 (GPIOs 0 - 3) */
+#define MV_GPIO_INT_TYPE1 0xF014
+#define MV_GPIO_INT_TYPE_PIN0 GENMASK(2, 0)
+#define MV_GPIO_INT_NO_IRQ 0
+#define MV_GPIO_INT_LEVEL_LOW 2
+#define MV_GPIO_INT_LEVEL_HIGH 3
+#define MV_GPIO_INT_EDGE_FALLING 4
+#define MV_GPIO_INT_EDGE_RISING 5
+#define MV_GPIO_INT_EDGE_BOTH 7
+#define MV_GPIO_INT_TYPE_PIN1 GENMASK(6, 4)
+#define MV_GPIO_FUNC_TX_FLT_PIN1 BIT(7)
+#define MV_GPIO_INT_TYPE_PIN2 GENMASK(10, 8)
+#define MV_GPIO_FUNC_RX_LOS_PIN2 BIT(11)
+#define MV_GPIO_INT_TYPE_PIN3 GENMASK(14, 12)
+
+/* GPIO Interrupt type 2 (GPIOs 4 - 7) */
+#define MV_GPIO_INT_TYPE2 0xF015
+#define MV_GPIO_INT_TYPE_PIN4 GENMASK(2, 0)
+#define MV_GPIO_FUNC_LED0_PIN4 BIT(3)
+#define MV_GPIO_INT_TYPE_PIN5 GENMASK(6, 4)
+#define MV_GPIO_FUNC_LED1_PIN5 BIT(7)
+#define MV_GPIO_INT_TYPE_PIN6 GENMASK(10, 8)
+#define MV_GPIO_FUNC_MPC_PIN6 BIT(11)
+#define MV_GPIO_INT_TYPE_PIN7 GENMASK(14, 12)
+#define MV_GPIO_FUNC_TOD_PIN7 BIT(15)
+
+/* GPIO Interrupt type 3 (GPIOs 8, 10, 11) */
+#define MV_GPIO_INT_TYPE3 0xF016
+#define MV_GPIO_INT_TYPE_PIN8 GENMASK(2, 0)
+#define MV_GPIO_FUNC_TX_DIS_PIN8 GENMASK(4, 3)
+#define MV_GPIO_FUNC_TX_DIS_LED 0
+#define MV_GPIO_FUNC_TX_DIS_PIN 1
+#define MV_GPIO_FUNC_TX_DIS_REG 2
+#define MV_GPIO_INT_TYPE_PIN10 GENMASK(10, 8)
+#define MV_GPIO_FUNC_SDA_PIN10 BIT(11)
+#define MV_GPIO_INT_TYPE_PIN11 GENMASK(14, 12)
+#define MV_GPIO_FUNC_SCL_PIN11 BIT(15)
+
+/* GPIO Interrupt type 1-3 helpers */
+#define MV_GPIO_INT_TYPE_REG(_pin) \
+ ((_pin) / 4)
+#define MV_GPIO_INT_TYPE_PREP(_pin, _val) \
+ ((_val) << ((_pin) % 4) * 4)
+#define MV_GPIO_INT_TYPE_MASK(_pin) \
+ MV_GPIO_INT_TYPE_PREP(_pin, GENMASK(2, 0))
+#define MV_GPIO_FUNC_PREP(_pin, _val) \
+ ((_val) << (3 + ((_pin) % 4) * 4))
+#define MV_GPIO_FUNC_MASK(_pin) \
+ MV_GPIO_FUNC_PREP(_pin, (_pin) != 8 ? BIT(0) : GENMASK(1, 0))
+
+/* Port Interrupt Status */
+#define MV_PORT_INT_STAT 0xF040
+#define MV_PORT_INT_PCS_LINE BIT(0)
+#define MV_PORT_INT_PCS_HOST BIT(2)
+#define MV_PORT_INT_GPIO BIT(3)
+
/* PMD Receive Signal Detect */
#define MV_RX_SIGNAL_DETECT 0x000A
#define MV_RX_SIGNAL_DETECT_GLOBAL BIT(0)
/* 1000Base-X Auto-Negotiation Advertisement Register */
#define MV_1GBX_ADVERTISE (0x2000 + MII_ADVERTISE)
+/* Two Wire Interface Caching Control/Status Register */
+#define MV_TWSI_CACHE_CTRL 0x8000
+#define MV_TWSI_CACHE_A0_CTRL GENMASK(1, 0)
+#define MV_TWSI_CACHE_NO_AUTO 0
+#define MV_TWSI_CACHE_AT_PLUGIN 1
+#define MV_TWSI_CACHE_AT_PLUGIN_POLL 2
+#define MV_TWSI_CACHE_MANUAL 3
+#define MV_TWSI_CACHE_A0_CMD_STAT GENMASK(3, 2)
+#define MV_TWSI_CACHE_NO_UPDATE 0
+#define MV_TWSI_CACHE_UPDATED_ONCE 1
+#define MV_TWSI_CACHE_IS_LOADING 2
+#define MV_TWSI_CACHE_FAILED 3
+#define MV_TWSI_CACHE_A0_VALID BIT(9)
+#define MV_TWSI_RESET BIT(10)
+#define MV_TWSI_CACHE_A2_CTRL GENMASK(12, 11)
+#define MV_TWSI_CACHE_A2_CMD_STAT GENMASK(14, 13)
+#define MV_TWSI_CACHE_A2_VALID BIT(15)
+
+/* Two Wire Interface Memory Address Register */
+#define MV_TWSI_MEM_ADDR 0x8001
+#define MV_TWSI_BYTE_ADDR GENMASK(7, 0)
+#define MV_TWSI_BYTE_READ BIT(8)
+#define MV_TWSI_SLV_ADDR GENMASK(15, 9)
+
+/* Two Wire Interface Memory Read Data and Status Register */
+#define MV_TWSI_MEM_READ_STAT 0x8002
+#define MV_TWSI_MEM_READ_DATA GENMASK(7, 0)
+#define MV_TWSI_STAT GENMASK(10, 8)
+#define MV_TWSI_READY 0
+#define MV_TWSI_CMD_DONE 1
+#define MV_TWSI_CMD_IN_PROG 2
+#define MV_TWSI_CMD_WDONE_RFAIL 3
+#define MV_TWSI_CMD_FAIL 5
+#define MV_TWSI_BUSY 7
+#define MV_TWSI_CACHE_ECC_UNCOR_ERR BIT(11)
+#define MV_TWSI_CACHE_ECC_COR_ERR BIT(12)
+
+/* Two Wire Interface Memory Write Data and Control Register */
+#define MV_TWSI_MEM_WRITE_CTRL 0x8003
+#define MV_TWSI_MEM_WRITE_DATA GENMASK(7, 0)
+#define MV_TWSI_MEM_AUTO_RBACK BIT(9)
+#define MV_TWSI_MEM_WRITE_TIME GENMASK(15, 12)
+
+/* Two Wire Interface Caching Delay Register */
+#define MV_TWSI_CACHE_DELAY 0x8004
+#define MV_TWSI_CACHE_A2_ADDR_MSB BIT(0)
+#define MV_TWSI_CACHE_A2_SLV_ADDR GENMASK(7, 1)
+#define MV_TWSI_CACHE_RELOAD_FREQ GENMASK(10, 9)
+#define MV_TWSI_CACHE_RELOAD_250MS 0
+#define MV_TWSI_CACHE_RELOAD_500MS 1
+#define MV_TWSI_CACHE_RELOAD_1S 2
+#define MV_TWSI_CACHE_RELOAD_2S 3
+#define MV_TWSI_CACHE_ECC_UNCOR_INT_EN BIT(11)
+#define MV_TWSI_CACHE_ECC_COR_INT_EN BIT(12)
+#define MV_TWSI_CACHE_AUTO_DELAY GENMASK(15, 13)
+#define MV_TWSI_CACHE_NO_DELAY 0
+#define MV_TWSI_CACHE_DELAY_250MS 1
+#define MV_TWSI_CACHE_DELAY_500MS 2
+#define MV_TWSI_CACHE_DELAY_1S 3
+#define MV_TWSI_CACHE_DELAY_2S 4
+#define MV_TWSI_CACHE_DELAY_4S 5
+#define MV_TWSI_CACHE_DELAY_8S 6
+#define MV_TWSI_CACHE_AUTO_DIS 7
+
+/* Two Wire Interface EEPROM Cache Page A0 Registers */
+#define MV_TWSI_CACHE_EEPROM_A0 0x8007
+
+/* Two Wire Interface EEPROM Cache Page A2 Registers */
+#define MV_TWSI_CACHE_EEPROM_A2 0x8087
+
+/* 10GBase-R Interrupt Enable Register */
+#define MV_10GBR_INT_EN 0x8000
+#define MV_10GBR_INT_BLOCK_LOCK BIT(0)
+#define MV_10GBR_INT_HIGH_BER BIT(1)
+#define MV_10GBR_INT_LINK_STAT BIT(2)
+#define MV_10GBR_INT_LOCAL_RXFAULT BIT(10)
+#define MV_10GBR_INT_LOCAL_TXFAULT BIT(11)
+#define MV_10GBR_INT_MASK (MV_10GBR_INT_LINK_STAT | \
+ MV_10GBR_INT_LOCAL_RXFAULT | \
+ MV_10GBR_INT_LOCAL_TXFAULT)
+
+/* 10GBase-R Interrupt Status Register */
+#define MV_10GBR_INT_STAT 0x8001
+
+/* 1000Base-X Interrupt Enable Register */
+#define MV_1GBX_INT_EN 0xA001
+#define MV_1GBX_INT_FALSE_CARRIER BIT(7)
+#define MV_1GBX_INT_SYMBOL_ERROR BIT(8)
+#define MV_1GBX_INT_LINK_UP BIT(9)
+#define MV_1GBX_INT_LINK_DOWN BIT(10)
+#define MV_1GBX_INT_AN_COMPLETED BIT(11)
+#define MV_1GBX_INT_PAGE_RECEIVED BIT(12)
+#define MV_1GBX_INT_DUPLEX_CHANGED BIT(13)
+#define MV_1GBX_INT_SPEED_CHANGED BIT(14)
+#define MV_1GBX_INT_MASK (MV_1GBX_INT_FALSE_CARRIER | \
+ MV_1GBX_INT_SYMBOL_ERROR | \
+ MV_1GBX_INT_LINK_UP | \
+ MV_1GBX_INT_LINK_DOWN | \
+ MV_1GBX_INT_AN_COMPLETED | \
+ MV_1GBX_INT_DUPLEX_CHANGED | \
+ MV_1GBX_INT_SPEED_CHANGED)
+
+/* 1000Base-X Interrupt Status Register */
+#define MV_1GBX_INT_STAT 0xA002
+
/* 1000Base-X PHY Specific Status Register */
#define MV_1GBX_PHY_STAT 0xA003
#define MV_1GBX_PHY_STAT_AN_RESOLVED BIT(11)
#define MV_1GBX_PHY_STAT_SPEED100 BIT(14)
#define MV_1GBX_PHY_STAT_SPEED1000 BIT(15)
+/* Host-side 10GBase-R PCS Status 1 Register */
+#define MV_HOST_10GBR_PCS_STAT1 (0x0 + MDIO_STAT1)
+
+/* Host-side XAUI PCS Status 1 Register */
+#define MV_HOST_XAUI_PCS_STAT1 (0x1000 + MDIO_STAT1)
+
+/* Host-side 10GBase-R Interrupt Enable Register */
+#define MV_HOST_10GBR_INT_EN 0x8000
+#define MV_HOST_10GBR_INT_BLOCK_LOCK BIT(0)
+#define MV_HOST_10GBR_INT_HIGH_BER BIT(1)
+#define MV_HOST_10GBR_INT_LINK_STAT BIT(2)
+#define MV_HOST_10GBR_INT_LOCAL_RXFAULT BIT(10)
+#define MV_HOST_10GBR_INT_LOCAL_TXFAULT BIT(11)
+#define MV_HOST_10GBR_INT_MASK (MV_HOST_10GBR_INT_LINK_STAT | \
+ MV_HOST_10GBR_INT_LOCAL_RXFAULT | \
+ MV_HOST_10GBR_INT_LOCAL_TXFAULT)
+
+/* Host-side 10GBase-R Interrupt Status Register */
+#define MV_HOST_10GBR_INT_STAT 0x8001
+
+/* Host-side XAUI Interrupt Enable 1 Register */
+#define MV_HOST_XAUI_INT_EN1 0x9001
+#define MV_HOST_XAUI_INT_LINK_UP BIT(2)
+#define MV_HOST_XAUI_INT_LINK_DOWN BIT(3)
+#define MV_HOST_XAUI_INT_MASK1 (MV_HOST_XAUI_INT_LINK_UP | \
+ MV_HOST_XAUI_INT_LINK_DOWN)
+
+/* Host-side XAUI Interrupt Enable 2 Register */
+#define MV_HOST_XAUI_INT_EN2 0x9002
+#define MV_HOST_XAUI_INT_LANE0_SYNC BIT(0)
+#define MV_HOST_XAUI_INT_LANE1_SYNC BIT(1)
+#define MV_HOST_XAUI_INT_LANE2_SYNC BIT(2)
+#define MV_HOST_XAUI_INT_LANE3_SYNC BIT(3)
+#define MV_HOST_XAUI_INT_LANE0_ENERGY BIT(4)
+#define MV_HOST_XAUI_INT_LANE1_ENERGY BIT(5)
+#define MV_HOST_XAUI_INT_LANE2_ENERGY BIT(6)
+#define MV_HOST_XAUI_INT_LANE3_ENERGY BIT(7)
+#define MV_HOST_XAUI_INT_TXFAULT BIT(8)
+#define MV_HOST_XAUI_INT_RXFAULT BIT(9)
+#define MV_HOST_XAUI_INT_MASK2 (MV_HOST_XAUI_INT_TXFAULT | \
+ MV_HOST_XAUI_INT_RXFAULT)
+
+/* Host-side XAUI Interrupt Status 1 Register */
+#define MV_HOST_XAUI_INT_STAT1 0x9003
+
+/* Host-side XAUI Interrupt Status 2 Register */
+#define MV_HOST_XAUI_INT_STAT2 0x9004
+
+/* GPIO controller settings (#9 always invalid, #0 and #3 always available) */
+#define MV_GPIO_NUM 12
+#define MV_GPIO_VAL_MASK 0xDFF
+#define MV_GPIO_INT_UPD_FLAG BIT(31)
+
+/* I2C controller settings */
+#define MV_I2C_POLL_NUM 3
+
#define AUTONEG_TIMEOUT 3
struct mv2222_data {
bool sfp_link;
};
+#ifdef CONFIG_MARVELL_88X2222_GPIO
+struct mv2222_gpio {
+ struct gpio_chip gc;
+ struct phy_device *phydev;
+ unsigned int irq;
+ u32 cache_val_mask;
+ u32 cache_data;
+ u32 cache_tri_ctrl;
+ u32 cache_int_en;
+ u32 cache_int_type[MV_GPIO_INT_TYPE_REG(MV_GPIO_NUM)];
+ struct mutex cache_lock;
+};
+#endif
+
+#ifdef CONFIG_MARVELL_88X2222_I2C
+struct mv2222_i2c {
+ struct i2c_adapter ia;
+ struct phy_device *phydev;
+};
+#endif
+
/* SFI PMA transmit enable */
static int mv2222_tx_enable(struct phy_device *phydev)
{
MDIO_PMD_TXDIS_GLOBAL);
}
-static int mv2222_soft_reset(struct phy_device *phydev)
+static int __mv2222_soft_reset(struct phy_device *phydev, u16 mask)
{
int val, ret;
- ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PORT_RST,
- MV_PORT_RST_SW);
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PORT_RST, mask);
if (ret < 0)
return ret;
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND2, MV_PORT_RST,
- val, !(val & MV_PORT_RST_SW),
+ val, !(val & mask),
5000, 1000000, true);
}
+static inline int mv2222_host_reset(struct phy_device *phydev)
+{
+ return __mv2222_soft_reset(phydev, MV_HOST_RST_SW);
+}
+
+static inline int mv2222_line_reset(struct phy_device *phydev)
+{
+ return __mv2222_soft_reset(phydev, MV_LINE_RST_SW);
+}
+
+static inline int mv2222_soft_reset(struct phy_device *phydev)
+{
+ return __mv2222_soft_reset(phydev, MV_PORT_RST_SW);
+}
+
static int mv2222_disable_aneg(struct phy_device *phydev)
{
int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL,
if (ret < 0)
return ret;
- return mv2222_soft_reset(phydev);
+ return mv2222_line_reset(phydev);
}
static int mv2222_enable_aneg(struct phy_device *phydev)
if (ret < 0)
return ret;
- return mv2222_soft_reset(phydev);
+ return mv2222_line_reset(phydev);
}
static int mv2222_set_sgmii_speed(struct phy_device *phydev)
priv->supported));
}
+static int mv2222_config_host(struct phy_device *phydev)
+{
+ u16 val;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_XAUI:
+ val = FIELD_PREP(MV_PCS_HOST_PCS_SELECT, MV_PCS_HOST_XAUI);
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ val = FIELD_PREP(MV_PCS_HOST_PCS_SELECT, MV_PCS_HOST_10GBR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
+ MV_PCS_HOST_PCS_SELECT, val);
+}
+
static int mv2222_config_line(struct phy_device *phydev)
{
struct mv2222_data *priv = phydev->priv;
+ u16 val;
switch (priv->line_interface) {
case PHY_INTERFACE_MODE_10GBASER:
- return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
- MV_PCS_HOST_XAUI | MV_PCS_LINE_10GBR);
+ val = FIELD_PREP(MV_PCS_LINE_PCS_SELECT, MV_PCS_LINE_10GBR);
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
- return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
- MV_PCS_HOST_XAUI | MV_PCS_LINE_1GBX_AN);
+ val = FIELD_PREP(MV_PCS_LINE_PCS_SELECT, MV_PCS_LINE_1GBX_AN);
+ break;
case PHY_INTERFACE_MODE_SGMII:
- return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
- MV_PCS_HOST_XAUI | MV_PCS_LINE_SGMII_AN);
+ val = FIELD_PREP(MV_PCS_LINE_PCS_SELECT, MV_PCS_LINE_SGMII_AN);
+ break;
default:
return -EINVAL;
}
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
+ MV_PCS_LINE_PCS_SELECT, val);
}
/* Switch between 1G (1000Base-X/SGMII) and 10G (10GBase-R) modes */
return mv2222_enable_aneg(phydev);
}
+/* The link state and some other fields in the status registers are latched
+ * low/high so that the momentary events could be detected. Do not double-read
+ * the status in polling mode to detect such a short flag changes except when
+ * it's forced to be required (i.e. when the link was already down).
+ */
+static int mv2222_read_mmd_latched(struct phy_device *phydev, int devad, u32 reg,
+ bool force)
+{
+ int ret;
+
+ if (!phy_polling_mode(phydev) || force) {
+ ret = phy_read_mmd(phydev, devad, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ return phy_read_mmd(phydev, devad, reg);
+}
+
static int mv2222_aneg_done(struct phy_device *phydev)
{
int ret;
if (mv2222_is_10g_capable(phydev)) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ ret = mv2222_read_mmd_latched(phydev, MDIO_MMD_PCS, MDIO_STAT1,
+ !phydev->link);
if (ret < 0)
return ret;
static int timeout;
int val, link = 0;
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ val = mv2222_read_mmd_latched(phydev, MDIO_MMD_PCS, MDIO_STAT1,
+ !phydev->link);
if (val < 0)
return val;
static int timeout;
int val, link = 0;
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT);
+ val = mv2222_read_mmd_latched(phydev, MDIO_MMD_PCS, MV_1GBX_STAT,
+ !phydev->link);
if (val < 0)
return val;
return link;
}
+static bool mv2222_iface_is_operational(struct phy_device *phydev)
+{
+ int reg, val;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_XAUI:
+ reg = MV_HOST_XAUI_PCS_STAT1;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ reg = MV_HOST_10GBR_PCS_STAT1;
+ break;
+ default:
+ return false;
+ }
+
+ val = mv2222_read_mmd_latched(phydev, MDIO_MMD_PHYXS, reg,
+ !phydev->link);
+ if (val < 0 || !(val & MDIO_STAT1_LSTATUS))
+ return false;
+
+ return true;
+}
+
static bool mv2222_link_is_operational(struct phy_device *phydev)
{
struct mv2222_data *priv = phydev->priv;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
- if (!mv2222_link_is_operational(phydev))
+ if (!mv2222_iface_is_operational(phydev)) {
+ phydev_dbg(phydev, "Host interface isn't operational\n");
return 0;
+ }
+
+ if (!mv2222_link_is_operational(phydev)) {
+ phydev_dbg(phydev, "Line side isn't operational\n");
+ return 0;
+ }
if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
link = mv2222_read_status_10g(phydev);
return 0;
}
-static int mv2222_resume(struct phy_device *phydev)
+static int mv2222_config_intr_host_10gbr(struct phy_device *phydev)
{
- return mv2222_tx_enable(phydev);
-}
+ int ret;
-static int mv2222_suspend(struct phy_device *phydev)
-{
- return mv2222_tx_disable(phydev);
+ if (phydev->interface == PHY_INTERFACE_MODE_10GBASER &&
+ phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_10GBR_INT_STAT);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_10GBR_INT_EN,
+ MV_HOST_10GBR_INT_MASK);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_10GBR_INT_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_10GBR_INT_STAT);
+ }
+
+ return ret;
}
-static int mv2222_get_features(struct phy_device *phydev)
+static int mv2222_config_intr_host_xaui(struct phy_device *phydev)
{
- /* All supported linkmodes are set at probe */
+ int ret;
- return 0;
+ if (phydev->interface == PHY_INTERFACE_MODE_XAUI &&
+ phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_STAT1);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_STAT2);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_EN1,
+ MV_HOST_XAUI_INT_MASK1);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_EN2,
+ MV_HOST_XAUI_INT_MASK2);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_EN2, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_EN1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_STAT2);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_STAT1);
+ }
+
+ return ret;
}
-static int mv2222_config_init(struct phy_device *phydev)
+static int mv2222_config_intr_10g(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_XAUI)
- return -EINVAL;
+ int ret;
- return 0;
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_10GBR_INT_STAT);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MV_10GBR_INT_EN,
+ MV_10GBR_INT_MASK);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MV_10GBR_INT_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_10GBR_INT_STAT);
+ }
+
+ return ret;
}
-static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int mv2222_config_intr_1g(struct phy_device *phydev)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- struct phy_device *phydev = upstream;
- phy_interface_t sfp_interface;
- struct mv2222_data *priv;
- struct device *dev;
int ret;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, };
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_INT_STAT);
+ if (ret < 0)
+ return ret;
- priv = (struct mv2222_data *)phydev->priv;
- dev = &phydev->mdio.dev;
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_INT_EN,
+ MV_1GBX_INT_MASK);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_INT_EN, 0);
+ if (ret < 0)
+ return ret;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
- phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
- sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_INT_STAT);
+ }
- dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
+ return ret;
+}
- if (sfp_interface != PHY_INTERFACE_MODE_10GBASER &&
- sfp_interface != PHY_INTERFACE_MODE_1000BASEX &&
- sfp_interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(dev, "Incompatible SFP module inserted\n");
+static int mv2222_config_intr(struct phy_device *phydev)
+{
+ int ret;
- return -EINVAL;
- }
+ ret = mv2222_config_intr_10g(phydev);
+ if (ret < 0)
+ return ret;
- priv->line_interface = sfp_interface;
- linkmode_and(priv->supported, phydev->supported, sfp_supported);
+ ret = mv2222_config_intr_1g(phydev);
+ if (ret < 0)
+ return ret;
- ret = mv2222_config_line(phydev);
+ ret = mv2222_config_intr_host_10gbr(phydev);
if (ret < 0)
return ret;
- if (mutex_trylock(&phydev->lock)) {
- ret = mv2222_config_aneg(phydev);
- mutex_unlock(&phydev->lock);
- }
+ ret = mv2222_config_intr_host_xaui(phydev);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
-static void mv2222_sfp_remove(void *upstream)
+static int mv2222_handle_interrupt_host_10gbr(struct phy_device *phydev)
{
- struct phy_device *phydev = upstream;
- struct mv2222_data *priv;
+ int val;
- priv = (struct mv2222_data *)phydev->priv;
+ val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_10GBR_INT_STAT);
+ if (val < 0)
+ return val;
- priv->line_interface = PHY_INTERFACE_MODE_NA;
- linkmode_zero(priv->supported);
- phydev->port = PORT_NONE;
+ if (val & MV_HOST_10GBR_INT_LINK_STAT)
+ phydev_dbg(phydev, "Host link status changed\n");
+
+ if (val & MV_HOST_10GBR_INT_LOCAL_RXFAULT)
+ phydev_dbg(phydev, "Host Rx fault detected\n");
+
+ if (val & MV_HOST_10GBR_INT_LOCAL_TXFAULT)
+ phydev_dbg(phydev, "Host Tx fault detected\n");
+
+ return 0;
}
-static void mv2222_sfp_link_up(void *upstream)
+static int mv2222_handle_interrupt_host_xaui(struct phy_device *phydev)
{
- struct phy_device *phydev = upstream;
- struct mv2222_data *priv;
+ int val1, val2;
- priv = phydev->priv;
- priv->sfp_link = true;
+ val1 = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_STAT1);
+ if (val1 < 0)
+ return val1;
+
+ val2 = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MV_HOST_XAUI_INT_STAT2);
+ if (val2 < 0)
+ return val2;
+
+ if (val1 & MV_HOST_XAUI_INT_LINK_UP)
+ phydev_dbg(phydev, "Host link is up\n");
+
+ if (val1 & MV_HOST_XAUI_INT_LINK_DOWN)
+ phydev_dbg(phydev, "Host link is down\n");
+
+ if (val2 & MV_HOST_XAUI_INT_TXFAULT)
+ phydev_dbg(phydev, "Host Tx fault detected\n");
+
+ if (val2 & MV_HOST_XAUI_INT_RXFAULT)
+ phydev_dbg(phydev, "Host Rx fault detected\n");
+
+ return 0;
}
-static void mv2222_sfp_link_down(void *upstream)
+static int mv2222_handle_interrupt_10g(struct phy_device *phydev)
{
- struct phy_device *phydev = upstream;
- struct mv2222_data *priv;
+ int val;
- priv = phydev->priv;
- priv->sfp_link = false;
-}
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_10GBR_INT_STAT);
+ if (val < 0)
+ return val;
-static const struct sfp_upstream_ops sfp_phy_ops = {
- .module_insert = mv2222_sfp_insert,
- .module_remove = mv2222_sfp_remove,
- .link_up = mv2222_sfp_link_up,
- .link_down = mv2222_sfp_link_down,
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
-};
+ if (val & MV_10GBR_INT_LINK_STAT)
+ phydev_dbg(phydev, "Line link status changed\n");
-static int mv2222_probe(struct phy_device *phydev)
+ if (val & MV_10GBR_INT_LOCAL_RXFAULT)
+ phydev_dbg(phydev, "Line Rx fault detected\n");
+
+ if (val & MV_10GBR_INT_LOCAL_TXFAULT)
+ phydev_dbg(phydev, "Line Tx fault detected\n");
+
+ return 0;
+}
+
+static int mv2222_handle_interrupt_1g(struct phy_device *phydev)
{
- struct device *dev = &phydev->mdio.dev;
- struct mv2222_data *priv = NULL;
+ int val;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_INT_STAT);
+ if (val < 0)
+ return val;
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported);
+ if (val & MV_1GBX_INT_FALSE_CARRIER)
+ phydev_dbg(phydev, "Line false carrier detected\n");
- linkmode_copy(phydev->supported, supported);
+ if (val & MV_1GBX_INT_SYMBOL_ERROR)
+ phydev_dbg(phydev, "Line symbol error detected\n");
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (val & MV_1GBX_INT_LINK_UP)
+ phydev_dbg(phydev, "Line link is up\n");
- priv->line_interface = PHY_INTERFACE_MODE_NA;
- phydev->priv = priv;
+ if (val & MV_1GBX_INT_LINK_DOWN)
+ phydev_dbg(phydev, "Line link is down\n");
- return phy_sfp_probe(phydev, &sfp_phy_ops);
+ if (val & MV_1GBX_INT_AN_COMPLETED)
+ phydev_dbg(phydev, "Line AN is completed\n");
+
+ if (val & MV_1GBX_INT_DUPLEX_CHANGED)
+ phydev_dbg(phydev, "Line link duplex changed\n");
+
+ if (val & MV_1GBX_INT_SPEED_CHANGED)
+ phydev_dbg(phydev, "Line link speed changed\n");
+
+ return 0;
}
-static struct phy_driver mv2222_drivers[] = {
- {
- .phy_id = MARVELL_PHY_ID_88X2222,
- .phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88X2222",
+static irqreturn_t mv2222_handle_interrupt(struct phy_device *phydev)
+{
+ int val, ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_PORT_INT_STAT);
+ if (val < 0)
+ goto err_set_state;
+
+ if (!(val & (MV_PORT_INT_PCS_HOST | MV_PORT_INT_PCS_LINE)))
+ return IRQ_NONE;
+
+ if (val & MV_PORT_INT_PCS_HOST) {
+ ret = mv2222_handle_interrupt_host_10gbr(phydev);
+ if (ret < 0)
+ goto err_set_state;
+
+ ret = mv2222_handle_interrupt_host_xaui(phydev);
+ if (ret < 0)
+ goto err_set_state;
+ }
+
+ if (val & MV_PORT_INT_PCS_LINE) {
+ ret = mv2222_handle_interrupt_10g(phydev);
+ if (ret < 0)
+ goto err_set_state;
+
+ ret = mv2222_handle_interrupt_1g(phydev);
+ if (ret < 0)
+ goto err_set_state;
+ }
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+
+err_set_state:
+ phy_error(phydev);
+
+ return IRQ_NONE;
+}
+
+static int mv2222_resume(struct phy_device *phydev)
+{
+ return mv2222_tx_enable(phydev);
+}
+
+static int mv2222_suspend(struct phy_device *phydev)
+{
+ return mv2222_tx_disable(phydev);
+}
+
+static int mv2222_get_features(struct phy_device *phydev)
+{
+ /* All supported linkmodes are set at probe */
+
+ return 0;
+}
+
+static int mv2222_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv2222_config_host(phydev);
+ if (ret)
+ return ret;
+
+ return mv2222_host_reset(phydev);
+}
+
+#ifdef CONFIG_MARVELL_88X2222_GPIO
+/*
+ * Marvell 88x2222 GPIO control registers access is implemented by means of the
+ * private data caches. It has several reasons. First of all the IRQ chip
+ * implementation requires the IRQ-related settings applied in a non-atomic
+ * context for the slow asynchronously accessed bus. Thus caching the IRQ-masks
+ * and types is neccessary in order to flush the respective CSRs data in the
+ * may-sleep context. Second the implementation decreases a bus traffic which
+ * improves the accesses performance espcially in case of the bit-banged MDIO
+ * bus. Finally it prevents a race condition in the output value setup on the
+ * output GPIO-mode settings. The GPIO data register has a multiplexed input
+ * output data semantics. Any read from the CSRs returns either the input GPIO
+ * state or the output GPIO value. Meanwhile any write to the data register
+ * updates the output GPIO value. So the race condition may happen if the
+ * output value is updated before the output GPIO-mode is activated and if
+ * there is a parallel read-modify-write is performed to the GPIO data
+ * register. In that case the input GPIO state will be read and written back as
+ * the output GPIO value. Using the private data cache prevents that.
+ */
+
+static inline bool mv2222_gpio_offset_is_valid(unsigned int ofs)
+{
+ return ofs < MV_GPIO_NUM && ofs != 9;
+}
+
+static int mv2222_gpio_read_tri_ctrl(struct mv2222_gpio *gpio)
+{
+ int val;
+
+ mutex_lock(&gpio->cache_lock);
+
+ val = phy_read_mmd(gpio->phydev, MDIO_MMD_VEND2, MV_GPIO_TRISTATE_CTRL);
+ if (val < 0)
+ goto err_mutex_unlock;
+
+ gpio->cache_tri_ctrl = val;
+
+err_mutex_unlock:
+ mutex_unlock(&gpio->cache_lock);
+
+ return val;
+}
+
+static int mv2222_gpio_wthru_tri_ctrl(struct mv2222_gpio *gpio, u16 mask, u16 val)
+{
+ int ret;
+
+ mutex_lock(&gpio->cache_lock);
+
+ gpio->cache_tri_ctrl &= ~mask;
+ gpio->cache_tri_ctrl |= val;
+
+ ret = phy_write_mmd(gpio->phydev, MDIO_MMD_VEND2,
+ MV_GPIO_TRISTATE_CTRL, gpio->cache_tri_ctrl);
+
+ mutex_unlock(&gpio->cache_lock);
+
+ return ret;
+}
+
+static int mv2222_gpio_read_data(struct mv2222_gpio *gpio)
+{
+ int val;
+
+ mutex_lock(&gpio->cache_lock);
+
+ val = phy_read_mmd(gpio->phydev, MDIO_MMD_VEND2, MV_GPIO_DATA);
+ if (val < 0)
+ goto err_mutex_unlock;
+
+ gpio->cache_data = val;
+
+err_mutex_unlock:
+ mutex_unlock(&gpio->cache_lock);
+
+ return val;
+}
+
+static int mv2222_gpio_wthru_data(struct mv2222_gpio *gpio, u16 mask, u16 val)
+{
+ int ret;
+
+ mutex_lock(&gpio->cache_lock);
+
+ gpio->cache_data &= ~mask;
+ gpio->cache_data |= val;
+
+ ret = phy_write_mmd(gpio->phydev, MDIO_MMD_VEND2,
+ MV_GPIO_DATA, gpio->cache_data);
+
+ mutex_unlock(&gpio->cache_lock);
+
+ return ret;
+}
+
+static int mv2222_gpio_cache_int_en(struct mv2222_gpio *gpio,
+ unsigned int ofs, bool int_en)
+{
+ if (!mv2222_gpio_offset_is_valid(ofs))
+ return -EINVAL;
+
+ if (int_en)
+ gpio->cache_int_en |= BIT(ofs);
+ else
+ gpio->cache_int_en &= ~BIT(ofs);
+
+ gpio->cache_int_en |= MV_GPIO_INT_UPD_FLAG;
+
+ return 0;
+}
+
+static int mv2222_gpio_cache_func(struct mv2222_gpio *gpio,
+ unsigned int ofs, bool gpio_en)
+{
+ u16 val;
+ int reg;
+
+ if (!mv2222_gpio_offset_is_valid(ofs))
+ return -EINVAL;
+
+ /* Pins #0 and #3 always work as GPIOs */
+ if (ofs == 0 || ofs == 3)
+ return !gpio_en ? -EINVAL : 0;
+
+ if (ofs == 8 && !gpio_en)
+ val = MV_GPIO_FUNC_TX_DIS_REG;
+ else
+ val = !!gpio_en;
+
+ reg = MV_GPIO_INT_TYPE_REG(ofs);
+ gpio->cache_int_type[reg] &= ~MV_GPIO_FUNC_MASK(ofs);
+ gpio->cache_int_type[reg] |= MV_GPIO_FUNC_PREP(ofs, val);
+
+ gpio->cache_int_type[reg] |= MV_GPIO_INT_UPD_FLAG;
+
+ return 0;
+}
+
+static int mv2222_gpio_cache_int_type(struct mv2222_gpio *gpio,
+ unsigned int ofs, unsigned int type)
+{
+ u16 val;
+ int reg;
+
+ if (!mv2222_gpio_offset_is_valid(ofs))
+ return -EINVAL;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ val = MV_GPIO_INT_NO_IRQ;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = MV_GPIO_INT_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = MV_GPIO_INT_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val = MV_GPIO_INT_EDGE_BOTH;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = MV_GPIO_INT_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = MV_GPIO_INT_LEVEL_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = MV_GPIO_INT_TYPE_REG(ofs);
+ gpio->cache_int_type[reg] &= ~MV_GPIO_INT_TYPE_MASK(ofs);
+ gpio->cache_int_type[reg] |= MV_GPIO_INT_TYPE_PREP(ofs, val);
+
+ gpio->cache_int_type[reg] |= MV_GPIO_INT_UPD_FLAG;
+
+ return 0;
+}
+
+static int mv2222_gpio_cache_int_flush(struct mv2222_gpio *gpio)
+{
+ int i, ret;
+
+ if (gpio->cache_int_en & MV_GPIO_INT_UPD_FLAG) {
+ ret = phy_write_mmd(gpio->phydev, MDIO_MMD_VEND2,
+ MV_GPIO_INT_EN, gpio->cache_int_en);
+ if (ret < 0)
+ return ret;
+
+ gpio->cache_int_en &= ~MV_GPIO_INT_UPD_FLAG;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gpio->cache_int_type); ++i) {
+ if (!(gpio->cache_int_type[i] & MV_GPIO_INT_UPD_FLAG))
+ continue;
+
+ ret = phy_write_mmd(gpio->phydev, MDIO_MMD_VEND2,
+ MV_GPIO_INT_TYPE1 + i, gpio->cache_int_type[i]);
+ if (ret < 0)
+ return ret;
+
+ gpio->cache_int_type[i] &= ~MV_GPIO_INT_UPD_FLAG;
+ }
+
+ return 0;
+}
+
+static int mv2222_gpio_init(struct mv2222_gpio *gpio)
+{
+ int i, ret;
+
+ /* Setup GPIO function and default IRQs state for all valid GPIOs */
+ for (i = 0; i < MV_GPIO_NUM; ++i) {
+ mv2222_gpio_cache_int_en(gpio, i, false);
+
+ if (gpio->cache_val_mask & BIT(i))
+ mv2222_gpio_cache_func(gpio, i, true);
+ else
+ mv2222_gpio_cache_func(gpio, i, false);
+
+ mv2222_gpio_cache_int_type(gpio, i, IRQ_TYPE_NONE);
+ }
+
+ ret = mv2222_gpio_cache_int_flush(gpio);
+ if (ret < 0)
+ return ret;
+
+ ret = mv2222_gpio_read_tri_ctrl(gpio);
+ if (ret < 0)
+ return ret;
+
+ ret = mv2222_gpio_read_data(gpio);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mv2222_gpio_get_direction(struct gpio_chip *gc, unsigned int ofs)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
+
+ mutex_lock(&gpio->cache_lock);
+ ret = !(gpio->cache_tri_ctrl & BIT(ofs));
+ mutex_unlock(&gpio->cache_lock);
+
+ return ret;
+}
+
+static int mv2222_gpio_direction_input(struct gpio_chip *gc, unsigned int ofs)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ u16 mask = BIT(ofs);
+
+ return mv2222_gpio_wthru_tri_ctrl(gpio, mask, 0);
+}
+
+static int mv2222_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int ofs, int val)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ u16 mask = BIT(ofs);
+ int ret;
+
+ ret = mv2222_gpio_wthru_data(gpio, mask, val ? mask : 0);
+ if (ret < 0)
+ return ret;
+
+ return mv2222_gpio_wthru_tri_ctrl(gpio, mask, mask);
+}
+
+static int mv2222_gpio_get(struct gpio_chip *gc, unsigned int ofs)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ u16 mask = BIT(ofs);
+ int val;
+
+ val = mv2222_gpio_read_data(gpio);
+ if (val < 0)
+ return val;
+
+ return !!(val & mask);
+}
+
+static int mv2222_gpio_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ int val;
+
+ val = mv2222_gpio_read_data(gpio);
+ if (val < 0)
+ return val;
+
+ *bits &= ~*mask;
+ *bits |= val & *mask;
+
+ return 0;
+}
+
+static void mv2222_gpio_set(struct gpio_chip *gc, unsigned ofs, int val)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ u16 mask = BIT(ofs);
+ int ret;
+
+ ret = mv2222_gpio_wthru_data(gpio, mask, val ? mask : 0);
+ if (ret < 0)
+ phydev_err(gpio->phydev, "Failed to set GPIO %d\n", ofs);
+}
+
+static void mv2222_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
+
+ ret = mv2222_gpio_wthru_data(gpio, *mask, *bits);
+ if (ret < 0)
+ phydev_err(gpio->phydev, "Failed to set GPIOs 0x%04lx\n", *bits);
+}
+
+static int mv2222_gpio_set_config(struct gpio_chip *gc, unsigned int ofs,
+ unsigned long cfg)
+{
+ enum pin_config_param mode = pinconf_to_config_param(cfg);
+
+ /* All output pins operate as open drain */
+ if (mode != PIN_CONFIG_DRIVE_OPEN_DRAIN)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int mv2222_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
+
+ if (ngpios > MV_GPIO_NUM)
+ return -EINVAL;
+
+ *valid_mask &= MV_GPIO_VAL_MASK;
+
+ gpio->cache_val_mask = *valid_mask;
+
+ ret = mv2222_gpio_init(gpio);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mv2222_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ mv2222_gpio_cache_int_en(gpio, hwirq, false);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void mv2222_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ mv2222_gpio_cache_int_en(gpio, hwirq, true);
+}
+
+static int mv2222_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ return mv2222_gpio_cache_int_type(gpio, hwirq, type);
+}
+
+static int mv2222_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+
+ return irq_set_irq_wake(gpio->irq, on);
+}
+
+static void mv2222_gpio_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&gpio->cache_lock);
+}
+
+static void mv2222_gpio_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mv2222_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
+
+ ret = mv2222_gpio_cache_int_flush(gpio);
+ if (ret < 0)
+ phydev_err(gpio->phydev, "Failed to flush GPIO IRQs state\n");
+
+ mutex_unlock(&gpio->cache_lock);
+}
+
+static void mv2222_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip mv2222_gpio_irq_chip = {
+ .name = "mv88x2222",
+ .irq_mask = mv2222_gpio_irq_mask,
+ .irq_unmask = mv2222_gpio_irq_unmask,
+ .irq_set_wake = mv2222_gpio_irq_set_wake,
+ .irq_set_type = mv2222_gpio_irq_set_type,
+ .irq_bus_lock = mv2222_gpio_irq_bus_lock,
+ .irq_bus_sync_unlock = mv2222_gpio_irq_bus_sync_unlock,
+ .irq_print_chip = mv2222_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static irqreturn_t mv2222_gpio_handle_interrupt(int irq, void *devid)
+{
+ struct mv2222_gpio *gpio = devid;
+ unsigned long pending;
+ int i, val;
+
+ val = phy_read_mmd(gpio->phydev, MDIO_MMD_VEND2, MV_GPIO_INT_STAT);
+ if (val < 0)
+ return IRQ_NONE;
+
+ /* The interrupt status register exports the raw IRQ status */
+ mutex_lock(&gpio->cache_lock);
+ pending = val & gpio->cache_int_en;
+ mutex_unlock(&gpio->cache_lock);
+
+ for_each_set_bit(i, &pending, gpio->gc.ngpio)
+ handle_nested_irq(irq_find_mapping(gpio->gc.irq.domain, i));
+
+ return IRQ_RETVAL(pending);
+}
+
+static const char * const mv2222_gpio_names[MV_GPIO_NUM] = {
+ [MV_88X2222_MOD_ABS] = "MOD_ABS",
+ [MV_88X2222_TX_FAULT] = "TX_FAULT",
+ [MV_88X2222_RX_LOS] = "RX_LOS",
+ [MV_88X2222_GPIO] = "GPIO",
+ [MV_88X2222_LED0] = "LED0",
+ [MV_88X2222_LED1] = "LED1",
+ [MV_88X2222_MPC] = "MPC",
+ [MV_88X2222_TOD] = "TOD",
+ [MV_88X2222_TX_DISABLE] = "TX_DISABLE",
+ [MV_88X2222_UNDEF] = NULL,
+ [MV_88X2222_SDA] = "SDA",
+ [MV_88X2222_SCL] = "SCL",
+};
+
+static int mv2222_gpio_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct mv2222_gpio *gpio;
+ struct gpio_chip *gc;
+ int ret;
+
+ /*
+ * No GPIO-chip registration if the PHY-device isn't marked as a
+ * GPIO-controller. This is another level of protection for the
+ * backward compatibility in case if the platforms rely on the
+ * default/pre-initialized pins functions.
+ */
+ if (!device_property_present(dev, "gpio-controller"))
+ return 0;
+
+ /*
+ * Marvell 88x2222 GPIO CSRs are tolerant to the soft-resets. They are
+ * marked as "Retain" in the "SW Rst" calumn of the registers
+ * description table defined in the HW-databook. On the contrary the
+ * hard-reset will reset all the GPIO CSRs to their default states which
+ * in its turn will break the driver GPIO functionality for sure.
+ */
+ if (phydev->mdio.reset_gpio || phydev->mdio.reset_ctrl) {
+ phydev_warn(phydev, "Hard-reset detected, GPIOs unsupported\n");
+ return 0;
+ }
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->phydev = phydev;
+
+ mutex_init(&gpio->cache_lock);
+
+ gc = &gpio->gc;
+ gc->label = "mv88x2222";
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->get_direction = mv2222_gpio_get_direction;
+ gc->direction_input = mv2222_gpio_direction_input;
+ gc->direction_output = mv2222_gpio_direction_output;
+ gc->get = mv2222_gpio_get;
+ gc->get_multiple = mv2222_gpio_get_multiple;
+ gc->set = mv2222_gpio_set;
+ gc->set_multiple = mv2222_gpio_set_multiple;
+ gc->set_config = mv2222_gpio_set_config;
+ gc->init_valid_mask = mv2222_gpio_init_valid_mask;
+ gc->base = -1;
+ gc->ngpio = MV_GPIO_NUM;
+ gc->names = mv2222_gpio_names;
+ gc->can_sleep = true;
+
+ if (phy_interrupt_is_valid(phydev)) {
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ gpio->irq = phydev->irq;
+
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->threaded = true;
+
+ gpio_irq_chip_set_chip(girq, &mv2222_gpio_irq_chip);
+
+ ret = devm_request_threaded_irq(dev, gpio->irq, NULL,
+ mv2222_gpio_handle_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED,
+ phydev_name(phydev), gpio);
+ if (ret) {
+ phydev_err(phydev, "Failed to request GPIO IRQ\n");
+ return ret;
+ }
+ } else {
+ gpio->irq = IRQ_NOTCONNECTED;
+ }
+
+ ret = devm_gpiochip_add_data(dev, gc, gpio);
+ if (ret)
+ phydev_err(phydev, "Failed to register GPIO chip\n");
+
+ return ret;
+}
+#else
+static int mv2222_gpio_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* !CONFIG_MARVELL_88X2222_GPIO */
+
+#ifdef CONFIG_MARVELL_88X2222_I2C
+static int mv2222_i2c_init(struct mv2222_i2c *i2c)
+{
+ struct i2c_timings t;
+ u16 val;
+ int ret;
+
+ /* The I2C bus is available if the SDA/SCL pins function isn't GPIO */
+ ret = phy_read_mmd(i2c->phydev, MDIO_MMD_VEND2, MV_GPIO_INT_TYPE3);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MV_GPIO_FUNC_SDA_PIN10 || ret & MV_GPIO_FUNC_SCL_PIN11)
+ return -EBUSY;
+
+ /* Make sure the only supported bus speed is specified */
+ i2c_parse_fw_timings(&i2c->phydev->mdio.dev, &t, true);
+ if (t.bus_freq_hz != I2C_MAX_STANDARD_MODE_FREQ)
+ return -EINVAL;
+
+ /* Disable the EEPROM caching. It will interfere the I2C-adapter work */
+ val = FIELD_PREP(MV_TWSI_CACHE_A0_CTRL, MV_TWSI_CACHE_NO_AUTO) |
+ FIELD_PREP(MV_TWSI_CACHE_A2_CTRL, MV_TWSI_CACHE_NO_AUTO);
+
+ ret = phy_write_mmd(i2c->phydev, MDIO_MMD_PMAPMD, MV_TWSI_CACHE_CTRL, val);
+ if (ret < 0)
+ return ret;
+
+ /* Disable the Read-back-after-write functionality */
+ ret = phy_write_mmd(i2c->phydev, MDIO_MMD_PMAPMD, MV_TWSI_MEM_WRITE_CTRL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Once again disable the auto-caching */
+ val = FIELD_PREP(MV_TWSI_CACHE_AUTO_DELAY, MV_TWSI_CACHE_AUTO_DIS);
+
+ ret = phy_write_mmd(i2c->phydev, MDIO_MMD_PMAPMD, MV_TWSI_CACHE_DELAY, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static inline unsigned long mv2222_i2c_get_udelay(char read_write)
+{
+ const unsigned long p = USEC_PER_SEC / I2C_MAX_STANDARD_MODE_FREQ;
+ unsigned long ud;
+
+ /* S, addr, Ack, cmd, Ack, data, Ack/Nack and P bits together */
+ ud = 29 * p;
+
+ /* Additional Sr, addr and Ack bits in case of the read op */
+ if (read_write == I2C_SMBUS_READ)
+ ud += 10 * p;
+
+ return ud;
+}
+
+static inline bool mv2222_i2c_ready(int val)
+{
+ if (FIELD_GET(MV_TWSI_STAT, val) == MV_TWSI_READY)
+ return true;
+
+ return false;
+}
+
+static inline bool mv2222_i2c_cmd_done(int val)
+{
+ if (FIELD_GET(MV_TWSI_STAT, val) != MV_TWSI_CMD_IN_PROG)
+ return true;
+
+ return false;
+}
+
+static int mv2222_i2c_xfer(struct i2c_adapter *ia,
+ u16 addr, unsigned short flags, char read_write,
+ u8 cmd, int size, union i2c_smbus_data *data)
+{
+ struct mv2222_i2c *i2c = ia->algo_data;
+ unsigned long ud;
+ int val, ret;
+
+ if (flags & I2C_CLIENT_TEN || size != I2C_SMBUS_BYTE_DATA)
+ return -EOPNOTSUPP;
+
+ /* Make sure the interface is ready to execute the command */
+ ud = mv2222_i2c_get_udelay(I2C_SMBUS_READ);
+
+ ret = phy_read_mmd_poll_timeout(i2c->phydev, MDIO_MMD_PMAPMD,
+ MV_TWSI_MEM_READ_STAT, val,
+ mv2222_i2c_ready(val),
+ ud, MV_I2C_POLL_NUM * ud, false);
+ if (ret < 0)
+ return ret;
+
+ /* Collect the command parameters */
+ if (read_write == I2C_SMBUS_WRITE) {
+ ret = phy_write_mmd(i2c->phydev, MDIO_MMD_PMAPMD,
+ MV_TWSI_MEM_WRITE_CTRL, data->byte);
+ if (ret < 0)
+ return ret;
+
+ val = 0;
+ } else {
+ val = MV_TWSI_BYTE_READ;
+ }
+
+ val |= FIELD_PREP(MV_TWSI_BYTE_ADDR, cmd) |
+ FIELD_PREP(MV_TWSI_SLV_ADDR, addr);
+
+ ret = phy_write_mmd(i2c->phydev, MDIO_MMD_PMAPMD, MV_TWSI_MEM_ADDR, val);
+ if (ret < 0)
+ return ret;
+
+ /* Issue the command and wait until the CMD in-progress status is set */
+ ud = mv2222_i2c_get_udelay(read_write);
+
+ ret = phy_read_mmd_poll_timeout(i2c->phydev, MDIO_MMD_PMAPMD,
+ MV_TWSI_MEM_READ_STAT, val,
+ mv2222_i2c_cmd_done(val),
+ ud, MV_I2C_POLL_NUM * ud, true);
+ if (ret < 0)
+ return ret;
+
+ /* The interface may respond with the READY status on the early stage
+ * of the SFP-module attachement. In that case ask to try again.
+ */
+ switch (FIELD_GET(MV_TWSI_STAT, val)) {
+ case MV_TWSI_CMD_FAIL:
+ return -EIO;
+ case MV_TWSI_BUSY:
+ return -ENXIO;
+ case MV_TWSI_READY:
+ return -EAGAIN;
+ }
+
+ if (read_write == I2C_SMBUS_READ)
+ data->byte = FIELD_GET(MV_TWSI_MEM_READ_DATA, val);
+
+ return 0;
+}
+
+static u32 mv2222_i2c_func(struct i2c_adapter *ia)
+{
+ return I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static struct i2c_algorithm mv2222_i2c_algo = {
+ .smbus_xfer = mv2222_i2c_xfer,
+ .functionality = mv2222_i2c_func,
+};
+
+static int mv2222_i2c_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct mv2222_i2c *i2c;
+ struct i2c_adapter *ia;
+ int ret;
+
+ /*
+ * Marvell 88x2222 I2C CSRs are tolerant to the soft-resets. Alas that
+ * can be said about the hard-resets. It will halt any communications
+ * with possibly no error reported. So it isn't safe to use the
+ * interface if a sudden hard-reset might be performed.
+ */
+ if (phydev->mdio.reset_gpio || phydev->mdio.reset_ctrl) {
+ phydev_warn(phydev, "Hard-reset detected, I2C unsupported\n");
+ return 0;
+ }
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->phydev = phydev;
+
+ /* Make sure the I2C-interface is ready before to proceed */
+ ret = mv2222_i2c_init(i2c);
+ if (ret == -EBUSY) {
+ phydev_warn(phydev, "I2C interface unavailable\n");
+ return 0;
+ } else if (ret) {
+ phydev_err(phydev, "Failed to init I2C-interface\n");
+ return ret;
+ }
+
+ ia = &i2c->ia;
+ strlcpy(ia->name, "mv88x2222", sizeof(ia->name));
+ ia->dev.parent = dev;
+ ia->owner = THIS_MODULE;
+ ia->algo = &mv2222_i2c_algo;
+ ia->algo_data = i2c;
+
+ ret = devm_i2c_add_adapter(dev, ia);
+ if (ret)
+ phydev_err(phydev, "Failed to register I2C adapter\n");
+
+ return ret;
+}
+#else
+static int mv2222_i2c_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* !CONFIG_MARVELL_88X2222_I2C */
+
+static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phy_device *phydev = upstream;
+ phy_interface_t sfp_interface;
+ struct mv2222_data *priv;
+ int ret;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, };
+
+ priv = (struct mv2222_data *)phydev->priv;
+
+ sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
+ phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
+ sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
+
+ phydev_info(phydev, "%s SFP module inserted\n", phy_modes(sfp_interface));
+
+ if (sfp_interface != PHY_INTERFACE_MODE_10GBASER &&
+ sfp_interface != PHY_INTERFACE_MODE_1000BASEX &&
+ sfp_interface != PHY_INTERFACE_MODE_SGMII) {
+ phydev_err(phydev, "Incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ priv->line_interface = sfp_interface;
+ linkmode_and(priv->supported, phydev->supported, sfp_supported);
+
+ ret = mv2222_config_line(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (mutex_trylock(&phydev->lock)) {
+ ret = mv2222_config_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
+}
+
+static void mv2222_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct mv2222_data *priv;
+
+ priv = (struct mv2222_data *)phydev->priv;
+
+ priv->line_interface = PHY_INTERFACE_MODE_NA;
+ linkmode_zero(priv->supported);
+ phydev->port = PORT_NONE;
+}
+
+static void mv2222_sfp_link_up(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct mv2222_data *priv;
+
+ priv = phydev->priv;
+ priv->sfp_link = true;
+}
+
+static void mv2222_sfp_link_down(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct mv2222_data *priv;
+
+ priv = phydev->priv;
+ priv->sfp_link = false;
+}
+
+static const struct sfp_upstream_ops sfp_phy_ops = {
+ .module_insert = mv2222_sfp_insert,
+ .module_remove = mv2222_sfp_remove,
+ .link_up = mv2222_sfp_link_up,
+ .link_down = mv2222_sfp_link_down,
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+};
+
+static int mv2222_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct mv2222_data *priv = NULL;
+ int ret;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported);
+
+ linkmode_copy(phydev->supported, supported);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->line_interface = PHY_INTERFACE_MODE_NA;
+ phydev->priv = priv;
+
+ ret = mv2222_gpio_probe(phydev);
+ if (ret)
+ return ret;
+
+ ret = mv2222_i2c_probe(phydev);
+ if (ret)
+ return ret;
+
+ return phy_sfp_probe(phydev, &sfp_phy_ops);
+}
+
+static struct phy_driver mv2222_drivers[] = {
+ {
+ .phy_id = MARVELL_PHY_ID_88X2222,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88X2222",
+ .get_features = mv2222_get_features,
+ .soft_reset = mv2222_soft_reset,
+ .config_init = mv2222_config_init,
+ .config_aneg = mv2222_config_aneg,
+ .aneg_done = mv2222_aneg_done,
+ .probe = mv2222_probe,
+ .suspend = mv2222_suspend,
+ .resume = mv2222_resume,
+ .read_status = mv2222_read_status,
+ .config_intr = mv2222_config_intr,
+ .handle_interrupt = mv2222_handle_interrupt,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88X2222R,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88X2222R",
.get_features = mv2222_get_features,
.soft_reset = mv2222_soft_reset,
.config_init = mv2222_config_init,
.suspend = mv2222_suspend,
.resume = mv2222_resume,
.read_status = mv2222_read_status,
+ .config_intr = mv2222_config_intr,
+ .handle_interrupt = mv2222_handle_interrupt,
},
};
module_phy_driver(mv2222_drivers);
static struct mdio_device_id __maybe_unused mv2222_tbl[] = {
{ MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88X2222R, MARVELL_PHY_ID_MASK },
{ }
};
MODULE_DEVICE_TABLE(mdio, mv2222_tbl);
case PHY_INTERFACE_MODE_SMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASEX:
case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_25GBASER:
return SPEED_5000;
case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_10GBASEX:
case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_10GBASER:
break;
case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_10GBASEX:
case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_10GBASER:
phylink_set(pl->supported, 25000baseKR_Full);
phylink_set(pl->supported, 25000baseSR_Full);
fallthrough;
+ case PHY_INTERFACE_MODE_XGMII:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASEX:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
phylink_set(pl->supported, 10baseT_Half);
state->pause = pl->link_config.pause;
}
-static void phylink_resolve_flow(struct phylink_link_state *state)
+static void phylink_resolve_an_pause(struct phylink_link_state *state)
{
bool tx_pause, rx_pause;
- state->pause = MLO_PAUSE_NONE;
if (state->duplex == DUPLEX_FULL) {
linkmode_resolve_pause(state->advertising,
state->lp_advertising,
else if (pl->link_gpio)
state->link = !!gpiod_get_value_cansleep(pl->link_gpio);
- phylink_resolve_flow(state);
+ state->pause = MLO_PAUSE_NONE;
+ phylink_resolve_an_pause(state);
}
static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
/* Helpers for MAC drivers */
+static struct {
+ int bit;
+ int speed;
+} phylink_c73_priority_resolution[] = {
+ { ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, SPEED_100000 },
+ { ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, SPEED_100000 },
+ /* 100GBASE-KP4 and 100GBASE-CR10 not supported */
+ { ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, SPEED_40000 },
+ { ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, SPEED_40000 },
+ { ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, SPEED_10000 },
+ { ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, SPEED_10000 },
+ /* 5GBASE-KR not supported */
+ { ETHTOOL_LINK_MODE_2500baseX_Full_BIT, SPEED_2500 },
+ { ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, SPEED_1000 },
+};
+
+void phylink_resolve_c73(struct phylink_link_state *state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_c73_priority_resolution); i++) {
+ int bit = phylink_c73_priority_resolution[i].bit;
+ if (linkmode_test_bit(bit, state->advertising) &&
+ linkmode_test_bit(bit, state->lp_advertising))
+ break;
+ }
+
+ if (i < ARRAY_SIZE(phylink_c73_priority_resolution)) {
+ state->speed = phylink_c73_priority_resolution[i].speed;
+ state->duplex = DUPLEX_FULL;
+ } else {
+ /* negotiation failure */
+ state->link = false;
+ }
+
+ phylink_resolve_an_pause(state);
+}
+EXPORT_SYMBOL_GPL(phylink_resolve_c73);
+
static void phylink_decode_c37_word(struct phylink_link_state *state,
uint16_t config_reg, int speed)
{
- bool tx_pause, rx_pause;
int fd_bit;
if (speed == SPEED_2500)
state->link = false;
}
- linkmode_resolve_pause(state->advertising, state->lp_advertising,
- &tx_pause, &rx_pause);
-
- if (tx_pause)
- state->pause |= MLO_PAUSE_TX;
- if (rx_pause)
- state->pause |= MLO_PAUSE_RX;
+ phylink_resolve_an_pause(state);
}
static void phylink_decode_sgmii_word(struct phylink_link_state *state,
return;
switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GBASEX:
case PHY_INTERFACE_MODE_10GBASER:
state->speed = SPEED_10000;
state->duplex = DUPLEX_FULL;
return ret;
}
+static int rtl8211e_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ int ret;
+
+ /* Write to the MMD registers by using the standard control/data pair.
+ * The only difference is that we need to perform a dummy read after
+ * the PC1R.CLKSTOP_EN bit is set. It's required to workaround an issue
+ * of a partial core freeze so LED2 stops blinking in EEE mode, PHY
+ * stops detecting the link change and raising IRQs until any read from
+ * its registers performed. That happens only if and right after the PHY
+ * is enabled to stop RXC in LPI mode.
+ */
+ ret = __phy_write(phydev, MII_MMD_CTRL, devnum);
+ if (ret)
+ return ret;
+
+ ret = __phy_write(phydev, MII_MMD_DATA, regnum);
+ if (ret)
+ return ret;
+
+ ret = __phy_write(phydev, MII_MMD_CTRL, devnum | MII_MMD_CTRL_NOINCR);
+ if (ret)
+ return ret;
+
+ ret = __phy_write(phydev, MII_MMD_DATA, val);
+ if (ret)
+ return ret;
+
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_CTRL1 &&
+ val & MDIO_PCS_CTRL1_CLKSTOP_EN)
+ ret = __phy_read(phydev, MII_MMD_DATA);
+
+ return ret < 0 ? ret : 0;
+}
+
static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .write_mmd = rtl8211e_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
struct sfp {
struct device *dev;
struct i2c_adapter *i2c;
+ struct i2c_client *i2c_a0;
+ struct i2c_client *i2c_a2;
struct mii_bus *i2c_mii;
struct sfp_bus *sfp_bus;
enum mdio_i2c_proto mdio_protocol;
static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
size_t len)
{
- struct i2c_msg msgs[2];
- u8 bus_addr = a2 ? 0x51 : 0x50;
+ struct i2c_client *ee = a2 ? sfp->i2c_a2 : sfp->i2c_a0;
size_t block_size = sfp->i2c_block_size;
- size_t this_len;
+ size_t this_len, i = 0;
int ret;
- msgs[0].addr = bus_addr;
- msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &dev_addr;
- msgs[1].addr = bus_addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = buf;
+ while (i < len) {
+ this_len = min(len - i, block_size);
- while (len) {
- this_len = len;
- if (this_len > block_size)
- this_len = block_size;
-
- msgs[1].len = this_len;
-
- ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(ee, dev_addr + i,
+ this_len, buf + i);
if (ret < 0)
return ret;
- if (ret != ARRAY_SIZE(msgs))
- break;
-
- msgs[1].buf += this_len;
- dev_addr += this_len;
- len -= this_len;
+ i += ret;
}
- return msgs[1].buf - (u8 *)buf;
+ return i;
}
static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
- size_t len)
+ size_t len)
{
- struct i2c_msg msgs[1];
- u8 bus_addr = a2 ? 0x51 : 0x50;
- int ret;
-
- msgs[0].addr = bus_addr;
- msgs[0].flags = 0;
- msgs[0].len = 1 + len;
- msgs[0].buf = kmalloc(1 + len, GFP_KERNEL);
- if (!msgs[0].buf)
- return -ENOMEM;
-
- msgs[0].buf[0] = dev_addr;
- memcpy(&msgs[0].buf[1], buf, len);
-
- ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
-
- kfree(msgs[0].buf);
+ struct i2c_client *ee = a2 ? sfp->i2c_a2 : sfp->i2c_a0;
- if (ret < 0)
- return ret;
-
- return ret == ARRAY_SIZE(msgs) ? len : 0;
+ return i2c_smbus_write_i2c_block_data_or_emulated(ee, dev_addr, len, buf);
}
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
{
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ if (!i2c_check_functionality(i2c, I2C_FUNC_I2C) &&
+ !i2c_check_functionality(i2c, I2C_FUNC_SMBUS_I2C_BLOCK) &&
+ !i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA))
return -EINVAL;
+ sfp->i2c_a0 = devm_i2c_new_dummy_device(sfp->dev, i2c, 0x50);
+ if (IS_ERR(sfp->i2c_a0))
+ return PTR_ERR(sfp->i2c_a0);
+
+ sfp->i2c_a2 = devm_i2c_new_dummy_device(sfp->dev, i2c, 0x51);
+ if (IS_ERR(sfp->i2c_a2))
+ return PTR_ERR(sfp->i2c_a2);
+
sfp->i2c = i2c;
sfp->read = sfp_i2c_read;
sfp->write = sfp_i2c_write;
.info = sfp_hwmon_info,
};
+static bool sfp_hwmon_access_is_coherent(struct sfp *sfp)
+{
+ return sfp->i2c_block_size >= 2 &&
+ (i2c_check_functionality(sfp->i2c, I2C_FUNC_I2C) ||
+ i2c_check_functionality(sfp->i2c, I2C_FUNC_SMBUS_I2C_BLOCK) ||
+ i2c_check_functionality(sfp->i2c, I2C_FUNC_SMBUS_WORD_DATA));
+}
+
static void sfp_hwmon_probe(struct work_struct *work)
{
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
/* hwmon interface needs to access 16bit registers in atomic way to
* guarantee coherency of the diagnostic monitoring data. If it is not
- * possible to guarantee coherency because EEPROM is broken in such way
- * that does not support atomic 16bit read operation then we have to
- * skip registration of hwmon device.
+ * possible to guarantee the coherency (because of the broken EEPROM or
+ * incapable I2C-controller) we have to skip registration of hwmon
+ * device.
*/
- if (sfp->i2c_block_size < 2) {
- dev_info(sfp->dev,
- "skipping hwmon device registration due to broken EEPROM\n");
+ if (!sfp_hwmon_access_is_coherent(sfp)) {
dev_info(sfp->dev,
- "diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
+ "skip hwmon device registration due to incapable interface\n");
return;
}
*/
static void pci_clip_resource_to_region(struct pci_bus *bus,
struct resource *res,
- struct pci_bus_region *region)
+ struct pci_bus_region *region,
+ resource_size_t *align)
{
struct pci_bus_region r;
+ resource_size_t new_align, offset;
pcibios_resource_to_bus(bus, &r, res);
+
+ offset = res->start - r.start;
+ if (offset & (*align - 1) && (r.start & (*align - 1)) == 0) {
+ /*
+ * a) CPU address (resource) differs from PCI bus address
+ * (pci_bus_region), i.e. address translation is in effect;
+ * b) PCI bus address is aligned as required;
+ * c) CPU address is not aligned.
+ * So, we can relax alignment requirement for CPU address.
+ */
+ new_align = 1 << __ffs(offset);
+ dev_info(&bus->dev,
+ "pci_clip_resource_to_region: relaxing alignment from %pa to %pa\n",
+ align, &new_align);
+ *align = new_align;
+ }
+
if (r.start < region->start)
r.start = region->start;
if (r.end > region->end)
pci_bus_for_each_resource(bus, r, i) {
resource_size_t min_used = min;
+ resource_size_t res_align = align;
if (!r)
continue;
continue;
avail = *r;
- pci_clip_resource_to_region(bus, &avail, region);
+ pci_clip_resource_to_region(bus, &avail, region, &res_align);
/*
* "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
/* Ok, try it out.. */
ret = allocate_resource(r, res, size, min_used, max,
- align, alignf, alignf_data);
+ res_align, alignf, alignf_data);
if (ret == 0)
return 0;
}
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_BT1
+ tristate "Baikal-T1 PCIe controller"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Baikal-T1 SoC to work
+ in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
+
config PCIE_ROCKCHIP_DW_HOST
bool "Rockchip DesignWare PCIe controller"
select PCIE_DW
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 PCIe controller driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Baikal-T1 System CCU control registers */
+#define BT1_CCU_PCIE_CLKC 0x140
+#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
+#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
+#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
+
+#define BT1_CCU_PCIE_RSTC 0x144
+#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
+#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
+#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
+#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
+#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
+#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
+
+#define BT1_CCU_PCIE_PMSC 0x148
+#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
+#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
+#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
+#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
+#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
+#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
+#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
+#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
+#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
+#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
+#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
+#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
+#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
+#define BT1_CCU_PCIE_LTSSM_L0 0x11
+#define BT1_CCU_PCIE_LTSSM_L0S 0x12
+#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
+#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
+#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
+#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
+#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
+#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
+#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
+#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
+#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
+#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
+#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
+#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
+#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
+#define BT1_CCU_PCIE_L1_PENDING BIT(12)
+#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
+#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
+#define BT1_CCU_PCIE_PM_PME_EN BIT(20)
+#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
+#define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
+#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
+#define BT1_CCU_PCIE_WAKE_DET BIT(24)
+#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
+#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
+
+#define BT1_CCU_PCIE_GENC 0x14c
+#define BT1_CCU_PCIE_LTSSM_EN BIT(1)
+#define BT1_CCU_PCIE_DBI2_MODE BIT(2)
+#define BT1_CCU_PCIE_MGMT_EN BIT(3)
+#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
+#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
+#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
+#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
+#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
+#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
+
+#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
+({ \
+ int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
+ __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
+})
+
+/* Baikal-T1 PCIe specific control registers */
+#define BT1_PCIE_AXI2MGM_LANENUM 0xd04
+#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
+
+#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
+#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
+#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
+#define BT1_PCIE_AXI2MGM_DONE BIT(30)
+#define BT1_PCIE_AXI2MGM_BUSY BIT(31)
+
+#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
+#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
+
+#define BT1_PCIE_AXI2MGM_READDATA 0xd10
+#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
+
+/* Generic Baikal-T1 PCIe interface resources */
+#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
+#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
+#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
+#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
+
+/* PCIe bus setup delays and timeouts */
+#define BT1_PCIE_RST_DELAY_US 100
+#define BT1_PCIE_RST_DELAY_MS 100
+#define BT1_PCIE_RUN_DELAY_US 100
+#define BT1_PCIE_REQ_DELAY_US 1
+#define BT1_PCIE_REQ_TIMEOUT_US 1000
+#define BT1_PCIE_LNK_DELAY_US 1000
+#define BT1_PCIE_LNK_TIMEOUT_US 1000000
+
+static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
+ DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
+};
+
+static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
+ DW_PCIE_REF_CLK,
+};
+
+static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
+ DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
+};
+
+static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
+ DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
+};
+
+struct bt1_pcie {
+ struct dw_pcie dw;
+ struct platform_device *pdev;
+ struct regmap *sys_regs;
+};
+#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
+
+/*
+ * Baikal-T1 MMIO space must be read/written by the dword-aligned
+ * instructions. Note the methods are optimized to have the dword operations
+ * performed with minimum overhead as the most frequently used ones.
+ */
+static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
+ if (size == 4) {
+ return 0;
+ } else if (size == 2) {
+ *val &= 0xffff;
+ return 0;
+ } else if (size == 1) {
+ *val &= 0xff;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+ u32 tmp, mask;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ if (size == 4) {
+ writel(val, addr);
+ return 0;
+ } else if (size == 2 || size == 1) {
+ mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
+ tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
+ tmp |= (val & mask) << ofs * BITS_PER_BYTE;
+ writel(tmp, addr - ofs);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ ret = bt1_pcie_read_mmio(base + reg, size, &val);
+ if (ret) {
+ dev_err(pci->dev, "Read DBI address failed\n");
+ return ~0U;
+ }
+
+ return val;
+}
+
+static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+
+static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI2 address failed\n");
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, 0);
+}
+
+static int bt1_pcie_link_up(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ u32 val;
+
+ regmap_read(btpci->sys_regs, BT1_CCU_PCIE_PMSC, &val);
+ if ((val & BT1_CCU_PCIE_SMLH_LINKUP) && (val & BT1_CCU_PCIE_RDLH_LINKUP))
+ return true;
+
+ return false;
+}
+
+static int bt1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable LTSSM and make sure it was able to establish both PHY and
+ * data links. This procedure shall work fine to reach 2.5 GT/s speed.
+ */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_SMLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_RDLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set data link up\n");
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ BT1_CCU_PCIE_LTSSM_LINKUP(val),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
+ return ret;
+ }
+
+ /*
+ * Wait for the speed change enabled by default and by the DW PCIe core
+ * driver to be finished.
+ */
+ ret = read_poll_timeout(dw_pcie_readl_dbi, val, !(val & PORT_LOGIC_SPEED_CHANGE),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US, false,
+ pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ if (ret) {
+ dev_err(pci->dev, "Initial speed change hanged up\n");
+ return ret;
+ }
+
+ /*
+ * Activate the direct speed change one more time after the link is
+ * established in order to reach a higher bus performance. This is
+ * required at least to get 8.0 GT/s speed.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ ret = read_poll_timeout(dw_pcie_readl_dbi, val, !(val & PORT_LOGIC_SPEED_CHANGE),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US, false,
+ pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ if (ret) {
+ dev_err(pci->dev, "Speed change hanged up\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bt1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+}
+
+static const struct dw_pcie_ops bt1_pcie_ops = {
+ .read_dbi = bt1_pcie_read_dbi,
+ .write_dbi = bt1_pcie_write_dbi,
+ .write_dbi2 = bt1_pcie_write_dbi2,
+ .start_link = bt1_pcie_start_link,
+ .stop_link = bt1_pcie_stop_link,
+};
+
+static struct pci_ops bt1_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ int i;
+
+ /* DBI access is supposed to be performed by the dword-aligned IOs */
+ btpci->dw.pp.bridge->ops = &bt1_pci_ops;
+
+ /* These CSRs are in MMIO so we won't check the regmap-methods status */
+ btpci->sys_regs =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
+ if (IS_ERR(btpci->sys_regs))
+ return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
+ "Failed to get syscon\n");
+
+ /* Make sure all the required resources have been specified */
+ for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
+ if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
+ dev_err(dev, "App clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
+ if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
+ dev_err(dev, "Core clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
+ if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
+ dev_err(dev, "App resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
+ if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
+ dev_err(dev, "Core resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ int ret;
+
+ /* Make sure LTSSM and DBI CS2 are disabled by default */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE | BT1_CCU_PCIE_LTSSM_EN, 0);
+
+ /*
+ * In case if the link has already been attempted to be set up
+ * the PCIe M/S AXI clocks _must_ be enabled for the application
+ * resets to succeed.
+ */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret)
+ dev_err(dev, "Failed to enable APP clocks\n");
+
+ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
+ if (ret)
+ dev_err(dev, "Failed to reset APP domains\n");
+
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+ /* The PCIe core resets can be now safely de-asserted */
+ ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
+ if (ret)
+ dev_err(dev, "Failed to assert core resets\n");
+
+ /* Wait a bit for the resets to take actions */
+ udelay(BT1_PCIE_RST_DELAY_US);
+
+ /*
+ * Clocks are disabled by default at least in accordance with the clk
+ * enable counter value on init stage. So skip disabling them in that
+ * case.
+ */
+ if (!init)
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+ /*
+ * The application domains _must_ be reset one more time so further
+ * initializations would not freeze and be performed from scratch.
+ */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret)
+ dev_err(dev, "Failed to post-enable APP clocks\n");
+
+ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
+ if (ret)
+ dev_err(dev, "Failed to post-reset APP domains\n");
+
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+ /* The peripheral devices are unavailable anyway so reset them too */
+ gpiod_set_value_cansleep(pci->pe_rst, 1);
+
+ /* Make sure all the resets are settled */
+ msleep(BT1_PCIE_RST_DELAY_MS);
+}
+
+/*
+ * Implements the cold reset procedure in accordance with the reference manual
+ * and available PM signals.
+ */
+static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ u32 val;
+ int ret;
+
+ /* First get out of the Power/Hot reset state */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert pwr reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert hot reset\n");
+ goto err_assert_pwr_rst;
+ }
+
+ /* Clocks can be now enabled, but the ref one is crucial at this stage */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable app clocks\n");
+ goto err_assert_hot_rst;
+ }
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable ref clocks\n");
+ goto err_disable_app_clk;
+ }
+
+ /* Wait for the PM-core to stop requesting the PHY reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_PHY_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
+ goto err_disable_core_clk;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ goto err_disable_core_clk;
+ }
+
+ /* Wait for the PM to stop requesting the controller core reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_CORE_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
+ goto err_assert_phy_rst;
+ }
+
+ /* Core and PCS-PIPE interface can be now activated */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core reset\n");
+ goto err_assert_phy_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PIPE reset\n");
+ goto err_assert_core_rst;
+ }
+
+ /* Sticky/Non-sticky CSR flags can be now unreset too */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert sticky reset\n");
+ goto err_assert_pipe_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert non-sticky reset\n");
+ goto err_assert_sticky_rst;
+ }
+
+ /* Activate the PCIe bus peripheral devices */
+ gpiod_set_value_cansleep(pci->pe_rst, 0);
+
+ /* Make sure the state is settled (LTSSM is still disabled though) */
+ usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
+
+ return 0;
+
+err_assert_sticky_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+
+err_assert_pipe_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+
+err_assert_core_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+
+err_assert_phy_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+
+err_disable_core_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+err_disable_app_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+err_assert_hot_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+
+err_assert_pwr_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+
+ return ret;
+}
+
+static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ ret = bt1_pcie_get_resources(btpci);
+ if (ret)
+ return ret;
+
+ /* Don't stop bus if it has already been set up by firmware */
+ if (!bt1_pcie_link_up(pci))
+ bt1_pcie_full_stop_bus(btpci, true);
+
+ /*
+ * Perform bus start procedure in any case at least to re-initialize
+ * the kernel clock handlers.
+ */
+ return bt1_pcie_cold_start_bus(btpci);
+}
+
+static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ bt1_pcie_full_stop_bus(btpci, false);
+}
+
+static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
+ .host_init = bt1_pcie_host_init,
+ .host_deinit = bt1_pcie_host_deinit,
+};
+
+static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
+ if (!btpci)
+ return ERR_PTR(-ENOMEM);
+
+ btpci->pdev = pdev;
+
+ platform_set_drvdata(pdev, btpci);
+
+ return btpci;
+}
+
+static int bt1_pcie_add_port(struct bt1_pcie *btpci)
+{
+ struct device *dev = &btpci->pdev->dev;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ btpci->dw.version = DW_PCIE_VER_460A;
+ btpci->dw.dev = dev;
+ btpci->dw.ops = &bt1_pcie_ops;
+
+ btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
+ btpci->dw.pp.ops = &bt1_pcie_host_ops;
+
+ dw_pcie_cap_set(&btpci->dw, REQ_RES);
+
+ ret = dw_pcie_host_init(&btpci->dw.pp);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
+
+ return ret;
+}
+
+static void bt1_pcie_del_port(struct bt1_pcie *btpci)
+{
+ dw_pcie_host_deinit(&btpci->dw.pp);
+}
+
+static int bt1_pcie_probe(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = bt1_pcie_create_data(pdev);
+ if (IS_ERR(btpci))
+ return PTR_ERR(btpci);
+
+ return bt1_pcie_add_port(btpci);
+}
+
+static int bt1_pcie_remove(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci = platform_get_drvdata(pdev);
+
+ bt1_pcie_del_port(btpci);
+
+ return 0;
+}
+
+static const struct of_device_id bt1_pcie_of_match[] = {
+ { .compatible = "baikal,bt1-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
+
+static struct platform_driver bt1_pcie_driver = {
+ .probe = bt1_pcie_probe,
+ .remove = bt1_pcie_remove,
+ .driver = {
+ .name = "bt1-pcie",
+ .of_match_table = bt1_pcie_of_match,
+ },
+};
+module_platform_driver(bt1_pcie_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
+MODULE_LICENSE("GPL");
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-#include "../../pci.h"
-
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
+ dw_pcie_edma_remove(pci);
+
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
INIT_LIST_HEAD(&ep->func_list);
- if (!pci->dbi_base) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- }
-
- if (!pci->dbi_base2) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- if (!res) {
- pci->dbi_base2 = pci->dbi_base + SZ_4K;
- } else {
- pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
- }
- }
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -ENOMEM;
ep->outbound_addr = addr;
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
goto err_exit_epc_mem;
}
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_epc_mem;
+
if (ep->ops->get_features) {
epc_features = ep->ops->get_features(ep);
if (epc_features->core_init_notifier)
ret = dw_pcie_ep_init_complete(ep);
if (ret)
- goto err_free_epc_mem;
+ goto err_remove_edma;
return 0;
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
err_free_epc_mem:
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
-#include "../../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ /*
+ * Even though the iMSI-RX Module supports 64-bit addresses some
+ * peripheral PCIe devices may lack the 64-bit messages support. In
+ * order not to miss MSI TLPs from those devices the MSI target address
+ * has to be reserved within the lowest 4GB.
+ * Note until there is a better alternative found the reservation is
+ * done by allocating from the artificially limited DMA-coherent
+ * memory.
+ */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
raw_spin_lock_init(&pp->lock);
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (res) {
pp->cfg0_size = resource_size(res);
return -ENODEV;
}
- if (!pci->dbi_base) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- }
-
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
pp->io_base = pci_pio_to_address(win->res->start);
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
-
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
dw_pcie_iatu_detect(pci);
- ret = dw_pcie_setup_rc(pp);
+ ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_remove_edma;
+
if (!dw_pcie_link_up(pci)) {
ret = dw_pcie_start_link(pci);
if (ret)
- goto err_free_msi;
+ goto err_remove_edma;
}
/* Ignore errors, the link may come up later */
err_stop_link:
dw_pcie_stop_link(pci);
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
dw_pcie_stop_link(pci);
+ dw_pcie_edma_remove(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
}
/*
- * Ensure all outbound windows are disabled before proceeding with
- * the MEM/IO ranges setups.
+ * Ensure all out/inbound windows are disabled before proceeding with
+ * the MEM/IO (dma-)ranges setups.
*/
for (i = 0; i < pci->num_ob_windows; i++)
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+ for (i = 0; i < pci->num_ib_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->windows) {
if (resource_type(entry->res) != IORESOURCE_MEM)
}
if (pci->num_ob_windows <= i)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
+ dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ib_windows <= i)
+ break;
+
+ ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set DMA range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pci->num_ib_windows <= i)
+ dev_warn(pci->dev, "DMA-ranges exceed inbound iATU size (%u)\n",
+ pci->num_ib_windows);
+
return 0;
}
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/sizes.h>
#include "../../pci.h"
#include "pcie-designware.h"
+static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
+ [DW_PCIE_DBI_CLK] = "dbi",
+ [DW_PCIE_MSTR_CLK] = "mstr",
+ [DW_PCIE_SLV_CLK] = "slv",
+};
+
+static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
+ [DW_PCIE_PIPE_CLK] = "pipe",
+ [DW_PCIE_CORE_CLK] = "core",
+ [DW_PCIE_AUX_CLK] = "aux",
+ [DW_PCIE_REF_CLK] = "ref",
+};
+
+static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
+ [DW_PCIE_DBI_RST] = "dbi",
+ [DW_PCIE_MSTR_RST] = "mstr",
+ [DW_PCIE_SLV_RST] = "slv",
+};
+
+static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
+ [DW_PCIE_NON_STICKY_RST] = "non-sticky",
+ [DW_PCIE_STICKY_RST] = "sticky",
+ [DW_PCIE_CORE_RST] = "core",
+ [DW_PCIE_PIPE_RST] = "pipe",
+ [DW_PCIE_PHY_RST] = "phy",
+ [DW_PCIE_HOT_RST] = "hot",
+ [DW_PCIE_PWR_RST] = "pwr",
+};
+
+static int dw_pcie_get_clocks(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
+ pci->app_clks[i].id = dw_pcie_app_clks[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
+ pci->core_clks[i].id = dw_pcie_core_clks[i];
+
+ ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
+ pci->app_clks);
+ if (ret)
+ return ret;
+
+ return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
+ pci->core_clks);
+}
+
+static int dw_pcie_get_resets(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
+ pci->app_rsts[i].id = dw_pcie_app_rsts[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
+ pci->core_rsts[i].id = dw_pcie_core_rsts[i];
+
+ ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
+ DW_PCIE_NUM_APP_RSTS,
+ pci->app_rsts);
+ if (ret)
+ return ret;
+
+ ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
+ DW_PCIE_NUM_CORE_RSTS,
+ pci->core_rsts);
+ if (ret)
+ return ret;
+
+ pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pci->pe_rst))
+ return PTR_ERR(pci->pe_rst);
+
+ return 0;
+}
+
+int dw_pcie_get_resources(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ struct device_node *np = dev_of_node(pci->dev);
+ struct resource *res;
+ int ret;
+
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ /* DBI2 is mainly useful for the endpoint controller */
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (res) {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ } else {
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ }
+ }
+
+ /* For non-unrolled iATU/eDMA platforms this range will be ignored */
+ if (!pci->atu_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res) {
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+ } else {
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ }
+
+ /* Set a default value suitable for at most 8 in and 8 out windows */
+ if (!pci->atu_size)
+ pci->atu_size = SZ_4K;
+
+ /* eDMA region can be mapped to a custom base address */
+ if (!pci->edma.reg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (res) {
+ pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->edma.reg_base))
+ return PTR_ERR(pci->edma.reg_base);
+ } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
+ pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ }
+ }
+
+ /* LLDD is supposed to manually switch the clocks and resets state */
+ if (dw_pcie_cap_is(pci, REQ_RES)) {
+ ret = dw_pcie_get_clocks(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_get_resets(pci);
+ if (ret)
+ return ret;
+ }
+
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+
+ if (of_property_read_bool(np, "snps,enable-cdm-check"))
+ dw_pcie_cap_set(pci, CDM_CHECK);
+
+ return 0;
+}
+
void dw_pcie_version_detect(struct dw_pcie *pci)
{
u32 ver;
static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
u32 index)
{
- if (pci->iatu_unroll_enabled)
+ if (dw_pcie_cap_is(pci, IATU_UNROLL))
return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ u64 limit_addr = pci_addr + size - 1;
+ u32 retries, val;
+
+ if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(pci_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ val = type;
+ if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar)
{
u32 retries, val;
cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
-
-}
-
-static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
- if (val == 0xffffffff)
- return true;
-
- return false;
}
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
int max_region, ob, ib;
u32 val, min, dir;
u64 max;
- if (pci->iatu_unroll_enabled) {
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xFFFFFFFF) {
+ dw_pcie_cap_set(pci, IATU_UNROLL);
+
max_region = min((int)pci->atu_size / 512, 256);
} else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
}
pci->num_ib_windows = ib;
pci->region_align = 1 << fls(min);
pci->region_limit = (max << 32) | (SZ_4G - 1);
+
+ dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
+ dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
-void dw_pcie_iatu_detect(struct dw_pcie *pci)
+static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
{
- struct platform_device *pdev = to_platform_device(pci->dev);
+ u32 val = 0;
+ int ret;
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- if (pci->iatu_unroll_enabled) {
- if (!pci->atu_base) {
- struct resource *res =
- platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
- if (res) {
- pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(pci->dev, res);
- }
- if (!pci->atu_base || IS_ERR(pci->atu_base))
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
+
+ ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DMA address failed\n");
+
+ return val;
+}
+
+static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[6];
+ int ret;
+
+ if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
+ return -EINVAL;
- if (!pci->atu_size)
- /* Pick a minimal default, enough for 8 in and 8 out windows */
- pci->atu_size = SZ_4K;
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0)
+ return ret;
+
+ snprintf(name, sizeof(name), "dma%u", nr);
+
+ return platform_get_irq_byname_optional(pdev, name);
+}
+
+static struct dw_edma_core_ops dw_pcie_edma_ops = {
+ .irq_vector = dw_pcie_edma_irq_vector,
+};
+
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Indirect eDMA CSRs access has been completely removed since v5.40a
+ * thus no space is now reserved for the eDMA channels viewport and
+ * former DMA CTRL register is no longer fixed to FFs.
+ */
+ if (dw_pcie_ver_is_ge(pci, 540A))
+ val = 0xFFFFFFFF;
+ else
+ val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
+
+ if (val == 0xFFFFFFFF && pci->edma.reg_base) {
+ pci->edma.mf = EDMA_MF_EDMA_UNROLL;
+
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
+ } else if (val != 0xFFFFFFFF) {
+ pci->edma.mf = EDMA_MF_EDMA_LEGACY;
+
+ pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
} else {
- pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
- pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+ return -ENODEV;
}
- dw_pcie_iatu_detect_regions(pci);
+ pci->edma.dev = pci->dev;
- dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
- "enabled" : "disabled");
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
- dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
- pci->num_ob_windows, pci->num_ib_windows,
- pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+
+ /* Sanity check the channels count if the mapping was incorrect */
+ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
+ !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
+ char name[6];
+ int ret;
+
+ if (pci->edma.nr_irqs == 1)
+ return 0;
+ else if (pci->edma.nr_irqs > 1)
+ return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0) {
+ pci->edma.nr_irqs = 1;
+ return 0;
+ }
+
+ for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
+ snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
+
+ ret = platform_get_irq_byname_optional(pdev, name);
+ if (ret <= 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
+{
+ struct dw_edma_region *ll;
+ dma_addr_t paddr;
+ int i;
+
+ for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
+ ll = &pci->edma.ll_region_wr[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
+ ll = &pci->edma.ll_region_rd[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ return 0;
+}
+
+int dw_pcie_edma_detect(struct dw_pcie *pci)
+{
+ int ret;
+
+ /* Don't fail if no eDMA was found (for the backward compatibility) */
+ ret = dw_pcie_edma_find_chip(pci);
+ if (ret)
+ return 0;
+
+ /* Don't fail on the IRQs verification (for the backward compatibility) */
+ ret = dw_pcie_edma_irq_verify(pci);
+ if (ret) {
+ dev_err(pci->dev, "Invalid eDMA IRQs found\n");
+ return 0;
+ }
+
+ ret = dw_pcie_edma_ll_alloc(pci);
+ if (ret) {
+ dev_err(pci->dev, "Couldn't allocate LLP memory\n");
+ return ret;
+ }
+
+ /* Don't fail if the DW eDMA driver can't find the device */
+ ret = dw_edma_probe(&pci->edma);
+ if (ret && ret != -ENODEV) {
+ dev_err(pci->dev, "Couldn't register eDMA device\n");
+ return ret;
+ }
+
+ dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
+ pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
+ pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
+
+ return 0;
+}
+
+void dw_pcie_edma_remove(struct dw_pcie *pci)
+{
+ dw_edma_remove(&pci->edma);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
- struct device_node *np = pci->dev->of_node;
u32 val;
if (pci->link_gen > 0)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ if (dw_pcie_cap_is(pci, CDM_CHECK)) {
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
PCIE_PL_CHK_REG_CHK_REG_START;
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
}
/* Set the number of lanes */
- val &= ~PORT_LINK_FAST_LINK_MODE;
val &= ~PORT_LINK_MODE_MASK;
switch (pci->num_lanes) {
case 1:
#define _PCIE_DESIGNWARE_H
#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/reset.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#define DW_PCIE_VER_480A 0x3438302a
#define DW_PCIE_VER_490A 0x3439302a
#define DW_PCIE_VER_520A 0x3532302a
+#define DW_PCIE_VER_540A 0x3534302a
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
((_pci)->version _op DW_PCIE_VER_ ## _ver)
+#define __dw_pcie_ver_type_cmp(_pci, _type, _op) \
+ ((_pci)->type _op DW_PCIE_VER_TYPE_ ## _type)
+
#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
#define dw_pcie_ver_type_is(_pci, _ver, _type) \
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
- __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+ __dw_pcie_ver_type_cmp(_pci, _type, ==))
#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
- __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+ __dw_pcie_ver_type_cmp(_pci, _type, >=))
+
+/* DWC PCIe controller capabilities */
+#define DW_PCIE_CAP_REQ_RES 0
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
+
+#define dw_pcie_cap_is(_pci, _cap) \
+ test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+#define dw_pcie_cap_set(_pci, _cap) \
+ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define PCIE_MSIX_DOORBELL 0x948
#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+/*
+ * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
+ * over the Port Logic registers space. Afterwards the unrolled mapping was
+ * introduced so eDMA and iATU could be accessed via a dedicated registers
+ * space.
+ */
+#define PCIE_DMA_VIEWPORT_BASE 0x970
+#define PCIE_DMA_UNROLL_BASE 0x80000
+#define PCIE_DMA_CTRL 0x008
+#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
+#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
+
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
+/* Default eDMA LLP memory size */
+#define DMA_LLP_MEM_SIZE PAGE_SIZE
+
struct dw_pcie;
struct dw_pcie_rp;
struct dw_pcie_ep;
DW_PCIE_RC_TYPE,
};
+enum dw_pcie_app_clk {
+ DW_PCIE_DBI_CLK,
+ DW_PCIE_MSTR_CLK,
+ DW_PCIE_SLV_CLK,
+ DW_PCIE_NUM_APP_CLKS
+};
+
+enum dw_pcie_core_clk {
+ DW_PCIE_PIPE_CLK,
+ DW_PCIE_CORE_CLK,
+ DW_PCIE_AUX_CLK,
+ DW_PCIE_REF_CLK,
+ DW_PCIE_NUM_CORE_CLKS
+};
+
+enum dw_pcie_app_rst {
+ DW_PCIE_DBI_RST,
+ DW_PCIE_MSTR_RST,
+ DW_PCIE_SLV_RST,
+ DW_PCIE_NUM_APP_RSTS
+};
+
+enum dw_pcie_core_rst {
+ DW_PCIE_NON_STICKY_RST,
+ DW_PCIE_STICKY_RST,
+ DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST,
+ DW_PCIE_PHY_RST,
+ DW_PCIE_HOT_RST,
+ DW_PCIE_PWR_RST,
+ DW_PCIE_NUM_CORE_RSTS
+};
+
struct dw_pcie_host_ops {
int (*host_init)(struct dw_pcie_rp *pp);
void (*host_deinit)(struct dw_pcie_rp *pp);
const struct dw_pcie_ops *ops;
u32 version;
u32 type;
+ unsigned long caps;
int num_lanes;
int link_gen;
u8 n_fts[2];
- bool iatu_unroll_enabled: 1;
+ struct dw_edma_chip edma;
+ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
+ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
+ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
+ struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
+ struct gpio_desc *pe_rst;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+int dw_pcie_get_resources(struct dw_pcie *pci);
+
void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
+int dw_pcie_edma_detect(struct dw_pcie *pci);
+void dw_pcie_edma_remove(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
void __iomem *ulreg_base;
void __iomem *smu_base;
void __iomem *mpu_base;
- struct clk *refclk;
- struct clk *coreclk;
- struct clk *auxclk;
};
#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
int err;
u32 val;
+ if (!pcie->pci.core_clks[DW_PCIE_REF_CLK].clk) {
+ dev_err(pci->dev, "Missing ref clock source\n");
+ return -ENOENT;
+ }
+
+ if (!pcie->pci.core_clks[DW_PCIE_CORE_CLK].clk) {
+ dev_err(pci->dev, "Missing core clock source\n");
+ return -ENOENT;
+ }
+
+ if (!pcie->pci.core_clks[DW_PCIE_AUX_CLK].clk) {
+ dev_err(pci->dev, "Missing aux clock source\n");
+ return -ENOENT;
+ }
+
visconti_smu_writel(pcie,
PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
PISMU_CKON_PCIE);
static int visconti_get_resources(struct platform_device *pdev,
struct visconti_pcie *pcie)
{
- struct device *dev = &pdev->dev;
-
pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
if (IS_ERR(pcie->ulreg_base))
return PTR_ERR(pcie->ulreg_base);
if (IS_ERR(pcie->mpu_base))
return PTR_ERR(pcie->mpu_base);
- pcie->refclk = devm_clk_get(dev, "ref");
- if (IS_ERR(pcie->refclk))
- return dev_err_probe(dev, PTR_ERR(pcie->refclk),
- "Failed to get ref clock\n");
-
- pcie->coreclk = devm_clk_get(dev, "core");
- if (IS_ERR(pcie->coreclk))
- return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
- "Failed to get core clock\n");
-
- pcie->auxclk = devm_clk_get(dev, "aux");
- if (IS_ERR(pcie->auxclk))
- return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
- "Failed to get aux clock\n");
-
return 0;
}
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ dw_pcie_cap_set(pci, REQ_RES);
+
ret = visconti_get_resources(pdev, pcie);
if (ret)
return ret;
resource_size_t min_align = 0;
int order;
- for (order = 0; order <= max_order; order++) {
+ for (order = 0; order < max_order; order++) {
resource_size_t align1 = 1;
+ if (!aligns[order])
+ continue;
+
align1 <<= (order + 20);
- if (!align)
+ if (!min_align)
min_align = align1;
else if (ALIGN(align + min_align, min_align) < align1)
min_align = align1 >> 1;
struct list_head *realloc_head)
{
struct pci_dev *dev;
- resource_size_t min_align, align, size, size0, size1;
+ resource_size_t min_align, align, size, size0, size1, max_align;
resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
int order, max_order;
struct resource *b_res = find_bus_resource_of_type(bus,
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, window_alignment(bus, b_res->flags));
+ max_align = 1 << (max_order + 20);
+ if (min_align >= max_align/2)
+ max_align = min_align;
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
add_align = max(min_align, add_align);
size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
b_res->flags = 0;
return 0;
}
- b_res->start = min_align;
- b_res->end = size0 + min_align - 1;
+ b_res->start = max_align;
+ b_res->end = size0 + max_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
bus_state = &rhub->bus_state;
wake_enabled = hcd->self.root_hub->do_remote_wakeup;
+ /* FIXME Workaround CSRTimeout error on the PORTSC access */
+ if (time_before(jiffies, bus_state->next_statechange))
+ msleep(5);
+
spin_lock_irqsave(&xhci->lock, flags);
if (wake_enabled) {
}
#endif
+/**
+ * tlb_prefetch - called if by design TLB-prefetching is required
+ * @addr: Virtual address
+ */
+#ifndef tlb_prefetch
+static inline void tlb_prefetch(unsigned long addr)
+{
+}
+#endif
+
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Marvell 88x2222 PHY constants
+ *
+ * Copyright (C) 2023 BAIKAL ELECTRONICS, JSC
+ */
+
+#ifndef _DT_BINDINGS_MV_88X2222_H
+#define _DT_BINDINGS_MV_88X2222_H
+
+/* Marvell 88x2222 GPIOs mapping */
+#define MV_88X2222_MOD_ABS 0
+#define MV_88X2222_TX_FAULT 1
+#define MV_88X2222_RX_LOS 2
+#define MV_88X2222_GPIO 3
+#define MV_88X2222_LED0 4
+#define MV_88X2222_LED1 5
+#define MV_88X2222_MPC 6
+#define MV_88X2222_TOD 7
+#define MV_88X2222_TX_DISABLE 8
+#define MV_88X2222_UNDEF 9
+#define MV_88X2222_SDA 10
+#define MV_88X2222_SCL 11
+
+#endif /* _DT_BINDINGS_MV_88X2222_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 Boot Modes
+ */
+#ifndef __DT_BINDINGS_SOC_BT1_BOOT_MODE_H
+#define __DT_BINDINGS_SOC_BT1_BOOT_MODE_H
+
+#define RCR_BOOT_NORMAL 0x1
+#define RCR_BOOT_LOADER 0x2
+#define RCR_BOOT_RECOVERY 0x3
+
+#endif /* __DT_BINDINGS_SOC_BT1_BOOT_MODE_H */
struct dw_edma;
struct dw_edma_region {
- phys_addr_t paddr;
- void __iomem *vaddr;
+ u64 paddr;
+ union {
+ void *mem;
+ void __iomem *io;
+ } vaddr;
size_t sz;
};
+/**
+ * struct dw_edma_core_ops - platform-specific eDMA methods
+ * @irq_vector: Get IRQ number of the passed eDMA channel. Note the
+ * method accepts the channel id in the end-to-end
+ * numbering with the eDMA write channels being placed
+ * first in the row.
+ * @pci_address: Get PCIe bus address corresponding to the passed CPU
+ * address. Note there is no need in specifying this
+ * function if the address translation is performed by
+ * the DW PCIe RP/EP controller with the DW eDMA device in
+ * subject and DMA_BYPASS isn't set for all the outbound
+ * iATU windows. That will be done by the controller
+ * automatically.
+ */
struct dw_edma_core_ops {
int (*irq_vector)(struct device *dev, unsigned int nr);
+ u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
};
enum dw_edma_map_format {
*/
struct dw_edma_chip {
struct device *dev;
- int id;
int nr_irqs;
const struct dw_edma_core_ops *ops;
u32 flags;
};
/* Export to the platform drivers */
-#if IS_ENABLED(CONFIG_DW_EDMA)
+#if IS_REACHABLE(CONFIG_DW_EDMA)
int dw_edma_probe(struct dw_edma_chip *chip);
int dw_edma_remove(struct dw_edma_chip *chip);
#else
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
- * should be written (TX), if the source is memory this argument
+ * should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* This is a variant of the DDR memories.
* A registered memory has a buffer inside it, hiding
* part of the memory details to the memory controller.
+ * @MEM_LPDDR: Low-Power DDR memory (mDDR).
* @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
* @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
* Those memories are labeled as "PC2-" instead of "PC" to
* a chip select signal.
* @MEM_RDDR2: Registered DDR2 RAM
* This is a variant of the DDR2 memories.
+ * @MEM_LPDDR2: Low-Power DDR2 memory.
* @MEM_XDR: Rambus XDR
* It is an evolution of the original RAMBUS memories,
* created to compete with DDR2. Weren't used on any
MEM_RDR,
MEM_DDR,
MEM_RDDR,
+ MEM_LPDDR,
MEM_RMBS,
MEM_DDR2,
MEM_FB_DDR2,
MEM_RDDR2,
+ MEM_LPDDR2,
MEM_XDR,
MEM_DDR3,
MEM_RDDR3,
#define MEM_FLAG_RDR BIT(MEM_RDR)
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
+#define MEM_FLAG_LPDDR BIT(MEM_LPDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_LPDDR2 BIT(MEM_LPDDR2)
#define MEM_FLAG_XDR BIT(MEM_XDR)
#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 command, u8 length,
u8 *values);
+s32 i2c_smbus_write_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length,
+ const u8 *values);
int i2c_get_device_id(const struct i2c_client *client,
struct i2c_device_identity *id);
#endif /* I2C */
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
+#define MARVELL_PHY_ID_88X2222R 0x014131b0
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
void mdio_driver_unregister(struct mdio_driver *drv);
int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
+static inline void mdio_device_get(struct mdio_device *mdiodev)
+{
+ get_device(&mdiodev->dev);
+}
+
+static inline void mdio_device_put(struct mdio_device *mdiodev)
+{
+ mdio_device_free(mdiodev);
+}
+
static inline bool mdio_phy_id_is_c45(int phy_id)
{
return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
return result;
}
+/**
+ * mii_c73_mod_linkmode - convert a Clause 73 advertisement to linkmodes
+ * @adv: linkmode advertisement setting
+ * @lpa: array of three u16s containing the advertisement
+ *
+ * Convert an IEEE 802.3 Clause 73 advertisement to ethtool link modes.
+ */
+static inline void mii_c73_mod_linkmode(unsigned long *adv, u16 *lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_ASM_DIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_1000BASE_KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_CR4);
+ /* 100GBASE_CR10 and 100GBASE_KP4 not implemented */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_CR4);
+ /* 25GBASE_R_S not implemented */
+ /* The 25GBASE_R bit can be used for 25Gbase KR or CR modes */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ adv, lpa[2] & MDIO_AN_C73_2_2500BASE_KX);
+ /* 5GBASE_KR not implemented */
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
struct mdio_device *lynx_get_mdio_device(struct phylink_pcs *pcs);
struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio);
+struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr);
void lynx_pcs_destroy(struct phylink_pcs *pcs);
#ifndef __LINUX_PCS_XPCS_H
#define __LINUX_PCS_XPCS_H
+#include <linux/clk.h>
+#include <linux/fwnode.h>
+#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#define DW_XPCS_ID_NATIVE 0x00000000
#define NXP_SJA1105_XPCS_ID 0x00000010
#define NXP_SJA1110_XPCS_ID 0x00000020
+#define BT1_XGMAC_XPCS_ID 0x00000030
+#define DW_XPCS_ID 0x7996ced0
+#define DW_XPCS_ID_MASK 0xffffffff
/* AN mode */
#define DW_AN_C73 1
#define DW_AN_C37_SGMII 2
#define DW_2500BASEX 3
#define DW_AN_C37_1000BASEX 4
+#define DW_10GBASER 5
+#define DW_10GBASEX 6
struct xpcs_id;
+enum dw_xpcs_pma {
+ DW_XPCS_PMA_UNKNOWN = 0,
+ DW_XPCS_PMA_GEN1_3G,
+ DW_XPCS_PMA_GEN2_3G,
+ DW_XPCS_PMA_GEN2_6G,
+ DW_XPCS_PMA_GEN4_3G,
+ DW_XPCS_PMA_GEN4_6G,
+ DW_XPCS_PMA_GEN5_10G,
+ DW_XPCS_PMA_GEN5_12G,
+};
+
+enum dw_xpcs_clock {
+ DW_XPCS_CLK_CORE,
+ DW_XPCS_CLK_PAD,
+ DW_XPCS_NUM_CLKS,
+};
+
+struct dw_xpcs_info {
+ u32 did;
+ u32 pma;
+};
+
struct dw_xpcs {
struct mdio_device *mdiodev;
+ struct dw_xpcs_info info;
const struct xpcs_id *id;
+ struct clk_bulk_data clks[DW_XPCS_NUM_CLKS];
+ u16 mmd_ctrl;
struct phylink_pcs pcs;
};
void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
int enable);
-struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
- phy_interface_t interface);
+struct dw_xpcs *xpcs_create_bynode(const struct fwnode_handle *fwnode,
+ phy_interface_t interface);
+struct dw_xpcs *xpcs_create_byaddr(struct mii_bus *bus, int addr,
+ phy_interface_t interface);
void xpcs_destroy(struct dw_xpcs *xpcs);
#endif /* __LINUX_PCS_XPCS_H */
* @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
* @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
* @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_10GBASEX: 10G BaseX
* @PHY_INTERFACE_MODE_5GBASER: 5G BaseR
* @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
* @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
PHY_INTERFACE_MODE_100BASEX,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_10GBASEX,
PHY_INTERFACE_MODE_5GBASER,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
return "1000base-kx";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_10GBASEX:
+ return "10gbase-x";
case PHY_INTERFACE_MODE_5GBASER:
return "5gbase-r";
case PHY_INTERFACE_MODE_RXAUI:
const unsigned long *advertising);
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+void phylink_resolve_c73(struct phylink_link_state *state);
+
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
int rx_coe;
int bugged_jumbo;
int pmt;
+ int sma;
int force_sf_dma_mode;
int force_thresh_dma_mode;
int riwt_off;
void (*exit)(struct platform_device *pdev, void *priv);
struct mac_device_info *(*setup)(void *priv);
int (*clks_config)(void *priv, bool enabled);
+ int (*bus_reset)(void *priv);
+ int (*swr_reset)(void *priv);
int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
void *ctx);
void (*dump_debug_regs)(void *priv);
#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */
#define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */
+/* AN Clause 73 linkword */
+#define MDIO_AN_C73_0_S_MASK GENMASK(4, 0)
+#define MDIO_AN_C73_0_E_MASK GENMASK(9, 5)
+#define MDIO_AN_C73_0_PAUSE BIT(10)
+#define MDIO_AN_C73_0_ASM_DIR BIT(11)
+#define MDIO_AN_C73_0_C2 BIT(12)
+#define MDIO_AN_C73_0_RF BIT(13)
+#define MDIO_AN_C73_0_ACK BIT(14)
+#define MDIO_AN_C73_0_NP BIT(15)
+#define MDIO_AN_C73_1_T_MASK GENMASK(4, 0)
+#define MDIO_AN_C73_1_1000BASE_KX BIT(5)
+#define MDIO_AN_C73_1_10GBASE_KX4 BIT(6)
+#define MDIO_AN_C73_1_10GBASE_KR BIT(7)
+#define MDIO_AN_C73_1_40GBASE_KR4 BIT(8)
+#define MDIO_AN_C73_1_40GBASE_CR4 BIT(9)
+#define MDIO_AN_C73_1_100GBASE_CR10 BIT(10)
+#define MDIO_AN_C73_1_100GBASE_KP4 BIT(11)
+#define MDIO_AN_C73_1_100GBASE_KR4 BIT(12)
+#define MDIO_AN_C73_1_100GBASE_CR4 BIT(13)
+#define MDIO_AN_C73_1_25GBASE_R_S BIT(14)
+#define MDIO_AN_C73_1_25GBASE_R BIT(15)
+#define MDIO_AN_C73_2_2500BASE_KX BIT(0)
+#define MDIO_AN_C73_2_5GBASE_KR BIT(1)
+
/* PHY XGXS lane state register. */
#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
return;
+ tlb_prefetch((unsigned long)ac);
+
if (ac->avail < ac->limit) {
STATS_INC_FREEHIT(cachep);
} else {