]> git.baikalelectronics.ru Git - kernel.git/commitdiff
SDK 5.8.2 baikal/mips/5.4.y baikal/mips/sdk5.8.2
authorBaikal Electronics <support@baikalelectronics.ru>
Mon, 19 Jun 2023 08:23:15 +0000 (11:23 +0300)
committerBaikal Electronics <support@baikalelectronics.ru>
Mon, 19 Jun 2023 08:26:02 +0000 (11:26 +0300)
114 files changed:
Documentation/pvt [new file with mode: 0644]
arch/mips/Kbuild.platforms
arch/mips/Kconfig
arch/mips/baikal/Kconfig [new file with mode: 0644]
arch/mips/baikal/Makefile [new file with mode: 0644]
arch/mips/baikal/Platform [new file with mode: 0644]
arch/mips/baikal/baikal-apb.c [new file with mode: 0644]
arch/mips/baikal/baikal-axi.c [new file with mode: 0644]
arch/mips/baikal/baikal-bc.c [new file with mode: 0644]
arch/mips/baikal/baikal-console.c [new file with mode: 0644]
arch/mips/baikal/baikal-efuse.c [new file with mode: 0644]
arch/mips/baikal/baikal-init.c [new file with mode: 0644]
arch/mips/baikal/baikal-int.c [new file with mode: 0644]
arch/mips/baikal/baikal-memory.c [new file with mode: 0644]
arch/mips/baikal/baikal-of.c [new file with mode: 0644]
arch/mips/baikal/baikal-setup.c [new file with mode: 0644]
arch/mips/baikal/baikal-time.c [new file with mode: 0644]
arch/mips/baikal/common.h [new file with mode: 0644]
arch/mips/boot/compressed/uart-16550.c
arch/mips/boot/dts/Makefile
arch/mips/boot/dts/baikal/Makefile [new file with mode: 0644]
arch/mips/boot/dts/baikal/baikal_bfk3.dts [new file with mode: 0644]
arch/mips/boot/dts/baikal/baikal_mdio.dtsi [new file with mode: 0644]
arch/mips/boot/dts/baikal/baikal_t1_clocks.dtsi [new file with mode: 0644]
arch/mips/boot/dts/baikal/baikal_t1_soc.dtsi [new file with mode: 0644]
arch/mips/configs/baikal_bfk3_defconfig [new file with mode: 0644]
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/dma.h
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/maar.h
arch/mips/include/asm/mach-baikal/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/efuse.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/hardware.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/ioremap.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/irq.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/kernel-entry-init.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/pci-t1.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/spaces.h [new file with mode: 0644]
arch/mips/include/asm/mach-baikal/war.h [new file with mode: 0644]
arch/mips/include/asm/mips-boards/baikal.h [new file with mode: 0644]
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/setup.h
arch/mips/include/asm/smp-ops.h
arch/mips/kernel/setup.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/sc-mips.c
arch/mips/pci/pci-baikal.h [new file with mode: 0644]
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/clk-baikal.c [new file with mode: 0644]
drivers/clocksource/mips-gic-timer.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Makefile
drivers/cpufreq/baikal-t1-cpufreq.c [new file with mode: 0644]
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/baikal_mc_edac.c [new file with mode: 0644]
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/pvt.c [new file with mode: 0644]
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/xgbe/Makefile
drivers/net/ethernet/amd/xgbe/baikal-mdio.c [new file with mode: 0644]
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-platform.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/dwmac-baikal.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_xsk.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac_xsk.h [new file with mode: 0644]
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/mdio-be-gpio.c [new file with mode: 0644]
drivers/net/phy/mv88x2222.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/pci/Kconfig
drivers/pci/bus.c
drivers/pci/controller/dwc/Kconfig
drivers/pci/controller/dwc/Makefile
drivers/pci/controller/dwc/pcie-baikal.c [new file with mode: 0644]
drivers/pci/setup-bus.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-baikal-boot.c [new file with mode: 0644]
drivers/spi/spi-baikal-dma.c [new file with mode: 0644]
drivers/spi/spi-baikal.c [new file with mode: 0644]
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/Makefile
drivers/usb/dwc3/dwc3-baikal.c [new file with mode: 0644]
drivers/watchdog/dw_wdt.c
include/asm-generic/gpio.h
include/linux/marvell_phy.h
include/uapi/linux/if_xdp.h
mm/slab.c
samples/Kconfig
samples/Makefile
samples/bpf/Makefile
tools/build/Makefile.include
tools/include/uapi/linux/if_xdp.h

diff --git a/Documentation/pvt b/Documentation/pvt
new file mode 100644 (file)
index 0000000..df4eeaf
--- /dev/null
@@ -0,0 +1,61 @@
+Kernel driver pvt
+==================
+
+Supported chips:
+  * Analog Bits' PVT Sensor in Baikal-T SoC
+    Prefix: 'pvt'
+    Addresses scanned: 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+    Datasheet:  Analog Bits. PVT Sensor Datasheet. Version: 2014.07.23
+                BE-T-B_M-AS-006-PVT_db.pdf
+
+Author: Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+
+Description
+-----------
+Analog Bits' PVT Sensor is a highly integrated macro for monitoring process, voltage, and temperature
+variation on-chip, allowing very high precision even in untrimmed usage. It consumes very little power even in
+operational mode, and leakage power only when temperature measurement is complete. An additional voltage
+sample mode is included allowing for supply voltage monitoring, and process monitor mode to assess
+transistor performance. The block includes a simple-to-use digital interface that works with standard core and
+IO level power supplies. The macro uses core and thick-oxide devices.
+
+
+Linux Usage
+-----------
+/ # sensors 
+
+pvt-baikal-isa-0000
+Adapter: ISA adapter
+in1:          +0.93 V  (min =  +0.80 V, max =  +1.00 V)
+temp1:        +44.1 C  (low  =  -0.0 C, high = +99.8 C)
+
+
+Some parameters for configuring PVT are placed in sysfs. Temperature in m degC and Voltage in mV.
+/ # ls  /sys/class/hwmon/hwmon1/device/
+RO:
+    name             
+    temp1_input
+    in1_input        
+    svt_input
+    hvt_input        
+    lvt_input        
+
+RW:    
+    temp1_min
+    temp1_max
+    in1_min          
+    in1_max          
+    mon_mod          
+
+The Temerature (Voltage) can be measured by reading the file temp1_input (in1_input).
+The PVT sensor can be used for monitoring temperature or voltage. You can switch monitoring mod by writing 0 (Temperature, Default) or 1 (Voltage) in file mon_mod.
+If Temperature or Voltage exceed limits which can be set by files temp1_min, temp1_max, in1_min, in1_max then generated the interrupt and in console will appear the message:
+/ # PVT WARNING Hi(Lo) Temperature(Voltage)
+
+
+/ # cat /proc/interrupts 
+           CPU0       CPU1       
+................................................
+ 23:          1          0  MIPS GIC  23  be-apb
+................................................
+
index 0de8398821066868ad01b2f2a5497852c287fdfb..01b2ade90eb4ee368ab8b79e4c48598610c3aefc 100644 (file)
@@ -5,6 +5,7 @@ platforms += alchemy
 platforms += ar7
 platforms += ath25
 platforms += ath79
+platforms += baikal
 platforms += bcm47xx
 platforms += bcm63xx
 platforms += bmips
index 2811ecc1f3c71304937baaef1ca186110e6051fb..d53622f3fae918db2ed0a7b5e9306abe7f202cad 100644 (file)
@@ -87,6 +87,7 @@ config MIPS
        select SYSCTL_EXCEPTION_TRACE
        select VIRT_TO_BUS
        select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
+       select HAVE_GCC_PLUGINS
 
 menu "Machine selection"
 
@@ -222,6 +223,57 @@ config ATH79
        help
          Support for the Atheros AR71XX/AR724X/AR913X SoCs.
 
+config MIPS_BAIKAL
+       bool "MIPS Baikal-T SoC"
+       select BOOT_ELF32
+       select BOOT_RAW
+       select LIBFDT
+       select USE_OF
+       select GENERIC_ISA_DMA
+       select DMA_NONCOHERENT
+       select SCHED_HRTICK
+       select HAVE_PCI
+       select MIPS_CPU_SCACHE
+       select MIPS_MACHINE
+       select MIPS_GIC
+       select EDAC_SUPPORT
+       select IRQ_MIPS_CPU
+       select IRQ_DOMAIN
+       select SWAP_IO_SPACE
+       select HAVE_MACH_CLKDEV
+       select COMMON_CLK
+       select CLKDEV_LOOKUP
+       select CLKSRC_MIPS_GIC
+       select CPU_MIPSR2_IRQ_VI
+       select CPU_MIPSR2_IRQ_EI
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_UNCACHED_ACCELERATED
+       select CPU_HAS_SYNC
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_MSA
+       select CPU_SUPPORTS_CPUFREQ
+       select CPU_FREQ
+       select MIPS_EXTERNAL_TIMER
+       select PCI_DRIVERS_GENERIC
+       select PCI_MSI_IRQ_DOMAIN
+       select SYS_HAS_CPU_MIPS32_R2
+       select SYS_HAS_CPU_MIPS32_R3_5
+       select SYS_HAS_CPU_MIPS32_R5
+       select SYS_HAS_EARLY_PRINTK
+       select SYS_HAS_32BIT_KERNEL
+       select CPU_SUPPORTS_CPUFREQ
+       select SYS_SUPPORTS_HIGHMEM
+       select SYS_SUPPORTS_32BIT_KERNEL
+       select SYS_SUPPORTS_LITTLE_ENDIAN
+       select SYS_SUPPORTS_ZBOOT
+       select SYS_SUPPORTS_ZBOOT_UART16550
+       select SYS_SUPPORTS_MIPS_CPS
+       select SYS_SUPPORTS_SMP
+       select ARCH_KEEP_MEMBLOCK
+        help
+          Support for the Baikal-T SoCs.
+
 config BMIPS_GENERIC
        bool "Broadcom Generic BMIPS kernel"
        select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
@@ -1026,6 +1078,7 @@ endchoice
 source "arch/mips/alchemy/Kconfig"
 source "arch/mips/ath25/Kconfig"
 source "arch/mips/ath79/Kconfig"
+source "arch/mips/baikal/Kconfig"
 source "arch/mips/bcm47xx/Kconfig"
 source "arch/mips/bcm63xx/Kconfig"
 source "arch/mips/bmips/Kconfig"
diff --git a/arch/mips/baikal/Kconfig b/arch/mips/baikal/Kconfig
new file mode 100644 (file)
index 0000000..f94c4ef
--- /dev/null
@@ -0,0 +1,271 @@
+#
+# Baikal-T/T1 SOC platform support code.
+#
+# Copyright (C) 2014-2017 Baikal Electronics JSC
+#
+# Authors:
+# Dmitry Dunaev <dmitry.dunaev@baikalelectronics.com>
+# Alexander Sazonov <Alexander.Sazonov@baikalelectronics.ru>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+if MIPS_BAIKAL
+
+choice
+    prompt "Baikal-T SoC Family Support"
+    default MIPS_BAIKAL_T  if MACH_BAIKAL_BFK
+    default MIPS_BAIKAL_T1 if MACH_BAIKAL_BFK3
+    default MIPS_BAIKAL_T1
+
+config MIPS_BAIKAL_T
+    bool "BAIKAL-T MIPS CPU"
+    select OF_EARLY_FLATTREE
+    select GENERIC_SCHED_CLOCK
+    select NO_IOPORT
+    select SOC_BUS
+    help
+      Baikal-T SoC with MIPS32 P5600 core (engineering version)
+
+config MIPS_BAIKAL_T1
+    bool "BAIKAL-T1 MIPS CPU"
+    select OF_EARLY_FLATTREE
+    select GENERIC_SCHED_CLOCK
+    select NO_IOPORT
+    select SOC_BUS
+    select BAIKAL_EFUSE
+    select SMP_UP if SMP
+    select SWIOTLB if ARCH_DMA_ADDR_T_64BIT
+    select GENERIC_ISA_DMA
+    select DMA_NONCOHERENT
+    select SYS_SUPPORTS_HIGHMEM
+    select SYS_SUPPORTS_32BIT_KERNEL
+    select MIPS_L1_CACHE_SHIFT_5
+    select SYS_HAS_CPU_P5600
+    select SYS_SUPPORTS_LITTLE_ENDIAN
+    select SYS_SUPPORTS_32BIT_KERNEL
+    select SYS_HAS_CPU_MIPS32_R5
+    select ARCH_HAS_RESET_CONTROLLER
+    help
+      Baikal-T1 SoC with MIPS32 P5600 core (production version)
+
+endchoice
+
+config BT1_SWIOTLB_SIZE
+    int "SWIOTLB size in MiB" if SWIOTLB
+    range 4 64
+    default 8
+    help
+      Due to the Baikal-T1 main interconnect controller weird synthesis
+      configuration, PCIe bus only is able to access the physical memory
+      higher than 4GiB. So in case if XPA is enabled and bootloader states
+      there is more than 4GiB of physical memory, we need to have the
+      SWIOTLB declared. Since by default SWIOTLB consumes too much memory
+      we create a custom table with compile-time configurable buffer size.
+
+config BAIKAL_T_COMMON
+    bool "Baikal-T SoC common features"
+
+    # Boot Controller support
+    select BE_BC
+
+    # Hardware features
+    select HW_HAS_PCI
+    select HW_PERF_EVENTS
+    select CPU_FREQ_GOV_PERFORMANCE
+    select CPU_FREQ_GOV_POWERSAVE
+
+    # Common
+    select PHYLIB
+    select DEBUG_LL
+    select TIMER_GENERIC
+    select ARCH_WANT_OPTIONAL_GPIOLIB
+    select SYS_HAS_CPU_MIPS32_R5
+    select GENERIC_CLOCKEVENTS_MIN_ADJUST
+
+    # HW perfomance counters
+    select HAVE_PERF_EVENTS
+    select PERF_EVENTS
+    select HW_PERF_EVENTS
+
+    # Clock source
+    select COMMON_CLK_BAIKAL
+    select WDT_MIPS_GIC
+
+    # Serial UART
+    select SERIAL_8250
+    select SERIAL_8250_CONSOLE
+    select SERIAL_8250_EXTENDED
+    select SERIAL_8250_DW
+
+    # WatchDog
+    select DW_WATCHDOG
+
+    # Timers
+    select CLKSRC_OF
+    select DW_APB_TIMER
+    select DW_APB_TIMER_OF
+
+    # I2C Bus support
+    select I2C_DESIGNWARE_CORE
+    select I2C_DESIGNWARE_PLATFORM
+
+    # GPIO support
+    select GPIOLIB
+    select OF_GPIO
+    select GPIO_SYSFS
+    select GPIO_DWAPB
+    select GPIO_GENERIC
+
+    # Gigabit Ethernet support
+    select STMMAC_ETH
+    select STMMAC_PLATFORM
+    select STMMAC_DEBUG_FS
+
+    # SPI bus support
+    select SPI_DESIGNWARE
+    select SPI_DW_MMIO
+    select SPI_BAIKAL
+
+    # USB Support
+    select USB_DWC3
+    select USB_DWC3_HOST
+    select USB_DWC3_BAIKAL
+    select USB_PHY
+
+    help
+      Common features of Baikal-T SoC based boards
+
+choice
+    prompt "Baikal-T SoC based Boards"
+    default MACH_BAIKAL_BFK
+
+config MACH_BAIKAL_BFK
+    bool "Baikal-T BFK board"
+
+    # Basic SoC features
+    select MIPS_BAIKAL_T
+    select BAIKAL_T_COMMON
+
+    # Memory mapping granularity
+    select PAGE_SIZE_16KB
+
+    # DTB select
+    select DTB_BAIKAL_BFK
+
+config MACH_BAIKAL_BFK3
+    bool "Baikal-T1 BFK3 board"
+
+    # Basic SoC features
+    select MIPS_BAIKAL_T1
+    select BAIKAL_T_COMMON
+
+    # Memory mapping granularity
+    select PAGE_SIZE_16KB
+
+    # DTB select
+    select DTB_BAIKAL_BFK3
+
+    # MMC
+    select MMC
+    select MMC_SPI
+
+
+endchoice
+
+choice
+    prompt "Baikal-T based Board Configs"
+    default DTB_BAIKAL_BFK
+    help
+      Select device tree for Baikal-T/T1 SoC based board
+
+config DTB_BAIKAL_BFK
+    bool "Baikal-T BFK board"
+    help
+      Device tree for BFK boards (for versions 1.0, 1.5, 1.6)
+      based on Baikal-T SOC.
+
+config DTB_BAIKAL_BFK3
+    bool "Baikal-T1 BFK3 board"
+    help
+      Device tree for BFK3 boards (for versions 2.0, 3.x)
+      based on Baikal-T1 SOC.
+
+config DTB_BAIKAL_BFKX
+    bool "Baikal-T1 BFKX board"
+    help
+      Device tree for BFKX boards (for version BFK1.6+Baikal-T1)
+      based on Baikal-T1 SOC.
+
+endchoice
+
+config ARCH_NR_GPIOS
+    int "Baikal GPIOs plus architecture specific ones"
+    range 32 512
+    default 43 if MACH_BAIKAL_BFK
+    default 43 if MACH_BAIKAL_BFK3
+    default 35
+    help
+      Maximum number of GPIOs in the system.
+
+      If unsure, leave the default value.
+
+menuconfig BAIKAL_ERRATA
+    bool "Baikal-T SoC Errata Fix"
+    help
+      Select this option if you want to fix known Baikal Errata
+
+if BAIKAL_ERRATA
+
+config BAIKAL_ERRATA_XGMAC
+    bool "Fix for MMD Devices in package registers swap in XGMAC"
+    help
+      Fix for MMD Devices in package registers swap in XGMAC in
+      early Baikal-T SoCs
+
+      If unsure, say Y.
+
+config BAIKAL_ERRATA_GMAC
+    bool "Fix for GMAC block speed 10/100M"
+    help
+      Fix for GMAC block speed 10/100M in early Baikal-T SoCs
+
+      If unsure, say N.
+
+config BAIKAL_ERRATA_GBE_PHY_MICREL
+    bool "Fix for Micrel PHY RX delay on BFK board"
+    help
+      Fix for Micrel PHY RX delay on early revisions of BFK board (v1.0)
+
+      If unsure, say N.
+
+endif # BAIKAL_ERRATA
+endif # MIPS_BAIKAL
+
+config SYS_HAS_CPU_MIPS32_R5
+    bool
+    default n
+
+config WDT_MIPS_GIC
+    bool
+    default n
+
+config BE_BC
+    tristate "boot controller"
+
+config BAIKAL_EFUSE
+    tristate "Baikal-T1 eFUSE driver"
+
+# source "drivers/dma/baikal/Kconfig"
diff --git a/arch/mips/baikal/Makefile b/arch/mips/baikal/Makefile
new file mode 100644 (file)
index 0000000..69bc287
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# Baikal-T SOC platform support code.
+#
+# Copyright (C) 2014-2016 Baikal Electronics JSC
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+cflags-y += -I$(srctree)/arch/mips/include/asm/mach-baikal
+
+obj-y                          := baikal-init.o baikal-int.o baikal-memory.o \
+                                  baikal-setup.o baikal-time.o \
+                                  baikal-apb.o baikal-axi.o
+
+# baikal-emc.o
+
+obj-$(CONFIG_EARLY_PRINTK)     += baikal-console.o
+obj-$(CONFIG_OF)               += baikal-of.o
+#obj-$(CONFIG_WDT_MIPS_GIC)    += baikal-wdt.o
+obj-$(CONFIG_BE_BC)            += baikal-bc.o
+#obj-$(CONFIG_KEXEC)           += baikal-kexec.o
+obj-$(CONFIG_BAIKAL_EFUSE)     += baikal-efuse.o
diff --git a/arch/mips/baikal/Platform b/arch/mips/baikal/Platform
new file mode 100644 (file)
index 0000000..5545f6a
--- /dev/null
@@ -0,0 +1,32 @@
+# 
+# Baikal-T SOC platform support code.
+#
+# Copyright (C) 2014-2018 Baikal Electronics JSC
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+platform-$(CONFIG_MIPS_BAIKAL) += baikal/
+cflags-$(CONFIG_MIPS_BAIKAL)   += -I$(srctree)/arch/mips/include/asm/mach-baikal
+cflags-$(CONFIG_CPU_MIPS32_R5) += -mfp64 -Wa,-mmsa -mips32r5 -mtune=p5600
+ifdef CONFIG_KVM_GUEST
+    load-$(CONFIG_MIPS_BAIKAL) += 0x0000000040100000
+    zload-$(CONFIG_MIPS_BAIKAL)        += 0xffffffff46100000
+else
+    load-$(CONFIG_MIPS_BAIKAL) += 0xffffffff80100000
+    zload-$(CONFIG_MIPS_BAIKAL)        += 0xffffffff86100000
+endif
+all-$(CONFIG_MIPS_BAIKAL)      := $(COMPRESSION_FNAME).bin
+
diff --git a/arch/mips/baikal/baikal-apb.c b/arch/mips/baikal/baikal-apb.c
new file mode 100644 (file)
index 0000000..ccda06a
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Baikal-T SOC platform support code. APB Terminator driver.
+ *
+ * Copyright (C) 2014  Baikal Electronics OJSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+
+#define VERSION        "1.02"
+
+#define BE_APB_IRQ_CTRL                0x00
+#define BE_APB_FAULT_ADDR      0x04
+#define BE_APB_FAULT_TEST      0x10
+
+#define BE_APB_IRQ_MASK                (1 << 1)
+#define BE_APB_IRQ_PEND                (1 << 0)
+
+struct be_apb {
+       struct device *dev;
+       void __iomem *regs;
+       int     irq;
+       unsigned int count;
+       unsigned int addr;
+};
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_count(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct be_apb *apb = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", apb->count);
+}
+static DEVICE_ATTR(errors, S_IWUSR | S_IRUGO, show_count, NULL);
+
+static ssize_t show_addr(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct be_apb *apb = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%08x\n", apb->addr);
+}
+static DEVICE_ATTR(addr, S_IWUSR | S_IRUGO, show_addr, NULL);
+
+static ssize_t show_test(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "Test APB exception\n");
+}
+static ssize_t store_test(struct device *dev, struct device_attribute *attr,
+                 const char *buf, size_t count)
+{
+       struct be_apb *apb = dev_get_drvdata(dev);
+       /* Dummy write */
+       writel(0, apb->regs + BE_APB_FAULT_TEST);
+       /* Never occurs */
+       return count;
+}
+static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, show_test, store_test);
+
+static void be_apb_sysfs_init(struct device *dev)
+{
+       int ret;
+       /* Errors count */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_errors.attr);
+       if (ret)
+               return;
+       /* Last error address */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_addr.attr);
+       if (ret)
+               goto __err2;
+       /* Test entry */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_test.attr);
+       if (ret)
+               goto __err1;
+       return;
+__err1:
+       sysfs_remove_file(&dev->kobj, &dev_attr_addr.attr);
+__err2:
+       sysfs_remove_file(&dev->kobj, &dev_attr_errors.attr);
+}
+
+static void be_apb_sysfs_remove(struct device *dev)
+{
+       sysfs_remove_file(&dev->kobj, &dev_attr_errors.attr);
+       sysfs_remove_file(&dev->kobj, &dev_attr_addr.attr);
+       sysfs_remove_file(&dev->kobj, &dev_attr_test.attr);
+}
+#else
+static void be_apb_sysfs_init(struct device *dev) {}
+static void be_apb_sysfs_remove(struct device *dev) {}
+#endif
+
+static irqreturn_t be_apb_irq(int irq, void *data)
+{
+       struct be_apb *apb = (struct be_apb *)data;
+       /* Get fault address */
+       apb->addr = readl(apb->regs + BE_APB_FAULT_ADDR);
+       /* Alert */
+       dev_crit_ratelimited(apb->dev,
+               "Peripherial Bus IOMEM access error handled at %08x\n", apb->addr);
+       /* Increase counter (in irq handler it is atomic) */
+       apb->count += 1;
+       /* Unmask and clear IRQ */
+       writel(BE_APB_IRQ_MASK, apb->regs + BE_APB_IRQ_CTRL);
+       /* Return success */
+       return IRQ_HANDLED;
+}
+
+static int be_apb_probe(struct platform_device *pdev)
+{
+       struct be_apb *apb;
+       struct resource *res;
+       int ret;
+
+       apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
+       if (!apb)
+               return -ENOMEM;
+
+       apb->dev = &pdev->dev;
+       
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       apb->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apb->regs))
+               return PTR_ERR(apb->regs);
+
+       /* Try to get IRQ resource */
+       apb->irq = platform_get_irq(pdev, 0);
+       if (apb->irq < 0)
+               return -EIO;
+
+       /* Request IRQ */
+       ret = request_irq(apb->irq, be_apb_irq,
+                                       IRQF_SHARED, "be-apb", (void *)apb);
+       if (ret)
+               return ret;
+
+       /* Unmask and clear IRQ */
+       writel(BE_APB_IRQ_MASK, apb->regs + BE_APB_IRQ_CTRL);
+       dev_set_drvdata(&pdev->dev, apb);
+       /* Register sysfs entries */
+       be_apb_sysfs_init(&pdev->dev);
+
+       dev_info(&pdev->dev, "Baikal Peripheral Bus Error handler\n");
+       dev_info(&pdev->dev, "Version " VERSION "\n");
+
+       return 0;
+}
+
+static int be_apb_remove(struct platform_device *pdev)
+{
+       struct be_apb *apb = platform_get_drvdata(pdev);
+       /* Free IRQ resource */
+       free_irq(apb->irq, (void *)apb);
+       /* Free sysfs */
+       be_apb_sysfs_remove(apb->dev);
+       /* Return success */
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id be_apb_of_match[] = {
+       { .compatible = "be,apb-ehb", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, be_apb_of_match);
+#endif
+
+static struct platform_driver be_apb_driver = {
+       .probe          = be_apb_probe,
+       .remove         = be_apb_remove,
+       .driver         = {
+               .name   = "be-apb",
+               .owner  = THIS_MODULE,
+#ifdef CONFIG_OF
+               .of_match_table = of_match_ptr(be_apb_of_match),
+#endif /* CONFIG_OF */
+       },
+};
+module_platform_driver(be_apb_driver);
+MODULE_VERSION(VERSION);
+MODULE_AUTHOR("Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal Electronics APB Terminator Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:be_apb");
diff --git a/arch/mips/baikal/baikal-axi.c b/arch/mips/baikal/baikal-axi.c
new file mode 100644 (file)
index 0000000..162ad95
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+ * Baikal-T SOC platform support code. AXI Terminator driver.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>              /* dev_err */
+#include <linux/module.h>
+#include <linux/of_platform.h> /* open firmware functioons */
+#include <linux/sysfs.h>               /* sysfs functions */
+
+#define VERSION        "1.02"
+
+#define BE_AXI_ADDRL_OFS               0x00
+#define BE_AXI_ADDRH_OFS               0x04
+
+#define BE_AXI_ADDRH_MASK              0xff
+#define BE_AXI_ADDRH_SHFT              24
+#define BE_AXI_TYPE_MASK               0x01
+#define BE_AXI_TYPE_SHFT               23
+
+#define BE_MSG_NOERROR                 "No interconnect errors detected"
+#define BE_MSG_SLAVE_ERROR             "Slave returns internal error"
+#define BE_MSG_DECODE_ERROR            "No slave at selected address"
+
+struct be_axi {
+       struct device *dev;
+       void __iomem *regs;
+       int     irq;
+       unsigned int count;
+       unsigned long long addr;
+       unsigned int type;
+};
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_count(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct be_axi *axi = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", axi->count);
+}
+static DEVICE_ATTR(count, S_IWUSR | S_IRUGO, show_count, NULL);
+
+static ssize_t show_addr(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct be_axi *axi = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%08llx\n", axi->addr);
+}
+static DEVICE_ATTR(addr, S_IWUSR | S_IRUGO, show_addr, NULL);
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct be_axi *axi = dev_get_drvdata(dev);
+
+       if (!axi->count)
+               return scnprintf(buf, PAGE_SIZE, "%s\n", BE_MSG_NOERROR);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", axi->type ? BE_MSG_DECODE_ERROR : BE_MSG_SLAVE_ERROR);
+}
+static DEVICE_ATTR(type, S_IWUSR | S_IRUGO, show_type, NULL);
+
+static ssize_t show_test(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "Test interconnect error "
+                               "(0 - Slave internal error, 1 - No slave error)\n");
+}
+static ssize_t store_test(struct device *dev, struct device_attribute *attr,
+                 const char *buf, size_t count)
+{
+       struct be_axi *axi = dev_get_drvdata(dev);
+
+       /* Dummy byte read */
+       if (*buf == '0')
+               readb(axi->regs + BE_AXI_ADDRL_OFS);
+       if (*buf == '1')
+               readb(axi->regs + 1);
+       return count;
+}
+static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, show_test, store_test);
+
+static void be_axi_sysfs_init(struct device *dev)
+{
+       int ret;
+       /* Errors count */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_count.attr);
+       if (ret)
+               return;
+       /* Last error address */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_addr.attr);
+       if (ret)
+               goto __err3;
+       /* Last error type */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_type.attr);
+       if (ret)
+               goto __err2;
+       /* Test entry */
+       ret = sysfs_create_file(&dev->kobj, &dev_attr_test.attr);
+       if (ret)
+               goto __err1;
+       return;
+__err1:
+       sysfs_remove_file(&dev->kobj, &dev_attr_type.attr);
+__err2:
+       sysfs_remove_file(&dev->kobj, &dev_attr_addr.attr);
+__err3:
+       sysfs_remove_file(&dev->kobj, &dev_attr_count.attr);
+}
+
+static void be_axi_sysfs_remove(struct device *dev)
+{
+       sysfs_remove_file(&dev->kobj, &dev_attr_count.attr);
+       sysfs_remove_file(&dev->kobj, &dev_attr_addr.attr);
+       sysfs_remove_file(&dev->kobj, &dev_attr_type.attr);
+       sysfs_remove_file(&dev->kobj, &dev_attr_test.attr);
+}
+#else
+static void be_axi_sysfs_init(struct device *dev) {}
+static void be_axi_sysfs_remove(struct device *dev) {}
+#endif
+
+static irqreturn_t be_axi_irq(int irq, void *data)
+{
+       struct be_axi *axi = (struct be_axi *)data;
+       unsigned long long addr;
+       /* Get low part of fault address */
+       axi->addr = readl(axi->regs + BE_AXI_ADDRL_OFS);
+       /* Get high part of fault address */
+       addr = readl(axi->regs + BE_AXI_ADDRH_OFS);
+       /* Add high bits to fault address */
+       axi->addr |= ((addr >> BE_AXI_ADDRH_SHFT) & BE_AXI_ADDRH_MASK) << 32;
+       /* Get fault type */
+       axi->type = (addr >> BE_AXI_TYPE_SHFT) & BE_AXI_TYPE_MASK;
+       /* Alert */
+       dev_crit_ratelimited(axi->dev, "Interconnect: %s (handled at %08llx)\n",
+               axi->type ? BE_MSG_DECODE_ERROR : BE_MSG_SLAVE_ERROR, axi->addr);
+       /* Increase counter (in irq handler it is atomic) */
+       axi->count += 1;
+       /* Return success */
+       return IRQ_HANDLED;
+}
+
+static int be_axi_probe(struct platform_device *pdev)
+{
+       struct be_axi *axi;
+       struct resource *res;
+       int ret;
+
+       axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
+       if (!axi)
+               return -ENOMEM;
+
+       axi->dev = &pdev->dev;
+       
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       axi->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(axi->regs))
+               return PTR_ERR(axi->regs);
+
+       /* Try to get IRQ resource */
+       axi->irq = platform_get_irq(pdev, 0);
+       if (axi->irq < 0)
+               return -EIO;
+
+       /* Request IRQ */
+       ret = request_irq(axi->irq, be_axi_irq,
+                       IRQF_SHARED, "be-axi", (void *)axi);
+       if (ret)
+               return ret;
+
+       dev_set_drvdata(&pdev->dev, axi);
+       /* Register sysfs entries */
+       be_axi_sysfs_init(&pdev->dev);
+
+       dev_info(&pdev->dev, "Baikal Interconnect Error handler\n");
+       dev_info(&pdev->dev, "Version " VERSION "\n");
+
+       return 0;
+}
+
+static int be_axi_remove(struct platform_device *pdev)
+{
+       struct be_axi *axi = platform_get_drvdata(pdev);
+       /* Free IRQ resource */
+       free_irq(axi->irq, axi);
+       /* Free sysfs */
+       be_axi_sysfs_remove(axi->dev);
+       /* Return success */
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id be_axi_of_match[] = {
+       { .compatible = "be,axi-ehb", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, be_axi_of_match);
+#endif
+
+static struct platform_driver be_axi_driver = {
+       .probe          = be_axi_probe,
+       .remove         = be_axi_remove,
+       .driver         = {
+               .name   = "be-axi",
+               .owner  = THIS_MODULE,
+#ifdef CONFIG_OF
+               .of_match_table = of_match_ptr(be_axi_of_match),
+#endif /* CONFIG_OF */
+       },
+};
+module_platform_driver(be_axi_driver);
+MODULE_VERSION(VERSION);
+MODULE_AUTHOR("Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal Electronics Interconnect error handler driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:be_axi");
diff --git a/arch/mips/baikal/baikal-bc.c b/arch/mips/baikal/baikal-bc.c
new file mode 100644 (file)
index 0000000..cb74d8c
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Baikal-T SOC platform support code. Boot Controller driver.
+ *
+ * Copyright (C) 2017 T-platforms JSC
+ *
+ * Author:
+ *   Sergey Semin <Sergey.Semin@t-platforms.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/property.h>
+
+
+#define BE_BC_CSR              0x00
+#define BE_BC_MAR              0x04
+#define BE_BC_DRID             0x08
+#define BE_BC_VID              0x0C
+#define BE_BC_CSR_BMODE                (0x3 << 0)
+#define BE_BC_CSR_SPI_RDA      (0x1 << 8)
+#define BE_BC_CSR_SPI_MDELAY   1
+#define BE_BC_OFF              0   /* transparent mode of spi memory */
+#define BE_BC_ON               1   /* not transparent */
+#define BE_BC_RESET            1
+
+struct be_bc {
+       void __iomem *regs;
+};
+
+/* control and status register */
+typedef struct {
+       uint32_t mode           :1-0  +1;  /* boot method */
+       uint32_t __             :7-2  +1;
+       uint32_t spi_rda        :8-8  +1;  /* operation mode: 0- transparent, 1- not transparent */
+       uint32_t _              :31-9 +1;
+} boot_csr_t;
+
+/* memory access control register */
+typedef struct {
+       uint32_t bsab           :0-0  +1;  /* reset when writing to the register */
+       uint32_t _              :31-1 +1;
+} boot_mar_t;
+
+static int be_bc_enable_spi(struct be_bc *c)
+{
+       boot_csr_t *csr;
+       if(!c)
+               return -1;
+       csr = (void*) ((uint32_t)c->regs + BE_BC_CSR);
+       csr->spi_rda = BE_BC_ON;
+       mdelay(100);
+       return 0;
+}
+static int be_bc_disable_spi(struct be_bc *c)
+{
+       boot_csr_t *csr;
+       if(!c)
+               return -1;
+       csr = (void*) ((uint32_t)c->regs + BE_BC_CSR);
+       csr->spi_rda = BE_BC_OFF;
+       mdelay(100);
+       return 0;
+}
+
+static int be_bc_drv_probe(struct platform_device *pdev)
+{
+       struct be_bc *bc;
+       struct resource *res;
+       unsigned int mode;
+       u32 vid, drid;
+
+       bc = devm_kzalloc(&pdev->dev, sizeof(*bc), GFP_KERNEL);
+       if (!bc)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       bc->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(bc->regs))
+               return PTR_ERR(bc->regs);
+
+       platform_set_drvdata(pdev, bc);
+       be_bc_enable_spi(bc);
+
+       vid  = readl(bc->regs + BE_BC_VID);
+       drid = readl(bc->regs + BE_BC_DRID);
+       mode = readl(bc->regs + BE_BC_CSR) & BE_BC_CSR_BMODE;
+       dev_info(&pdev->dev, "VID: 0x%08x, DRID: 0x%08x, MODE: 0x%08x\n", vid, drid, mode);
+
+       return 0;
+}
+static int be_bc_drv_remove(struct platform_device *pdev)
+{
+       struct be_bc *bc;
+       bc = platform_get_drvdata(pdev);
+       be_bc_disable_spi(bc);
+       return 0;
+}
+
+static const struct of_device_id be_bc_of_match[] = {
+       { .compatible = "be,boot-controller", },
+       { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, be_bc_of_match);
+
+static struct platform_driver be_bc_driver = {
+       .probe  = be_bc_drv_probe,
+       .remove = be_bc_drv_remove,
+       .driver = {
+               .name = "be,boot-controller",
+               .of_match_table = be_bc_of_match,
+       },
+};
+module_platform_driver(be_bc_driver);
+
+MODULE_AUTHOR("Sergey Semin <Sergey.Semin@t-platforms.ru>");
+MODULE_DESCRIPTION("Baikal Electronics Boot Controller Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:be_bc");
+
diff --git a/arch/mips/baikal/baikal-console.c b/arch/mips/baikal/baikal-console.c
new file mode 100644 (file)
index 0000000..3bb2627
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/serial_8250.h>
+
+#include <asm/io.h>
+#include <asm/mach-baikal/hardware.h>
+
+#define DW_UART_THR            0x00
+#define DW_UART_DLL            0x00
+#define DW_UART_FCR            0x08
+#define DW_UART_LCR            0x0C
+#define DW_UART_LSR            0x14
+
+#define DW_UART_LSR_TEMT       (1 << 6)
+#define DW_UART_LSR_THRE       (1 << 5)
+
+#define EARLY_CONSOLE_BASE BAIKAL_UART0_START
+
+static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_CONSOLE_BASE);
+
+static inline void uart_write32(u32 val, unsigned reg)
+{
+       writel(val, uart_membase + reg);
+}
+
+static inline u32 uart_read32(unsigned reg)
+{
+       return readl(uart_membase + reg);
+}
+
+void prom_putchar(unsigned char ch)
+{
+       while ((uart_read32(DW_UART_LSR) & DW_UART_LSR_TEMT) == 0)
+               ;
+       uart_write32(ch, DW_UART_THR);
+}
diff --git a/arch/mips/baikal/baikal-efuse.c b/arch/mips/baikal/baikal-efuse.c
new file mode 100644 (file)
index 0000000..1a0c368
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ * Baikal-T1 SOC platform support code. EFUSE driver.
+ *
+ * Copyright (C) 2014-2017 Baikal Electronics JSC
+ * 
+ * Author:
+ *   Georgiy Vlasov <Georgy.Vlasov@baikalelectronics.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <asm/io.h>
+#include "efuse.h"
+
+#define BE_EFUSE_VERSION       "1.00"
+
+/*#define DEBUG_EFUSE FALSE*/
+#define STATIC_PART_OF_MAC 0x4ca515 /* !TODO: this field can change in the next revisions! */
+
+/* Current EFuse format */
+typedef struct
+{
+       /* Field Name */   
+       u32 Locks       ; 
+        u8 Version     ; 
+        u8 Fab     : 4 ; 
+        u8 Process : 4 ; 
+        u8 LotId       ; 
+        u8 Revision    ; 
+       u32 SerialNum   ; 
+       u32 CornerId: 4 ; 
+       u32 CPUFreq : 4 ; 
+       u32 Pad:24      ; 
+       u32 Reserved[28]; 
+}EFUSE_Structure;
+
+typedef struct
+{
+       struct  device *dev;
+       EFUSE_Structure *EFUSE_Format;
+       void    __iomem *efuse;
+       dev_t   first;                         /* Variable for the first device number        */
+       struct  cdev c_dev;                    /* Variable for the character device structure */
+       struct  class *cl;                     /* Variable for the device class               */
+       int     is_device_open;
+}be_efuse;
+
+be_efuse *be_apb_efuse;
+
+/* Baikal-EFUSE access functions (API) */
+
+u32 be_efuse_getLocks(void)
+{
+       return be_apb_efuse->EFUSE_Format->Locks;
+}
+
+u8 be_efuse_getVersion(void)
+{
+       return be_apb_efuse->EFUSE_Format->Version;
+}
+
+u8 be_efuse_getFab(void)
+{
+       return be_apb_efuse->EFUSE_Format->Fab;
+}
+
+u8 be_efuse_getProcess(void)
+{
+       return be_apb_efuse->EFUSE_Format->Process;
+}
+
+u8 be_efuse_getLotID(void)
+{
+       return be_apb_efuse->EFUSE_Format->LotId;
+}
+
+u8 be_efuse_getRevision(void)
+{
+       return be_apb_efuse->EFUSE_Format->Revision;
+}
+
+u32 be_efuse_getSerialNum(void)
+{
+       return be_apb_efuse->EFUSE_Format->SerialNum;
+}
+
+u32 be_efuse_getCornerID(void)
+{
+       return be_apb_efuse->EFUSE_Format->CornerId;
+}
+
+u32 be_efuse_getCPUFreq(void)
+{
+       return be_apb_efuse->EFUSE_Format->CPUFreq;
+}
+
+u32 be_efuse_getPad(void)
+{
+       return be_apb_efuse->EFUSE_Format->Pad;
+}
+
+u64 be_efuse_getMAC(u8 id)
+{ 
+       u64 ret;
+       u32 devId = be_apb_efuse->EFUSE_Format->SerialNum << 2;
+       devId |= (id & 0x3);
+       ret = ((u64)(be_apb_efuse->EFUSE_Format->Revision) & 0xFFFFFF) << 24;
+       ret = STATIC_PART_OF_MAC;
+       ret = ret << 24;
+       return  (ret | (devId & 0xFFFFFF)); 
+}
+
+/* EFUSE debug output */
+static void get_efuse(u32 *Raw)
+{
+       be_apb_efuse->EFUSE_Format->Locks = *Raw;
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Locks field = %x\n",be_apb_efuse->EFUSE_Format->Locks);
+#endif
+
+       be_apb_efuse->EFUSE_Format->Version = *(Raw + 1) >> 24;
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Version field = %x\n",be_apb_efuse->EFUSE_Format->Version);
+#endif
+
+       be_apb_efuse->EFUSE_Format->Fab= ((*(Raw + 1) >> 16) & 0xF0) >> 4;
+#ifdef DEBUG_EFUSE 
+       printk(KERN_INFO "Fab field = %x\n",be_apb_efuse->EFUSE_Format->Fab);
+#endif
+
+       be_apb_efuse->EFUSE_Format->Process = (*(Raw + 1) >> 16) & 0x0F;
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Process field = %x\n",be_apb_efuse->EFUSE_Format->Fab);
+#endif
+
+       be_apb_efuse->EFUSE_Format->LotId = *(Raw + 1) >> 8;
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Lotid field = %x\n",be_apb_efuse->EFUSE_Format->LotId);
+#endif
+
+       be_apb_efuse->EFUSE_Format->Revision = *(Raw + 1);
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Revision field = %x\n",be_apb_efuse->EFUSE_Format->Revision);
+#endif
+
+       be_apb_efuse->EFUSE_Format->SerialNum = *(Raw + 2);
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Serial field = %x\n",be_apb_efuse->EFUSE_Format->SerialNum);
+#endif
+
+       be_apb_efuse->EFUSE_Format->CornerId = (*(Raw + 3) >> 28);
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Corner field = %x\n",be_apb_efuse->EFUSE_Format->CornerId);
+#endif
+
+       be_apb_efuse->EFUSE_Format->CPUFreq = (*(Raw + 3) >> 24) & 0x0F;
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "CpuFrequency field = %x\n",be_apb_efuse->EFUSE_Format->CPUFreq);
+#endif
+
+       be_apb_efuse->EFUSE_Format->Pad = (*(Raw + 3)) & 0x00FFFFFF;
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "Pad field = %x\n",be_apb_efuse->EFUSE_Format->Pad);
+#endif
+}
+
+/* Read a dump from efuse memory to the stucture 'Raw' */
+static int read_EFUSE(void)
+{
+       u32 *Raw = NULL;
+       int i = 0;
+       u32 reg;
+       u32 addr;
+
+       Raw = (u32 *) kmalloc(sizeof(u32) * 32, GFP_KERNEL);
+       if (Raw == NULL) {
+               printk (KERN_ERR "be-efuse: EFUSE buffer allocation failure!");
+               return -ENOMEM;
+       }
+       addr = 0xFFFFFFE0;
+
+       /* Second condition for protection against looping */
+       while ((addr <= 0xFFFFFFFF) && (addr > 0xFFFFFF00)) {
+               /* 1)writing addr of string which we will read */
+               reg = 0xFFFFFFFF;
+               reg &= addr; //reading from zero addr
+               iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_ADDR/4); /* push data to the register */
+               /* 2)set read mode*/
+               reg = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_MODES/4); /* pull register */
+               reg |= (1<<0); /* set 0 bit in 1 */
+               reg &= ~(1<<1); /* set 1 bit in 0 , it's a read mode */
+               iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_MODES/4); /* push data into the register */
+               /* 3)set enable reg */
+               reg = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4); 
+               reg |= (1<<0); 
+               iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+               /* 4)delay for waiting preadu signal */
+               udelay(2);
+               /* 5)set power down mode */
+               reg = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+               reg &= ~(1<<0);
+               iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+               /* 6)pull from reg efuse_rdata */
+               Raw[i] = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_RDATA/4);
+               i++;
+               addr++;
+       }
+
+       /* 7)close work session with efuse */
+       reg = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_MODES/4);
+       reg &= ~(1<<0); /* set power down mode */
+       iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_MODES/4);
+       /* set enable */
+       reg = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+       reg |= (1<<0);
+       iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+       udelay(2);
+       /* set power down mode */
+       reg = ioread32((u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+       reg &= ~(1<<0);
+       iowrite32(reg, (u32 *)(be_apb_efuse->efuse) + EFUSE_ENABLE/4);
+
+       /* parse dump from efuse */
+       get_efuse(Raw);
+       kfree(Raw);
+
+       return 0;
+}
+
+static int baikal_efuse_open(struct inode *i, struct file *f)
+{
+       if (be_apb_efuse->is_device_open)
+               return -EBUSY;
+
+       be_apb_efuse->is_device_open++;
+#ifdef DEBUG_EFUSE
+       printk(KERN_DEBUG "baikal_efuse driver has been opened");
+#endif
+       return 0;
+}
+
+static int baikal_efuse_close(struct inode *i, struct file *f)
+{
+       be_apb_efuse->is_device_open--;
+#ifdef DEBUG_EFUSE
+       printk(KERN_DEBUG "baikal_efuse driver has been closed\n");
+#endif
+       return 0;
+}
+
+static ssize_t baikal_efuse_read(struct file *f, char __user *buf, size_t
+  len, loff_t *off)
+{
+#ifdef DEBUG_EFUSE
+       u64 res;
+       printk(KERN_DEBUG "baikal_efuse_driver read function has been used\n");
+       res = be_efuse_getMAC(Gb_ETHERNET_0);
+       printk(KERN_DEBUG "MAC0 for 1Gb ETH # 0=%llx", res);
+       res = be_efuse_getMAC(Gb_ETHERNET_1);
+       printk(KERN_DEBUG "MAC1 for 1Gb ETH # 1=%llx \n",res);
+       res = be_efuse_getMAC(xGb_ETHERNET);
+       printk(KERN_DEBUG "MAC2 for 10 Gb ETH=%llx \n",res);
+#endif
+       return 0;
+}
+
+static struct file_operations efuse_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = baikal_efuse_open,
+       .release = baikal_efuse_close,
+       .read = baikal_efuse_read
+};
+
+static int be_efuse_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+
+       be_apb_efuse = devm_kzalloc(&pdev->dev, sizeof(*be_apb_efuse), GFP_KERNEL);
+       if (!be_apb_efuse)
+               return -ENOMEM;
+
+       be_apb_efuse->is_device_open = 0; /* init dev_open */
+       be_apb_efuse->dev = &pdev->dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       be_apb_efuse->efuse = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(be_apb_efuse->efuse))
+               return PTR_ERR(be_apb_efuse->efuse);
+
+       dev_info(&pdev->dev, "Baikal Efuse Driver\n");
+       dev_info(&pdev->dev, "Version " BE_EFUSE_VERSION "\n");
+
+       /* register number of efuse device */ 
+       if (alloc_chrdev_region(&(be_apb_efuse->first), 0, 1, "baikal_efuse_driver") < 0) {
+               return -1;
+       }
+
+       /* create device class */
+       if ((be_apb_efuse->cl = class_create(THIS_MODULE, "efuse")) == NULL) {
+               unregister_chrdev_region(be_apb_efuse->first, 1);
+               return -1;
+       }
+
+       /* create device with name efuse_driver */
+       if (device_create(be_apb_efuse->cl, NULL, be_apb_efuse->first, NULL, "efuse_driver") == NULL) {
+               class_destroy(be_apb_efuse->cl);
+               unregister_chrdev_region(be_apb_efuse->first, 1);
+               return -1;
+       }
+
+       cdev_init(&(be_apb_efuse->c_dev), &efuse_driver_fops);
+       if (cdev_add(&(be_apb_efuse->c_dev), be_apb_efuse->first, 1) == -1) {
+               device_destroy(be_apb_efuse->cl, be_apb_efuse->first);
+               class_destroy(be_apb_efuse->cl);
+               unregister_chrdev_region(be_apb_efuse->first, 1);
+               return -1;
+       }
+
+#ifdef DEBUG_EFUSE
+       printk(KERN_DEBUG "<Major, Minor>: <%d, %d>\n", MAJOR(be_apb_efuse->first), MINOR(be_apb_efuse->first));
+#endif
+
+       be_apb_efuse->EFUSE_Format = (EFUSE_Structure *) kmalloc(sizeof(EFUSE_Structure),GFP_KERNEL);
+       if (be_apb_efuse->EFUSE_Format == NULL) {
+               printk(KERN_ERR "be-efuse: EFUSE structure allocation failure!");
+               return -ENOMEM;
+       }
+
+       /* read all efuse memory to the dump sructure and parse it */
+       if (read_EFUSE() != 0) {
+                       printk(KERN_ERR "be-efuse: EFUSE read procedure failure!");
+                       return -1;
+       }
+#ifdef DEBUG_EFUSE
+       printk(KERN_INFO "baikal_efuse driver has been loaded\n");
+#endif
+       return 0;
+}
+
+static int be_efuse_remove(struct platform_device *pdev)
+{
+       cdev_del(&(be_apb_efuse->c_dev));
+       device_destroy(be_apb_efuse->cl, be_apb_efuse->first);
+       class_destroy(be_apb_efuse->cl);
+       unregister_chrdev_region(be_apb_efuse->first, 1);
+       iounmap(be_apb_efuse->efuse);
+       kfree(be_apb_efuse->EFUSE_Format);
+
+       printk(KERN_INFO "baikal_efuse driver has been unloaded\n");
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id be_apb_of_match[] = {
+       { .compatible = "baikal,efuse", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, be_apb_of_match);
+#endif
+
+static struct platform_driver be_efuse_driver = {
+       .probe          = be_efuse_probe,
+       .remove         = be_efuse_remove,
+       .driver         = {
+               .name   = "baikal_efuse",
+               .owner  = THIS_MODULE,
+#ifdef CONFIG_OF
+               .of_match_table = of_match_ptr(be_apb_of_match),
+#endif /* CONFIG_OF */
+       },
+};
+
+module_platform_driver(be_efuse_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Georgiy Vlasov <Georgy.Vlasov@baikalelectronics.ru>");
+MODULE_DESCRIPTION("baikal_efuse_driver");
diff --git a/arch/mips/baikal/baikal-init.c b/arch/mips/baikal/baikal-init.c
new file mode 100644 (file)
index 0000000..ad56cad
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/pm.h>          /* pm_power_off */
+
+#include <asm/fw/fw.h>
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/sections.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/smp-ops.h>
+#endif
+#include <asm/idle.h>          /* cpu_wait */
+#include <asm/reboot.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+#include <asm/msa.h>
+#include <asm/cdmm.h>
+#include <asm/idle.h>
+
+#include <asm/mach-baikal/hardware.h>
+#include <asm/mips-boards/baikal.h> /* Base GIC and GCR addresses */
+
+#include "common.h"
+#include <linux/swiotlb.h>
+#include <linux/memblock.h>
+
+#ifdef CONFIG_KEXEC
+#include <asm/kexec.h>
+extern int baikal_kexec_prepare(struct kimage *);
+extern void baikal_kexec_shutdown(void);
+#endif
+
+#ifndef CONFIG_MIPS_CPC
+void __iomem *mips_cpc_base;
+#endif
+
+extern void baikal_be_init(void);
+extern int baikal_be_handler(struct pt_regs *regs, int is_fixup);
+
+static void __init mips_nmi_setup(void)
+{
+       void *base;
+       extern char except_vec_nmi;
+
+       base = (void *)(CAC_BASE + 0xa80);
+       memcpy(base, &except_vec_nmi, 0x80);
+       flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+static void __init mips_ejtag_setup(void)
+{
+       void *base;
+       extern char except_vec_ejtag_debug;
+
+       base = (void *)(CAC_BASE + 0x480);
+       memcpy(base, &except_vec_ejtag_debug, 0x80);
+       flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+       return CPC_BASE_ADDR;
+}
+
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+       unsigned long reg = read_c0_config3();
+
+       if (!(reg & MIPS_CONF3_CDMM))
+               return 0;
+       reg = read_c0_cdmm() | MIPS_CDMMBASE_EN;
+       write_c0_cdmmbase(reg);
+
+       return (reg >> MIPS_CDMMBASE_ADDR_SHIFT) << MIPS_CDMMBASE_ADDR_START;
+}
+
+/*
+ * Initial kernel command line, usually setup by prom_init()
+ * extern char arcs_cmdline[COMMAND_LINE_SIZE];
+ *
+ * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
+ * extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+ */
+void __init prom_init(void)
+{
+       unsigned long reg;
+#ifdef CONFIG_EARLY_PRINTK_8250
+       setup_8250_early_printk_port(KSEG1ADDR(BAIKAL_UART0_START), 2, 1000000);
+#endif
+
+       /* Setup exception handlers */
+       board_nmi_handler_setup = mips_nmi_setup;
+       board_ejtag_handler_setup = mips_ejtag_setup;
+
+       /* handlers */
+       board_be_init = baikal_be_init;
+       board_be_handler = baikal_be_handler;
+
+       /* Early detection of CMP support */
+       mips_cm_probe();
+       mips_cpc_probe();
+       /* Setup L2 prefetch */
+       reg = read_gcr_l2_pft_control();
+       /* Set page mask depending on actual page size */
+       reg &= ~(CM_GCR_L2_PFT_CONTROL_PAGEMASK);
+#if defined(CONFIG_PAGE_SIZE_4KB)
+       /* 4K pages */
+       reg |= 0xFFFFF000;
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+       /* 8K pages */
+       reg |= 0xFFFFE000;
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+       /* 16K pages */
+       reg |= 0xFFFFC000;
+#else
+       /* Other cases */
+       reg |= 0xFFFFF000;
+#endif
+       pr_info("Enable data prefetch\n");
+       write_gcr_l2_pft_control(reg | CM_GCR_L2_PFT_CONTROL_PFTEN);
+       wmb();
+
+       pr_info("Enable instruction prefetch\n");
+       reg = read_gcr_l2_pft_control_b();
+       write_gcr_l2_pft_control_b(reg | CM_GCR_L2_PFT_CONTROL_PFTEN);
+       wmb();
+
+#ifdef CONFIG_KEXEC
+       _machine_kexec_shutdown = baikal_kexec_shutdown;
+       _machine_kexec_prepare = baikal_kexec_prepare;
+#endif
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MIPS_CPS
+       if (!register_cps_smp_ops())
+               return;
+       pr_warn("%s: register_cps_smp_ops failed\n", __func__);
+#endif /* CONFIG_MIPS_CPS */
+#endif /* CONFIG_SMP */
+}
+
+const char *get_system_type(void)
+{
+        return "Baikal-T Generic SoC";
+}
+
+#ifdef CONFIG_SWIOTLB
+
+#define IO_TLB_DEFAULT_SIZE    (64UL << 20)
+void __init plat_swiotlb_setup(void)
+{
+       unsigned long swiotlb_nslabs;
+       size_t swiotlb_size;
+       phys_addr_t top;
+       void *swiotlb;
+       int ret;
+
+       /*
+        * Skip SWIOTLB initialization since there is no that much memory to
+        * cause the peripherals invalid access.
+        */
+       top = memblock_end_of_DRAM();
+       if (top <= SIZE_MAX)
+               return;
+
+       swiotlb_size = swiotlb_size_or_default();
+       if (swiotlb_size >= IO_TLB_DEFAULT_SIZE)
+               swiotlb_size = CONFIG_BT1_SWIOTLB_SIZE << 20;
+
+       swiotlb_nslabs = swiotlb_size >> IO_TLB_SHIFT;
+       swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
+       swiotlb_size = swiotlb_nslabs << IO_TLB_SHIFT;
+
+       swiotlb = memblock_alloc_low(swiotlb_size, PAGE_SIZE);
+       if (!swiotlb) {
+               panic("Failed to allocate %zu bytes (align=%lx) for SWIOTLB",
+                     swiotlb_size, PAGE_SIZE);
+       }
+
+       ret = swiotlb_init_with_tbl(swiotlb, swiotlb_nslabs, 1);
+       if (ret)
+               panic("Failed to init the SWIOTLB table");
+}
+
+#endif /* CONFIG_SWIOTLB */
diff --git a/arch/mips/baikal/baikal-int.c b/arch/mips/baikal/baikal-int.c
new file mode 100644 (file)
index 0000000..30e4009
--- /dev/null
@@ -0,0 +1,41 @@
+/* 
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/of.h>
+#include <linux/irqchip.h>
+
+/* Perfomance counters support */
+int get_c0_perfcount_int(void)
+{
+       return gic_get_c0_perfcount_int();
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+void __init arch_init_irq(void)
+{
+       /* Init complete with devicetree */
+       if (of_have_populated_dt())
+               irqchip_init();
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+       /* Nothing to do here */
+}
diff --git a/arch/mips/baikal/baikal-memory.c b/arch/mips/baikal/baikal-memory.c
new file mode 100644 (file)
index 0000000..09e3392
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/of_fdt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/prom.h>
+
+#include <asm/mach-baikal/hardware.h>
+#include "common.h"
+
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <asm/current.h>
+
+#include <asm/maar.h>
+#include <linux/memblock.h>
+
+#define BAIKAL_MMIO_MEM_START          0x08000000
+#define BAIKAL_MMIO_MEM_END            0x1FFFFFFF
+
+void __init prom_free_prom_memory(void)
+{
+       /* Nothing todo here */
+}
+/*
+ * Platform memory detection hook called by setup_arch
+ * extern void plat_mem_setup(void);
+ */
+void __init plat_mem_setup(void)
+{
+       /* Setup dummy port segment */
+       set_io_port_base(CKSEG1);
+       if (IS_ENABLED(CONFIG_EVA))
+               /* EVA should be configured in mach-baikal/kernel-init.h */
+               pr_info("Enhanced Virtual Addressing (EVA) activated\n");
+
+#ifdef CONFIG_OF
+       /* Try to parse device tree */
+       if (!device_tree_early_init())
+               return;
+#endif
+               /* Low memory region */
+       add_memory_region(BAIKAL_DRAM_START, BAIKAL_DRAM_SIZE, BOOT_MEM_RAM);
+#ifdef CONFIG_HIGHMEM
+               /* High memory region */
+       add_memory_region(BAIKAL_HIGHMEM_START, BAIKAL_HIGHMEM_SIZE, BOOT_MEM_RAM);
+#endif
+}
+
+unsigned platform_maar_init(unsigned num_pairs)
+{
+       struct maar_config cfg[3];
+       unsigned num_configured, num_cfg = 0;
+       struct memblock_region *region;
+
+       for_each_memblock(memory, region) {
+               /* Round lower up */
+               cfg[num_cfg].lower = PFN_PHYS(memblock_region_memory_base_pfn(region));
+               cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
+
+               /* Round upper down */
+               cfg[num_cfg].upper = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
+               cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
+
+               cfg[num_cfg].attrs = MIPS_MAAR_S;
+               num_cfg++;
+       }
+
+       num_configured = maar_config(cfg, num_cfg, num_pairs);
+
+       if (num_configured < num_cfg)
+               pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+                       num_pairs, num_cfg);
+
+       return num_configured;
+}
+
+
+#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
+/* override of arch/mips/mm/cache.c: __uncached_access */
+int __uncached_access(struct file *file, unsigned long addr)
+{
+       if (file->f_flags & O_DSYNC)
+               return 1;
+
+       return addr >= __pa(high_memory) ||
+               ((addr >= BAIKAL_MMIO_MEM_START) &&
+                (addr < BAIKAL_MMIO_MEM_END));
+}
+
+static unsigned long uca_start, uca_end;
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       unsigned long offset = pfn << PAGE_SHIFT;
+       unsigned long end = offset + size;
+
+       if (__uncached_access(file, offset)) {
+               if (uca_start && (offset >= uca_start) &&
+                   (end <= uca_end))
+                       return __pgprot((pgprot_val(vma_prot) &
+                                        ~_CACHE_MASK) |
+                                       _CACHE_UNCACHED_ACCELERATED);
+               else
+                       return pgprot_noncached(vma_prot);
+       }
+       return vma_prot;
+}
+
+int baikal_find_vga_mem_init(void)
+{
+       struct pci_dev *dev = 0;
+       struct resource *r;
+       int idx;
+
+       if (uca_start)
+               return 0;
+
+       for_each_pci_dev(dev) {
+               if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+                       for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
+                               r = &dev->resource[idx];
+                               if (!r->start && r->end)
+                                       continue;
+                               if (r->flags & IORESOURCE_IO)
+                                       continue;
+                               if (r->flags & IORESOURCE_MEM) {
+                                       uca_start = r->start;
+                                       uca_end = r->end;
+                                       return 0;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+late_initcall(baikal_find_vga_mem_init);
+#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
diff --git a/arch/mips/baikal/baikal-of.c b/arch/mips/baikal/baikal-of.c
new file mode 100644 (file)
index 0000000..e3c6a3d
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+
+#include <asm/fw/fw.h>
+#include <asm/prom.h>
+
+#include "common.h"
+
+static char mips_revision[16] = "Unknown";
+static char mips_soc_id[16]   = "Unknown";
+
+__iomem void *plat_of_remap_node(const char *node)
+{
+       struct resource res;
+       struct device_node *np;
+
+       np = of_find_compatible_node(NULL, NULL, node);
+       if (!np)
+               panic("Failed to find %s node", node);
+
+       if (of_address_to_resource(np, 0, &res))
+               panic("Failed to get resource for %s", node);
+
+       if ((request_mem_region(res.start,
+                               resource_size(&res),
+                               res.name) < 0))
+               panic("Failed to request resources for %s", node);
+
+       return ioremap_nocache(res.start, resource_size(&res));
+}
+
+void __init device_tree_init(void)
+{
+       /* Set machine name */
+       mips_set_machine_name(of_flat_dt_get_machine_name());
+
+       /* Restore tree model and copy into kernel memory */
+       unflatten_and_copy_device_tree();
+}
+
+int __init device_tree_early_init(void)
+{
+       /* Assume that device tree blob ptr in fw_arg3 */
+       void *fdt = IS_ENABLED(CONFIG_BUILTIN_DTB) ?
+                                               __dtb_start : phys_to_virt(fw_arg3);
+       /* UHI boot support */
+       if ((int)fw_arg0 == -2)
+               fdt = phys_to_virt(fw_arg1);
+
+       if ((unsigned long)fdt < PAGE_OFFSET) {
+               pr_err("Device tree blob address < PAGE_OFFSET\n");
+               goto no_dtb;
+       }
+
+       if (!early_init_dt_scan(fdt))
+               goto no_dtb;
+
+       /* Inform about initial device tree location */
+       pr_info("Machine device tree at: 0x%p\n", fdt);
+
+       /* Copy device tree command line to arcitecture command line */
+       strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+       return 0;
+
+no_dtb:
+               pr_warn("No valid device tree found, continuing without\n");
+#ifndef CONFIG_CMDLINE_OVERRIDE
+               /* Init command line from bootloader */
+               fw_init_cmdline();
+#endif
+       return -1;
+}
+
+static int __init plat_of_setup(void)
+{
+       struct soc_device_attribute *soc_dev_attr;
+       struct soc_device *soc_dev;
+       struct device *parent = NULL;
+       unsigned int cpuid = current_cpu_data.processor_id;
+
+       if (unlikely(!of_have_populated_dt()))
+               return 0;
+
+       soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+       if (!soc_dev_attr)
+               goto populate;
+       /* SoC attributes */
+       soc_dev_attr->machine   = mips_get_machine_name();
+       soc_dev_attr->family    = get_system_type();
+       soc_dev_attr->revision  = mips_revision;
+       soc_dev_attr->soc_id    = mips_soc_id;
+       /* Populate SoC-specific attributes */
+       snprintf(mips_revision, 15, "%u.%u", (cpuid >> 5) & 0x07,
+               cpuid & 0x07);
+       snprintf(mips_soc_id, 15, "0x%08X",
+               readl(phys_to_virt(BAIKAL_BOOT_CTRL_DRID)));
+       /* Register SoC device */
+       soc_dev = soc_device_register(soc_dev_attr);
+       if (IS_ERR(soc_dev)) {
+               kfree(soc_dev_attr);
+               goto populate;
+       }
+       /* SoC platform device is parent for all */
+       parent = soc_device_to_device(soc_dev);
+populate:
+       if (of_platform_populate(NULL, of_default_bus_match_table, NULL, parent))
+               panic("Failed to populate device tree");
+
+       return 0;
+}
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/baikal/baikal-setup.c b/arch/mips/baikal/baikal-setup.c
new file mode 100644 (file)
index 0000000..35c4aec
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/init.h>
+
+#include <asm/mips-cps.h>      /* needed for Coherence manager */
+#include <asm/traps.h>
+#include <asm/mach-baikal/hardware.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/dma-coherence.h>
+
+#include "common.h"              /* Common Baikal definitions */
+
+static void __init plat_setup_iocoherency(void)
+{
+   if (mips_cps_numiocu(0) != 0) {
+      pr_info("CMP IOCU detected\n");
+
+       if (coherentio == IO_COHERENCE_ENABLED)
+           pr_info("Hardware DMA cache coherency enabled\n");
+        else
+           pr_info("Hardware DMA cache coherency disabled\n");
+   }
+}
+
+static int __init baikal_platform_setup(void)
+{
+   /* Setup IO Coherency */
+   plat_setup_iocoherency();
+   /* No critical actions - always return success */
+   return 0;
+}
+late_initcall(baikal_platform_setup);
+
+void baikal_be_init(void)
+{
+   /* Could change CM error mask register. */
+}
+
+static char *tr[8] = {
+   "mem",   "gcr",   "gic",   "mmio",
+   "0x04", "0x05", "0x06", "0x07"
+};
+
+static char *mcmd[32] = {
+   [0x00] = "0x00",
+   [0x01] = "Legacy Write",
+   [0x02] = "Legacy Read",
+   [0x03] = "0x03",
+   [0x04] = "0x04",
+   [0x05] = "0x05",
+   [0x06] = "0x06",
+   [0x07] = "0x07",
+   [0x08] = "Coherent Read Own",
+   [0x09] = "Coherent Read Share",
+   [0x0a] = "Coherent Read Discard",
+   [0x0b] = "Coherent Ready Share Always",
+   [0x0c] = "Coherent Upgrade",
+   [0x0d] = "Coherent Writeback",
+   [0x0e] = "0x0e",
+   [0x0f] = "0x0f",
+   [0x10] = "Coherent Copyback",
+   [0x11] = "Coherent Copyback Invalidate",
+   [0x12] = "Coherent Invalidate",
+   [0x13] = "Coherent Write Invalidate",
+   [0x14] = "Coherent Completion Sync",
+   [0x15] = "0x15",
+   [0x16] = "0x16",
+   [0x17] = "0x17",
+   [0x18] = "0x18",
+   [0x19] = "0x19",
+   [0x1a] = "0x1a",
+   [0x1b] = "0x1b",
+   [0x1c] = "0x1c",
+   [0x1d] = "0x1d",
+   [0x1e] = "0x1e",
+   [0x1f] = "0x1f"
+};
+
+static char *core[8] = {
+   "Invalid/OK",  "Invalid/Data",
+   "Shared/OK",   "Shared/Data",
+   "Modified/OK", "Modified/Data",
+   "Exclusive/OK", "Exclusive/Data"
+};
+
+static char *causes[32] = {
+   "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+   "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+   "0x08", "0x09", "0x0a", "0x0b",
+   "0x0c", "0x0d", "0x0e", "0x0f",
+   "0x10", "0x11", "0x12", "0x13",
+   "0x14", "0x15", "0x16", "INTVN_WR_ERR",
+   "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+   "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+int baikal_be_handler(struct pt_regs *regs, int is_fixup)
+{
+   /* This duplicates the handling in do_be which seems wrong */
+   int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+
+   if (mips_cm_present()) {
+      unsigned long cm_error = read_gcr_error_cause();
+      unsigned long cm_addr = read_gcr_error_addr();
+      unsigned long cm_other = read_gcr_error_mult();
+      unsigned long cause, ocause;
+      char buf[256];
+
+      cause = cm_error & CM_GCR_ERROR_CAUSE_ERRTYPE;
+      if (cause != 0) {
+         cause >>= __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE);
+         if (cause < 16) {
+            unsigned long cca_bits = (cm_error >> 15) & 7;
+            unsigned long tr_bits = (cm_error >> 12) & 7;
+            unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+            unsigned long stag_bits = (cm_error >> 3) & 15;
+            unsigned long sport_bits = (cm_error >> 0) & 7;
+
+            snprintf(buf, sizeof(buf),
+                "CCA=%lu TR=%s MCmd=%s STag=%lu "
+                "SPort=%lu\n",
+                cca_bits, tr[tr_bits], mcmd[cmd_bits],
+                stag_bits, sport_bits);
+         } else {
+            /* glob state & sresp together */
+            unsigned long c3_bits = (cm_error >> 18) & 7;
+            unsigned long c2_bits = (cm_error >> 15) & 7;
+            unsigned long c1_bits = (cm_error >> 12) & 7;
+            unsigned long c0_bits = (cm_error >> 9) & 7;
+            unsigned long sc_bit = (cm_error >> 8) & 1;
+            unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+            unsigned long sport_bits = (cm_error >> 0) & 7;
+            snprintf(buf, sizeof(buf),
+                "C3=%s C2=%s C1=%s C0=%s SC=%s "
+                "MCmd=%s SPort=%lu\n",
+                core[c3_bits], core[c2_bits],
+                core[c1_bits], core[c0_bits],
+                sc_bit ? "True" : "False",
+                mcmd[cmd_bits], sport_bits);
+         }
+
+         ocause = (cm_other & CM_GCR_ERROR_MULT_ERR2ND) >>
+             __ffs(CM_GCR_ERROR_MULT_ERR2ND);
+
+         pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
+                causes[cause], buf);
+         pr_err("CM_ADDR =%08lx\n", cm_addr);
+         pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
+
+         /* reprime cause register */
+         write_gcr_error_cause(0);
+      }
+   }
+
+   return retval;
+}
diff --git a/arch/mips/baikal/baikal-time.c b/arch/mips/baikal/baikal-time.c
new file mode 100644 (file)
index 0000000..6d64898
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+
+#include <linux/init.h>
+#include <linux/sched_clock.h>
+#include <linux/clk-provider.h>                /* of_clk_init */
+#include <linux/clocksource.h>                 /* clocksource_of_init */
+#include <linux/clk.h>                 /* of_clk_get */
+#include <linux/of.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <asm/mach-baikal/hardware.h>
+#include "common.h"
+
+static unsigned long __init plat_get_dev_clk(const char *name)
+{
+       struct device_node *np;
+       struct clk *clk;
+       int ret;
+
+       /* Get node */
+       np = of_find_compatible_node(NULL, NULL, name);
+       if (!np) {
+               return 0;
+       }
+
+       /* Get node clock index 0 */
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               return 0;
+       }
+
+       /* Prepare and enable clock */
+       ret = clk_prepare_enable(clk);
+       if (!ret) {
+               return clk_get_rate(clk);
+       }
+
+       if (of_property_read_u32(np, "clock-frequency", &ret)) {
+               return 0;
+       }
+
+       return ret;
+}
+
+/*
+ * Platform timers initialization
+ */
+void __init plat_time_init(void)
+{
+       /* Init system clocks */
+       of_clk_init(NULL);
+
+       if (!of_have_populated_dt())
+               pr_info("No device tree!!!!\n");
+
+       /* Init clocksources */
+       timer_probe();
+
+       /* Set architectural timer frequency */
+       mips_hpt_frequency = plat_get_dev_clk("mti,p5600");
+
+       pr_info("CPU timer-dev_clk frequency: %u MHz\n",
+               (unsigned int)(mips_hpt_frequency / 1000000));
+       /* Check frequency */
+       if (!mips_hpt_frequency) {
+               pr_warn("No CPU clock frequency defined.\n");
+               mips_hpt_frequency = CPU_FREQ / CPU_CLK_DIV;
+       }
+
+       /* Report CPU clock frequency */
+       pr_info("CPU timer frequency: %u MHz\n",
+               (unsigned int)(mips_hpt_frequency / 1000000));
+
+       /*
+        * Use deterministic values for initial counter interrupt
+        * so that calibrate delay avoids encountering a counter wrap.
+        */
+       write_c0_count(0);
+       write_c0_compare(0xffff);
+}
diff --git a/arch/mips/baikal/common.h b/arch/mips/baikal/common.h
new file mode 100644 (file)
index 0000000..f04b856
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __BAIKAL_COMMON_H
+#define __BAIKAL_COMMON_H
+
+#define CPU_FREQ               1200000000
+#define CPU_CLK_DIV            1
+
+extern __iomem void *plat_of_remap_node(const char *node);
+extern int device_tree_early_init(void);
+
+#endif /* __BAIKAL_COMMON_H */
index aee8d7b8f09143fd8e4ce30a9552bf827ec357e4..0a9c2acca7f9ddcd7c2f0db18fad600af3779581 100644 (file)
 #define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
 #endif
 
+#ifdef CONFIG_MIPS_BAIKAL
+#include <asm/mach-baikal/hardware.h>
+#define UART0_BASE BAIKAL_UART0_START
+#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset))
+#define IOTYPE unsigned int
+#endif
+
 #ifdef CONFIG_AR7
 #include <ar7.h>
 #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
index 1e79cab8e2690c6e912579e4dac579a0ff550944..3b09cd161bc2f73ea36dad950199c1e080e17cfa 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+subdir-y       += baikal
 subdir-y       += brcm
 subdir-y       += cavium-octeon
 subdir-y       += img
diff --git a/arch/mips/boot/dts/baikal/Makefile b/arch/mips/boot/dts/baikal/Makefile
new file mode 100644 (file)
index 0000000..dd2c86b
--- /dev/null
@@ -0,0 +1,13 @@
+dtb-$(CONFIG_DTB_BAIKAL_BFK3)  += baikal_bfk3.dtb
+
+
+dtb-$(CONFIG_DT_NONE)  += \
+                                               baikal_bfk3.dtb \
+
+obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj-                           += dummy.o
+
+always                         := $(dtb-y)
+clean-files                    := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/baikal/baikal_bfk3.dts b/arch/mips/boot/dts/baikal/baikal_bfk3.dts
new file mode 100644 (file)
index 0000000..c28e929
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Baikal-T1 BFK3 evaluation board device tree
+ *
+ * Copyright (C) 2014-2018  Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+
+#include  "baikal_t1_soc.dtsi"
+#include  "baikal_mdio.dtsi"
+
+/ {
+       model = "Baikal-T1 BFK3 evaluation board";
+       compatible = "baikal,mips", "baikal,baikal-t1-soc", "baikal,baikal-bfk3-eval-board";
+       #address-cells = <1>;
+       #size-cells = <2>;
+
+       chosen {
+               bootargs = "root=/dev/ram rw rootwait console=ttyS0,115200n8 earlyprintk=uart8250,mmio32,0x1F04A000,115200 maxcpus=2 nohtw";
+               linux,initrd-start = <0x00000000>;
+               linux,initrd-end   = <0x00000000>;
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0 0x08000000>,
+                     <0x20000000 0 0xC0000000>;
+       };
+};
+
+&usb {
+       status = "okay";
+};
+
+&sata {
+       status = "okay";
+};
+
+&pcie {
+       status = "okay";
+};
+
+&gmac0 {
+       status = "okay";
+};
+
+&gmac1 {
+       status = "okay";
+};
+
+&xgmac {
+       status = "okay";
+};
+
+&gpio {
+       status = "okay";
+};
+
+&gpio3 {
+       status = "okay";
+};
+
+&i2c0 {
+};
+
+&i2c1 {
+       rtc@56 {
+               compatible = "abracon,abeoz9s3";
+               reg = <0x56>;
+       };
+};
+
+&uart1 {
+       status = "okay";
+};
+
+&bc {
+       status = "okay";
+};
+
+&spi0 {
+       num-cs = <2>;
+       cs-gpios = <&portb 0 GPIO_ACTIVE_LOW>,
+                          <&portb 1 GPIO_ACTIVE_LOW>;
+
+       status = "okay";
+
+       flash@1 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               status = "okay";
+               compatible = "micron,n25q256a", "jedec,spi-nor";
+               reg = <1>;
+               spi-max-frequency = <25000000>;
+               /* m25p,fast-read; */
+
+               mtd@00000000 {
+                       label = "flash0";
+                       reg = <0x00000000 0x02000000>;
+               };
+       };
+};
+
+&spi1 {
+       num-cs = <1>;
+       cs-gpios = <&portb 2 GPIO_ACTIVE_LOW>;
+
+       status = "okay";
+};
+
+&spi2 {
+       num-cs = <1>;
+       status = "okay";
+
+       flash@0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               status = "okay";
+               compatible = "jedec,spi-nor";
+               reg = <0>;
+               spi-max-frequency = <100000>;
+
+               /* total size 16MB */
+               part@0 {
+                       label = "UBOOT";
+                       reg = <0x0 0xa0000>;
+               };
+               part@A0000 {
+                       label = "ENVSET";
+                       reg = <0xa0000 0x10000>;
+               };
+               part@B0000 {
+                       label = "FIRMWARE";
+                       reg = <0xb0000 0x10000>;
+               };
+               part@C0000 {
+                       label = "FDT";
+                       reg = <0xc0000 0x10000>;
+               };
+               part@D0000 {
+                       label = "MULTIIMAGE";
+                       reg = <0xd0000 0xf30000>;
+               };
+       };
+};
+
+&pvt {
+       status = "okay";
+};
+
+&efuse {
+       status = "okay";
+};
+
diff --git a/arch/mips/boot/dts/baikal/baikal_mdio.dtsi b/arch/mips/boot/dts/baikal/baikal_mdio.dtsi
new file mode 100644 (file)
index 0000000..ba3f809
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Baikal Electronics' XGBE MDIO mezzanine card device tree
+ *
+ * Copyright (C) 2014-2017  Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include  "baikal_t1_soc.dtsi"
+
+/ {
+       aliases {
+               mdio-gpio0 = &mdio0;
+       };
+
+        sfp_xgmac: sfp_xgmac {
+               compatible = "sff,sfp";
+                i2c-bus = <&i2c0>;
+       };
+
+       mdio0: be-mdio {
+               compatible = "be,mdio-gpio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               mdc-pin = <&porta 9 GPIO_ACTIVE_HIGH>;
+               mdo-pin = <&porta 10 GPIO_ACTIVE_HIGH>;
+               mdio-pin = <&porta 11 GPIO_ACTIVE_HIGH>;
+               rst-pin = <&porta 27 GPIO_ACTIVE_HIGH>;
+               clocks = <&gpio_clk 0>;
+               clock-names = "gpioclk";
+
+               mv_ch0:88X2222@0C {
+                       compatible = "marvell,88x2222", "ethernet-phy-ieee802.3-c45";
+                       reg = <0x0C>;
+                       phy-mode = "xgmii";
+                        mv,line-mode = "KR";
+                        mv,host-mode = "KR";
+               };
+
+               mv_ch1:88X2222@0D {
+                       compatible = "marvell,88x2222", "ethernet-phy-ieee802.3-c45";
+                       reg = <0x0D>;
+                       phy-mode = "xgmii";
+                        mv,line-mode = "KR";
+                        mv,host-mode = "KR";
+               };
+
+               mv_ch2:88X2222@0E {
+                       compatible = "marvell,88x2222", "ethernet-phy-ieee802.3-c45";
+                       reg = <0x0E>;
+                       phy-mode = "xgmii";
+                        mv,line-mode = "KR";
+                        mv,host-mode = "KR";
+
+               };
+
+               mv_ch3:88X2222@0F {
+                       compatible = "marvell,88x2222", "ethernet-phy-ieee802.3-c45";
+                       reg = <0x0F>;
+                       phy-mode = "xgmii";
+                        mv,line-mode = "KR";
+                        mv,host-mode = "KR";
+               };
+       };
+};
+
+&xgmac {
+       ext-phy-handle = <&mv_ch0>;
+};
diff --git a/arch/mips/boot/dts/baikal/baikal_t1_clocks.dtsi b/arch/mips/boot/dts/baikal/baikal_t1_clocks.dtsi
new file mode 100644 (file)
index 0000000..e067b0b
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ * Baikal-T1 SOC clock tree
+ *
+ * Copyright (C) 2014-2017  Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+/ {
+       clocks {
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges;
+
+               /*** external oscillator ****/
+               osc25: oscillator@0 {
+                       compatible = "fixed-clock";
+                       #clock-cells = <1>;
+                       clock-frequency  = <25000000>;
+                       clock-output-names = "osc25";
+               };
+
+               /*** primary clock domains ***/
+
+               core_pll: core_pll@1F04D000 {
+                       compatible = "be,pmu-pll-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D000 0x0008>;
+                       clock-output-names = "corepll";
+                       clock-frequency-range = <200000000 1300000000 25000000>;
+               };
+
+               sata_pll: sata_pll@1F04D008 {
+                       compatible = "be,pmu-pll-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D008 0x0008>;
+                       clock-output-names = "satapll";
+               };
+
+               ddr_pll: ddr_pll@1F04D010 {
+                       compatible = "be,pmu-pll-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D010 0x0008>;
+                       clock-output-names = "ddrpll";
+               };
+
+               pcie_pll: pcie_pll@1F04D018 {
+                       compatible = "be,pmu-pll-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D018 0x0008>;
+                       clock-output-names = "pciepll";
+               };
+
+               eth_pll: eth_pll@1F04D020 {
+                       compatible = "be,pmu-pll-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D020 0x0008>;
+                       clock-output-names = "ethpll";
+               };
+
+
+               /******** clocks ********/
+
+               /** core_pll domain **/
+               cpu_clk:cpu_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&core_pll 0>;
+                       clock-names = "corepll";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "cpuclk";
+               };
+
+               gic_clk:gic_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&core_pll 0>;
+                       clock-names = "corepll";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "gicclk";
+               };
+
+               /** pcie_pll domain **/
+               axi_clk:axi_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&pcie_pll 0>;
+                       clock-names = "pciepll";
+                       #clock-cells = <0>;
+                       clock-div = <2>;
+                       clock-mult = <1>;
+                       clock-output-names = "axiclk";
+               };
+
+                axi_xgmac:axi_xgmac0@1F04D044 {
+                        compatible = "be,pmu-device-clock";
+                        #clock-cells = <1>;
+                        clocks = <&eth_pll 0>;
+                        clock-names = "ethpll";
+                        reg = <0x1F04D044 0x0004>;
+                        clock-output-names = "axixgmac";
+                        divider-width = <4>;
+                        nobypass;
+                };
+
+               apb_clk:apb_clk@1F04D064 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&pcie_pll 0>;
+                       clock-names = "pciepll";
+                       reg = <0x1F04D064 0x0004>;
+                       clock-output-names = "apbclk";
+                       divider-width = <5>;
+               };
+
+               pci_phy_clk:pci_phy_clk@1F04D05C {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&pcie_pll 0>;
+                       clock-names = "pciepll";
+                       reg = <0x1F04D05C 0x0004>;
+                       clock-output-names = "pciphyclk";
+                       divider-width = <4>;
+                       nobypass;
+               };
+
+               /** sata_pll domain **/
+               sata_clk:sata_clk@1F04D060 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&sata_pll 0>;
+                       clock-names = "sataclk";
+                       reg = <0x1F04D060 0x0004>;
+                       clock-output-names = "sataclk";
+                       divider-width = <4>;
+                       nobypass;
+               };
+
+               /** eth_pll domain **/
+               div_125m:div_125m {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&eth_pll 0>;
+                       clock-names = "ethpll";
+                       #clock-cells = <1>;
+                       clock-div = <10>;
+                       clock-mult = <1>;
+                       clock-output-names = "div125m";
+               };
+
+               div_156m:div_156m {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&eth_pll 0>;
+                       clock-names = "ethpll";
+                       #clock-cells = <1>;
+                       clock-div = <8>;
+                       clock-mult = <1>;
+                       clock-output-names = "div156m";
+               };
+
+               div_250m:div_250m {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&eth_pll 0>;
+                       clock-names = "ethpll";
+                       #clock-cells = <1>;
+                       clock-div = <5>;
+                       clock-mult = <1>;
+                       clock-output-names = "div250m";
+               };
+
+               uart_clk:uart_clk@1F04D084 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&eth_pll 0>;
+                       clock-names = "ethpll";
+                       reg = <0x1F04D084 0x0004>;
+                       clock-output-names = "baudclk";
+                       divider-width = <17>;
+               };
+
+               /** osc25 derivatives **/
+               timer0_clk:timer0_clk@1F04D088 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D088 0x0004>;
+                       clock-output-names = "timer0clk";
+                       divider-width = <17>;
+               };
+
+               timer1_clk:timer1_clk@1F04D08C {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D08C 0x0004>;
+                       clock-output-names = "timer1clk";
+                       divider-width = <17>;
+               };
+
+               timer2_clk:timer2_clk@1F04D090 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       reg = <0x1F04D090 0x0004>;
+                       clock-output-names = "timer2clk";
+                       divider-width = <17>;
+               };
+
+               gpio_clk:gpio_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&osc25 0>;
+                       clock-names = "ref";
+                       #clock-cells = <0>;
+                       clock-div = <25>;
+                       clock-mult = <1>;
+                       clock-output-names = "gpioclk";
+                       clock-output = "gpio_clk";
+               };
+
+               pvt_clk:pvt_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&osc25 0>;
+                       clock-names = "osc25";
+                       #clock-cells = <0>;
+                       clock-div = <21>;
+                       clock-mult = <1>;
+                       clock-output-names = "pvtclk";
+               };
+
+               div_1m:div_1m {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&osc25 0>;
+                       clock-names = "osc25";
+                       #clock-cells = <0>;
+                       clock-div = <25>;
+                       clock-mult = <1>;
+                       clock-output-names = "div1m";
+               };
+
+               /*** secondary clock domains ***/
+
+               /** pcie_pll -> apb_clk domain **/
+               dma_clk:dma_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&apb_clk 0>;
+                       clock-names = "apbclk";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "dmaclk";
+                       clock-output = "hclk";
+               };
+
+               spi0_clk:spi0_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&apb_clk 0>;
+                       clock-names = "apbclk";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "spi0clk";
+               };
+
+               spi1_clk:spi1_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&apb_clk 0>;
+                       clock-names = "apbclk";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "spi1clk";
+               };
+
+               boot_clk:boot_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&apb_clk 0>;
+                       clock-names = "apbclk";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "bootclk";
+               };
+
+               /** eth_pll -> div_250m domain **/
+               gmac0_clk:gmac0_clk@1F04D068 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&div_250m 0>;
+                       clock-names = "div250m";
+                       reg = <0x1F04D068 0x0004>;
+                       clock-output-names = "gmac0clk";
+                       divider-width = <0>;
+               };
+
+               gmac1_clk:gmac1_clk@1F04D06C {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&div_250m 0>;
+                       clock-names = "div250m";
+                       reg = <0x1F04D06C 0x0004>;
+                       clock-output-names = "gmac1clk";
+                       divider-width = <0>;
+               };
+
+               /** eth_pll -> div_125m domain **/
+               usb_clk:usb_clk@1F04D074 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&div_125m 0>;
+                       clock-names = "div125m";
+                       reg = <0x1F04D074 0x0004>;
+                       clock-output-names = "usbclk";
+                       divider-width = <0>;
+               };
+
+               i2c0_clk:i2c0_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&div_125m 0>;
+                       clock-names = "div125m";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "i2c0clk";
+               };
+
+               i2c1_clk:i2c1_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&div_125m 0>;
+                       clock-names = "div125m";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <1>;
+                       clock-output-names = "i2c1clk";
+               };
+
+               /** eth_pll -> div_156m domain **/
+               xgmac_dma:xgmac_dma@1F04D070 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&div_156m 0>;
+                       clock-names = "div156m";
+                       reg = <0x1F04D070 0x0004>;
+                       clock-output-names = "xgmac_dma";
+                       clock-output = "dma_clk";
+                       divider-width = <0>;
+               };
+
+               xgmac_ptp:xgmac_ptp {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&div_156m 0>;
+                       clock-names = "div156m";
+                       #clock-cells = <0>;
+                       clock-div = <4>;
+                       clock-mult = <1>;
+                       clock-output-names = "xgmacptp";
+                       clock-output = "ptp_clk";
+               };
+
+               /** osc25 -> div_1m domain **/
+               wdt_clk:wdt_clk@1F04D150 {
+                       compatible = "be,pmu-device-clock";
+                       #clock-cells = <1>;
+                       clocks = <&div_1m 0>;
+                       clock-names = "div1m";
+                       reg = <0x1F04D150 0x0004>;
+                       clock-output-names = "wdtclk";
+                       divider-width = <0>;
+               };
+       };
+};
diff --git a/arch/mips/boot/dts/baikal/baikal_t1_soc.dtsi b/arch/mips/boot/dts/baikal/baikal_t1_soc.dtsi
new file mode 100644 (file)
index 0000000..0bb12a4
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+ * Baikal-T1 SOC generic device tree
+ *
+ * Copyright (C) 2014-2017  Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include "baikal_t1_clocks.dtsi"
+
+/ {
+
+       compatible = "baikal,mips", "baikal,baikal-t1-soc";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&gic>;
+
+       aliases {
+               gic = &gic;
+               serial0 = &uart0;
+               serial1 = &uart1;
+               i2c0 = &i2c0;
+               i2c1 = &i2c1;
+               bc   = &bc;
+               ssi0 = &spi0;
+               ssi1 = &spi1;
+               ssi2 = &spi2;
+               ethernet0 = &gmac0;
+               ethernet1 = &gmac1;
+               ethernet2 = &xgmac;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "mti,p5600";
+                       reg = <0x0>;
+                       clocks = <&cpu_clk 0>;
+                       clock-names = "cpuclk";
+               };
+
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "mti,p5600";
+                       reg = <0x1>;
+                       clocks = <&cpu_clk 0>;
+                       clock-names = "cpuclk";
+               };
+       };
+
+       cpufreq: cpufreq@1F04D000 {
+               compatible = "be,cpufreq";
+               reg = <0x1F04D000 0x0 0x0008>;
+               clocks = <&cpu_clk 0>;
+               clock-names = "cpuclk";
+       };
+
+       gic_wdt: gic_wdt {
+               compatible = "be,gic-wdt";
+               interrupt-parent = <&gic>;
+               interrupts = <GIC_LOCAL 0 IRQ_TYPE_NONE>;
+               clock-frequency = <600000000>;
+               clocks = <&gic_clk 0>;
+               clock-names = "gicclk";
+
+       };
+
+       gic: gic@1BDC0000 {
+               compatible = "mti,gic";
+               reg = <0x1BDC0000 0x0 0x20000>;
+               interrupt-controller;
+               #interrupt-cells = <3>;
+
+               timer:timer {
+                       compatible = "mti,gic-timer";
+                       interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+                       clock-frequency = <600000000>;
+                       clocks = <&gic_clk 0>;
+                       clock-names = "gicclk";
+               };
+       };
+
+       sram: sram@1BF80000 {
+               compatible = "mmio-sram";
+               reg = <0x1BF80000 0x0 0x10000>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0 0x1BF80000 0x10000>;
+
+               smp-sram@0 {
+                       compatible = "be,smp-sram";
+                       reg = <0 0x10000>;
+                       label="Internal SRAM";
+                       export;
+               };
+       };
+
+       axi {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               interrupt-parent = <&gic>;
+
+               ranges;
+
+               dma: dma@1f041000 {
+                       compatible = "snps,dma-spear1340";
+                       reg = <0x1f041000 0x1000>;
+                       interrupts = <GIC_SHARED 56 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-channels = <2>;
+                       dma-requests = <16>;
+                       dma-masters = <2>;
+                       #dma-cells = <3>;
+                       chan_allocation_order = <1>;
+                       chan_priority = <1>;
+                       block_size = <0xfff>;
+                       data_width = <0 0 0 0>;
+                       clocks = <&dma_clk 0>;
+                       clock-names = "hclk";
+               };
+
+
+               memory-controller@1F042000 {
+                       compatible = "baikal,bt1-edac-mc";
+                       reg = <0x1F042000 0x1000>;
+                       interrupts = <GIC_SHARED 96 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 97 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 98 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ddr_pll 0>;
+                       clock-names = "ddrclk";
+               };
+
+               usb: usb@1F04D050 {
+                       compatible = "be,baikal-dwc3";
+                       reg = <0x1F04D050 0x0004>;
+                       interrupts = <GIC_SHARED 69 IRQ_TYPE_LEVEL_HIGH>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       clocks = <&usb_clk 0>;
+                       clock-names = "usb";
+                       ranges;
+                       status = "disabled";
+
+                       dwc3@1F100000 {
+                               compatible = "snps,dwc3", "synopsys,dwc3", "generic-xhci";
+                               reg = <0x1F100000 0x10000>;
+                               interrupts = <GIC_SHARED 68 IRQ_TYPE_LEVEL_HIGH>;
+                               dr_mode = "host";
+                               tx-fifo-resize;
+                               maximum-speed = "high-speed";
+                       };
+               };
+
+               axi_ehb@1F04D110 {
+                       compatible = "be,axi-ehb";
+                       reg = <0x1F04D110 0x8>;
+                       interrupts = <GIC_LOCAL 127 IRQ_TYPE_NONE>;
+               };
+
+               sata: sata@1F050000 {
+                       interrupt-parent = <&gic>;
+                       compatible = "snps,dwc-ahci", "generic-ahci";
+                       reg = <0x1F050000 0x2000>;
+                       interrupts = <GIC_SHARED 64 IRQ_TYPE_LEVEL_HIGH>;
+                       ports-implemented = <3>;
+                       clocks = <&sata_clk 0>;
+                       clock-names = "sataclk";
+                       status = "disabled";
+               };
+
+               pcie: pcie@1F052000 {
+                       compatible = "snps,dw-pcie";
+                       reg = <0x1f052000 0x1000>,      /* Controller regs */
+                             <0x1bdb0000 0x10000>;     /* PCI config space */
+                       reg-names = "dbi", "config";
+                       interrupts = <GIC_SHARED 88 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 89 IRQ_TYPE_LEVEL_HIGH>;
+                       #interrupt-cells = <1>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       device_type = "pci";
+                       ranges = <0x82000000 0 0x08000000 0x08000000 0 0x03da0000>, /* mem */
+                                <0x82000000 0 0x10000000 0x0bda0000 0 0x10000000>, /* mem */
+                                <0x81000000 0 0x0bda0000 0x1bda0000 0 0x00010000>; /* io */
+                       num-lanes = <4>;
+                       num-viewport = <4>;
+                       bus-range = <0x0 0xff>;
+                       status = "disabled";
+               };
+
+               edma: edma@1F053000 {
+                       compatible = "be,baikal-edma";
+                       reg = <0x1F053000 0x1000>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SHARED 80 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 81 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 82 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 83 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 84 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 85 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 86 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 87 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "eDMA-Tx-0", "eDMA-Tx-1", "eDMA-Tx-2", "eDMA-Tx-3",
+                                         "eDMA-Rx-0", "eDMA-Rx-1", "eDMA-Rx-2", "eDMA-Rx-3";
+                       upstream = <&pcie>;
+               };
+
+               xgmac: eth0@1F054000 {
+                       compatible = "amd,xgbe-seattle-v1a";
+                       reg = <0x1F054000 0x4000>,
+                             <0x1F05D000 0x1000>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SHARED 74 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 75 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 76 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 77 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 78 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SHARED 79 IRQ_TYPE_LEVEL_HIGH>;
+                       fsl,num-rx-queues=<3>;
+                       clocks = <&axi_xgmac 0>, <&xgmac_dma 0>, <&xgmac_ptp 0>, <&div_156m 0>;
+                       clock-names = "axi", "dma_clk", "ptp_clk", "xgbe_clk";
+                       be,external-clock;
+                       phy-mode = "xgmii";
+                       be,speed-set = <1>;
+                       be,mode-set = <0>;
+                       mac-address = [ 00 20 13 ba 1c a1 ];
+                       local-mac-address = [ 00 20 13 ba 1c a1 ];
+                       be,pcs-mode = "KR";
+                       status = "disabled";
+               };
+
+               stmmac_axi_setup: stmmac-axi-config {
+                       snps,wr_osr_lmt = <0x3>;
+                       snps,rd_osr_lmt = <0x3>;
+               };
+
+
+               gmac0: eth1@1F05E000 {
+                       compatible = "be,dwmac", "snps,dwmac-3.710", "snps,dwmac";
+                       reg = <0x1F05E000 0x2000>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SHARED 72 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "macirq";
+                       phy-mode = "rgmii";
+                       clocks = <&gmac0_clk 0>;
+                       clock-names = "stmmaceth";
+                       mac-address = [ 7a 72 6c 4a 7a 07 ];
+                       local-mac-address = [ 7a 72 6c 4a 7a 07 ];
+                       txd0-skew-ps = <0>;
+                       txd1-skew-ps = <0>;
+                       txd2-skew-ps = <0>;
+                       txd3-skew-ps = <0>;
+                       txc-skew-ps  = <0xff>;
+                       snps,pbl = <2>;
+                       snps,axi-config = <&stmmac_axi_setup>;
+                       tx-fifo-depth = <32768>;
+                       rx-fifo-depth = <32768>;
+                       status = "disabled";
+               };
+
+               gmac1: eth2@1F060000 {
+                       compatible = "be,dwmac", "snps,dwmac-3.710", "snps,dwmac";
+                       reg = <0x1F060000 0x2000>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SHARED 73 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "macirq";
+                       phy-mode = "rgmii";
+                       clocks = <&gmac1_clk 0>;
+                       clock-names = "stmmaceth";
+                       mac-address = [ 7a 72 6c 4a 7b 07 ];
+                       local-mac-address = [ 7a 72 6c 4a 7b 07 ];
+                       txd0-skew-ps = <0>;
+                       txd1-skew-ps = <0>;
+                       txd2-skew-ps = <0>;
+                       txd3-skew-ps = <0>;
+                       txc-skew-ps  = <0xff>;
+                       snps,pbl = <2>;
+                       snps,axi-config = <&stmmac_axi_setup>;
+                       tx-fifo-depth = <32768>;
+                       rx-fifo-depth = <32768>;
+                       status = "disabled";
+               };
+       }; /* axi */
+
+       apb {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               interrupt-parent = <&gic>;
+
+               ranges;
+
+               bc: bc@1F040000 {
+                       compatible = "be,boot-controller";
+                       reg = <0x1F040000 0x100>;
+                       status = "disabled";
+               };
+
+               gpio: gpio@1F044000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = <0x1F044000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+
+                       porta: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               #gpio-cells = <2>;
+                               gpio-controller;
+                               snps,nr-gpios = <32>;
+                               reg = <0>;
+                               #interrupt-cells = <2>;
+                               interrupt-controller;
+                               interrupts = <GIC_SHARED 19 IRQ_TYPE_LEVEL_HIGH>;
+                       };
+               };
+
+               gpio3: gpio@1F045000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = <0x1F045000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+
+                       portb: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               #gpio-cells = <2>;
+                               gpio-controller;
+                               snps,nr-gpios = <3>;
+                               reg = <0>;
+                       };
+               };
+
+               i2c0: i2c0@1F046000 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "snps,designware-i2c";
+                       reg = <0x1F046000 0x1000>;
+                       interrupts = <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>;
+                       clock-frequency = <400000>;
+                       clocks = <&i2c0_clk 0>;
+                       clock-names = "i2c0clk";
+               };
+
+               i2c1: i2c1@1F047000 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "snps,designware-i2c";
+                       reg = <0x1F047000 0x1000>;
+                       interrupts = <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>;
+                       clock-frequency = <400000>;
+                       clocks = <&i2c1_clk 0>;
+                       clock-names = "i2c1clk";
+               };
+
+               timer0: timer0@1F049000 {
+                       compatible = "snps,dw-apb-timer-osc";
+                       interrupts = <GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
+                       reg = <0x1F049000 0x14>;
+                       clocks = <&timer0_clk 0>;
+                       clock-frequency  = <25000000>;
+                       clock-names = "timer";
+               };
+
+               timer1: timer1@1F049014 {
+                       compatible = "snps,dw-apb-timer-sp";
+                       reg = <0x1F049014 0x14>;
+                       interrupts = <GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&timer1_clk 0>;
+                       clock-frequency  = <25000000>;
+                       clock-names = "timer";
+               };
+
+               timer2: timer2@1F049028 {
+                       compatible = "snps,dw-apb-timer-sp";
+                       reg = <0x1F049028 0x14>;
+                       interrupts = <GIC_SHARED 26 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&timer2_clk 0>;
+                       clock-frequency  = <25000000>;
+                       clock-names = "timer";
+               };
+
+               uart0: serial0@1F04A000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x1F04A000 0x1000>;
+                       interrupts = <GIC_SHARED 48 IRQ_TYPE_LEVEL_HIGH>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clocks = <&uart_clk 0>, <&apb_clk 0>;
+                       clock-names = "baudclk", "apb_pclk";
+                       dcd-override;
+                       dsr-override;
+                       cts-override;
+                       ri-override;
+               };
+
+               uart1: serial1@1F04B000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x1F04B000 0x1000>;
+                       interrupts = <GIC_SHARED 49 IRQ_TYPE_LEVEL_HIGH>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clocks = <&uart_clk 0>, <&apb_clk 0>;
+                       clock-names = "baudclk", "apb_pclk";
+                       status = "disabled";
+               };
+
+               wdt: watchdog@1F04C000 {
+                       compatible = "snps,dw-wdt";
+                       reg = <0x1F04C000 0x1000>;
+                       interrupts = <GIC_SHARED 17 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&wdt_clk 0>;
+                       clock-names = "wdtclk";
+                       snps,watchdog-tops = <0x000000ff 0x000001ff 0x000003ff 0x000007ff
+                                             0x0000ffff 0x0001ffff 0x0003ffff 0x0007ffff
+                                             0x000fffff 0x001fffff 0x003fffff 0x007fffff
+                                             0x00ffffff 0x01ffffff 0x03ffffff 0x07ffffff>;
+               };
+
+               spi0: spi@1F04E000 {
+                       compatible = "be,dw-spi";
+                       reg = <0x1F04E000 0x1000>;
+                       interrupts = <GIC_SHARED 40 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&spi0_clk 0>;
+                       clock-names = "spi0clk";
+                       dma-names = "tx", "rx";
+                       dmas = <&dma 8 0 1>, <&dma 9 1 0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               spi1: spi@1F04F000 {
+                       compatible = "be,dw-spi";
+                       reg = <0x1F04F000 0x1000>;
+                       interrupts = <GIC_SHARED 41 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&spi1_clk 0>;
+                       clock-names = "spi1clk";
+                       dma-names = "tx", "rx";
+                       dmas = <&dma 10 0 1>, <&dma 11 1 0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               spi2: spi@1F040100 {
+                       compatible = "be,dw-spi-boot";
+                       reg = <0x1F040100 0xF00>;
+                       clocks = <&boot_clk 0>;
+                       clock-names = "bootclk";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               apb_ehb@1F059000 {
+                       compatible = "be,apb-ehb";
+                       reg = <0x1f059000 0x1000>;
+                       interrupts = <GIC_SHARED 16 IRQ_TYPE_LEVEL_HIGH>;
+               };
+
+               pvt: pvt@1F200000 {
+                       compatible = "baikal,pvt";
+                       reg = <0x1F200000 0x2C>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
+               efuse: efuse@1F201000 {
+                       compatible = "baikal,efuse";
+                       reg = <0x1F201000 0x1000>;
+                       status = "disabled";
+               };
+       }; /* apb */
+};
diff --git a/arch/mips/configs/baikal_bfk3_defconfig b/arch/mips/configs/baikal_bfk3_defconfig
new file mode 100644 (file)
index 0000000..d36777b
--- /dev/null
@@ -0,0 +1,326 @@
+CONFIG_MIPS_BAIKAL=y
+CONFIG_MACH_BAIKAL_BFK3=y
+CONFIG_DTB_BAIKAL_BFK3=y
+CONFIG_BAIKAL_ERRATA=y
+CONFIG_BAIKAL_ERRATA_XGMAC=n
+CONFIG_CPU_MIPS32_3_5_FEATURES=y
+# CONFIG_CPU_MIPS32_3_5_EVA is not set
+CONFIG_CPU_MIPS32_R5_FEATURES=y
+CONFIG_CPU_MIPS32_R5_XPA=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_CPS=y
+CONFIG_CPU_HAS_MSA=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+#CONFIG_KEXEC=y
+CONFIG_SERIAL_8250_DMA=n
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_LOCALVERSION="-bfk3"
+CONFIG_DEFAULT_HOSTNAME="baikal"
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_IOMMU_SUPPORT=n
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=n
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_PCI_REALLOC_ENABLE_AUTO=y
+CONFIG_PCI_STUB=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_PRI=y
+CONFIG_PCI_PASID=y
+CONFIG_PCIE_BAIKAL=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_ECRC=y
+CONFIG_PCIEAER_INJECT=y
+CONFIG_PCIEAER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_EXTHDR=y
+CONFIG_NFT_META=y
+CONFIG_NFT_RBTREE=y
+CONFIG_NFT_HASH=y
+CONFIG_NFT_COUNTER=y
+CONFIG_NFT_LOG=y
+CONFIG_NFT_LIMIT=y
+CONFIG_NFT_MASQ=y
+CONFIG_NFT_REDIR=y
+CONFIG_NFT_NAT=y
+CONFIG_NFT_REJECT=y
+CONFIG_NETFILTER_XT_TARGET_AUDIT=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CGROUP=y
+CONFIG_NETFILTER_XT_MATCH_CPU=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NFT_CHAIN_ROUTE_IPV4=y
+CONFIG_NFT_DUP_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_NFT_CHAIN_NAT_IPV4=y
+CONFIG_NFT_MASQ_IPV4=y
+CONFIG_NFT_REDIR_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NFT_CHAIN_ROUTE_IPV6=y
+CONFIG_NFT_DUP_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEBUG_DRIVER=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_MIPS_CDMM=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=n
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=4
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=4
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_RAID_ATTRS=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_AMD_XGBE=y
+CONFIG_BAIKAL_XGBE=y
+CONFIG_COMPILE_TEST=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+CONFIG_IXGBEVF=y
+CONFIG_BAIKAL_MDIO=y
+CONFIG_88X2222_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_GPIO=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_GPIO_PCF857X=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_OF=y
+CONFIG_DRM=y
+CONFIG_DRM_I2C_CH7006=m
+CONFIG_DRM_I2C_SIL164=m
+CONFIG_FB_SIMPLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_ULPI_BUS=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=m
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_HCD_TEST_MODE=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3_ULPI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ABEOZ9S3=y
+CONFIG_DMADEVICES=y
+CONFIG_DMADEVICES_DEBUG=y
+CONFIG_DMADEVICES_VDEBUG=y
+CONFIG_DW_DMAC=y
+CONFIG_BAIKAL_EDMA=y
+CONFIG_STAGING=y
+CONFIG_FB_SM750=y
+CONFIG_IOMMU_SUPPORT=n
+CONFIG_MEMORY=y
+CONFIG_EXT4_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFS_USE_LEGACY_DNS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_PANIC_TIMEOUT=10
+CONFIG_DMA_API_DEBUG=n
+CONFIG_DEBUG_ZBOOT=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
+CONFIG_SENSORS_PVT=y
+CONFIG_HWMON=y
+CONFIG_JFFS2_FS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_NET_PKTGEN=y
+CONFIG_DEBUG_FS=y
+#CONFIG_NET_PKTGEN=y
+#CONFIG_DEBUG_FS=y
+CONFIG_STACKTRACE_SUPPORT=n
+CONFIG_STACKTRACE=n
+CONFIG_TRACING=n
+CONFIG_TRACING_SUPPORT=n
+CONFIG_DEBUG_KERNEL=n
+CONFIG_DEBUG_LIST=n
+CONFIG_LATENCYTOP=n
+CONFIG_EDAC_BAIKAL=y
+CONFIG_EDAC=y
index ed7ffe4e63a32edbe4c793936beed17626e7d305..1aad7e5a5cd8474f3983a61d0d7137648b09da65 100644 (file)
@@ -105,7 +105,7 @@ struct cpuinfo_mips {
        unsigned int            gtoffset_mask;
        unsigned int            guestid_mask;
        unsigned int            guestid_cache;
-} __attribute__((aligned(SMP_CACHE_BYTES)));
+} __aligned(SMP_CACHE_BYTES) __randomize_layout;
 
 extern struct cpuinfo_mips cpu_data[];
 #define current_cpu_data cpu_data[smp_processor_id()]
index be726b9435309773b4558d6555ed6fe91137d0ea..013101f3f3fd11d7108afd1ed2cc7d007d26456e 100644 (file)
@@ -88,7 +88,7 @@
 /* don't care; ISA bus master won't work, ISA slave DMA supports 32bit addr */
 #define MAX_DMA_ADDRESS                PAGE_OFFSET
 #else
-#define MAX_DMA_ADDRESS                (PAGE_OFFSET + 0x01000000)
+#define MAX_DMA_ADDRESS                (PAGE_OFFSET + 0x02000000)
 #endif
 #define MAX_DMA_PFN            PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
 
index 356c61074d1363152f07604565ec23b2c85ef99c..0303c88d18d23f2dd0107e249379f510b5a733bc 100644 (file)
@@ -818,7 +818,7 @@ struct kvm_mips_callbacks {
        int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
        int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
        void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-};
+} __no_randomize_layout;
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
 
index 6908b93c4ff9b05a3adee95f86847daaeb4dc01b..fd9a398c01811a5e9868e06732040a7c1fa090ff 100644 (file)
@@ -53,12 +53,22 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
        back_to_back_c0_hazard();
        write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
        back_to_back_c0_hazard();
+#ifdef CONFIG_XPA
+       /* Write bits [39:36] of the upper address to the extended MAAR. */
+       writex_c0_maar((upper >> (MIPS_MAARX_ADDR_SHIFT)) | MIPS_MAARX_VH);
+       back_to_back_c0_hazard();
+#endif /* CONFIG_XPA */
 
        /* Write the lower address & attributes */
        write_c0_maari((idx << 1) | 0x1);
        back_to_back_c0_hazard();
        write_c0_maar((lower >> 4) | attrs);
        back_to_back_c0_hazard();
+#ifdef CONFIG_XPA
+       writex_c0_maar((lower >> (MIPS_MAARX_ADDR_SHIFT)) | MIPS_MAARX_VH);
+       back_to_back_c0_hazard();
+#endif /* CONFIG_XPA */
+
 }
 
 /**
diff --git a/arch/mips/include/asm/mach-baikal/cpu-feature-overrides.h b/arch/mips/include/asm/mach-baikal/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..2a56a18
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014  Baikal Electronics OJSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef _BAIKAL_CPU_FEATURE_OVERRIDES_H
+#define _BAIKAL_CPU_FEATURE_OVERRIDES_H
+
+#if 0
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_has_maar           0
+#define cpu_has_tlbinv         1
+
+#endif
+
+#define cpu_has_64bits         0
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_veic           1
+#define cpu_has_mipsmt         0
+#define cpu_has_counter                1
+#ifdef CONFIG_CPU_HAS_MSA
+#define cpu_has_msa            1
+#else
+#define cpu_has_msa            0
+#endif
+
+#define cpu_has_maar           1
+#define cpu_has_htw            0
+
+#define cpu_has_nan_2008       1
+#define cpu_has_nan_legacy     1
+
+#endif /* _BAIKAL_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-baikal/efuse.h b/arch/mips/include/asm/mach-baikal/efuse.h
new file mode 100644 (file)
index 0000000..0aea279
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Baikal-T SOC platform support code. EFUSE driver.
+ *
+ * Brief Baikal T1 EFUSE registers and fields declarations
+ * based on Baikal-T1 EFUSE_programming_v0.4.pdf
+ *
+ * Copyright (C) 2014-2016 Baikal Electronics JSC
+ * 
+ * Author:
+ *   Georgiy Vlasov <Georgy.Vlasov@baikalelectronics.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __EFUSE_H_
+#define __EFUSE_H_
+
+#define Gb_ETHERNET_0 0
+#define Gb_ETHERNET_1 1
+#define xGb_ETHERNET  2
+
+/* EFUSE registers */
+/* MODE_REG. R/W. */
+#define EFUSE_MODES        0x0004
+#define EFUSE_MODES_RegisterSize 32
+#define EFUSE_MODES_RegisterResetValue 0x0
+#define EFUSE_MODES_RegisterResetMask 0xffffffff
+/* Register Field information for EFUSE_MODES */
+#define EFUSE_MODES_MODE_BitAddressOffset 0
+#define EFUSE_MODES_MODE_RegisterSize 2
+/* Bits 31:2 - reserved */
+
+/* ADDR_REG EFUSE. R/W. */
+#define EFUSE_ADDR         0x0008
+#define EFUSE_ADDR_RegisterSize 32
+#define EFUSE_ADDR_RegisterResetValue 0x0
+#define EFUSE_ADDR_RegisterResetMask 0xffffffff
+/* Register Field information for EFUSE_ADDR */
+#define EFUSE_ADDR_Addr_BitAddressOffset 0   /* [4:0] - номер строки EFUSE памяти. */
+#define EFUSE_ADDR_Addr_RegisterSize 5
+/* Bits 31:5 - reserved */
+
+/* ENABLE EFUSE [0]. R/W. */
+#define EFUSE_ENABLE       0x000C
+#define EFUSE_ENABLE_RegisterSize 32
+#define EFUSE_ENABLE_RegisterResetValue 0x0
+#define EFUSE_ENABLE_RegisterResetMask 0xffffffff
+/* Register Field information for EFUSE_ADDR */
+#define EFUSE_ENABLE_Enable_BitAddressOffset 0   /* [4:0] - номер строки EFUSE памяти. */
+#define EFUSE_ENABLE_Enable_RegisterSize 1
+/* Bits 31:1 - reserved */
+
+/* Reg for readind data. RO. */
+#define EFUSE_RDATA        0x0010
+#define EFUSE_RDATA_RegisterSize 32
+#define EFUSE_RDATA_RegisterResetValue 0x0
+#define EFUSE_RDATA_RegisterResetMask 0xffffffff
+
+/* API Functions */
+u32 be_efuse_getLocks(void);
+u8     be_efuse_getVersion(void);
+u8     be_efuse_getFab(void);
+u8     be_efuse_getProcess(void);
+u8     be_efuse_getLotID(void);
+u8     be_efuse_getRevision(void);
+u32 be_efuse_getSerialNum(void);
+u32 be_efuse_getCornerID(void);
+u32 be_efuse_getCPUFreq(void);
+u32 be_efuse_getPad(void);
+u64 be_efuse_getMAC(u8 id);
+
+#endif
diff --git a/arch/mips/include/asm/mach-baikal/hardware.h b/arch/mips/include/asm/mach-baikal/hardware.h
new file mode 100644 (file)
index 0000000..607d259
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014,2015  Baikal Electronics OJSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ASM_ARCH_HARDWARE_H
+#define __ASM_ARCH_HARDWARE_H
+/*
+ * 26.08.2013 Dmitry Duanev
+ * In terms of address constants the physical addresses has
+ * suffix _START. All virtual addresses has suffix _BASE
+ * The constant prefix specifies the target CPU (f.ex BAIKAL_)
+ *
+ * 27.01.2014 Dmitry Dunaev
+ * Memory map is adopted to the MIPS architecture
+ */
+
+#include <linux/sizes.h>
+
+/* Global IO addresses */
+#define BAIKAL_IO_START                        (0x1F000000)
+#define BAIKAL_IO_SIZE                 SZ_16M
+/* PCI mapping region */
+#define BAIKAL_PCI_MAP_START   (0x08000000)
+#define BAIKAL_PCI_MAP_SIZE            SZ_256M
+
+/* Physical allocation of subsystems */
+#define BAIKAL_BOOT_START              (0x1FC00000)
+#define BAIKAL_BOOT_SIZE               SZ_4M
+#define BAIKAL_SRAM_START              (0x1BF80000)
+#define BAIKAL_SRAM_SIZE               SZ_64K
+#define BAIKAL_ROM_START               (0x1BFC0000)
+#define BAIKAL_ROM_SIZE                        SZ_64K
+#define BAIKAL_DRAM_START              (0x00000000)
+#define BAIKAL_DRAM_SIZE               SZ_128M
+#define BAIKAL_HIGHMEM_START   (0x20000000)
+#define BAIKAL_HIGHMEM_SIZE            SZ_1G
+
+/* Peripheral addresses, offset from BAIKAL_IO_START */
+#define BAIKAL_P5600                   (BAIKAL_IO_START + 0x00000000)
+#define BAIKAL_BOOT_CTRL_START (BAIKAL_IO_START + 0x00040000)
+#define BAIKAL_BOOT_CTRL_CSR   (BAIKAL_BOOT_CTRL_START + 0x00)
+#define BAIKAL_BOOT_CTRL_MAR   (BAIKAL_BOOT_CTRL_START + 0x04)
+#define BAIKAL_BOOT_CTRL_DRID  (BAIKAL_BOOT_CTRL_START + 0x08)
+#define BAIKAL_BOOT_CTRL_VID   (BAIKAL_BOOT_CTRL_START + 0x0C)
+#define BAIKAL_DMA_START               (BAIKAL_IO_START + 0x00041000)
+#define BAIKAL_DDR_START               (BAIKAL_IO_START + 0x00042000)
+#define BAIKAL_DDR_PHY                 (BAIKAL_IO_START + 0x00043000)
+#define BAIKAL_GPIO_START              (BAIKAL_IO_START + 0x00044000)
+#define BAIKAL_CTRL_GPIO_START (BAIKAL_IO_START + 0x00045000)
+#define BAIKAL_I2C_START               (BAIKAL_IO_START + 0x00046000)
+#define BAIKAL_SPI_START               (BAIKAL_IO_START + 0x00047000)
+#define BAIKAL_RTC_START               (BAIKAL_IO_START + 0x00048000)
+#define BAIKAL_TIMERS_START            (BAIKAL_IO_START + 0x00049000)
+#define BAIKAL_UART0_START             (BAIKAL_IO_START + 0x0004A000)
+#define BAIKAL_UART1_START             (BAIKAL_IO_START + 0x0004B000)
+#define BAIKAL_WDT_START               (BAIKAL_IO_START + 0x0004C000)
+#define BAIKAL_PMU_START               (BAIKAL_IO_START + 0x0004D000)
+#define BAIKAL_PMU_I2C_START   (BAIKAL_IO_START + 0x0004D800)
+#define BAIKAL_GMAC_START              (BAIKAL_IO_START + 0x0004E000)
+#define BAIKAL_GMAC_DMA                        (BAIKAL_IO_START + 0x0004F000)
+#define BAIKAL_SATA_START              (BAIKAL_IO_START + 0x00050000)
+#define BAIKAL_PCI_START               (BAIKAL_IO_START + 0x00051000)
+#define BAIKAL_PCI_DMA                 (BAIKAL_IO_START + 0x00052000)
+#define BAIKAL_USB_START               (BAIKAL_IO_START + 0x00053000)
+#define BAIKAL_USB_DMA                 (BAIKAL_IO_START + 0x00054000)
+#define BAIKAL_XGMAC_START             (BAIKAL_IO_START + 0x00055000)
+#define BAIKAL_XGMAC_DMA               (BAIKAL_IO_START + 0x00056000)
+#define BAIKAL_VIRTUAL_BLOCK   (BAIKAL_IO_START + 0x000FF000)
+#define BAIKAL_VBLOCK_EXIT             (BAIKAL_VIRTUAL_BLOCK + 0x00)
+#define BAIKAL_VBLOCK_REVISION (BAIKAL_VIRTUAL_BLOCK + 0x04)
+
+#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/mips/include/asm/mach-baikal/ioremap.h b/arch/mips/include/asm/mach-baikal/ioremap.h
new file mode 100644 (file)
index 0000000..3417ef2
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ *      Baikal-T SOC platform support code.
+ *
+ *      Copyright (C) 2018 Baikal Electronics.
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_MACH_BAIKAL_IOREMAP_H
+#define __ASM_MACH_BAIKAL_IOREMAP_H
+
+#include <linux/types.h>
+#include <asm/addrspace.h>
+
+#ifdef CONFIG_PCI
+#include <pci-t1.h>
+#endif
+
+/*
+ * Allow physical addresses to be fixed up to help peripherals located
+ * outside the low 32-bit range -- generic pass-through version.
+ */
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
+{
+       return phys_addr;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+       unsigned long flags)
+{
+#ifdef CONFIG_PCI
+       if ((offset >= PCI_BUS_PHYS_PCIMEM_BASE_ADDR) && (offset < PCI_BUS_PHYS_PCIMEM_LIMIT_ADDR)) {
+               return (void __iomem *)KSEG1ADDR(BAIKAL_MAP_PCI_BUS_TO_PADDR((unsigned long)offset));
+       }
+#endif 
+       return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+#ifdef CONFIG_PCI
+       return ((BAIKAL_MAP_PADDR_TO_PCI_BUS(CPHYSADDR((unsigned long)addr)) >= PCI_BUS_PHYS_PCIMEM_BASE_ADDR) &&
+               (BAIKAL_MAP_PADDR_TO_PCI_BUS(CPHYSADDR((unsigned long)addr)) < PCI_BUS_PHYS_PCIMEM_LIMIT_ADDR));
+#else
+       return 0;
+#endif
+}
+#endif /* __ASM_MACH_BAIKAL_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-baikal/irq.h b/arch/mips/include/asm/mach-baikal/irq.h
new file mode 100644 (file)
index 0000000..d6ac9ac
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014  Baikal Electronics OJSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __ASM_BAIKAL_IRQ_H
+#define __ASM_BAIKAL_IRQ_H
+
+#define NR_IRQS 255
+#define MIPS_CPU_IRQ_BASE      0
+
+#include_next <irq.h>
+
+#endif /* __ASM_BAIKAL_IRQ_H */
diff --git a/arch/mips/include/asm/mach-baikal/kernel-entry-init.h b/arch/mips/include/asm/mach-baikal/kernel-entry-init.h
new file mode 100644 (file)
index 0000000..591545f
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Chris Dearman (chris@mips.com)
+ * Copyright (C) 2007 Mips Technologies, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
+       /*
+        * Prepare segments for EVA boot:
+        *
+        * This is in case the processor boots in legacy configuration
+        * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
+        *
+        * ========================= Mappings =============================
+        * Virtual memory           Physical memory           Mapping
+        * 0x00000000 - 0x7fffffff  0x80000000 - 0xfffffffff   MUSUK (kuseg)
+        *                          Flat 2GB physical memory
+        *
+        * 0x80000000 - 0x9fffffff  0x00000000 - 0x1ffffffff   MUSUK (kseg0)
+        * 0xa0000000 - 0xbf000000  0x00000000 - 0x1ffffffff   MUSUK (kseg1)
+        * 0xc0000000 - 0xdfffffff             -                 MK  (kseg2)
+        * 0xe0000000 - 0xffffffff             -                 MK  (kseg3)
+        *
+        *
+        * Lowmem is expanded to 2GB
+        *
+        * The following code uses the t0, t1, t2 and ra registers without
+        * previously preserving them.
+        *
+        */
+       .macro  platform_eva_init
+
+       .set    push
+       .set    reorder
+#if 0
+       /*
+        * Get Config.K0 value and use it to program
+        * the segmentation registers
+        */
+       mfc0    t1, CP0_CONFIG
+       andi    t1, 0x7 /* CCA */
+#endif
+       /*
+        * Directly use cacheable, coherent, write-back,
+        * write-allocate, read misses request shared attribute
+        */
+       li      t1, 0x5
+       move    t2, t1
+       ins     t2, t1, 16, 3
+       /* SegCtl0 */
+       li      t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |            \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       or      t0, t2
+       mtc0    t0, CP0_PAGEMASK, 2
+
+       /* SegCtl1 */
+       li      t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |      \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (2 << MIPS_SEGCFG_C_SHIFT) |                            \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       ins     t0, t1, 16, 3
+       mtc0    t0, CP0_PAGEMASK, 3
+
+       /* SegCtl2 */
+       li      t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |      \
+               (6 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (4 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       or      t0, t2
+       mtc0    t0, CP0_PAGEMASK, 4
+
+       jal     mips_ihb
+       mfc0    t0, CP0_CONFIG, 5
+       li      t2, MIPS_CONF5_K      /* K bit */
+       or      t0, t0, t2
+       mtc0    t0, CP0_CONFIG, 5
+
+       sync
+       jal     mips_ihb
+
+       mfc0    t0, CP0_CONFIG, 0
+       li      t2, MIPS32R5_CONF_MM      /* Write Merge */
+       or      t0, t0, t2
+       mtc0    t0, CP0_CONFIG, 0
+       sync
+       jal     mips_ihb
+       nop
+
+       .set    pop
+       .endm
+
+       /*
+        * Prepare segments for LEGACY boot:
+        *
+        * ========================= Mappings =============================
+        * Segment   Virtual    Size   Access Mode   Physical   Caching   EU
+        * -------   -------    ----   -----------   --------   -------   --
+        *    0      e0000000   512M      MK            UND         U       0
+        *    1      c0000000   512M      MSK           UND         U       0
+        *    2      a0000000   512M      UK            000         2       0
+        *    3      80000000   512M      UK            000         3       0
+        *    4      40000000    1G       MUSK          UND         U       1
+        *    5      00000000    1G       MUSK          UND         U       1
+        *
+        * The following code uses the t0, t1, t2 and ra registers without
+        * previously preserving them.
+        *
+        */
+       .macro  platform_legacy_init
+
+       .set    push
+       .set    reorder
+#if 0
+       /*
+        * Get Config.K0 value and use it to program
+        * the segmentation registers
+        */
+       mfc0    t1, CP0_CONFIG
+       andi    t1, 0x7 /* CCA */
+#endif
+       /*
+        * Directly use cacheable, coherent, write-back,
+        * write-allocate, read misses request shared attribute
+        */
+       li      t1, 0x5
+       move    t2, t1
+       ins     t2, t1, 16, 3
+       /* SegCtl0 */
+       li      t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (0 << MIPS_SEGCFG_PA_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MSK << MIPS_SEGCFG_AM_SHIFT) |           \
+               (0 << MIPS_SEGCFG_PA_SHIFT)) << 16)
+       or      t0, t2
+       mtc0    t0, CP0_PAGEMASK, 2
+
+       /* SegCtl1 */
+       li      t0, ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (2 << MIPS_SEGCFG_C_SHIFT)) |                           \
+               (((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) |            \
+               (0 << MIPS_SEGCFG_PA_SHIFT)) << 16)
+       ins     t0, t1, 16, 3
+       mtc0    t0, CP0_PAGEMASK, 3
+
+       /* SegCtl2 */
+       li      t0, ((MIPS_SEGCFG_MUSK << MIPS_SEGCFG_AM_SHIFT) |       \
+               (6 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MUSK << MIPS_SEGCFG_AM_SHIFT) |          \
+               (4 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       or      t0, t2
+       mtc0    t0, CP0_PAGEMASK, 4
+
+       jal     mips_ihb
+       nop
+
+       mfc0    t0, CP0_CONFIG, 5
+       li      t2, MIPS_CONF5_K      /* K bit */
+       or      t0, t0, t2
+       mtc0    t0, CP0_CONFIG, 5
+       sync
+       jal     mips_ihb
+       nop
+
+       mfc0    t0, CP0_CONFIG, 0
+       li      t2, MIPS32R5_CONF_MM      /* Write Merge */
+       or      t0, t0, t2
+       mtc0    t0, CP0_CONFIG, 0
+       sync
+       jal     mips_ihb
+       nop
+
+       .set    pop
+       .endm
+
+#ifdef CONFIG_MIPS_BAIKAL_T
+       .macro  platform_errata_fix
+
+       .set    push
+       .set    reorder
+
+       jal     mips_ihb
+       nop
+
+       /*
+        * Disable load/store bonding.
+        */
+       mfc0    t0, CP0_CONFIG, 6
+       lui     t1, (MIPS_CONF6_DLSB >> 16)
+       or      t0, t0, t1
+       /*
+        * This disables all JR prediction other than JR $31.
+        */
+       ori     t0, t0, MIPS_CONF6_JRCD
+       mtc0    t0, CP0_CONFIG, 6
+       sync
+       jal     mips_ihb
+       nop
+
+       /*
+        * This disables all JR $31 prediction through return prediction stack.
+        */
+       mfc0    t0, CP0_CONFIG, 7
+       ori     t0, t0, MIPS_CONF7_RPS
+       mtc0    t0, CP0_CONFIG, 7
+       sync
+       jal     mips_ihb
+       nop
+
+       .set    pop
+       .endm
+#endif
+
+       .macro  platform_enable_msa
+
+       .set    push
+       .set    reorder
+
+#ifdef CONFIG_CPU_HAS_MSA
+       jal     mips_ihb
+       nop
+
+       mfc0    t0, CP0_CONFIG, 5
+       li      t1, MIPS_CONF5_MSAEN
+       or      t0, t0, t1
+       mtc0    t0, CP0_CONFIG, 5
+       sync
+       jal     mips_ihb
+       nop
+
+       mfc0    t0, CP0_STATUS, 0
+       li      t1, ST0_FR
+       or      t0, t0, t1
+       mtc0    t0, CP0_STATUS, 0
+       sync
+       jal     mips_ihb
+       nop
+#endif /* CONFIG_CPU_HAS_MSA */
+
+       .set    pop
+       .endm
+
+       .macro  kernel_entry_setup
+
+       sync
+       ehb
+
+#ifdef CONFIG_EVA
+       mfc0    t1, CP0_CONFIG
+       bgez    t1, 9f
+       mfc0    t0, CP0_CONFIG, 1
+       bgez    t0, 9f
+       mfc0    t0, CP0_CONFIG, 2
+       bgez    t0, 9f
+       mfc0    t0, CP0_CONFIG, 3
+       sll     t0, t0, 6   /* SC bit */
+       bgez    t0, 9f
+
+       platform_eva_init
+
+       b       0f
+9:     b       9b
+       nop
+#else
+       platform_legacy_init
+#endif /* CONFIG_EVA */
+0:
+#ifdef CONFIG_MIPS_BAIKAL_T
+       platform_errata_fix
+#endif
+       platform_enable_msa
+
+       .endm
+
+/*
+ * Do SMP slave processor setup necessary before we can safely execute C code.
+ */
+       .macro  smp_slave_setup
+       sync
+       ehb
+
+#ifdef CONFIG_EVA
+       platform_eva_init
+#else
+       platform_legacy_init
+#endif  /* CONFIG_EVA */
+
+#ifdef CONFIG_MIPS_BAIKAL_T
+       platform_errata_fix
+#endif
+       platform_enable_msa
+
+       .endm
+
+#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-baikal/pci-t1.h b/arch/mips/include/asm/mach-baikal/pci-t1.h
new file mode 100644 (file)
index 0000000..dd970e8
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ *  Baikal-T SOC platform support code.
+ *
+ *  Copyright (C) 2015-2018 Baikal Electronics.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ *  BAIKAL MIPS boards specific PCI support.
+ */
+
+#ifndef __ASM_MACH_BAIKAL_PCI_T1_H__
+#define __ASM_MACH_BAIKAL_PCI_T1_H__
+
+#define        PHYS_PCI_START_ADDR             (0x08000000)
+#define        PHYS_PCI_END_ADDR               (0x1BDC0000)
+
+#define PCI_BUS_ALIGN_OFFSET           (0x18000000LL)
+#define BAIKAL_MAP_PCI_BUS_TO_PADDR(x)  ((x) - PCI_BUS_ALIGN_OFFSET)
+#define BAIKAL_MAP_PADDR_TO_PCI_BUS(x)  ((x) + PCI_BUS_ALIGN_OFFSET)
+
+#define        PHYS_PCIMEM_BASE_ADDR           (PHYS_PCI_START_ADDR)
+#define PHYS_PCIMEM_SIZE               (0x10410000)
+#define        PHYS_PCIMEM_LIMIT_ADDR          (PHYS_PCIMEM_BASE_ADDR + PHYS_PCIMEM_SIZE - 1)
+#define IATU_MEM_INDEX                 2
+#define PCI_BUS_PHYS_PCIMEM_BASE_ADDR  (PHYS_PCIMEM_BASE_ADDR + PCI_BUS_ALIGN_OFFSET)
+#define PCI_BUS_PHYS_PCIMEM_LIMIT_ADDR (PHYS_PCIMEM_LIMIT_ADDR + PCI_BUS_ALIGN_OFFSET)
+
+#define        PHYS_PCI_RD0_BASE_ADDR          (PHYS_PCIMEM_LIMIT_ADDR + 1)
+#ifdef CONFIG_PCI_ECAM
+#define PHYS_PCI_RD0_SIZE              (0x00210000)
+#else
+#define PHYS_PCI_RD0_SIZE              (0x00010000)
+#endif /* CONFIG_PCI_ECAM */
+#define PHYS_PCI_RD0_LIMIT_ADDR                (PHYS_PCI_RD0_BASE_ADDR + PHYS_PCI_RD0_SIZE - 1)
+#define IATU_RD0_INDEX                 0
+
+#define PHYS_PCI_RD1_BASE_ADDR         (PHYS_PCI_RD0_LIMIT_ADDR + 1)
+#ifdef CONFIG_PCI_ECAM
+#define PHYS_PCI_RD1_SIZE              (0x02F00000)
+#else
+#define PHYS_PCI_RD1_SIZE              (0x00010000)
+#endif /* CONFIG_PCI_ECAM */
+#define PHYS_PCI_RD1_LIMIT_ADDR                (PHYS_PCI_RD1_BASE_ADDR + PHYS_PCI_RD1_SIZE - 1)
+#define IATU_RD1_INDEX                 1
+
+#define PHYS_PCI_MSI_SIZE              (0x00010000)
+#define        PHYS_PCI_MSI_BASE_ADDR          (PHYS_PCI_END_ADDR - PHYS_PCI_MSI_SIZE)
+
+#define PHYS_PCIIO_BASE_ADDR           (PHYS_PCI_RD1_LIMIT_ADDR + 1)
+#define PHYS_PCIIO_LIMIT_ADDR          (PHYS_PCI_MSI_BASE_ADDR - 1)
+#define PHYS_PCIIO_SIZE                        (PHYS_PCIIO_LIMIT_ADDR - PHYS_PCIIO_BASE_ADDR)
+#define IATU_IO_INDEX                  3
+
+#endif /* __ASM_MACH_BAIKAL_PCI_T1_H__ */
diff --git a/arch/mips/include/asm/mach-baikal/spaces.h b/arch/mips/include/asm/mach-baikal/spaces.h
new file mode 100644 (file)
index 0000000..a571343
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014  Baikal Electronics OJSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef _ASM_BAIKAL_SPACES_H
+#define _ASM_BAIKAL_SPACES_H
+
+#include <asm/mach-baikal/hardware.h>
+
+/*
+ * Virtual addresses offset
+ */
+#define PAGE_OFFSET            _AC(0x80000000, UL)
+/*
+ * Physical addresses offset
+ */
+#define PHYS_OFFSET            _AC(0x00000000, UL)
+
+/*
+ * Uncached addresses offset
+ */
+#define UNCAC_BASE     _AC(0xa0000000, UL)     /* 0xa0000000 + PHYS_OFFSET */
+
+/*
+ * High memory segment physical addresses
+ */
+#define HIGHMEM_START          _AC(0x20000000, UL)
+/*
+ * I/O memory space
+ */
+#define IO_BASE                UNCAC_BASE
+
+/*
+ * PCI_IOBASE must correspond to the I/O range in .dts
+ */
+#define PCI_IOBASE     _AC(0xbbd00000, UL)
+
+#include_next <spaces.h>
+
+#endif /* __ASM_BAIKAL_SPACES_H */
diff --git a/arch/mips/include/asm/mach-baikal/war.h b/arch/mips/include/asm/mach-baikal/war.h
new file mode 100644 (file)
index 0000000..282b67a
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014  Baikal Electronics OJSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __ASM_MIPS_MACH_MIPS_WAR_H
+#define __ASM_MIPS_MACH_MIPS_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR            0
+#define R4600_V1_HIT_CACHEOP_WAR               0
+#define R4600_V2_HIT_CACHEOP_WAR               0
+#define R5432_CP0_INTERRUPT_WAR                        0
+#define BCM1250_M3_WAR                                 0
+#define SIBYTE_1956_WAR                                        0
+#define MIPS4K_ICACHE_REFILL_WAR               0
+#define MIPS_CACHE_SYNC_WAR                            0
+#define TX49XX_ICACHE_INDEX_INV_WAR            0
+#define ICACHE_REFILLS_WORKAROUND_WAR  0
+#define R10000_LLSC_WAR                                        0
+#define MIPS34K_MISSED_ITLB_WAR                        0
+
+#include <asm/war.h>
+
+#endif /* __ASM_MIPS_MACH_MIPS_WAR_H */
diff --git a/arch/mips/include/asm/mips-boards/baikal.h b/arch/mips/include/asm/mips-boards/baikal.h
new file mode 100644 (file)
index 0000000..59d856e
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Baikal-T SOC platform support code.
+ *
+ * Copyright (C) 2014-2016  Baikal Electronics OJSC
+ * 
+ * Author:
+ *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ASM_MIPS_BOARDS_BAIKAL_H
+#define __ASM_MIPS_BOARDS_BAIKAL_H
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+
+/*
+ * GCMP Specific definitions
+ */
+#define GCMP_BASE_ADDR                 0x1fbf8000
+#define GCMP_ADDRSPACE_SZ              (256 * 1024)
+
+/*
+ * GIC Specific definitions
+ */
+#define GIC_BASE_ADDR                  0x1bdc0000
+#define GIC_ADDRSPACE_SZ               (128 * 1024)
+
+/*
+ * CPC Specific definitions
+ */
+#define CPC_BASE_ADDR                  0x1bde0000
+#define CPC_ADDRSPACE_SZ               (24 * 1024)
+
+
+#endif /* __ASM_MIPS_BOARDS_BAIKAL_H */
index c28b892937fe1681b44162de9e569b27c38cefd7..52d1bd3c9bf017c863cb8fed4309200ac6b5215c 100644 (file)
@@ -75,6 +75,7 @@
 #define CP0_EPC $14
 #define CP0_PRID $15
 #define CP0_EBASE $15, 1
+#define CP0_CDMMBASE $15, 2
 #define CP0_CMGCRBASE $15, 3
 #define CP0_CONFIG $16
 #define CP0_CONFIG3 $16, 3
 #define MIPS_CONF_AT           (_ULCAST_(3) << 13)
 #define MIPS_CONF_M            (_ULCAST_(1) << 31)
 
+/* Bits specific to the MIPS32R5. */
+#define MIPS32R5_CONF_K23      (_ULCAST_(7) << 28)
+#define MIPS32R5_CONF_KU       (_ULCAST_(7) << 25)
+#define MIPS32R5_CONF_ISP      (_ULCAST_(1) << 24)
+#define MIPS32R5_CONF_DSP      (_ULCAST_(1) << 23)
+#define MIPS32R5_CONF_UDI      (_ULCAST_(1) << 22)
+#define MIPS32R5_CONF_SB       (_ULCAST_(1) << 21)
+#define MIPS32R5_CONF_MM       (_ULCAST_(1) << 18)
+
 /*
  * Bits in the MIPS32/64 PRA coprocessor 0 config registers 1 and above.
  */
 #define MIPS_CONF5_CV          (_ULCAST_(1) << 29)
 #define MIPS_CONF5_K           (_ULCAST_(1) << 30)
 
+/* Jump register cache prediction disable */
+#define MIPS_CONF6_JRCD                (_ULCAST_(1) << 0)
+/* MIPSr6 enable */
+#define MIPS_CONF6_R6          (_ULCAST_(1) << 2)
 #define MIPS_CONF6_SYND                (_ULCAST_(1) << 13)
 /* proAptiv FTLB on/off bit */
 #define MIPS_CONF6_FTLBEN      (_ULCAST_(1) << 15)
 #define MIPS_CONF6_FTLBDIS     (_ULCAST_(1) << 22)
 /* FTLB probability bits */
 #define MIPS_CONF6_FTLBP_SHIFT (16)
+/* Disable load/store bonding */
+#define MIPS_CONF6_DLSB                (_ULCAST_(1) << 21)
 
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
 #define MIPS_MAAR_ADDR_SHIFT   12
 #define MIPS_MAAR_S            (_ULCAST_(1) << 1)
 #define MIPS_MAAR_VL           (_ULCAST_(1) << 0)
+#define MIPS_MAARX_VH          (_ULCAST_(1) << 31)
+#define MIPS_MAARX_ADDR_SHIFT  36
 
 /* MAARI bit definitions */
 #define MIPS_MAARI_INDEX       (_ULCAST_(0x3f) << 0)
@@ -1688,6 +1706,9 @@ do {                                                                      \
 
 #define read_c0_prid()         __read_const_32bit_c0_register($15, 0)
 
+#define read_c0_cdmm()         __read_ulong_c0_register($15, 2)
+#define write_c0_cdmm(val)     __write_ulong_c0_register($15, 2, val)
+
 #define read_c0_cmgcrbase()    __read_ulong_c0_register($15, 3)
 
 #define read_c0_config()       __read_32bit_c0_register($16, 0)
@@ -1711,6 +1732,8 @@ do {                                                                      \
 #define write_c0_lladdr(val)   __write_ulong_c0_register($17, 0, val)
 #define read_c0_maar()         __read_ulong_c0_register($17, 1)
 #define write_c0_maar(val)     __write_ulong_c0_register($17, 1, val)
+#define readx_c0_maar()                __readx_32bit_c0_register($17, 1)
+#define writex_c0_maar(val)    __writex_32bit_c0_register($17, 1, val)
 #define read_c0_maari()                __read_32bit_c0_register($17, 2)
 #define write_c0_maari(val)    __write_32bit_c0_register($17, 2, val)
 
@@ -2778,6 +2801,7 @@ set_##name(unsigned int set)                                      \
        res = read_##name();                                    \
        new = res | set;                                        \
        write_##name(new);                                      \
+       _ehb();                                                 \
                                                                \
        return res;                                             \
 }                                                              \
@@ -2790,6 +2814,7 @@ clear_##name(unsigned int clear)                          \
        res = read_##name();                                    \
        new = res & ~clear;                                     \
        write_##name(new);                                      \
+       _ehb();                                                 \
                                                                \
        return res;                                             \
 }                                                              \
@@ -2803,6 +2828,7 @@ change_##name(unsigned int change, unsigned int val)              \
        new = res & ~change;                                    \
        new |= (val & change);                                  \
        write_##name(new);                                      \
+       _ehb();                                                 \
                                                                \
        return res;                                             \
 }
index cddead91acd4856f6121b93ae275b8050e4d932e..d0ea0fb595cffee35d796991ba6ae9c316b39e47 100644 (file)
@@ -237,4 +237,63 @@ drop_mmu_context(struct mm_struct *mm)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_MIPS_BAIKAL
+#define ARCH_WANTS_TLB_PREFETCH
+/* Workaround for core stuck on TLB load exception */
+static inline void tlb_prefetch(unsigned long addr)
+{
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+       pte_t *ptep;
+       int idx, pid;
+
+
+       if (addr < MAP_BASE)
+               return;
+
+       addr &= (PAGE_MASK << 1);
+       if (cpu_has_mmid) {
+               write_c0_entryhi(addr);
+       } else {
+               pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
+               write_c0_entryhi(addr | pid);
+       }
+       pgdp = pgd_offset(&init_mm, addr);
+       mtc0_tlbw_hazard();
+       tlb_probe();
+       tlb_probe_hazard();
+       pudp = pud_offset(pgdp, addr);
+       pmdp = pmd_offset(pudp, addr);
+       idx = read_c0_index();
+
+       ptep = pte_offset_map(pmdp, addr);
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+       write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+       if (cpu_has_xpa)
+               writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+       ptep++;
+       write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+       if (cpu_has_xpa)
+               writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
+       write_c0_entrylo0(ptep->pte_high);
+       ptep++;
+       write_c0_entrylo1(ptep->pte_high);
+#endif
+#else
+       write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
+       write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
+#endif
+       mtc0_tlbw_hazard();
+       if (idx < 0)
+               tlb_write_random();
+       else
+               tlb_write_indexed();
+
+       tlbw_use_hazard();
+}
+#endif
 #endif /* _ASM_MMU_CONTEXT_H */
index 8c56b862fd9c2b003fe00fbad9cb88ae88ea6d16..4c02ae05339e0ccdb50342d9109a3d2a816b8077 100644 (file)
@@ -25,7 +25,7 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
 extern void *set_except_vector(int n, void *addr);
 extern unsigned long ebase;
 extern unsigned int hwrena;
-extern void per_cpu_trap_init(bool);
+extern void per_cpu_trap_init(bool) __init;
 extern void cpu_cache_init(void);
 
 #endif /* __SETUP_H */
index 65618ff1280c9687e0eeeb9aa14ac90311a88915..5959c85fd7083a3fa1506cb59cd4d59b2139bd96 100644 (file)
@@ -37,7 +37,7 @@ struct plat_smp_ops {
 #ifdef CONFIG_KEXEC
        void (*kexec_nonboot_cpu)(void);
 #endif
-};
+} __no_randomize_layout;
 
 extern void register_smp_ops(const struct plat_smp_ops *ops);
 
index 82e44b31aad596d2ad0803bd67d91393388f943a..824a9b252a99492312d533d3082ed268953e52ac 100644 (file)
@@ -336,19 +336,10 @@ static void __init bootmem_init(void)
        min_low_pfn = ARCH_PFN_OFFSET;
        max_pfn = PFN_DOWN(ramend);
        for_each_memblock(memory, mem) {
-               unsigned long start = memblock_region_memory_base_pfn(mem);
                unsigned long end = memblock_region_memory_end_pfn(mem);
 
-               /*
-                * Skip highmem here so we get an accurate max_low_pfn if low
-                * memory stops short of high memory.
-                * If the region overlaps HIGHMEM_START, end is clipped so
-                * max_pfn excludes the highmem portion.
-                */
                if (memblock_is_nomap(mem))
                        continue;
-               if (start >= PFN_DOWN(HIGHMEM_START))
-                       continue;
                if (end > PFN_DOWN(HIGHMEM_START))
                        end = PFN_DOWN(HIGHMEM_START);
                if (end > max_low_pfn)
index f659adb681bc32868132dbc4c89183c46fc69e5f..6fa48c79d44f26e4d0add8477a3acd65bf338976 100644 (file)
@@ -247,7 +247,12 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
                        wmb();
                }
 
-               write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+               /*
+                * Use PWRUP instead of RESET command for operating EJTAG.
+                * Otherwise there is no EJTAG chain.
+                */
+               write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
+
 
                timeout = 100;
                while (true) {
index c563b03bdccc1cc7e8fbc61a0b2c7029e1990b9f..72ded27bdf015b16d1509aeb1c726771919fa343 100644 (file)
@@ -340,7 +340,7 @@ early_initcall(mips_smp_ipi_init);
  * First C code run on the secondary CPUs after being started up by
  * the master.
  */
-asmlinkage void start_secondary(void)
+asmlinkage void __init start_secondary(void)
 {
        unsigned int cpu;
 
index 5a491eca456fce8b482b119bc5019ddc4c68d0b1..279132430dd31fd75e6dc849d00a765864f65e7f 100644 (file)
@@ -1628,14 +1628,14 @@ asmlinkage void do_reserved(struct pt_regs *regs)
              (regs->cp0_cause & 0x7f) >> 2);
 }
 
-static int __initdata l1parity = 1;
+static int l1parity __initdata = 1;
 static int __init nol1parity(char *s)
 {
        l1parity = 0;
        return 1;
 }
 __setup("nol1par", nol1parity);
-static int __initdata l2parity = 1;
+static int l2parity __initdata = 1;
 static int __init nol2parity(char *s)
 {
        l2parity = 0;
@@ -1647,7 +1647,7 @@ __setup("nol2par", nol2parity);
  * Some MIPS CPUs can enable/disable for cache parity detection, but do
  * it different ways.
  */
-static inline void parity_protection_init(void)
+static inline void __init parity_protection_init(void)
 {
 #define ERRCTL_PE      0x80000000
 #define ERRCTL_L2P     0x00800000
@@ -2191,7 +2191,7 @@ static void configure_exception_vector(void)
        }
 }
 
-void per_cpu_trap_init(bool is_boot_cpu)
+void __init per_cpu_trap_init(bool is_boot_cpu)
 {
        unsigned int cpu = smp_processor_id();
 
@@ -2269,7 +2269,7 @@ void set_uncached_handler(unsigned long offset, void *addr,
        memcpy((void *)(uncached_ebase + offset), addr, size);
 }
 
-static int __initdata rdhwr_noopt;
+static int rdhwr_noopt __initdata = 0;
 static int __init set_rdhwr_noopt(char *str)
 {
        rdhwr_noopt = 1;
index 3375bbe63284ef68490d7e22eb421d6c0d9ba26f..0d397d92fb80b7e9acbbb9584384440876c50a04 100644 (file)
@@ -1606,9 +1606,9 @@ static void loongson3_sc_init(void)
 
 extern int r5k_sc_init(void);
 extern int rm7k_sc_init(void);
-extern int mips_sc_init(void);
+extern int mips_sc_init(void) __init;
 
-static void setup_scache(void)
+static void __init setup_scache(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int config = read_c0_config();
@@ -1819,7 +1819,7 @@ static void r4k_cache_error_setup(void)
        }
 }
 
-void r4k_cache_init(void)
+void __init r4k_cache_init(void)
 {
        extern void build_clear_page(void);
        extern void build_copy_page(void);
index 33b409391ddb6ba394da60a84c75f8a5d5091ad8..21bc7b9bd14dcd76af543563bccb8f772a3ef9fc 100644 (file)
@@ -197,7 +197,7 @@ static inline void setup_protection_map(void)
        }
 }
 
-void cpu_cache_init(void)
+void __init cpu_cache_init(void)
 {
        if (cpu_has_3k_cache) {
                extern void __weak r3k_cache_init(void);
index e67374268b42d0e0c25f0e916cc519f84b6c879a..f074643aa7a7288daa632041bb680f087454f177 100644 (file)
@@ -250,7 +250,7 @@ static inline int mips_sc_probe(void)
        return 1;
 }
 
-int mips_sc_init(void)
+int __init mips_sc_init(void)
 {
        int found = mips_sc_probe();
        if (found) {
diff --git a/arch/mips/pci/pci-baikal.h b/arch/mips/pci/pci-baikal.h
new file mode 100644 (file)
index 0000000..1602718
--- /dev/null
@@ -0,0 +1,673 @@
+/*
+ *  Baikal-T SOC platform support code.
+ *
+ *  Copyright (C) 2015-2018 Baikal Electronics.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ *  BAIKAL MIPS boards specific PCI support.
+ */
+
+#ifndef __PCI_BAIKAL_H__
+#define __PCI_BAIKAL_H__
+
+#include <linux/interrupt.h>
+#include <asm/mach-baikal/pci-t1.h>
+
+/* Define DW_CHECK_ECRC to add checking CRC. */
+//#define DW_CHECK_ECRC
+
+#define PCIE_CFG_BASE                                  0xBF052000
+#define PMU_BASE                                               0xBF04D000
+/* Start enumerating the busses from 1 since IDT-switch oddly acts, when it's
+ * directly connected to the RC and has bus number 0 */
+#define PCIE_ROOT_BUS_NUM      1
+
+#define PCI_RD0_BASE_ADDR              KSEG1ADDR(PHYS_PCI_RD0_BASE_ADDR)
+#define PCI_RD1_BASE_ADDR              KSEG1ADDR(PHYS_PCI_RD1_BASE_ADDR)
+
+#define PCIE_TYPE1_DEV_ID_VEND_ID_REG          (0x0)   /* Device ID and Vendor ID Register. */
+#define PCIE_TYPE1_STATUS_COMMAND_REG          (0x4)   /* Command and Status Register. */
+#define PCIE_TYPE1_CLASS_CODE_REV_ID_REG       (0x8)   /* Class Code and Revision ID Register. */
+#define PCIE_TYPE1_BIST_HDR_TYPE_LAT_CACHE_LINE_SIZE_REG       (0xc)   /* BIST, Header Type, Cache Line Size, and Master Latency Timer Register. */
+#define PCIE_SEC_LAT_TIMER_SUB_BUS_SEC_BUS_PRI_BUS_REG         (0x18)  /* Primary, Secondary, Subordinate Bus Numbers and Latency Timer Regisers . */
+#define PCIE_SEC_STAT_IO_LIMIT_IO_BASE_REG     (0x1c)  /* Secondary Status and I/O Base and Limit Registers. */
+#define PCIE_MEM_LIMIT_MEM_BASE_REG            (0x20)  /* Memory Base and Memory Limit Register. */
+#define PCIE_PREF_MEM_LIMIT_PREF_MEM_BASE_REG  (0x24)  /* Prefetchable Memory Base and Limit Register. */
+#define PCIE_PREF_BASE_UPPER_REG               (0x28)  /* Prefetchable Base Upper 32 Bits Register. */
+#define PCIE_PREF_LIMIT_UPPER_REG              (0x2c)  /* I/O Base and Limit Upper 16 Bits Register. */
+#define PCIE_IO_LIMIT_UPPER_IO_BASE_UPPER_REG  (0x30)  /* Expansion ROM BAR and Mask Register. */
+#define PCIE_TYPE1_CAP_PTR_REG                 (0x34)  /* Capability Pointer Register. */
+#define PCIE_TYPE1_EXP_ROM_BASE_REG            (0x38)  /* Expansion ROM BAR and Mask Register. */
+#define PCIE_BRIDGE_CTRL_INT_PIN_INT_LINE_REG  (0x3c)  /* Interrupt Line and Pin and Bridge Control Registers. */
+#define PCIE_CAP_ID_NXT_PTR_REG                        (0x40)  /* Power Management Capabilities Register. */
+#define PCIE_CON_STATUS_REG                    (0x44)  /* Power Management Control and Status Register. */
+#define PCIE_PCI_MSI_CAP_ID_NEXT_CTRL_REG      (0x50)  /* MSI Capability ID, Next Pointer, Control Registers. */
+#define PCIE_MSI_CAP_OFF_04H_REG               (0x54)  /* MSI Capability ID, Next Pointer, Control Registers. */
+#define PCIE_MSI_CAP_OFF_08H_REG               (0x58)  /* MSI Capability ID, Next Pointer, Control Registers. */
+#define PCIE_MSI_CAP_OFF_0CH_REG               (0x5c)  /* MSI Capability ID, Next Pointer, Control Registers. */
+#define PCIE_MSI_CAP_OFF_10H_REG               (0x60)  /* MSI Capability ID, Next Pointer, Control Registers. */
+#define PCIE_MSI_CAP_OFF_14H_REG               (0x64)  /* MSI Capability ID, Next Pointer, Control Registers. */
+#define PCIE_PCIE_CAP_ID_PCIE_NEXT_CAP_PTR_PCIE_CAP_REG        (0x70)  /* PCI Express Capabilities, ID, Next Pointer Register. */
+#define PCIE_DEVICE_CAPABILITIES_REG           (0x74)  /* Device Capabilities Register. */
+#define PCIE_DEVICE_CONTROL_DEVICE_STATUS      (0x78)  /* Device Control and Status Register. */
+#define PCIE_LINK_CAPABILITIES_REG             (0x7c)  /* Link Capabilities Register. */
+#define PCIE_LINK_CONTROL_LINK_STATUS_REG      (0x80)  /* Link Control and Status Register. */
+#define PCIE_ROOT_CONTROL_ROOT_CAPABILITIES_REG        (0x8c)  /* Root Control and Capabilities Register. */
+#define PCIE_ROOT_STATUS_REG                   (0x90)  /* Root Status Register. */
+#define PCIE_DEVICE_CAPABILITIES2_REG          (0x94)  /* Device Capabilities 2 Register. */
+#define PCIE_DEVICE_CONTROL2_DEVICE_STATUS2_REG        (0x98)  /* Device Control 2 and Status 2 Register. */
+#define PCIE_LINK_CAPABILITIES2_REG            (0x9c)  /* Link Capabilities 2 Register. */
+#define PCIE_LINK_CONTROL2_LINK_STATUS2_REG    (0xa0)  /* Link Control 2 and Status 2 Register. */
+#define PCIE_PCI_MSIX_CAP_ID_NEXT_CTRL_REG     (0xb0)  /* MSI-X Capability ID, Next Pointer, Control Registers. */
+#define PCIE_MSIX_TABLE_OFFSET_REG             (0xb4)  /* MSI-X Table Offset and BIR Register. */
+#define PCIE_MSIX_PBA_OFFSET_REG               (0xb8)  /* MSI-X PBA Offset and BIR Register. */
+#define PCIE_SLOTNUM_BASE                      (0xc0)  /* Slot Numbering Capabilities Register. */
+#define PCIE_VPD_BASE                          (0xd0)  /* VPD Control and Capabilities Register. */
+#define PCIE_DATA_REG                          (0xd4)  /* VPD Data Register. */
+#define PCIE_AER_EXT_CAP_HDR_OFF               (0x100) /* Advanced Error Reporting Extended Capability Header. */
+#define PCIE_UNCORR_ERR_STATUS_OFF             (0x104) /* Uncorrectable Error Status Register. */
+#define PCIE_UNCORR_ERR_MASK_OFF               (0x108) /* Uncorrectable Error Mask Register. */
+#define PCIE_UNCORR_ERR_SEV_OFF                        (0x10c) /* Uncorrectable Error Severity Register. */
+#define PCIE_CORR_ERR_STATUS_OFF               (0x110) /* Correctable Error Status Register. */
+#define PCIE_CORR_ERR_MASK_OFF                 (0x114) /* Correctable Error Mask Register. */
+#define PCIE_ADV_ERR_CAP_CTRL_OFF              (0x118) /* Advanced Error Capabilities and Control Register. */
+#define PCIE_HDR_LOG_0_OFF                     (0x11c) /* Header Log Register 0. */
+#define PCIE_HDR_LOG_1_OFF                     (0x120) /* Header Log Register 1. */
+#define PCIE_HDR_LOG_2_OFF                     (0x124) /* Header Log Register 2. */
+#define PCIE_HDR_LOG_3_OFF                     (0x128) /* Header Log Register 3. */
+#define PCIE_ROOT_ERR_CMD_OFF                  (0x12c) /* Root Error Command Register. */
+#define PCIE_ROOT_ERR_STATUS_OFF               (0x130) /* Root Error Status Register. */
+#define PCIE_ERR_SRC_ID_OFF                    (0x134) /* Error Source Identification Register. */
+#define PCIE_TLP_PREFIX_LOG_OFF                        (0x138) /* TLP Prefix Log Register. */
+#define PCIE_VC_BASE                           (0x148) /* VC Extended Capability Header. */
+#define PCIE_VC_CAPABILITIES_REG_1             (0x14c) /* Port VC Capability Register 1. */
+#define PCIE_VC_CAPABILITIES_REG_2             (0x150) /* Port VC Capability Register 2. */
+#define PCIE_VC_STATUS_CONTROL_REG             (0x154) /* Port VC Control and Status Register. */
+#define PCIE_RESOURCE_CAP_REG_VC0              (0x158) /* VC Resource Capability Register (0). */
+#define PCIE_RESOURCE_CON_REG_VC0              (0x15c) /* VC Resource Control Register (0). */
+#define PCIE_RESOURCE_STATUS_REG_VC0           (0x160) /* VC Resource Status Register (0). */
+#define PCIE_RESOURCE_CAP_REG_VC1              (0x164) /* VC Resource Capability Register (1). */
+#define PCIE_RESOURCE_CON_REG_VC1              (0x168) /* VC Resource Control Register (1). */
+#define PCIE_RESOURCE_STATUS_REG_VC1           (0x16c) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_RESOURCE_CAP_REG_VC2              (0x170) /* VC Resource Capability Register (2). */
+#define PCIE_RESOURCE_CON_REG_VC2              (0x174) /* VC Resource Control Register (2). */
+#define PCIE_RESOURCE_STATUS_REG_VC2           (0x178) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_RESOURCE_CAP_REG_VC3              (0x17c) /* VC Resource Capability Register (3). */
+#define PCIE_RESOURCE_CON_REG_VC3              (0x180) /* VC Resource Control Register (3). */
+#define PCIE_RESOURCE_STATUS_REG_VC3           (0x184) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_RESOURCE_CAP_REG_VC4              (0x188) /* VC Resource Capability Register (4). */
+#define PCIE_RESOURCE_CON_REG_VC4              (0x18c) /* VC Resource Control Register (4). */
+#define PCIE_RESOURCE_STATUS_REG_VC4           (0x190) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_RESOURCE_CAP_REG_VC5              (0x194) /* VC Resource Capability Register (5). */
+#define PCIE_RESOURCE_CON_REG_VC5              (0x198) /* VC Resource Control Register (5). */
+#define PCIE_RESOURCE_STATUS_REG_VC5           (0x19c) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_RESOURCE_CAP_REG_VC6              (0x1a0) /* VC Resource Capability Register (6). */
+#define PCIE_RESOURCE_CON_REG_VC6              (0x1a4) /* VC Resource Control Register (6). */
+#define PCIE_RESOURCE_STATUS_REG_VC6           (0x1a8) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_RESOURCE_CAP_REG_VC7              (0x1ac) /* VC Resource Capability Register (7). */
+#define PCIE_RESOURCE_CON_REG_VC7              (0x1b0) /* VC Resource Control Register (7). */
+#define PCIE_RESOURCE_STATUS_REG_VC7           (0x1b4) /* For a description of this standard PCIe register field, see the PCI Express. */
+#define PCIE_SN_BASE                           (0x168) /* Device Serial Number Extended Capability Header. */
+#define PCIE_SER_NUM_REG_DW_1                  (0x16c) /* Serial Number 1 Register. */
+#define PCIE_SER_NUM_REG_DW_2                  (0x170) /* Serial Number 2 Register. */
+#define PCIE_PB_BASE                           (0x178) /* Power Budgeting Extended Capability Header. */
+#define PCIE_DATA_REG_PB                       (0x180) /* Data Register. */
+#define PCIE_CAP_REG_PB                                (0x184) /* Power Budget Capability Register. */
+#define PCIE_SPCIE_CAP_HEADER_REG              (0x198) /* SPCIE Capability Header. */
+#define PCIE_LINK_CONTROL3_REG                 (0x19c) /* Link Control 3 Register. */
+#define PCIE_LANE_ERR_STATUS_REG               (0x1a0) /* Lane Error Status Register. */
+#define PCIE_LANE_EQUALIZATION_CONTROL01_REG   (0x1a4) /* Equalization Control Register for Lanes 1-0. */
+#define PCIE_LANE_EQUALIZATION_CONTROL23_REG   (0x1a8) /* Equalization Control Register for Lanes 3-2. */
+#define PCIE_LANE_EQUALIZATION_CONTROL45_REG   (0x1ac) /* Equalization Control Register for Lanes 5-4. */
+#define PCIE_LANE_EQUALIZATION_CONTROL67_REG   (0x1b0) /* Equalization Control Register for Lanes 7-6. */
+#define PCIE_LANE_EQUALIZATION_CONTROL89_REG   (0x1b4) /* Equalization Control Register for Lanes 9-8. */
+#define PCIE_LANE_EQUALIZATION_CONTROL1011_REG (0x1b8) /* Equalization Control Register for Lanes 11-10. */
+#define PCIE_LANE_EQUALIZATION_CONTROL1213_REG (0x1bc) /* Equalization Control Register for Lanes 13-12. */
+#define PCIE_LANE_EQUALIZATION_CONTROL1415_REG (0x1c0) /* Equalization Control Register for Lanes 15-14. */
+#define PCIE_TPH_EXT_CAP_HDR_REG               (0x1f8) /* TPH Extended Capability Header. */
+#define PCIE_TPH_REQ_CAP_REG_REG               (0x1fc) /* TPH Requestor Capability Register. */
+#define PCIE_TPH_REQ_CONTROL_REG_REG           (0x200) /* TPH Requestor Control Register. */
+#define PCIE_TPH_ST_TABLE_REG_0                        (0x204) /* TPH ST Table Register 0. */
+#define PCIE_TPH_ST_TABLE_REG_1                        (0x208) /* TPH ST Table Register 1. */
+#define PCIE_TPH_ST_TABLE_REG_2                        (0x20c) /* TPH ST Table Register 2. */
+#define PCIE_TPH_ST_TABLE_REG_3                        (0x210) /* TPH ST Table Register 3. */
+#define PCIE_TPH_ST_TABLE_REG_4                        (0x214) /* TPH ST Table Register 4. */
+#define PCIE_TPH_ST_TABLE_REG_5                        (0x218) /* TPH ST Table Register 5. */
+#define PCIE_TPH_ST_TABLE_REG_6                        (0x21c) /* TPH ST Table Register 6. */
+#define PCIE_TPH_ST_TABLE_REG_7                        (0x220) /* TPH ST Table Register 7. */
+#define PCIE_L1SUB_CAP_HEADER_REG              (0x2e0) /* L1 Substates Extended Capability Header. */
+#define PCIE_L1SUB_CAPABILITY_REG              (0x2e4) /* L1 Substates Capability Register. */
+#define PCIE_L1SUB_CONTROL1_REG                        (0x2e8) /* L1 Substates Control 1 Register. */
+#define PCIE_L1SUB_CONTROL2_REG                        (0x2ec) /* L1 Substates Control 2 Register. */
+#define PCIE_ACK_LATENCY_TIMER_OFF             (0x700) /* Ack Latency Timer and Replay Timer Register. */
+#define PCIE_VENDOR_SPEC_DLLP_OFF              (0x704) /* Vendor Specific DLLP Register. */
+#define PCIE_PORT_FORCE_OFF                    (0x708) /* Port Force Link Register. */
+#define PCIE_ACK_F_ASPM_CTRL_OFF               (0x70c) /* Ack Frequency and L0-L1 ASPM Control Register. */
+#define PCIE_PORT_LINK_CTRL_OFF                        (0x710) /* Port Link Control Register. */
+#define PCIE_LANE_SKEW_OFF                     (0x714) /* Lane Skew Register. */
+#define PCIE_TIMER_CTRL_MAX_FUNC_NUM_OFF       (0x718) /* Timer Control and Max Function Number Register. */
+#define PCIE_SYMBOL_TIMER_FILTER_1_OFF         (0x71c) /* Symbol Timer Register and Filter Mask 1 . */
+#define PCIE_FILTER_MASK_2_OFF                 (0x720) /* Filter Mask 2 . */
+#define PCIE_AMBA_MUL_OB_DECOMP_NP_SUB_REQ_CTRL_OFF    (0x724) /* AMBA Multiple Outbound Decomposed NP SubRequests Control Register. */
+#define PCIE_PL_DEBUG0_OFF                     (0x728) /* Debug Register 0. */
+#define PCIE_PL_DEBUG1_OFF                     (0x72c) /* Debug Register 1. */
+#define PCIE_TX_P_FC_CREDIT_STATUS_OFF         (0x730) /* Transmit Posted FC Credit Status. */
+#define PCIE_TX_NP_FC_CREDIT_STATUS_OFF                (0x734) /* Transmit Non-Posted FC Credit Status. */
+#define PCIE_TX_CPL_FC_CREDIT_STATUS_OFF       (0x738) /* Transmit Completion FC Credit Status. */
+#define PCIE_QUEUE_STATUS_OFF                  (0x73c) /* Queue Status. */
+#define PCIE_VC_TX_ARBI_1_OFF                  (0x740) /* VC Transmit Arbitration Register 1. */
+#define PCIE_VC_TX_ARBI_2_OFF                  (0x744) /* VC Transmit Arbitration Register 2. */
+#define PCIE_VC0_P_RX_Q_CTRL_OFF               (0x748) /* Segmented-Buffer VC0 Posted Receive Queue Control . */
+#define PCIE_VC0_NP_RX_Q_CTRL_OFF              (0x74c) /* Segmented-Buffer VC0 Non-Posted Receive Queue Control . */
+#define PCIE_VC0_CPL_RX_Q_CTRL_OFF             (0x750) /* Segmented-Buffer VC0 Completion Receive Queue Control . */
+#define PCIE_VC1_P_RX_Q_CTRL_OFF               (0x754) /* Segmented-Buffer VC1 Posted Receive Queue Control . */
+#define PCIE_VC1_NP_RX_Q_CTRL_OFF              (0x758) /* Segmented-Buffer VC1 Non-Posted Receive Queue Control . */
+#define PCIE_VC1_CPL_RX_Q_CTRL_OFF             (0x75c) /* Segmented-Buffer VC1 Completion Receive Queue Control . */
+#define PCIE_VC2_P_RX_Q_CTRL_OFF               (0x760) /* Segmented-Buffer VC2 Posted Receive Queue Control. */
+#define PCIE_VC2_NP_RX_Q_CTRL_OFF              (0x764) /* Segmented-Buffer VC2 Non-Posted Receive Queue Control. */
+#define PCIE_VC2_CPL_RX_Q_CTRL_OFF             (0x768) /* Segmented-Buffer VC2 Completion Receive Queue Control. */
+#define PCIE_VC3_P_RX_Q_CTRL_OFF               (0x76c) /* Segmented-Buffer VC3 Posted Receive Queue Control. */
+#define PCIE_VC3_NP_RX_Q_CTRL_OFF              (0x770) /* Segmented-Buffer VC3 Non-Posted Receive Queue Control. */
+#define PCIE_VC3_CPL_RX_Q_CTRL_OFF             (0x774) /* Segmented-Buffer VC3 Completion Receive Queue Control. */
+#define PCIE_VC4_P_RX_Q_CTRL_OFF               (0x778) /* Segmented-Buffer VC4 Posted Receive Queue Control. */
+#define PCIE_VC4_NP_RX_Q_CTRL_OFF              (0x77c) /* Segmented-Buffer VC4 Non-Posted Receive Queue Control. */
+#define PCIE_VC4_CPL_RX_Q_CTRL_OFF             (0x780) /* Segmented-Buffer VC4 Completion Receive Queue Control. */
+#define PCIE_VC5_P_RX_Q_CTRL_OFF               (0x784) /* Segmented-Buffer VC5 Posted Receive Queue Control. */
+#define PCIE_VC5_NP_RX_Q_CTRL_OFF              (0x788) /* Segmented-Buffer VC5 Non-Posted Receive Queue Control. */
+#define PCIE_VC5_CPL_RX_Q_CTRL_OFF             (0x78c) /* Segmented-Buffer VC5 Completion Receive Queue Control. */
+#define PCIE_VC6_P_RX_Q_CTRL_OFF               (0x790) /* Segmented-Buffer VC6 Posted Receive Queue Control. */
+#define PCIE_VC6_NP_RX_Q_CTRL_OFF              (0x794) /* Segmented-Buffer VC6 Non-Posted Receive Queue Control. */
+#define PCIE_VC6_CPL_RX_Q_CTRL_OFF             (0x798) /* Segmented-Buffer VC6 Completion Receive Queue Control. */
+#define PCIE_VC7_P_RX_Q_CTRL_OFF               (0x79c) /* Segmented-Buffer VC7 Posted Receive Queue Control. */
+#define PCIE_VC7_NP_RX_Q_CTRL_OFF              (0x7a0) /* Segmented-Buffer VC7 Non-Posted Receive Queue Control. */
+#define PCIE_VC7_CPL_RX_Q_CTRL_OFF             (0x7a4) /* Segmented-Buffer VC7 Completion Receive Queue Control. */
+#define PCIE_GEN2_CTRL_OFF                     (0x80c) /* Link Width and Speed Change Control Register. */
+#define PCIE_PHY_STATUS_OFF                    (0x810) /* PHY Status Register. */
+#define PCIE_PHY_CONTROL_OFF                   (0x814) /* PHY Control Register. */
+#define PCIE_MSI_CTRL_ADDR_OFF                 (0x820) /* MSI Controller Address Register. */
+#define PCIE_MSI_CTRL_UPPER_ADDR_OFF           (0x824) /* MSI Controller Upper Address Register. */
+#define MSI_INTERRUPT_OFF                      (12)
+#define PCIE_MSI_CTRL_INT_0_EN_OFF             (0x828) /* MSI Controller Interrupt#0 Enable Register. */
+#define PCIE_MSI_CTRL_INT_0_MASK_OFF           (0x82c) /* MSI Controller Interrupt#0 Mask Register. */
+#define PCIE_MSI_CTRL_INT_0_STATUS_OFF         (0x830) /* MSI Controller Interrupt#0 Status Register. */
+#define PCIE_MSI_CTRL_INT_1_EN_OFF             (0x834) /* MSI Controller Interrupt#1 Enable Register. */
+#define PCIE_MSI_CTRL_INT_1_MASK_OFF           (0x838) /* MSI Controller Interrupt#1 Mask Register. */
+#define PCIE_MSI_CTRL_INT_1_STATUS_OFF         (0x83c) /* MSI Controller Interrupt#1 Status Register. */
+#define PCIE_MSI_CTRL_INT_2_EN_OFF             (0x840) /* MSI Controller Interrupt#2 Enable Register. */
+#define PCIE_MSI_CTRL_INT_2_MASK_OFF           (0x844) /* MSI Controller Interrupt#2 Mask Register. */
+#define PCIE_MSI_CTRL_INT_2_STATUS_OFF         (0x848) /* MSI Controller Interrupt#2 Status Register. */
+#define PCIE_MSI_CTRL_INT_3_EN_OFF             (0x84c) /* MSI Controller Interrupt#3 Enable Register. */
+#define PCIE_MSI_CTRL_INT_3_MASK_OFF           (0x850) /* MSI Controller Interrupt#3 Mask Register. */
+#define PCIE_MSI_CTRL_INT_3_STATUS_OFF         (0x854) /* MSI Controller Interrupt#3 Status Register. */
+#define PCIE_MSI_CTRL_INT_4_EN_OFF             (0x858) /* MSI Controller Interrupt#4 Enable Register. */
+#define PCIE_MSI_CTRL_INT_4_MASK_OFF           (0x85c) /* MSI Controller Interrupt#4 Mask Register. */
+#define PCIE_MSI_CTRL_INT_4_STATUS_OFF         (0x860) /* MSI Controller Interrupt#4 Status Register. */
+#define PCIE_MSI_CTRL_INT_5_EN_OFF             (0x864) /* MSI Controller Interrupt#5 Enable Register. */
+#define PCIE_MSI_CTRL_INT_5_MASK_OFF           (0x868) /* MSI Controller Interrupt#5 Mask Register. */
+#define PCIE_MSI_CTRL_INT_5_STATUS_OFF         (0x86c) /* MSI Controller Interrupt#5 Status Register. */
+#define PCIE_MSI_CTRL_INT_6_EN_OFF             (0x870) /* MSI Controller Interrupt#6 Enable Register. */
+#define PCIE_MSI_CTRL_INT_6_MASK_OFF           (0x874) /* MSI Controller Interrupt#6 Mask Register. */
+#define PCIE_MSI_CTRL_INT_6_STATUS_OFF         (0x878) /* MSI Controller Interrupt#6 Status Register. */
+#define PCIE_MSI_CTRL_INT_7_EN_OFF             (0x87c) /* MSI Controller Interrupt#7 Enable Register. */
+#define PCIE_MSI_CTRL_INT_7_MASK_OFF           (0x880) /* MSI Controller Interrupt#7 Mask Register. */
+#define PCIE_MSI_CTRL_INT_7_STATUS_OFF         (0x884) /* MSI Controller Interrupt#7 Status Register. */
+#define PCIE_MSI_GPIO_IO_OFF                   (0x888) /* MSI Controller General Purpose IO Register. */
+#define PCIE_GEN3_RELATED_OFF                  (0x890) /* Gen3 Control Register. */
+#define PCIE_GEN3_EQ_LOCAL_FS_LF_OFF           (0x894) /* Gen3 EQ FS and LF Register. */
+#define PCIE_GEN3_EQ_PSET_INDEX_OFF            (0x89c) /* Gen3 EQ Preset Index Register. */
+#define PCIE_GEN3_EQ_COEFF_LEGALITY_STATUS_OFF (0x8a4) /* Gen3 EQ Status Register. */
+#define PCIE_GEN3_EQ_CONTROL_OFF               (0x8a8) /* Gen3 EQ Control Register. */
+#define PCIE_GEN3_EQ_FB_MODE_DIR_CHANGE_OFF    (0x8ac) /* Gen3 EQ Direction Change Feedback Mode Control Register. */
+#define PCIE_PIPE_LOOPBACK_CONTROL_OFF         (0x8b8) /* PIPE Loopback Control Register. */
+#define PCIE_MISC_CONTROL_1_OFF                        (0x8bc) /* DBI Read-Only Write Enable Register. */
+#define PCIE_AMBA_ERROR_RESPONSE_DEFAULT_OFF   (0x8d0) /* AHB/AXI Bridge Slave Error Response Register. */
+#define PCIE_AMBA_LINK_TIMEOUT_OFF             (0x8d4) /* Link Down AXI Bridge Slave Timeout Register. */
+#define PCIE_AMBA_ORDERING_CTRL_OFF            (0x8d8) /* AMBA Ordering Control. */
+#define PCIE_AMBA_ORDRMGR_WDOG_OFF             (0x8dc) /* AHB/AXI Ordering Manager Watchdog Timer. */
+#define PCIE_COHERENCY_CONTROL_1_OFF           (0x8e0) /* ACE Cache Coherency Control Register 1. */
+#define PCIE_COHERENCY_CONTROL_2_OFF           (0x8e4) /* ACE Cache Coherency Control Register 2. */
+#define PCIE_COHERENCY_CONTROL_3_OFF           (0x8e8) /* ACE Cache Coherency Control Register 3. */
+#define PCIE_PL_LAST_OFF                       (0x8fc) /* This is an internally reserved register. */
+#define PCIE_IATU_VIEWPORT_OFF                 (0x900) /* iATU Index Register. */
+#define PCIE_IATU_REGION_CTRL_1_OFF_OUTBOUND_0 (0x904) /* iATU Region Control 1 Register. */
+#define PCIE_IATU_REGION_CTRL_2_OFF_OUTBOUND_0 (0x908) /* iATU Region Control 2 Register. */
+#define PCIE_IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 (0x90c) /* The start address of the address region to be translated.*/
+#define PCIE_IATU_UPR_BASE_ADDR_OFF_OUTBOUND_0 (0x910)
+#define PCIE_IATU_LIMIT_ADDR_OFF_OUTBOUND_0    (0x914) /* iATU Limit Address Register */
+#define PCIE_IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0       (0x918) /* iATU Outbound Region#N Lower Offset Address Register */
+#define PCIE_IATU_UPR_TARGET_ADDR_OFF_OUTBOUND_0       (0x91C)
+#define PCIE_DMA_CTRL_OFF                      (0x978) /* DMA Number of Channels Register. */
+#define PCIE_DMA_WRITE_ENGINE_EN_OFF           (0x97c) /* DMA Write Engine Enable Register. */
+#define PCIE_DMA_WRITE_DOORBELL_OFF            (0x980) /* DMA Write Doorbell Register. */
+#define PCIE_DMA_WRITE_CHANNEL_ARB_WEIGHT_LOW_OFF      (0x988) /* DMA Write Engine Channel Arbitration Weight Low Register. */
+#define PCIE_DMA_WRITE_CHANNEL_ARB_WEIGHT_HIGH_OFF     (0x98c) /* DMA Write Engine Channel Arbitration Weight High Register. */
+#define PCIE_DMA_WRITE_P_REQ_TIMER_OFF         (0x998) /* DMA Write Posted Request Deadlock Timer Register. */
+#define PCIE_DMA_READ_ENGINE_EN_OFF            (0x99c) /* DMA Read Engine Enable Register. */
+#define PCIE_DMA_READ_DOORBELL_OFF             (0x9a0) /* DMA Read Doorbell Register. */
+#define PCIE_DMA_READ_CHANNEL_ARB_WEIGHT_LOW_OFF       (0x9a8) /* DMA Read Engine Channel Arbitration Weight Low Register. */
+#define PCIE_DMA_READ_CHANNEL_ARB_WEIGHT_HIGH_OFF      (0x9ac) /* DMA Read Engine Channel Arbitration Weight High Register. */
+#define PCIE_DMA_WRITE_INT_STATUS_OFF          (0x9bc) /* DMA Write Interrupt Status Register. */
+#define PCIE_DMA_WRITE_INT_MASK_OFF            (0x9c4) /* DMA Write Interrupt Mask Register. */
+#define PCIE_DMA_WRITE_INT_CLEAR_OFF           (0x9c8) /* DMA Write Interrupt Clear Register. */
+#define PCIE_DMA_WRITE_ERR_STATUS_OFF          (0x9cc) /* DMA Write Error Status Register. */
+#define PCIE_DMA_WRITE_DONE_IMWR_LOW_OFF       (0x9d0) /* DMA Write Done IMWr Address Low Register. */
+#define PCIE_DMA_WRITE_DONE_IMWR_HIGH_OFF      (0x9d4) /* DMA Write Done IMWr Interrupt Address High Register. */
+#define PCIE_DMA_WRITE_ABORT_IMWR_LOW_OFF      (0x9d8) /* DMA Write Abort IMWr Address Low Register. */
+#define PCIE_DMA_WRITE_ABORT_IMWR_HIGH_OFF     (0x9dc) /* DMA Write Abort IMWr Address High Register. */
+#define PCIE_DMA_WRITE_CH01_IMWR_DATA_OFF      (0x9e0) /* DMA Write Channel 1 and 0 IMWr Data Register. */
+#define PCIE_DMA_WRITE_CH23_IMWR_DATA_OFF      (0x9e4) /* DMA Write Channel 3 and 2 IMWr Data Register. */
+#define PCIE_DMA_WRITE_CH45_IMWR_DATA_OFF      (0x9e8) /* DMA Write Channel 5 and 4 IMWr Data Register. */
+#define PCIE_DMA_WRITE_CH67_IMWR_DATA_OFF      (0x9ec) /* DMA Write Channel 7 and 6 IMWr Data Register. */
+#define PCIE_DMA_WRITE_LINKED_LIST_ERR_EN_OFF  (0xa00) /* DMA Write Linked List Error Enable Register. */
+#define PCIE_DMA_READ_INT_STATUS_OFF           (0xa10) /* DMA Read Interrupt Status Register. */
+#define PCIE_DMA_READ_INT_MASK_OFF             (0xa18) /* DMA Read Interrupt Mask Register. */
+#define PCIE_DMA_READ_INT_CLEAR_OFF            (0xa1c) /* DMA Read Interrupt Clear Register. */
+#define PCIE_DMA_READ_ERR_STATUS_LOW_OFF       (0xa24) /* DMA Read Error Status Low Register. */
+#define PCIE_DMA_READ_ERR_STATUS_HIGH_OFF      (0xa28) /* DMA Read Error Status High Register. */
+#define PCIE_DMA_READ_LINKED_LIST_ERR_EN_OFF   (0xa34) /* DMA Read Linked List Error Enable Register. */
+#define PCIE_DMA_READ_DONE_IMWR_LOW_OFF                (0xa3c) /* DMA Read Done IMWr Address Low Register. */
+#define PCIE_DMA_READ_DONE_IMWR_HIGH_OFF       (0xa40) /* DMA Read Done IMWr Address High Register. */
+#define PCIE_DMA_READ_ABORT_IMWR_LOW_OFF       (0xa44) /* DMA Read Abort IMWr Address Low Register. */
+#define PCIE_DMA_READ_ABORT_IMWR_HIGH_OFF      (0xa48) /* DMA Read Abort IMWr Address High Register. */
+#define PCIE_DMA_READ_CH01_IMWR_DATA_OFF       (0xa4c) /* DMA Read Channel 1 and 0 IMWr Data Register. */
+#define PCIE_DMA_READ_CH23_IMWR_DATA_OFF       (0xa50) /* DMA Read Channel 3 and 2 IMWr Data Register. */
+#define PCIE_DMA_READ_CH45_IMWR_DATA_OFF       (0xa54) /* DMA Read Channel 5 and 4 IMWr Data Register. */
+#define PCIE_DMA_READ_CH67_IMWR_DATA_OFF       (0xa58) /* DMA Read Channel 7 and 6 IMWr Data Register. */
+#define PCIE_DMA_VIEWPORT_SEL_OFF              (0xa6c) /* DMA Channel Context Index Register. */
+#define PCIE_PL_LTR_LATENCY_OFF                        (0xb30) /* LTR Latency Register. */
+#define PCIE_AUX_CLK_FREQ_OFF                  (0xb40) /* Auxiliary Clock Frequency Control Register. */
+#define PCIE_L1_SUBSTATES_OFF                  (0xb44) /* L1 Substates Timing Register. */
+/* Baikal-specific registers. */
+#define PCIE_BK_MGMT_SEL_LANE                  (0xd04) /* Select lane. */
+#define PCIE_BK_MGMT_CTRL                      (0xd08) /* Control management register. */
+#define PCIE_BK_MGMT_WRITE_DATA                        (0xd0c) /* Data write register. */
+#define PCIE_BK_MGMT_READ_DATA                 (0xd10) /* Data read register. */
+
+/* PCIE_BK_MGMT_CTRL */
+#define BK_MGMT_CTRL_ADDR_MASK                 (0x1FFFFF) /* [21:0] bits */
+#define BK_MGMT_CTRL_READ                      (0 << 29)
+#define BK_MGMT_CTRL_WRITE                     (1 << 29)
+#define BK_MGMT_CTRL_DONE                      (1 << 30)
+#define BK_MGMT_CTRL_BUSY                      (1 << 31)
+
+/* PCIE_MISC_CONTROL_1_OFF */
+#define DBI_RO_WR_EN                           (1 << 0)        /* Write to RO Registers Using DBI. */
+
+/* PCIE_PORT_LINK_CTRL_OFF */
+#define FAST_LINK_MODE                         (1 << 7)        /* Fast Link Mode. */
+#define LINK_CAPABLE_SHIFT                     (16)            /* Link Mode Enable. */
+#define LINK_CAPABLE_MASK                      0x3F0000
+
+/* GEN2_CTRL_OFF */
+#define NUM_OF_LANES_SHIFT                     (8)             /* Predetermined Number of Lanes. */
+#define NUM_OF_LANES_MASK                      0x1F00
+#define DIRECT_SPEED_CHANGE                    (1 << 17)
+
+/* GEN3_EQ_CONTROL_OFF */
+#define GEN3_EQ_EVAL_2MS_DISABLE               (1 << 5)        /* Phase2_3 2 ms Timeout Disable. */
+#define GEN3_EQ_FB_MODE_SHIFT                  (0)             /* Feedback Mode */
+#define GEN3_EQ_FB_MODE_MASK                   0xF
+#define GEN3_EQ_PSET_REQ_VEC_SHIFT             (8)             /* Preset Request Vector. */
+#define GEN3_EQ_PSET_REQ_VEC_MASK              0xFFFF00
+
+/* LINK_CONTROL_LINK_STATUS_REG */
+#define PCIE_CAP_LINK_SPEED_SHIFT              16
+#define PCIE_CAP_LINK_SPEED_MASK               0xF0000
+#define PCIE_CAP_LINK_SPEED_GEN1               0x1
+#define PCIE_CAP_LINK_SPEED_GEN2               0x2
+#define PCIE_CAP_LINK_SPEED_GEN3               0x3
+#define PCIE_STA_LINK_TRAINING                 0x8000000
+#define PCIE_STA_LINK_WIDTH_MASK               0x3f00000
+#define PCIE_STA_LINK_WIDTH_SHIFT              (20)
+
+/* IATU_VIEWPORT_OFF */
+#define REGION_DIR_SHIFT                       (31)            /* Region Direction. */
+#define REGION_INDEX_SHIFT                     (0)             /* Region Index. */
+#define REGION_DIR_OUTBOUND                    (0)
+#define REGION_DIR_INBOUND                     (1)
+
+/* TYPE1_STATUS_COMMAND_REG */
+#define TYPE1_STATUS_COMMAND_REG_BME           (1 << 2)
+#define TYPE1_STATUS_COMMAND_REG_MSE           (1 << 1)
+#define TYPE1_STATUS_COMMAND_REG_IOSE          (1 << 0)
+
+/* IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define LWR_BASE_RW_SHIFT                      (16)
+
+/* IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define LIMIT_ADDR_RW_SHIFT                    (16)
+
+/* IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define LWR_TARGET_RW_SHIFT                    (16)
+
+/* IATU_REGION_CTRL_1_OFF_OUTBOUND_0 */
+#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_SHIFT   (0)
+#define TLP_TYPE_MEM                           (0)
+#define TLP_TYPE_IO                            (2)
+#define TLP_TYPE_CFGRD0                                (4)
+#define TLP_TYPE_CFGRD1                                (5)
+
+/* IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN    (1 << 31)
+#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE       (1 << 28)
+
+/* PCIE_LINK_CONTROL2_LINK_STATUS2 */
+#define PCIE_LINK_CONTROL2_GEN_MASK            (0xF)
+#define PCIE_LINK_CONTROL2_GEN1                        (1)
+#define PCIE_LINK_CONTROL2_GEN2                        (2)
+#define PCIE_LINK_CONTROL2_GEN3                        (3)
+
+/* PHY control registers. */
+#define PCIE_PHY_DWC_GLBL_PLL_CFG_0            (0x1c000)       /* PLL Global Configuration Register #0 */
+#define PCIE_PHY_DWC_GLBL_PLL_CFG_1            (0x1c001)       /* PLL Global Configuration Register #1 */
+#define PCIE_PHY_DWC_GLBL_PLL_CFG_2            (0x1c002)       /* PLL Global Configuration Register #2 */
+#define PCIE_PHY_DWC_GLBL_PLL_CFG_3            (0x1c003)       /* PLL Global Configuration Register #3 */
+#define PCIE_PHY_DWC_GLBL_PLL_CFG_4            (0x1c004)       /* PLL Global Configuration Register #4 */
+#define PCIE_PHY_DWC_GLBL_MISC_CONFIG_0                (0x1c005)       /* Global Miscellaneous Configuration #0 */
+#define PCIE_PHY_DWC_GLBL_MISC_CONFIG_1                (0x1c006)       /* Global Miscellaneous Configuration #1 */
+#define PCIE_PHY_DWC_SLICE_CFG                 (0x1c00c)       /* Slice Configuration */
+#define PCIE_PHY_DWC_GLBL_REGU_CFG             (0x1c00d)       /* Global Regulator Configuration */
+#define PCIE_PHY_DWC_GLBL_TERM_CFG             (0x1c00e)       /* Global Termination Calibration Configuration */
+#define PCIE_PHY_DWC_GLBL_CAL_CFG              (0x1c00f)       /* Global PLL Calibration Configuration */
+#define PCIE_PHY_DWC_GLBL_RD_SYNC_STATUS       (0x1c010)       /* Global Read Synchronization Status */
+#define PCIE_PHY_DWC_RX_PWR_CTRL_P0            (0x1c014)       /* RX Power Controls in Power State P0 */
+#define PCIE_PHY_DWC_RX_PWR_CTRL_P0S           (0x1c015)       /* RX Power Controls in Power State P0S */
+#define PCIE_PHY_DWC_RX_PWR_CTRL_P1            (0x1c016)       /* RX Power Controls in Power State P1 */
+#define PCIE_PHY_DWC_RX_PWR_CTRL_P2            (0x1c017)       /* RX Power Controls in Power State P2 */
+#define PCIE_PHY_DWC_TX_PWR_CTRL_P0_P0S                (0x1c018)       /* TX Power Controls in Power States P0 and POS */
+#define PCIE_PHY_DWC_TX_PWR_CTRL_P1_P2         (0x1c019)       /* TX Power Controls in Power States P1 and P2 */
+#define PCIE_PHY_DWC_GLBL_PWR_CTRL             (0x1c01a)       /* Global Power State Machine Control Override */
+#define PCIE_PHY_DWC_RX_TXDIR_CTRL_0           (0x1c01d)       /* Far-end TX Direction Control Register #0 */
+#define PCIE_PHY_DWC_RX_TXDIR_CTRL_1           (0x1c01e)       /* Far-end TX Direction Control Register #1 */
+#define PCIE_PHY_DWC_RX_TXDIR_CTRL_2           (0x1c01f)       /* Far-end TX Direction Control Register #2 */
+#define PCIE_PHY_DWC_GLBL_PLL_MONITOR          (0x1c020)       /* Monitor for SerDes Global to Raw PCS Global Interface */
+#define PCIE_PHY_DWC_GLBL_TERM_MON_1           (0x1c022)       /* Monitor for SerDes Global to Raw PCS Global Interface */
+#define PCIE_PHY_DWC_GLBL_SDS_PIN_MON_0                (0x1c023)       /* Monitor for Raw PCS Global to SerDes Global to Raw PCS Interface */
+#define PCIE_PHY_DWC_GLBL_SDS_PIN_MON_1                (0x1c024)       /* Monitor for Raw PCS Global to SerDes Global to Raw PCS Interface */
+#define PCIE_PHY_DWC_GLBL_PWR_MON_0            (0x1c025)       /* Monitor of Global Power State Machine Values */
+#define PCIE_PHY_DWC_GLBL_PWR_MON_1            (0x1c026)       /* Monitor of Global Power State Machine Values */
+#define PCIE_PHY_DWC_GLBL_PWR_MON_2            (0x1c027)       /* Monitor of Global Power State Machine Values */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_FRAC_BASE    (0x1c060)       /* Global PLL SSC Fractional Base */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_CYCLES       (0x1c061)       /* Global PLL SSC Cycles Configuration */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_FMFREQ       (0x1c062)       /* Global PLL SSC Modulation Frequency */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_FREF         (0x1c063)       /* Global PLL SSC Reference Frequency */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_PPM          (0x1c064)       /* Global PLL SSC PPM */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_CFG          (0x1c065)       /* Global PLL SSC Configuration */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_ALU_CMD      (0x1c067)       /* Global PLL SSC ALU Command */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_MON          (0x1c069)       /* Global PLL SSC Monitor */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_ALU_OUT_0    (0x1c06b)       /* Global PLL SSC ALU Output Register #0 */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_ALU_OUT_1    (0x1c06c)       /* Global PLL SSC ALU Output Register #1 */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_DIV          (0x1c06d)       /* Global PLL SSC Divider */
+#define PCIE_PHY_DWC_GLBL_PLL_SSC_FRAC         (0x1c06e)       /* Global PLL SSC Fraction */
+#define PCIE_PHY_DWC_GLBL_TAD                  (0x1c080)       /* Global Test Analog and Digital Monitor */
+#define PCIE_PHY_DWC_GLBL_TM_ADMON             (0x1c081)       /* Global Test Mode Analog/Digital Monitor Enable */
+#define PCIE_PHY_DWC_EQ_WAIT_TIME              (0x3c000)       /* TX and RX Equalization Wait Times */
+#define PCIE_PHY_DWC_RDET_TIME                 (0x3c001)       /* Receiver Detect Wait Times */
+#define PCIE_PHY_DWC_PCS_LANE_LINK_CFG         (0x3c002)       /* Link Configuration Override */
+#define PCIE_PHY_DWC_PCS_PLL_CTLIFC_0          (0x3c003)       /* PLL Control Interface Override Register #0 */
+#define PCIE_PHY_DWC_PCS_PLL_CTLIFC_1          (0x3c004)       /* PLL Control Interface Override Register #1 */
+#define PCIE_PHY_DWC_PCS_REG_RD_TIMEOUT                (0x3c005)       /* Register Read Timeout */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE1_MODE_0      (0x3c006)       /* PLL Configuration Register #0 for PCIe1 */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE1_MODE_1      (0x3c007)       /* PLL Configuration Register #1 for PCIe1 */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE1_MODE_0     (0x3c008)       /* Lane Configuration Register #0 for PCIe1 */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE1_MODE_1     (0x3c009)       /* Lane Configuration Register #1 for PCIe1 */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE2_MODE_0      (0x3c00a)       /* PLL Configuration Register #0 for PCIe2 */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE2_MODE_1      (0x3c00b)       /* PLL Configuration Register #1 for PCIe2 */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE2_MODE_0     (0x3c00c)       /* Lane Configuration Register #0 for PCIe2 */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE2_MODE_1     (0x3c00d)       /* Lane Configuration Register #1 for PCIe2 */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE3_MODE_0      (0x3c00e)       /* PLL Configuration Register #0 for PCIe3 */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE3_MODE_1      (0x3c00f)       /* PLL Configuration Register #1 for PCIe3 */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE3_MODE_0     (0x3c010)       /* Lane Configuration Register #0 for PCIe3 */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE3_MODE_1     (0x3c011)       /* Lane Configuration Register #1 for PCIe3 */
+#define PCIE_PHY_DWC_PCS_PLL_KX_MODE_1         (0x3c013)       /* PLL Configuration Register #1 for KX */
+#define PCIE_PHY_DWC_PCS_LANE_KX_MODE_0                (0x3c014)       /* Lane Configuration Register #0 for KX */
+#define PCIE_PHY_DWC_PCS_LANE_KX_MODE_1                (0x3c015)       /* Lane Configuration Register #1 for KX */
+#define PCIE_PHY_DWC_PCS_PLL_KX4_MODE_0                (0x3c016)       /* PLL Configuration Register #0 for KX4 */
+#define PCIE_PHY_DWC_PCS_PLL_KX4_MODE_1                (0x3c017)       /* PLL Configuration Register #1 for KX4 */
+#define PCIE_PHY_DWC_PCS_LANE_KX4_MODE_0       (0x3c018)       /* Lane Configuration Register #0 for KX4 */
+#define PCIE_PHY_DWC_PCS_LANE_KX4_MODE_1       (0x3c019)       /* Lane Configuration Register #1 for KX4 */
+#define PCIE_PHY_DWC_PCS_PLL_KR_MODE_0         (0x3c01a)       /* PLL Configuration Register #0 for KR */
+#define PCIE_PHY_DWC_PCS_PLL_KR_MODE_1         (0x3c01b)       /* PLL Configuration Register #1 for KR */
+#define PCIE_PHY_DWC_PCS_LANE_KR_MODE_0                (0x3c01c)       /* Lane Configuration Register #0 for KR */
+#define PCIE_PHY_DWC_PCS_LANE_KR_MODE_1                (0x3c01d)       /* Lane Configuration Register #1 for KR */
+#define PCIE_PHY_DWC_PCS_PLL_SGMII_MODE_0      (0x3c01e)       /* PLL Configuration Register #0 for SGMII */
+#define PCIE_PHY_DWC_PCS_PLL_SGMII_MODE_1      (0x3c01f)       /* PLL Configuration Register #1 for SGMII */
+#define PCIE_PHY_DWC_PCS_LANE_SGMII_MODE_0     (0x3c020)       /* Lane Configuration Register #0 for SGMII */
+#define PCIE_PHY_DWC_PCS_LANE_SGMII_MODE_1     (0x3c021)       /* Lane Configuration Register #1 for SGMII */
+#define PCIE_PHY_DWC_PCS_PLL_QSGMII_MODE_0     (0x3c022)       /* PLL Configuration Register #0 for QSGMII */
+#define PCIE_PHY_DWC_PCS_PLL_QSGMII_MODE_1     (0x3c023)       /* PLL Configuration Register #1 for QSGMII */
+#define PCIE_PHY_DWC_PCS_LANE_QSGMII_MODE_0    (0x3c024)       /* Lane Configuration Register #0 for QSGMII */
+#define PCIE_PHY_DWC_PCS_LANE_QSGMII_MODE_1    (0x3c025)       /* Lane Configuration Register #1 for QSGMII */
+#define PCIE_PHY_DWC_PCS_PLL_CEI_MODE_0                (0x3c026)       /* PLL Configuration Register #0 for CEI */
+#define PCIE_PHY_DWC_PCS_PLL_CEI_MODE_1                (0x3c027)       /* PLL Configuration Register #1 for CEI */
+#define PCIE_PHY_DWC_PCS_LANE_CEI_MODE_0               (0x3c028)       /* Lane Configuration Register #0 for CEI */
+#define PCIE_PHY_DWC_PCS_LANE_CEI_MODE_1               (0x3c029)       /* Lane Configuration Register #1 for CEI */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE1_125M_MODE_0         (0x3c02a)       /* PLL Configuration Register #0 for PCIe1 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE1_125M_MODE_1         (0x3c02b)       /* PLL Configuration Register #1 for PCIe1 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE1_125M_MODE_0                (0x3c02c)       /* Lane Configuration Register #0 for PCIe1 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE1_125M_MODE_1                (0x3c02d)       /* Lane Configuration Register #1 for PCIe1 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE2_125M_MODE_0         (0x3c02e)       /* PLL Configuration Register #0 for PCIe2 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE2_125M_MODE_1         (0x3c02f)       /* PLL Configuration Register #1 for PCIe2 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE2_125M_MODE_0                (0x3c030)       /* Lane Configuration Register #0 for PCIe2 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE2_125M_MODE_1                (0x3c031)       /* Lane Configuration Register #1 for PCIe2 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE3_125M_MODE_0         (0x3c032)       /* PLL Configuration Register #0 for PCIe3 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_PLL_PCIE3_125M_MODE_1         (0x3c033)       /* PLL Configuration Register #1 for PCIe3 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE3_125M_MODE_0                (0x3c034)       /* Lane Configuration Register #0 for PCIe3 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_PCIE3_125M_MODE_1                (0x3c035)       /* Lane Configuration Register #1 for PCIe3 with 125MHz refclk */
+#define PCIE_PHY_DWC_PCS_LANE_VMA_COARSE_CTRL_0                (0x3c036)       /* Lane VMA Coarse Control Register #0 */
+#define PCIE_PHY_DWC_PCS_LANE_VMA_COARSE_CTRL_1                (0x3c037)       /* Lane VMA Coarse Control Register #1 */
+#define PCIE_PHY_DWC_PCS_LANE_VMA_COARSE_CTRL_2                (0x3c038)       /* Lane VMA Coarse Control Register #2 */
+#define PCIE_PHY_DWC_PCS_LANE_VMA_FINE_CTRL_0          (0x3c039)       /* Lane VMA Fine Control Register #0 */
+#define PCIE_PHY_DWC_PCS_LANE_VMA_FINE_CTRL_1          (0x3c03a)       /* Lane VMA Fine Control Register #1 */
+#define PCIE_PHY_DWC_PCS_LANE_VMA_FINE_CTRL_2          (0x3c03b)       /* Lane VMA Fine Control Register #2 */
+#define PCIE_PHY_DWC_PCS_LANE_MODE_OVRD                        (0x3c03c)       /* Lane Mode Override in Raw PCS Global and Slice */
+#define PCIE_PHY_DWC_PCS_LANE_LINK_MON                 (0x3c040)       /* Monitor of MAC to Raw PCS Link Configuration Interface */
+#define PCIE_PHY_DWC_PCS_MAC_PLLIFC_MON_2              (0x3c043)       /* Monitor of MAC to Raw PCS PLL_PCS Divider Value */
+#define PCIE_PHY_DWC_PCS_MAC_PLLIFC_MON_3              (0x3c044)       /* Monitor of MAC to Raw PCS PLL OP_Range and Divider Values */
+#define PCIE_PHY_DWC_SLICE_TRIM                        (0x1c040)       /* Slice TX and RX Bias Trim Settings */
+#define PCIE_PHY_DWC_RX_LDLL_CTRL              (0x1c043)       /* RX Lane DLL Test Controls */
+#define PCIE_PHY_DWC_RX_SDLL_CTRL              (0x1c044)       /* RX Slice DLL test controls */
+#define PCIE_PHY_DWC_SLICE_PCIE1_MODE          (0x1c045)       /* Slice Configuration Settings for PCIE1 @ 100MHz */
+#define PCIE_PHY_DWC_SLICE_PCIE2_MODE          (0x1c046)       /* Slice Configuration Settings for PCIE2 @ 100Mhz */
+#define PCIE_PHY_DWC_SLICE_PCIE3_MODE          (0x1c047)       /* Slice Configuration Settings for PCIE3 @ 100Mhz */
+#define PCIE_PHY_DWC_SLICE_KX_MODE             (0x1c048)       /* Slice Configuration Settings for KX */
+#define PCIE_PHY_DWC_SLICE_KX4_MODE            (0x1c049)       /* Slice Configuration Settings for KX4 */
+#define PCIE_PHY_DWC_SLICE_KR_MODE             (0x1c04a)       /* Slice Configuration Settings for KR */
+#define PCIE_PHY_DWC_SLICE_SGMII_MODE          (0x1c04b)       /* Slice Configuration Settings for SGMII */
+#define PCIE_PHY_DWC_SLICE_QSGMII_MODE         (0x1c04c)       /* Slice Configuration Settings for QSGMII */
+#define PCIE_PHY_DWC_SLICE_CEI_MODE            (0x1c04d)       /* Slice Configuration Settings for CEI */
+#define PCIE_PHY_DWC_SLICE_PCIE1_125M_MODE     (0x1c04e)       /* Slice Configuration Settings for PCIE1 @ 125MHz */
+#define PCIE_PHY_DWC_SLICE_PCIE2_125M_MODE     (0x1c04f)       /* Slice Configuration Settings for PCIE2 @ 125MHz */
+#define PCIE_PHY_DWC_SLICE_PCIE3_125M_MODE     (0x1c050)       /* Slice Configuration Settings for PCIE3 @ 125MHz */
+#define PCIE_PHY_DWC_SLICE_OVRD_MODE           (0x1c051)       /* Slice Configuration Settings Override */
+#define PCIE_PHY_DWC_RX_CFG_0                  (0x18000)       /* Lane RX Configuration Register #0 */
+#define PCIE_PHY_DWC_RX_CFG_1                  (0x18001)       /* Lane RX Configuration Register #1 */
+#define PCIE_PHY_DWC_RX_CFG_2                  (0x18002)       /* Lane RX Configuration Register #2 */
+#define PCIE_PHY_DWC_RX_CFG_3                  (0x18003)       /* Lane RX Configuration Register #3 */
+#define PCIE_PHY_DWC_RX_CFG_4                  (0x18004)       /* Lane RX Configuration Register #4 */
+#define PCIE_PHY_DWC_RX_CFG_5                  (0x18005)       /* Lane RX Configuration Register #5 */
+#define PCIE_PHY_DWC_RX_CDR_CTRL_0             (0x18006)       /* Lane RX CDR Control Register #0 */
+#define PCIE_PHY_DWC_RX_CDR_CTRL_1             (0x18007)       /* Lane RX CDR Control Register #1 */
+#define PCIE_PHY_DWC_RX_CDR_CTRL_2             (0x18008)       /* Lane RX CDR Control Register #2 */
+#define PCIE_PHY_DWC_RX_LOOP_CTRL              (0x18009)       /* Lane RX Loop Control */
+#define PCIE_PHY_DWC_RX_MISC_CTRL              (0x1800a)       /* Lane RX Miscellaneous Control */
+#define PCIE_PHY_DWC_RX_CTLE_CTRL              (0x1800b)       /* Lane RX CTLE Control */
+#define PCIE_PHY_DWC_RX_PRECORR_CTRL           (0x1800c)       /* Lane RX Pre-Correlation Control */
+#define PCIE_PHY_DWC_RX_PHS_ACCM_CTRL          (0x1800d)       /* Lane RX Phase Accumulator Control */
+#define PCIE_PHY_DWC_RX_PHS_ACCM_FR_VAL                (0x1800e)       /* Lane RX Phase Accumulator Frequency Portion Control */
+#define PCIE_PHY_DWC_RX_PRECORR_VAL            (0x1800f)       /* Lane RX Pre-Correlation Count */
+#define PCIE_PHY_DWC_RX_DELTA_PM_0             (0x18010)       /* Lane RX VMA Performance Metric Register #0 */
+#define PCIE_PHY_DWC_RX_DELTA_PM_1             (0x18011)       /* Lane RX VMA Performance Metric Register #1 */
+#define PCIE_PHY_DWC_TX_CAPT_CTRL              (0x18012)       /* Lane TX Latch Control */
+#define PCIE_PHY_DWC_TX_CFG_0                  (0x18015)       /* Lane TX Configuration Register #0 */
+#define PCIE_PHY_DWC_TX_CFG_1                  (0x18016)       /* Lane TX Configuration Register #1 */
+#define PCIE_PHY_DWC_TX_CFG_2                  (0x18017)       /* Lane TX Configuration Register #2 */
+#define PCIE_PHY_DWC_TX_CFG_3                  (0x18018)       /* Lane TX Configuration Register #3 */
+#define PCIE_PHY_DWC_TX_PREEMPH_0              (0x18019)       /* Lane TX Pre-Emphasis */
+#define PCIE_PHY_DWC_PMA_LOOPBACK_CTRL         (0x1801a)       /* Lane PMA Loopback Control */
+#define PCIE_PHY_DWC_LANE_PWR_CTRL             (0x1801b)       /* Lane Power Control */
+#define PCIE_PHY_DWC_TERM_CTRL                 (0x1801c)       /* Lane Termination Control */
+#define PCIE_PHY_DWC_RX_MISC_STATUS            (0x18025)       /* RX Miscellaneous Status */
+#define PCIE_PHY_DWC_SDS_PIN_MON_0             (0x18026)       /* SerDes Pin Monitor 0 */
+#define PCIE_PHY_DWC_SDS_PIN_MON_1             (0x18027)       /* SerDes Pin Monitor 1 */
+#define PCIE_PHY_DWC_SDS_PIN_MON_2             (0x18028)       /* SerDes Pin Monitor 2 */
+#define PCIE_PHY_DWC_RX_PWR_MON_0              (0x18029)       /* RX Power State Machine Monitor 0 */
+#define PCIE_PHY_DWC_RX_PWR_MON_1              (0x1802a)       /* RX Power State Machine Monitor 1 */
+#define PCIE_PHY_DWC_RX_PWR_MON_2              (0x1802b)       /* RX Power State Machine Monitor 2 */
+#define PCIE_PHY_DWC_TX_PWR_MON_0              (0x1802c)       /* TX Power State Machine Monitor 0 */
+#define PCIE_PHY_DWC_TX_PWR_MON_1              (0x1802d)       /* TX Power State Machine Monitor 1 */
+#define PCIE_PHY_DWC_TX_PWR_MON_2              (0x1802e)       /* TX Power State Machine Monitor 2 */
+#define PCIE_PHY_DWC_RX_VMA_CTRL               (0x18040)       /* Lane RX VMA Control */
+#define PCIE_PHY_DWC_RX_CDR_MISC_CTRL_0                (0x18041)       /* Lane RX CDR Miscellaneous Control Register #0 */
+#define PCIE_PHY_DWC_RX_CDR_MISC_CTRL_1                (0x18042)       /* Lane RX CDR Miscellaneous Control Register #1 */
+#define PCIE_PHY_DWC_RX_PWR_CTRL               (0x18043)       /* Lane RX Power Control */
+#define PCIE_PHY_DWC_RX_OS_MVALBBD_0           (0x18045)       /* Lane RX Offset Calibration Manual Control Register #0 */
+#define PCIE_PHY_DWC_RX_OS_MVALBBD_1           (0x18046)       /* Lane RX Offset Calibration Manual Control Register #1 */
+#define PCIE_PHY_DWC_RX_OS_MVALBBD_2           (0x18047)       /* Lane RX Offset Calibration Manual Control Register #2 */
+#define PCIE_PHY_DWC_RX_AEQ_VALBBD_0           (0x18048)       /* Lane RX Adaptive Equalizer Control Register #0 */
+#define PCIE_PHY_DWC_RX_AEQ_VALBBD_1           (0x18049)       /* Lane RX Adaptive Equalizer Control Register #1 */
+#define PCIE_PHY_DWC_RX_AEQ_VALBBD_2           (0x1804a)       /* Lane RX Adaptive Equalizer Control Register #2 */
+#define PCIE_PHY_DWC_RX_MISC_OVRRD             (0x1804b)       /* Lane RX Miscellaneous Override Controls */
+#define PCIE_PHY_DWC_RX_OVRRD_PHASE_ACCUM_ADJ  (0x1804c)       /* Lane RX Phase Accumulator Adjust Override */
+#define PCIE_PHY_DWC_RX_AEQ_OUT_0              (0x18050)       /* Lane RX Adaptive Equalizer Status Register #0 */
+#define PCIE_PHY_DWC_RX_AEQ_OUT_1              (0x18051)       /* Lane RX Adaptive Equalizer Status Register #1 */
+#define PCIE_PHY_DWC_RX_AEQ_OUT_2              (0x18052)       /* Lane RX Adaptive Equalizer Status Register #2 */
+#define PCIE_PHY_DWC_RX_OS_OUT_0               (0x18053)       /* Lane RX Offset Calibration Status Register #0 */
+#define PCIE_PHY_DWC_RX_OS_OUT_1               (0x18054)       /* Lane RX Offset Calibration Status Register #1 */
+#define PCIE_PHY_DWC_RX_OS_OUT_2               (0x18055)       /* Lane RX Offset Calibration Status Register #2 */
+#define PCIE_PHY_DWC_RX_OS_OUT_3               (0x18056)       /* Lane RX Offset Calibration Status Register #3 */
+#define PCIE_PHY_DWC_RX_VMA_STATUS_0           (0x18057)       /* Lane RX CDR Status Register #0 */
+#define PCIE_PHY_DWC_RX_VMA_STATUS_1           (0x18058)       /* Lane RX CDR Status Register #1 */
+#define PCIE_PHY_DWC_RX_CDR_STATUS_0           (0x18059)       /* Lane RX CDR Status Register #0 */
+#define PCIE_PHY_DWC_RX_CDR_STATUS_1           (0x1805a)       /* Lane RX CDR Status Register #1 */
+#define PCIE_PHY_DWC_RX_CDR_STATUS_2           (0x1805b)       /* Lane RX CDR Status Register #2 */
+#define PCIE_PHY_DWC_PCS_MISC_CFG_0            (0x38000)       /* Lane Miscellaneous Configuration Register #0 */
+#define PCIE_PHY_DWC_PCS_MISC_CFG_1            (0x38001)       /* Lane Raw PCS Miscellaneous Configuration Register #1 */
+#define PCIE_PHY_DWC_PCS_LBERT_PAT_CFG         (0x38003)       /* LBERT Pattern Configuration */
+#define PCIE_PHY_DWC_PCS_LBERT_CFG             (0x38004)       /* LBERT Configuration */
+#define PCIE_PHY_DWC_PCS_LBERT_ECNT            (0x38005)       /* LBERT Error Counter */
+#define PCIE_PHY_DWC_PCS_RESET_0               (0x38006)       /* Lane Raw PCS Reset Register #0 */
+#define PCIE_PHY_DWC_PCS_RESET_1               (0x38007)       /* Lane Raw PCS Reset Register #1 */
+#define PCIE_PHY_DWC_PCS_RESET_2               (0x38008)       /* Lane Raw PCS Reset Register #2 */
+#define PCIE_PHY_DWC_PCS_RESET_3               (0x38009)       /* Lane Raw PCS Reset Register #3 */
+#define PCIE_PHY_DWC_PCS_CTLIFC_CTRL_0         (0x3800c)       /* Lane Raw PCS Control Interface Configuration Register #0 */
+#define PCIE_PHY_DWC_PCS_CTLIFC_CTRL_1         (0x3800d)       /* Lane Raw PCS Control Interface Configuration Register #1 */
+#define PCIE_PHY_DWC_PCS_CTLIFC_CTRL_2         (0x3800e)       /* Lane Raw PCS Control Interface Configuration Register #2 */
+#define PCIE_PHY_DWC_PCS_MACIFC_MON_0          (0x38021)       /* MAC to Raw PCS Interface Monitor Register #0 */
+#define PCIE_PHY_DWC_PCS_MACIFC_MON_2          (0x38023)       /* MAC to Raw PCS Interface Monitor Register #1 */
+
+/* DWC_GLBL_PLL_MONITOR */
+#define SDS_PCS_CLOCK_READY                    (1 << 6)        /* Clock status signal. */
+
+/* DWC_GLBL_PLL_CFG_0 */
+#define PCS_SDS_PLL_FTHRESH_SHIFT              6
+#define PCS_SDS_PLL_FTHRESH_MASK               0xC0            /* PLL frequency comparison threshold */
+
+/* DWC_GLBL_TERM_CFG */
+#define FAST_TERM_CAL                          (1 << 8)        /* Enable fast termination calibration. */
+
+/* DWC_RX_LOOP_CTRL */
+#define FAST_OFST_CNCL                         (1 << 10)       /* Enable fast offset cancellation. */
+#define FAST_DLL_LOCK                          (1 << 11)       /* Enable fast DLL lock. */
+
+/* Enable PCIe 3.0 PHY */
+#define EN_PCIE3                                (1 << 10)
+
+/* DWC_TX_CFG_0 */
+#define FAST_TRISTATE_MODE                     (1 << 1)        /* Enable fast Tristate power up. */
+#define FAST_RDET_MODE                         (1 << 2)        /* Enable fast RX Detection */
+#define FAST_CM_MODE                           (1 << 8)        /* Enable fast common-mode charge up. */
+
+/* Macros to read/write PCIe registers. */
+#define READ_PCIE_REG(r)       readl((const volatile void *)(PCIE_CFG_BASE + (r)))
+#define WRITE_PCIE_REG(r, v)   writel((v), (volatile void *)(PCIE_CFG_BASE + (r)))
+
+
+/* PMU registers */
+
+#define BK_PMU_LOCK_BIT                        (1 << 31)
+#define BK_PMU_EN_BIT                  (1 << 0)
+#define BK_PMU_RST_BIT                 (1 << 1)
+#define BK_PMU_INIT_BIT                        (1 << 2)
+
+/* BK_PMU_AXI_PCIE_M_CTL */
+#define PMU_AXI_PCIE_M_CTL_EN          (1 << 0)        /* Enable AXI PCIe Master clock. */
+#define PMU_AXI_PCIE_M_CTL_RST         (1 << 1)        /* Software AXI PCIe Master clock domain reset. */
+
+/* BK_PMU_AXI_PCIE_S_CTL */
+#define PMU_AXI_PCIE_S_CTL_EN          (1 << 0)        /* Enable AXI PCIe Slave clock. */
+#define PMU_AXI_PCIE_S_CTL_RST         (1 << 1)        /* Software AXI PCIe Slave clock domain reset. */
+
+/* BK_PMU_PCIE_RSTC */
+#define PMU_PCIE_RSTC_PHY_RESET                (1 << 0)        /* PCIe PHY phy_rts_n reset control bit. */
+#define PMU_PCIE_RSTC_PIPE_RESET       (1 << 4)        /* PCIe PHY PCS pipe_reset_n reset control bit. */
+#define PMU_PCIE_RSTC_CORE_RST         (1 << 8)        /* PCIe core core_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_PWR_RST          (1 << 9)        /* PCIe core pwr_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_STICKY_RST       (1 << 10)       /* PCIe core sticky_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_NONSTICKY_RST    (1 << 11)       /* PCIe core nonsticky_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_HOT_RESET                (1 << 12)       /* Hot Reset control bit. */
+#define PMU_PCIE_RSTC_REQ_RESET                (1 << 13)       /* PCIe core link_req_rst_not ready for reset signal status bit */
+#define PMU_PCIE_RSTC_SMLH_REQ_RST     (1 << 14)
+#define PMU_PCIE_RSTC_REQ_PHY_RST      (1 << 16)
+#define PMU_PCIE_RSTC_REQ_CORE_RST     (1 << 24)
+#define PMU_PCIE_RSTC_REQ_STICKY_RST   (1 << 26)
+#define PMU_PCIE_RSTC_REQ_NON_STICKY_RST (1 << 27)
+#define PMU_PCIE_RSTC_BRIDGE_FLUSH     (1 << 19)       /* PCIe AXI bridge bridge_flush_not signal status bit. */
+
+/* BK_PMU_PCIE_GENC */
+#define PMU_PCIE_GENC_LTSSM_ENABLE     (1 << 1)        /* LTSSM enable bit. */
+#define PMU_PCIE_GENC_DBI2_MODE                (1 << 2)        /* PCIe core registers access mode bit: DBI(=0) / DBI2(=1) */
+#define PMU_PCIE_GENC_MGMT_ENABLE      (1 << 3)        /* PCIe PHY management interface enable bit. */
+
+/* BK_PMU_PCIE_PMSC */
+#define PMU_PCIE_PMSC_LTSSM_STATE_SHIFT        (0)             /* LTSSM state (smlh_ltssm_state[5:0] signal) */
+#define PMU_PCIE_PMSC_LTSSM_STATE_MASK (0x3F)
+#define LTSSM_L0                       0x11
+#define PMU_PCIE_PMSC_SMLH_LINKUP      (1 << 6)        /* Physical level (PL) state bit (smlh_link_up signal) */
+#define PMU_PCIE_PMSC_RDLH_LINKUP      (1 << 7)        /* Channel level (DLL) state bit (rdlh_link_up signal) */
+
+/* Register map */
+
+#define BK_COREPLL_CTL_OFFSET          0x000
+#define BK_PCIEPLL_CTL_OFFSET          0x018
+#define BK_PCIEPLL_CTL1_OFFSET         0x01C
+#define BK_AXI_PCIE_M_CTL_OFFSET       0x048
+#define BK_AXI_PCIE_S_CTL_OFFSET       0x04C
+#define BK_PCIE_REF_CTL_OFFSET         0x05C
+#define BK_PCIE_CLKC_OFFSET            0x140
+#define BK_PCIE_RSTC_OFFSET            0x144
+#define BK_PCIE_PMSC_OFFSET            0x148
+#define BK_PCIE_GENC_OFFSET            0x14C
+
+#define BK_PMU_COREPLL_CTL     (PMU_BASE + BK_COREPLL_CTL_OFFSET)
+#define BK_PMU_PCIEPLL_CTL     (PMU_BASE + BK_PCIEPLL_CTL_OFFSET)
+#define BK_PMU_PCIEPLL_CTL1    (PMU_BASE + BK_PCIEPLL_CTL1_OFFSET)
+#define BK_PMU_AXI_PCIE_M_CTL  (PMU_BASE + BK_AXI_PCIE_M_CTL_OFFSET)
+#define BK_PMU_AXI_PCIE_S_CTL  (PMU_BASE + BK_AXI_PCIE_S_CTL_OFFSET)
+#define BK_PMU_PCIE_REF_CTL    (PMU_BASE + BK_PCIE_REF_CTL_OFFSET)
+#define BK_PMU_PCIE_CLKC       (PMU_BASE + BK_PCIE_CLKC_OFFSET)
+#define BK_PMU_PCIE_RSTC       (PMU_BASE + BK_PCIE_RSTC_OFFSET)
+#define BK_PMU_PCIE_PMSC       (PMU_BASE + BK_PCIE_PMSC_OFFSET)
+#define BK_PMU_PCIE_GENC       (PMU_BASE + BK_PCIE_GENC_OFFSET)
+
+/* Macros to read/write PMU registers. */
+#define READ_PMU_REG(r)                readl((const volatile void *)(r))
+#define WRITE_PMU_REG(r, v)    writel(v, (volatile void *)(r))
+
+void dw_set_iatu_region(int dir, int index, int base_addr, int limit_addr, int target_addr, int tlp_type);
+irqreturn_t dw_msi_interrupt(int id, void *dev_id);
+int dw_msi_init(void);
+
+#endif /* __PCI_BAIKAL_H__ */
index cc871ae3a17924f83019cad4c5a3d08897190279..a9c8ee7dbac88cc16822efed1a93493e7c56c5cf 100644 (file)
@@ -144,6 +144,14 @@ config COMMON_CLK_CDCE706
        ---help---
          This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
 
+config COMMON_CLK_BAIKAL
+       bool "Clock driver for Baikal SoCs"
+       default y
+       depends on MIPS_BAIKAL && OF
+       ---help---
+         Support for the Baikal Electronicse SoC reference, PLL, and device clocks.
+
+
 config COMMON_CLK_CDCE925
        tristate "Clock driver for TI CDCE913/925/937/949 devices"
        depends on I2C
index 0138fb14e6f883bd641230e8d1c4315f5b7b1eff..72960f8cb97aabaa13a8b4bc5b1452adb5bd5359 100644 (file)
@@ -21,6 +21,7 @@ endif
 obj-$(CONFIG_MACH_ASM9260)             += clk-asm9260.o
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)               += clk-axm5516.o
+obj-$(CONFIG_COMMON_CLK_BAIKAL)                += clk-baikal.o
 obj-$(CONFIG_COMMON_CLK_BD718XX)       += clk-bd718x7.o
 obj-$(CONFIG_COMMON_CLK_CDCE706)       += clk-cdce706.o
 obj-$(CONFIG_COMMON_CLK_CDCE925)       += clk-cdce925.o
diff --git a/drivers/clk/clk-baikal.c b/drivers/clk/clk-baikal.c
new file mode 100644 (file)
index 0000000..dbe7f2e
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * clk-baikal.c - Baikal Electronics clock driver.
+ *
+ * Copyright (C) 2015,2016 Baikal Electronics JSC
+ * 
+ * Author:
+ *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <asm/setup.h>
+
+#define VERSION        "1.03"
+
+#define BAIKAL_CLK_FIXED_RATE
+
+#define BE_CLK_ENABLE_MASK             (1 << 0)
+#define BE_CLK_RESET_MASK              (1 << 1)
+#define BE_CLK_SET_MASK                (1 << 2)
+#define BE_CLK_BYPASS_MASK             (1 << 30)
+#define BE_CLK_LOCK_MASK               (1 << 31)
+
+#define BE_CLKR_SHFT                   2
+#define BE_DIV_SHFT                            4
+#define BE_CLKF_SHFT                   8
+#define BE_CLKOD_SHFT                  21
+
+#define BE_CLK_DIV_MAX_WIDTH   17
+#define BE_CLK_DIV_MASK                        (((1 << BE_CLK_DIV_MAX_WIDTH) - 1) \
+                                                                       << BE_DIV_SHFT)
+
+#define BE_RD_CLKR(SRC)                        (((SRC) & 0x000000FC) >> BE_CLKR_SHFT)
+#define BE_RD_CLKF(SRC)                        (((SRC) & 0x001FFF00) >> BE_CLKF_SHFT)
+#define BE_RD_CLKOD(SRC)               (((SRC) & 0x01E00000) >> BE_CLKOD_SHFT)
+
+#define BE_CLKR_VAL(NR)                        ((NR - 1) << BE_CLKR_SHFT)
+#define BE_CLKF_VAL(NF)                        ((NF - 1) << BE_CLKF_SHFT)
+#define BE_CLKOD_VAL(OD)               ((OD - 1) << BE_CLKOD_SHFT)
+
+#define BE_PLL_CLK_VAL(NR, NF, OD)     \
+       (BE_CLKR_VAL(NR) | BE_CLKF_VAL(NF) | BE_CLKOD_VAL(OD))
+
+#define BE_PLL_DIV_MASK                0x01FFFFFC
+#define BE_PLL_LATENCY                 100000000 /* ns */
+#define BE_PLL_FREQ_STEP               25000000
+
+static DEFINE_SPINLOCK(clk_lock);
+
+struct be_clk_pll {
+       struct clk_hw   hw;
+       void __iomem    *reg;
+       spinlock_t      *lock;
+       const char      *name;
+       unsigned int    latency; /* ns */
+       unsigned int    min, max, step;
+};
+#define to_be_clk_pll(_hw) container_of(_hw, struct be_clk_pll, hw)
+
+/*
+ * Common functions
+ */
+static inline unsigned int be_clk_read(void *csr)
+{
+        return readl(csr);
+}
+
+static inline void be_clk_write(unsigned int data, void *csr)
+{
+        return writel(data, csr);
+}
+
+static int be_clk_pll_reset(struct clk_hw *hw)
+{
+       struct be_clk_pll *pllclk = to_be_clk_pll(hw);
+       unsigned int reg, count;
+
+       reg = be_clk_read(pllclk->reg);
+       reg |= BE_CLK_RESET_MASK;
+       be_clk_write(reg, pllclk->reg);
+       wmb();
+
+       count = 50;
+       do {
+               udelay(pllclk->latency / 1000);
+               reg = be_clk_read(pllclk->reg);
+       } while (!(reg & BE_CLK_LOCK_MASK) && --count);
+
+       if (!(reg & BE_CLK_LOCK_MASK))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int be_clk_pll_is_enabled(struct clk_hw *hw)
+{
+       struct be_clk_pll *pllclk = to_be_clk_pll(hw);
+       unsigned int reg;
+
+       reg = be_clk_read(pllclk->reg);
+
+       return !!(reg & BE_CLK_ENABLE_MASK);
+}
+
+static int be_clk_pll_enable(struct clk_hw *hw)
+{
+       struct be_clk_pll *pllclk = to_be_clk_pll(hw);
+       unsigned int reg;
+
+       reg = be_clk_read(pllclk->reg);
+       reg |= BE_CLK_ENABLE_MASK;
+       be_clk_write(reg, pllclk->reg);
+       wmb();
+
+       return 0;
+}
+
+static unsigned long be_clk_pll_recalc_rate(struct clk_hw *hw,
+                                unsigned long parent_rate)
+{
+       struct be_clk_pll *pllclk = to_be_clk_pll(hw);
+       unsigned long fref, fout;
+       unsigned int reg, nr, nf, od;
+
+       /* Read pll ctrl reg */
+       reg = be_clk_read(pllclk->reg);
+       /* Fetch pll parameters */
+       nr = BE_RD_CLKR(reg) + 1;
+       nf = BE_RD_CLKF(reg) + 1;
+       od = BE_RD_CLKOD(reg) + 1;
+       /* ref dividers */
+       fref = parent_rate / nr / od;
+       /* pll multiplier */
+       fout = fref * nf;
+
+       return fout;
+}
+
+long be_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long *parent_rate)
+{
+       struct be_clk_pll *pllclk = to_be_clk_pll(hw);
+
+       if (!pllclk->max) {
+               pllclk->max = be_clk_pll_recalc_rate(hw, *parent_rate);
+               pllclk->min = pllclk->max;
+       }
+
+       if (rate >= pllclk->max)
+               return pllclk->max;
+
+       if (rate <= pllclk->min)
+               return pllclk->min;
+
+       return rate;
+}
+
+int be_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long parent_rate)
+{
+       struct be_clk_pll *pllclk = to_be_clk_pll(hw);
+       unsigned int reg, nf, od, mul;
+
+       mul = (rate / parent_rate) & 0x7f;
+       od = ((68 / mul) << 1) & 0x3f; 
+       nf = (mul * od) & 0x1fff;
+
+       reg = be_clk_read(pllclk->reg);
+       reg &= ~BE_PLL_DIV_MASK;
+       reg |=  BE_PLL_CLK_VAL(1, nf, od);
+
+       be_clk_write(reg, pllclk->reg);
+       wmb();
+
+       return be_clk_pll_reset(hw);
+}
+
+const struct clk_ops be_clk_pll_ops = {
+               .enable      = be_clk_pll_enable,
+               .is_enabled  = be_clk_pll_is_enabled,
+               .recalc_rate = be_clk_pll_recalc_rate,
+               .round_rate  = be_clk_pll_round_rate,
+               .set_rate    = be_clk_pll_set_rate,
+};
+
+static __init int be_clk_pll_setup(struct device_node *np,
+       struct be_clk_pll *pmuclk)
+{
+
+       if (of_property_read_u32(np, "clock-latency",
+                               &pmuclk->latency))
+               pmuclk->latency = BE_PLL_LATENCY;
+
+       if (of_property_read_u32_index(np, "clock-frequency-range", 0,
+               &pmuclk->min))
+               pmuclk->min = 0;
+       if (of_property_read_u32_index(np, "clock-frequency-range", 1,
+               &pmuclk->max))
+               pmuclk->max = 0;
+       if (of_property_read_u32_index(np, "clock-frequency-range", 2,
+               &pmuclk->step) || !pmuclk->step)
+               pmuclk->step = BE_PLL_FREQ_STEP;
+
+       if (pmuclk->min > pmuclk->max)
+               return -EINVAL;
+
+       return 0;
+}
+
+static __init void be_pllclk_init(struct device_node *np)
+{
+       struct clk *clk;
+       struct clk_init_data init;
+       struct be_clk_pll *pmuclk;
+       const char *clk_name = np->name;
+       const char *parent_name;
+       void *res;
+
+       /* allocate the APM clock structure */
+       pmuclk = kzalloc(sizeof(*pmuclk), GFP_KERNEL);
+       if (!pmuclk) {
+               pr_err("PMU: Could not allocate clock %s\n", np->full_name);
+               return;
+       }
+
+       res = of_iomap(np, 0);
+       if (res == NULL) {
+               pr_err("PMU: Unable to map CSR register for %s\n", np->full_name);
+               goto __err;
+       }
+
+       if (be_clk_pll_setup(np, pmuclk)) {
+               pr_err("PMU: Unable setup clock %s\n", np->full_name);
+               goto __err;
+       }
+
+       /* Get clock name */
+       of_property_read_string(np, "clock-output-names", &clk_name);
+       if (!clk_name)
+               clk_name = np->full_name;
+
+       /* Set clock init parameters */
+       init.name = clk_name;
+       init.ops = &be_clk_pll_ops;
+       init.flags = CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED;
+       parent_name = of_clk_get_parent_name(np, 0);
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       /* Baikal pll parameters */
+       pmuclk->reg = res;
+       pmuclk->lock = &clk_lock;
+       pmuclk->hw.init = &init;
+       pmuclk->name = clk_name;
+
+       clk = clk_register(NULL, &pmuclk->hw);
+       if (IS_ERR(clk)) {
+               pr_err("PMU: could not register clk %s\n", clk_name);
+               goto __err;
+       }
+
+       of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       clk_register_clkdev(clk, clk_name, NULL);
+
+       pr_debug("PMU: Add %s PLL clock \n", clk_name);
+
+       return;
+
+__err:
+       kfree(pmuclk);
+}
+CLK_OF_DECLARE(be_pll_clock, "be,pmu-pll-clock", be_pllclk_init);
+
+struct be_dev_params {
+       unsigned int    width;          /* Divider width */
+       unsigned int    nobypass;       /* Disable clock div=1 */
+};
+
+struct be_clk {
+       struct clk_hw   hw;
+       const char      *name;
+       spinlock_t      *lock;
+       void __iomem    *reg;
+       struct be_dev_params params;
+};
+
+#define to_be_clk(_hw) container_of(_hw, struct be_clk, hw)
+
+static int be_clk_enable(struct clk_hw *hw)
+{
+       struct be_clk *pclk = to_be_clk(hw);
+       unsigned long flags = 0;
+       unsigned int data;
+       /* Lock clock */
+       if (pclk->lock)
+               spin_lock_irqsave(pclk->lock, flags);
+       /* If clock valid */
+       if (pclk->reg != NULL) {
+               /* Debug info */
+               pr_debug("%s clock enabled\n", pclk->name);
+               /* Get CSR register */
+               data = be_clk_read(pclk->reg);
+               /* Enable the clock */
+               data |= BE_CLK_ENABLE_MASK;
+               /* Set CSR register */
+               be_clk_write(data, pclk->reg);
+               /* Debug info */
+               pr_debug("%s clock PADDR base 0x%08lX clk value 0x%08X\n",
+                       pclk->name, __pa(pclk->reg), data);
+       }
+       /* Unlock clock */
+       if (pclk->lock)
+               spin_unlock_irqrestore(pclk->lock, flags);
+       /* Return success */
+       return 0;
+}
+
+static void be_clk_disable(struct clk_hw *hw)
+{
+       struct be_clk *pclk = to_be_clk(hw);
+       unsigned long flags = 0;
+       unsigned int data;
+       /* Lock clock */
+       if (pclk->lock)
+               spin_lock_irqsave(pclk->lock, flags);
+       /* If clock valid */
+       if (pclk->reg != NULL) {
+               /* Debug info */
+               pr_debug("%s clock disabled\n", pclk->name);
+               /* Get CSR register */
+               data = be_clk_read(pclk->reg);
+               /* Disable the clock */
+               data &= ~BE_CLK_ENABLE_MASK;
+               /* Set CSR register */
+               be_clk_write(data, pclk->reg);
+               /* Debug info */
+               pr_debug("%s clock PADDR base 0x%08lX clk value 0x%08X\n",
+                       pclk->name, __pa(pclk->reg), data);
+       }
+       /* Unlock clock */
+       if (pclk->lock)
+               spin_unlock_irqrestore(pclk->lock, flags);
+}
+
+static int be_clk_is_enabled(struct clk_hw *hw)
+{
+       struct be_clk *pclk = to_be_clk(hw);
+       unsigned int data = 0;
+
+       /* If clock valid */
+       if (pclk->reg != NULL) {
+               /* Debug info */
+               pr_debug("%s clock checking\n", pclk->name);
+               /* Get CSR register */
+               data = be_clk_read(pclk->reg);
+               /* Debug info */
+               pr_debug("%s clock PADDR base 0x%08lX clk value 0x%08X\n",
+                       pclk->name, __pa(pclk->reg), data);
+               /* Debug info */
+               pr_debug("%s clock is %sabled\n", pclk->name,
+                       data & BE_CLK_ENABLE_MASK ? "en" : "dis");
+       }
+       /* Enabled and not controlled */
+       else
+               return 1;
+       return data & BE_CLK_ENABLE_MASK ? 1 : 0;
+}
+
+static unsigned long be_clk_recalc_rate(struct clk_hw *hw,
+                                unsigned long parent_rate)
+{
+       struct be_clk *pclk = to_be_clk(hw);
+       unsigned int data;
+
+       /* If clock valid */
+       if ((pclk->reg != NULL) &&
+           (pclk->params.width != 0)) {
+               /* Get CSR register */
+               data = be_clk_read(pclk->reg);
+               /* Apply global mask and shift data */
+               data = (data & BE_CLK_DIV_MASK) >> BE_DIV_SHFT;
+               /* Apply divider width mask */
+               data &= (1 << pclk->params.width) - 1;
+               /* Debug info */
+               pr_debug("%s clock recalc rate %ld parent %ld\n",
+                               pclk->name, parent_rate / data, parent_rate);
+               return parent_rate / data;
+       } else {
+               pr_debug("%s clock recalc rate %ld parent %ld\n",
+                       pclk->name, parent_rate, parent_rate);
+               return parent_rate;
+       }
+}
+
+static int be_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                                                               unsigned long parent_rate)
+{
+       struct be_clk *pclk = to_be_clk(hw);
+       unsigned long flags = 0;
+       unsigned int data;
+       unsigned int divider;
+       /* Lock clock */
+       if (pclk->lock)
+               spin_lock_irqsave(pclk->lock, flags);
+       /* If clock valid */
+       if ((pclk->reg != NULL) &&
+           (pclk->params.width != 0)) {
+               /* Let's compute the divider */
+               if (rate > parent_rate)
+                       rate = parent_rate;
+               /* Calc divider rounded down */
+               divider = parent_rate / rate;
+               /* Apply divider width mask */
+               divider &= (1 << pclk->params.width) - 1;
+               /* Why so may be ? */
+               if (!divider)
+                       divider = 1;
+               /* Check nobypass flag */
+               if ((divider == 1) && pclk->params.nobypass)
+                       divider = 2;
+               /* Get current state */
+               data = be_clk_read(pclk->reg);
+               /* Clear divide field */
+               data &= ~BE_CLK_DIV_MASK;
+               /* Set new divider */
+               data |= divider << BE_DIV_SHFT;
+               /* Set new value */
+               be_clk_write(data, pclk->reg);
+               /* Set restart pulse */
+               data |= BE_CLK_SET_MASK;
+               /* Restart divider */
+               be_clk_write(data, pclk->reg);
+               /* Debug info */
+               pr_debug("%s clock set rate %ld\n", pclk->name,
+                               parent_rate / divider);
+       } else {
+               /* bypass mode */
+               divider = 1;
+       }
+       /* Unlock clock */
+       if (pclk->lock)
+               spin_unlock_irqrestore(pclk->lock, flags);
+       /* Return new rate */
+       return parent_rate / divider;
+}
+
+static long be_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long *prate)
+{
+       struct be_clk *pclk = to_be_clk(hw);
+       unsigned long parent_rate = *prate;
+       unsigned int divider;
+       /* If clock valid */
+       if (pclk->reg) {
+               /* Let's compute the divider */
+               if (rate > parent_rate)
+                       rate = parent_rate;
+#ifdef BAIKAL_CLK_FIXED_RATE
+               rate = be_clk_recalc_rate(hw, parent_rate);
+#endif /* BAIKAL_CLK_FIXED_RATE */
+               /* Calc divider rounded down */
+               divider = parent_rate / rate;
+               pr_debug("%s clock round rate=%ld, parent_rate=%ld round_rate=%ld\n", pclk->name, rate, parent_rate,
+                                parent_rate / divider);
+       } else {
+               divider = 1;
+       }
+
+       /* Return actual freq */
+       return parent_rate / divider;
+}
+
+const struct clk_ops be_clk_ops = {
+        .enable = be_clk_enable,
+        .disable = be_clk_disable,
+        .is_enabled = be_clk_is_enabled,
+        .recalc_rate = be_clk_recalc_rate,
+        .set_rate = be_clk_set_rate,
+        .round_rate = be_clk_round_rate,
+};
+
+static struct clk *be_register_clk(struct device *dev,
+                               const char *name, const char *parent_name,
+                               struct be_dev_params *params, void __iomem *reg,
+                               spinlock_t *lock)
+{
+       struct be_clk *pmuclk;
+       struct clk *clk;
+       struct clk_init_data init;
+       int rc;
+
+       /* Allocate the APM clock structure */
+       pmuclk = kzalloc(sizeof(*pmuclk), GFP_KERNEL);
+       if (!pmuclk) {
+               /* Error */
+               pr_err("%s: could not allocate PMU clk\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+       /* Setup clock init structure */
+       init.name = name;
+       init.ops = &be_clk_ops;
+       init.flags = 0;
+       init.parent_names = parent_name ? &parent_name : NULL;
+       init.num_parents = parent_name ? 1 : 0;
+       /* Setup IP clock structure */
+       pmuclk->reg = reg;
+       pmuclk->name = name;
+       pmuclk->lock = lock;
+       pmuclk->hw.init = &init;
+       pmuclk->params = *params;
+
+       /* Register the clock */
+       clk = clk_register(dev, &pmuclk->hw);
+       if (IS_ERR(clk)) {
+               /* Error */
+               pr_err("%s: could not register clk %s\n", __func__, name);
+               /* Free memory */
+               kfree(pmuclk);
+               return clk;
+       }
+
+       /* Register the clock for lookup */
+       rc = clk_register_clkdev(clk, name, NULL);
+       if (rc != 0) {
+               /* Error */
+               pr_err("%s: could not register lookup clk %s\n",
+                       __func__, name);
+       }
+       return clk;
+}
+
+static void __init be_devclk_init(struct device_node *np)
+{
+       const char *clk_name = np->full_name;
+       struct clk *clk;
+       struct be_dev_params params;
+       void *reg;
+       int rc;
+
+       /* Check if the entry is disabled */
+       if (!of_device_is_available(np))
+               return;
+
+       /* Remap ctrl reg mem */
+       reg = of_iomap(np, 0);
+       if (reg == NULL) {
+               /* Error */
+               pr_err("Unable to map CSR register for %s\n", np->full_name);
+               return;
+       }
+       /* Check nobypass property */
+       params.nobypass = of_property_read_bool(np, "nobypass");
+       /* Get divider width */
+       if (of_property_read_u32(np, "divider-width", &params.width))
+                               params.width = BE_CLK_DIV_MAX_WIDTH;
+       /* Get clock name */
+       of_property_read_string(np, "clock-output-names", &clk_name);
+       /* Register clock */
+       clk = be_register_clk(NULL, clk_name, of_clk_get_parent_name(np, 0),
+                                               &params, reg, &clk_lock);
+       /* Check error */
+       if (IS_ERR(clk))
+               goto err;
+       /* Debug error */
+       pr_debug("Add %s clock\n", clk_name);
+       /* Add clock provider */
+       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       if (rc != 0)
+               pr_err("%s: could register provider clk %s\n", __func__,
+                               np->full_name);
+       return;
+err:
+       if (reg)
+               iounmap(reg);
+}
+CLK_OF_DECLARE(be_dev_clock, "be,pmu-device-clock", be_devclk_init);
+
+MODULE_VERSION(VERSION);
+MODULE_AUTHOR("Dmitry Dunaev");
+MODULE_DESCRIPTION("Baikal Electronics clock driver");
+MODULE_LICENSE("GPL");
index 37671a5d4ed9fe1e59236a543b4c2b28be89f20e..4033a24d0e3c503f5afb6fca730219cde4e9db21 100644 (file)
 #include <linux/time.h>
 #include <asm/mips-cps.h>
 
+#define CALCULATE_RATING(gic_freq) 200 + gic_freq / 10000000
+
 static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
 static int gic_timer_irq;
 static unsigned int gic_frequency;
+static int __init __gic_clocksource_init(void);
 
 static u64 notrace gic_read_count(void)
 {
@@ -109,13 +112,48 @@ static int gic_starting_cpu(unsigned int cpu)
        return 0;
 }
 
+static u64 gic_hpt_read(struct clocksource *cs)
+{
+       return gic_read_count();
+}
+
+static struct clocksource gic_clocksource = {
+       .name           = "GIC",
+       .read           = gic_hpt_read,
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+       .archdata       = { .vdso_clock_mode = VDSO_CLOCK_GIC },
+};
+
 static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
                            void *data)
 {
        struct clk_notifier_data *cnd = data;
+       unsigned int count_width;
+       int ret;
+
+       /* Update clocksource in case of new freq */
+       if (action == PRE_RATE_CHANGE){
+               clocksource_unregister(&gic_clocksource);
+       }
 
-       if (action == POST_RATE_CHANGE)
+       if (action == POST_RATE_CHANGE){
+               gic_frequency = cnd->new_rate;
+
+               /* Set clocksource mask. */
+               count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
+               count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
+               count_width *= 4;
+               count_width += 32;
+               gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
+
+               /* Calculate a somewhat reasonable rating value. */
+               gic_clocksource.rating = CALCULATE_RATING(gic_frequency);
+
+               ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
+               if (ret < 0)
+                       pr_warn("Unable to register clocksource\n");
                on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+       }
 
        return NOTIFY_OK;
 }
@@ -149,18 +187,6 @@ static int gic_clockevent_init(void)
        return 0;
 }
 
-static u64 gic_hpt_read(struct clocksource *cs)
-{
-       return gic_read_count();
-}
-
-static struct clocksource gic_clocksource = {
-       .name           = "GIC",
-       .read           = gic_hpt_read,
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-       .archdata       = { .vdso_clock_mode = VDSO_CLOCK_GIC },
-};
-
 static int __init __gic_clocksource_init(void)
 {
        unsigned int count_width;
@@ -174,7 +200,7 @@ static int __init __gic_clocksource_init(void)
        gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
 
        /* Calculate a somewhat reasonable rating value. */
-       gic_clocksource.rating = 200 + gic_frequency / 10000000;
+       gic_clocksource.rating = CALCULATE_RATING(gic_frequency);
 
        ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
        if (ret < 0)
index bff5295016ae0fcbaa7858d4eb9095707cfdfd14..711e45db1bffd80b2e12ec8ad9bf47fac3885379 100644 (file)
@@ -36,7 +36,7 @@ config CPU_FREQ_STAT
 
 choice
        prompt "Default CPUFreq governor"
-       default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+       default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ || MIPS_BAIKAL
        default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
        help
          This option sets which CPUFreq governor shall be loaded at
@@ -247,6 +247,19 @@ config IA64_ACPI_CPUFREQ
        If in doubt, say N.
 endif
 
+if MIPS_BAIKAL
+config BAIKAL_T1_CPUFREQ
+       tristate "Baikal-T1 CPUFreq Driver"
+       default y
+       help
+         This option adds a CPUFreq driver for Baikal-T1 processor which
+         support software configurable cpu frequency.
+
+         For details, take a look at <file:Documentation/cpu-freq/>.
+
+         If in doubt, say N.
+endif
+
 if MIPS
 config BMIPS_CPUFREQ
        tristate "BMIPS CPUfreq Driver"
index 9a9f5ccd13d981ed5ad1a69d58b9509d00c0184a..c72404ae0e8106491831c80a57fa51fac4fc0284 100644 (file)
@@ -107,6 +107,7 @@ obj-$(CONFIG_BMIPS_CPUFREQ)         += bmips-cpufreq.o
 obj-$(CONFIG_IA64_ACPI_CPUFREQ)                += ia64-acpi-cpufreq.o
 obj-$(CONFIG_LOONGSON2_CPUFREQ)                += loongson2_cpufreq.o
 obj-$(CONFIG_LOONGSON1_CPUFREQ)                += loongson1-cpufreq.o
+obj-$(CONFIG_BAIKAL_T1_CPUFREQ)                += baikal-t1-cpufreq.o
 obj-$(CONFIG_SH_CPU_FREQ)              += sh-cpufreq.o
 obj-$(CONFIG_SPARC_US2E_CPUFREQ)       += sparc-us2e-cpufreq.o
 obj-$(CONFIG_SPARC_US3_CPUFREQ)                += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/baikal-t1-cpufreq.c b/drivers/cpufreq/baikal-t1-cpufreq.c
new file mode 100644 (file)
index 0000000..e024ed1
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Baikal-T SOC platform support code. 
+ * CPU Frequency Scaling driver.
+ *
+ * Copyright (C) 2018 Baikal Electronics JSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published bythe Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define VERSION        "2.0"
+
+#define PLL_FREQ_MAX   1300000 /* KHz */
+#define PLL_FREQ_MIN   200000  /* KHz */
+#define PLL_FREQ_STEP  25000   /* KHz */
+
+struct be_cpufreq {
+       struct device *dev;
+       void    __iomem *cpufreq_dev;
+       struct clk *clk;                        /* CPU clk */
+       struct clk *coreclk;                    /* Core PLL parent of CPU clk*/
+       unsigned int max_freq;                  /* KHz */
+       unsigned int min_freq;                  /* KHz */
+       unsigned int latency;                   /* uS  */
+};
+static struct be_cpufreq *cpufreq;
+
+static int be_cpufreq_notifier(struct notifier_block *nb,
+                                unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freqs = data;
+       /* Change freq in /proc/cpuinfo */
+       if (val == CPUFREQ_POSTCHANGE)
+               current_cpu_data.udelay_val = freqs->new * 5;
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block be_cpufreq_notifier_block = {
+       .notifier_call = be_cpufreq_notifier,
+};
+
+static int be_cpufreq_target(struct cpufreq_policy *policy,
+                              unsigned int index)
+{
+       unsigned int reg;
+       struct cpufreq_freqs freqs;
+
+       freqs.old = policy->cur;
+       freqs.new = policy->freq_table[index].frequency;
+
+       dev_info(cpufreq->dev,"%u KHz --> %u KHz\n", freqs.old, freqs.new);
+
+       reg = ioread32((u32 *)(cpufreq->cpufreq_dev)); /* pull register */
+       pr_debug( "Core PLL CTL reg BEFORE = %x",reg);
+
+       clk_set_rate(policy->clk, freqs.new * 1000);
+
+       reg = ioread32((u32 *)(cpufreq->cpufreq_dev)); /* pull register */
+       pr_debug( "Core PLL CTL reg AFTER = %x",reg);
+       
+       policy->cur = freqs.new;
+
+       /* Change freq in /proc/cpuinfo */
+       cpu_data[0].udelay_val= clk_get_rate(policy->clk) / 1000 * 5;
+       cpu_data[1].udelay_val=clk_get_rate(policy->clk) / 1000 * 5;
+
+       dev_info(cpufreq->dev, "Frequency changing procedure completed\n");
+
+       return 0;
+}
+
+static int be_cpufreq_init(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *freq_tbl;
+       unsigned int steps, freq;
+       int i;
+
+       steps = (cpufreq->max_freq - cpufreq->min_freq) / PLL_FREQ_STEP;
+
+       freq_tbl = kzalloc(sizeof(*freq_tbl) * (steps + 1),
+                                       GFP_KERNEL);
+
+       if (!freq_tbl) {
+               dev_err(cpufreq->dev,
+                       "Failed to alloc cpufreq frequency table\n");
+               return -ENOMEM;
+       }
+
+       freq = cpufreq->min_freq;;
+       for (i = 0; i <= steps; ++i) {
+               if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
+                       freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
+               else
+                       freq_tbl[i].frequency = freq;
+               pr_debug("CPUFreq index %d: frequency %d KHz\n", i,
+                       freq_tbl[i].frequency);
+               freq += PLL_FREQ_STEP;
+       }
+       freq_tbl[steps + 1].frequency = CPUFREQ_TABLE_END;
+
+       policy->driver_data = (void *) cpufreq;
+       policy->clk = cpufreq->coreclk;
+       policy->cur = clk_get_rate(policy->clk) / 1000;
+       cpufreq_generic_init(policy, freq_tbl, 1000);
+
+       return 0;
+}
+
+static int be_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       kfree(policy->freq_table);
+       return 0;
+}
+
+static struct cpufreq_driver be_cpufreq_driver = {
+       .name           = "cpufreq-baikal",
+       .flags          = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+       .verify         = cpufreq_generic_frequency_table_verify,
+       .target_index   = be_cpufreq_target,
+       .get            = cpufreq_generic_get,
+       .init           = be_cpufreq_init,
+       .exit           = be_cpufreq_exit,
+       .attr           = cpufreq_generic_attr,
+};
+
+static int be_cpufreq_remove(struct platform_device *pdev)
+{
+       cpufreq_unregister_notifier(&be_cpufreq_notifier_block,
+                                   CPUFREQ_TRANSITION_NOTIFIER);
+       cpufreq_unregister_driver(&be_cpufreq_driver);
+
+       return 0;
+}
+
+static int be_cpufreq_probe(struct platform_device *pdev)
+{
+       struct device_node *np;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret;
+
+       cpufreq = devm_kzalloc(&pdev->dev,sizeof(*cpufreq), GFP_KERNEL);
+       if (!cpufreq)
+               return -ENOMEM;
+
+       cpufreq->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       cpufreq->cpufreq_dev = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(cpufreq->cpufreq_dev))
+               return PTR_ERR(cpufreq->cpufreq_dev);
+
+       cpufreq->clk = devm_clk_get(dev, "cpuclk");
+       if (IS_ERR(cpufreq->clk)) {
+               dev_err(dev, "Unable to get CPU clock\n");
+               return PTR_ERR(cpufreq->clk);
+       }
+
+       cpufreq->coreclk = clk_get_parent(cpufreq->clk);
+               if (IS_ERR(cpufreq->coreclk)) {
+                       dev_err(dev, "Unable to get COREPLL which is a parent of CPU clock\n");
+                       return PTR_ERR(cpufreq->coreclk);
+               }
+
+       np = of_find_node_by_name(NULL,"core_pll");
+
+       if (!np) {
+               dev_err(dev, "Failed to find DT node\n");
+               return -ENOENT;
+       }
+
+       if (of_property_read_u32_index(np, "clock-frequency-range", 0,
+               &cpufreq->min_freq))
+               cpufreq->min_freq = PLL_FREQ_MIN * 1000;
+       cpufreq->min_freq = cpufreq->min_freq / 1000;
+
+       if (of_property_read_u32_index(np, "clock-frequency-range", 1,
+               &cpufreq->max_freq))
+               cpufreq->max_freq = PLL_FREQ_MAX * 1000;
+       cpufreq->max_freq = cpufreq->max_freq / 1000;
+
+
+       be_cpufreq_driver.driver_data = (void *)cpufreq;
+
+       ret = cpufreq_register_driver(&be_cpufreq_driver);
+       if (ret) {
+               dev_err(dev, "Failed to register cpufreq driver\n");
+               return ret;
+       }
+
+       ret = cpufreq_register_notifier(&be_cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+       if (ret) {
+               dev_err(dev, "Failed to register cpufreq notifier\n");
+               cpufreq_unregister_driver(&be_cpufreq_driver);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, cpufreq);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id be_cpufreq_of_match[] = {
+       { .compatible = "be,cpufreq", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, be_cpufreq_of_match);
+#endif
+
+static struct platform_driver be_cpufreq_platdrv = {
+       .probe          = be_cpufreq_probe,
+       .remove         = be_cpufreq_remove,
+       .driver         = {
+               .name   = "be-cpufreq",
+               .owner  = THIS_MODULE,
+#ifdef CONFIG_OF
+               .of_match_table = of_match_ptr(be_cpufreq_of_match),
+#endif /* CONFIG_OF */
+       },
+
+};
+
+module_platform_driver(be_cpufreq_platdrv);
+
+MODULE_VERSION(VERSION);
+MODULE_AUTHOR("Georgy Vlasov <Georgy.Vlasov@baikalelectronics.ru>");
+MODULE_AUTHOR("Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal CPUFreq driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:be_cpufreq");
index 417dad6355268934383074c1759154535d114a33..a9b52d067cea355d951628955201ecfbdf306af7 100644 (file)
@@ -524,4 +524,11 @@ config EDAC_BLUEFIELD
          Support for error detection and correction on the
          Mellanox BlueField SoCs.
 
+config EDAC_BAIKAL
+       tristate "Baikal-M SoC Memory Controller"
+       depends on MIPS_BAIKAL
+       help
+         Support for error detection and correction on the
+         Baikal Electronics SoCs.
+
 endif # EDAC
index d77200c9680bce9b7dd8fef8847854018fb8a5f5..1df45a459113bab9db0ac78e9052f1a04b81c2a0 100644 (file)
@@ -87,3 +87,4 @@ obj-$(CONFIG_EDAC_TI)                 += ti_edac.o
 obj-$(CONFIG_EDAC_QCOM)                        += qcom_edac.o
 obj-$(CONFIG_EDAC_ASPEED)              += aspeed_edac.o
 obj-$(CONFIG_EDAC_BLUEFIELD)           += bluefield_edac.o
+obj-$(CONFIG_EDAC_BAIKAL)               += baikal_mc_edac.o
diff --git a/drivers/edac/baikal_mc_edac.c b/drivers/edac/baikal_mc_edac.c
new file mode 100644 (file)
index 0000000..6727770
--- /dev/null
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Baikal-M memory controller edac kernel module.
+ *
+ * Copyright (C) 2021 Baikal Electronics, JSC.
+ *
+ */
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "edac_mc.h"
+
+#define BE_EDAC_MSG_SIZE       80
+
+#define DDRC_ECCCFG0           0x70    /* ECC Configuration Register */
+#define DDRC_ECCCFG1           0x74    /* ECC Configuration Register */
+#define DDRC_ECCSTAT           0x78    /* ECC Status Register */
+#define DDRC_ECCCLR            0x7c    /* ECC Clear Register */
+#define DDRC_ECCERRCNT         0x80    /* ECC Error Counter Register */
+#define DDRC_ECCCADDR0         0x84    /* ECC Corrected Error Address Register 0 */
+#define DDRC_ECCCADDR1         0x88    /* ECC Corrected Error Address Register 1 */
+#define DDRC_ECCCSYN0          0x8c    /* ECC Corrected Syndrome Register 0 */
+#define DDRC_ECCCSYN1          0x90    /* ECC Corrected Syndrome Register 1 */
+#define DDRC_ECCCSYN2          0x94    /* ECC Corrected Syndrome Register 2 */
+#define DDRC_ECCBITMASK0       0x98    /* ECC Corrected Data Bit Mask Register 0 */
+#define DDRC_ECCBITMASK1       0x9c    /* ECC Corrected Data Bit Mask Register 1 */
+#define DDRC_ECCBITMASK2       0xa0    /* ECC Corrected Data Bit Mask Register 2 */
+#define DDRC_ECCUADDR0         0xa4    /* ECC Uncorrected Error Address Register 0 */
+#define DDRC_ECCUADDR1         0xa8    /* ECC Uncorrected Error Address Register 1 */
+#define DDRC_ECCUSYN0          0xac    /* ECC Uncorrected Syndrome Register 0 */
+#define DDRC_ECCUSYN1          0xb0    /* ECC Uncorrected Syndrome Register 1 */
+#define DDRC_ECCUSYN2          0xb4    /* ECC Uncorrected Syndrome Register 2 */
+#define DDRC_ECCPOISONADDR0    0xb8    /* ECC Data Poisoning Address Register 0 */
+#define DDRC_ECCPOISONADDR1    0xbc    /* ECC Data Poisoning Address Register 1 */
+#define DDRC_CRCPARCTL0         0xc0    /* CRC Parity Control Register 0 */
+#define DDRC_CRCPARCTL1         0xc4    /* CRC Parity Control Register 1 */
+#define DDRC_CRCPARSTAT         0xcc    /* CRC Parity Status Register */
+
+#define ECCCTL_ENABLE_INTR     0x300
+#define ECCCTL_CLEAR_CERR      (1 << 0)
+#define ECCCTL_CLEAR_UERR      (1 << 1)
+
+#define ECCCNT_CERRS_MASK      (0xffff << 0)
+#define ECCCNT_UERRS_SHIFT     16
+
+#define ECCADDR_RANK_SHIFT     24
+#define ECCADDR_BG_SHIFT       24
+#define ECCADDR_BANK_SHIFT     16
+
+#define ECCADDR_RANK_MASK      (0x3 << ECCADDR_RANK_SHIFT)
+#define ECCADDR_ROW_MASK       (0x3ffff)
+#define ECCADDR_BG_MASK                (0x3 << ECCADDR_BG_SHIFT)
+#define ECCADDR_BANK_MASK      (0x7 << ECCADDR_BANK_SHIFT)
+#define ECCADDR_COL_MASK       (0xfff)
+
+struct baikal_edac_priv {
+       void __iomem *baseaddr;
+       int irq_cer;
+       int irq_uer;
+};
+
+struct baikal_edac_platform {
+       void (*plat_mc_init)(struct mem_ctl_info *mci, phys_addr_t phys_base);
+};
+
+static int ecc_mask_bitnum(unsigned mask)
+{
+       int bitnum = 0;
+       while (mask) {
+               mask >>= 1;
+               bitnum++;
+       }
+       return bitnum;
+}
+
+static irqreturn_t baikal_mc_err_handler(int irq, void *dev_id)
+{
+       u32 regaddr0, regaddr1;
+       char msg[BE_EDAC_MSG_SIZE];
+       struct mem_ctl_info *mci = dev_id;
+       const struct baikal_edac_priv *priv = mci->pvt_info;
+
+       if (irq == priv->irq_cer) {
+               regaddr0 = readl(priv->baseaddr + DDRC_ECCCADDR0);
+               regaddr1 = readl(priv->baseaddr + DDRC_ECCCADDR1);
+
+               snprintf(msg, BE_EDAC_MSG_SIZE, "catched at "
+                       "Rank: %d, BankGroup: %d, Bank: %d, Row: %d, Col: %d\n",
+                       (regaddr0 & ECCADDR_RANK_MASK) >> ECCADDR_RANK_SHIFT,
+                       (regaddr1 & ECCADDR_BG_MASK) >> ECCADDR_BG_SHIFT,
+                       (regaddr1 & ECCADDR_BANK_MASK) >> ECCADDR_BANK_SHIFT,
+                       ecc_mask_bitnum(regaddr0 & ECCADDR_ROW_MASK),
+                       ecc_mask_bitnum(regaddr1 & ECCADDR_COL_MASK));
+
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                       readl(priv->baseaddr + DDRC_ECCERRCNT) & ECCCNT_CERRS_MASK,
+                       0, 0, (u64)readl(priv->baseaddr + DDRC_ECCCSYN0) << 32 |
+                       readl(priv->baseaddr + DDRC_ECCCSYN1), 0, -1, -1, msg, "");
+
+               writel(ECCCTL_ENABLE_INTR | ECCCTL_CLEAR_CERR,
+                               priv->baseaddr + DDRC_ECCCLR);
+
+               return IRQ_HANDLED;
+       }
+       if (irq == priv->irq_uer) {
+               regaddr0 = readl(priv->baseaddr + DDRC_ECCUADDR0);
+               regaddr1 = readl(priv->baseaddr + DDRC_ECCUADDR1);
+
+               snprintf(msg, BE_EDAC_MSG_SIZE, "catched at "
+                       "Rank: %d, BankGroup: %d, Bank: %d, Row: %d, Col: %d\n",
+                       (regaddr0 & ECCADDR_RANK_MASK) >> ECCADDR_RANK_SHIFT,
+                       (regaddr1 & ECCADDR_BG_MASK) >> ECCADDR_BG_SHIFT,
+                       (regaddr1 & ECCADDR_BANK_MASK) >> ECCADDR_BANK_SHIFT,
+                       ecc_mask_bitnum(regaddr0 & ECCADDR_ROW_MASK),
+                       ecc_mask_bitnum(regaddr1 & ECCADDR_COL_MASK));
+
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                       readl(priv->baseaddr + DDRC_ECCERRCNT) >> ECCCNT_UERRS_SHIFT,
+                       0, 0, (u64)readl(priv->baseaddr + DDRC_ECCUSYN0) << 32 |
+                       readl(priv->baseaddr + DDRC_ECCUSYN1), 0, -1, -1, msg, "");
+
+               writel(ECCCTL_ENABLE_INTR | ECCCTL_CLEAR_UERR,
+                               priv->baseaddr + DDRC_ECCCLR);
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static bool baikal_get_ecc_state(void __iomem *base)
+{
+       return (readl(base + DDRC_ECCCFG0) & 0x7) == 0x4;
+}
+
+static void bt1_edac_mc_init(struct mem_ctl_info *mci, phys_addr_t phys_base)
+{
+       mci->mtype_cap = MEM_FLAG_DDR3;
+       mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+       mci->scrub_mode = SCRUB_NONE;
+}
+
+static const struct baikal_edac_platform bt1_mc_pdata = {
+       .plat_mc_init = bt1_edac_mc_init,
+};
+
+static const struct of_device_id baikal_edac_match[] = {
+       {
+               .compatible = "baikal,bt1-edac-mc",
+               .data = (void *)&bt1_mc_pdata
+       },
+       {
+               /* null entry */
+       }
+};
+MODULE_DEVICE_TABLE(of, baikal_edac_match);
+
+static int baikal_mc_probe(struct platform_device *pdev)
+{
+       struct edac_mc_layer layers;
+       struct baikal_edac_priv *priv;
+       const struct baikal_edac_platform *pdata;
+       struct mem_ctl_info *mci;
+       struct dimm_info *dimm;
+       void __iomem *baseaddr;
+       struct resource *res;
+       int ret;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       baseaddr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(baseaddr))
+               return PTR_ERR(baseaddr);
+
+       if (!baikal_get_ecc_state(baseaddr)) {
+               edac_printk(KERN_INFO, EDAC_MC, "%s: ECC not enabled\n",
+                               pdev->name);
+               return -ENXIO;
+       }
+
+       pdata = of_device_get_match_data(&pdev->dev);
+       if (!pdata) {
+               edac_printk(KERN_INFO, EDAC_MC,
+                               "%s: platform data is not available\n",
+                               pdev->name);
+               return -ENODEV;
+       }
+
+       /* oversimplified layout */
+       layers.type = EDAC_MC_LAYER_ALL_MEM;
+       layers.size = 1;
+       layers.is_virt_csrow = false;
+
+       mci = edac_mc_alloc(0, 1, &layers,
+                           sizeof(struct baikal_edac_priv));
+       if (!mci) {
+               edac_printk(KERN_ERR, EDAC_MC,
+                           "Failed memory allocation for mc instance\n");
+               return -ENOMEM;
+       }
+
+       priv = mci->pvt_info;
+       priv->baseaddr = baseaddr;
+
+       platform_set_drvdata(pdev, mci);
+
+       mci->mod_name = pdev->dev.driver->name;
+       mci->dev_name = dev_name(&pdev->dev);
+       mci->pdev = &pdev->dev;
+
+       dimm = *mci->dimms;
+       dimm->mci = mci;
+       dimm->grain = 1;
+
+       pdata->plat_mc_init(mci, res->start);
+
+       priv->irq_cer = platform_get_irq(pdev, 1);
+       if (priv->irq_cer < 0) {
+               edac_printk(KERN_ERR, EDAC_MC,
+                           "No IRQ in DT (%d)\n", priv->irq_cer);
+               ret = priv->irq_cer;
+               goto free_mc;
+       }
+       ret = devm_request_irq(&pdev->dev, priv->irq_cer, baikal_mc_err_handler,
+                               0, "baikal_mc_cerr", mci);
+
+       if (ret) {
+               edac_printk(KERN_ERR, EDAC_MC,
+                           "Unable to request irq %d\n", priv->irq_cer);
+               goto free_mc;
+       }
+
+       priv->irq_uer = platform_get_irq(pdev, 2);
+       if (priv->irq_uer < 0) {
+               edac_printk(KERN_ERR, EDAC_MC,
+                               "No IRQ in DT (%d)\n", priv->irq_uer);
+               ret = priv->irq_uer;
+               goto free_irq;
+       }
+       ret = devm_request_irq(&pdev->dev, priv->irq_uer, baikal_mc_err_handler,
+                               0, "baikal_mc_uerr", mci);
+
+       if (ret) {
+               edac_printk(KERN_ERR, EDAC_MC,
+                               "Unable to request irq %d\n", priv->irq_uer);
+               goto free_irq;
+       }
+
+       ret = edac_mc_add_mc_with_groups(mci, NULL);
+       if (ret) {
+               edac_printk(KERN_ERR, EDAC_MC,
+                               "Failed to register with EDAC core\n");
+               goto free_irq;
+       }
+
+       return 0;
+
+free_irq:
+       devm_free_irq(&pdev->dev, priv->irq_cer, (void *)mci);
+       devm_free_irq(&pdev->dev, priv->irq_uer, (void *)mci);
+
+free_mc:
+       edac_mc_del_mc(&pdev->dev);
+       edac_mc_free(mci);
+
+       return ret;
+}
+
+static int baikal_mc_remove(struct platform_device *pdev)
+{
+       struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+       struct baikal_edac_priv *priv = mci->pvt_info;
+
+       devm_free_irq(&pdev->dev, priv->irq_cer, (void *)mci);
+       devm_free_irq(&pdev->dev, priv->irq_uer, (void *)mci);
+
+       edac_mc_del_mc(&pdev->dev);
+       edac_mc_free(mci);
+
+       return 0;
+}
+
+static struct platform_driver baikal_mc_driver = {
+       .driver = {
+                  .name = "baikal-edac",
+                  .of_match_table = baikal_edac_match,
+                  },
+       .probe = baikal_mc_probe,
+       .remove = baikal_mc_remove,
+};
+module_platform_driver(baikal_mc_driver);
+
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Ivan Kapaev <Ivan.Kapaev@baikalelectronics.ru>");
+MODULE_DESCRIPTION("DDR ECC driver for Baikal SoCs");
+MODULE_LICENSE("GPL v2");
index 4ea742ada36dec6142befe3d4d38cf9e3cbb91af..d2ad9bc78b6ffe349f0eb2573ba4d21931e967ee 100644 (file)
@@ -1647,6 +1647,15 @@ config SENSORS_TC74
          This driver can also be built as a module. If so, the module
          will be called tc74.
 
+config SENSORS_PVT
+       tristate "Baikal PVT"
+       help
+         If you say yes here you get support for Baikal PVT single
+         input temperature sensor chips.
+
+         This driver can also be built as a module.  If so, the module
+         will be called pvt.
+
 config SENSORS_THMC50
        tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
        depends on I2C
index 40c036ea45e6b01c850e7247d1177ae91a86a2ed..7ba6f02087e3dbf0d5093064113f1c1c5f23a40a 100644 (file)
@@ -160,6 +160,7 @@ obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
 obj-$(CONFIG_SENSORS_STTS751)  += stts751.o
 obj-$(CONFIG_SENSORS_AMC6821)  += amc6821.o
 obj-$(CONFIG_SENSORS_TC74)     += tc74.o
+obj-$(CONFIG_SENSORS_PVT)      += pvt.o
 obj-$(CONFIG_SENSORS_THMC50)   += thmc50.o
 obj-$(CONFIG_SENSORS_TMP102)   += tmp102.o
 obj-$(CONFIG_SENSORS_TMP103)   += tmp103.o
diff --git a/drivers/hwmon/pvt.c b/drivers/hwmon/pvt.c
new file mode 100644 (file)
index 0000000..92606cf
--- /dev/null
@@ -0,0 +1,592 @@
+/*
+ * An hwmon driver for BAIKAL-T PVT Sensors based on
+ *
+ * Analog Bits. PVT Sensor Datasheet. Version: 2014.07.23
+ *
+ *  Copyright (C) 2017 Baikal Electronics JSC
+ *  Author:
+ *      Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+
+#define DRV_NAME "pvt"
+#define DRV_VERSION "1.0.0"
+
+/* PVT registers */
+#define BK_PVT_CTRL     0x00
+#define BK_PVT_DATA     0x04
+#define BK_PVT_TTHRES   0x08
+#define BK_PVT_VTHRES   0x0C
+#define BK_PVT_TTIMEOUT 0x1C
+#define BK_PVT_INTR_STAT 0x20
+#define BK_PVT_INTR_MASK 0x24
+#define BK_PVT_CLR_INTR 0x2C
+/* PVT VALID bit reads TIMEOUT */
+#define BK_PVT_VALID_TIMEOUT 10000
+
+/* PVT VALUES and MASKS */
+#define BK_PVT_CTRL_EN_BIT  0x1
+#define BK_PVT_CTRL_TMOD    0x0
+#define BK_PVT_CTRL_VMOD    0x2
+#define BK_PVT_CTRL_LVTMOD  0b0100
+#define BK_PVT_CTRL_HVTMOD  0b1000
+#define BK_PVT_CTRL_SVTMOD  0b1100
+
+#define BK_PVT_INTR_MASK_TONLY  0x7F9
+#define BK_PVT_INTR_MASK_TVONLY 0x7E1
+#define BK_PVT_INTR_MASK_ALL  0x7FF
+
+#define BK_PVT_DATA_MASK    0x3ff
+#define BK_PVT_DATA_VALID   (1 << 10)
+
+#define BK_PVT_THRES_HI    0xFFC00
+#define BK_PVT_THRES_LO    0x3FF
+
+#define BK_PVT_TTIMEOUT_SET 10000000
+
+#define BK_PVT_INTR_STAT_TTHRES_LO 0x02
+#define BK_PVT_INTR_STAT_TTHRES_HI 0x04
+#define BK_PVT_INTR_STAT_VTHRES_LO 0x08
+#define BK_PVT_INTR_STAT_VTHRES_HI 0x10
+
+/* TEMP limits */
+#define TEMP_PVT_MAX 125000
+#define TEMP_PVT_MIN -40000
+/* Voltage limits */
+#define VOLT_PVT_MAX 800
+#define VOLT_PVT_MIN 1000
+
+
+/* coef for transformtion to T,C (10^-3) times 10^6
+ * DATA = BK_PVT_DATA [0:9]
+ * T =  COEF4 * DATA ^ 4 + COEF3 * DATA ^ 3 + COEF2 * DATA ^ 2 +
+ *    + COEF1 * DATA ^ 1 + COEF0 */
+#define COEF4   (-16743 )        /* (-1.6743E-11f)  * 10^15 */
+#define COEF3   (81542  )        /* (8.1542E-08f)   * 10^12 */
+#define COEF2   (-182010)        /* (-1.8201E-04f)  * 10^9  */
+#define COEF1   (310200 )        /* (3.1020E-01f)   * 10^6  */
+#define COEF0   (-48380 )        /* (-4.8380E+01f)  * 10^3  */
+
+/* coef for transformation T,C (10^-3) to DATA
+ * DATA = DCOEF3 * T^3 + DCOEF2 * T ^ 2 + DCOEF1 * T + DCOEF 0 */
+
+#define DCOEF3  (2617)
+#define DCOEF2  (8654)
+#define DCOEF1  (3923)
+#define DCOEF0  (172 )
+
+/*  coef for transformatio to V, mV
+ *  DATA = 1865.8 *  VOLTAGE- 1157.2 =>
+ *  VOLTAGE = 620 + data * 10000 / 18658;
+*/
+#define COEF0_V 620
+#define COEF1_V 18658
+
+
+struct pvt_hwmon {
+    void __iomem *base;
+    int irq;
+    const struct mfd_cell *cell;
+    struct device *hwmon;
+    struct completion read_completion;
+    struct mutex lock;
+    int temp;
+    int volt;
+    int svt;
+    int hvt;
+    int lvt;
+    bool mon_mod;
+};
+
+static void switch_to_mon_mod(struct pvt_hwmon *hwmon) {
+    //OFF PVT
+    writel( 0, hwmon->base + BK_PVT_CTRL);
+    //Set timeout of inerupts
+    writel( BK_PVT_TTIMEOUT_SET, hwmon->base + BK_PVT_TTIMEOUT);
+    pr_debug("pvt switch_to_mon_mod and set BK_PVT_TTIMEOUT %d\n",readl(hwmon->base + BK_PVT_TTIMEOUT));
+    //Mask all interupts
+    writel( BK_PVT_INTR_MASK_TVONLY, hwmon->base + BK_PVT_INTR_MASK);
+    //Switch to last VOLT or Temprature mon_mod
+    writel( ((hwmon->mon_mod)<<1), hwmon->base + BK_PVT_CTRL);
+    pr_debug("pvt switch_to_mon_mod and set BK_PVT_CTRL %d\n",readl(hwmon->base + BK_PVT_CTRL));
+    //ON PVT
+    writel( (BK_PVT_CTRL_EN_BIT)| ((hwmon->mon_mod)<<1), hwmon->base + BK_PVT_CTRL);
+}
+
+static int read_valid_datareg( struct pvt_hwmon *hwmon)
+{
+    register int data, i = 0;
+    data = readl(hwmon->base + BK_PVT_DATA);
+    data = 0;
+    while ( ! (data & (BK_PVT_DATA_VALID) )  ) {
+        data = readl(hwmon->base + BK_PVT_DATA);
+        if( ++i == BK_PVT_VALID_TIMEOUT )
+            return -EINVAL;
+    }
+
+    data &= ( BK_PVT_DATA_MASK);
+    switch_to_mon_mod(hwmon);
+    return data;
+}
+
+
+static void switch_pvt_mod( void *addr, long int mod)
+{
+    pr_debug("BK PVT now %x, but need %lx \n",readl(addr+BK_PVT_CTRL), (unsigned long)mod);
+    writel( 0, addr + BK_PVT_CTRL);
+    //Set timeout of PVT measurment
+    writel( 0, addr + BK_PVT_TTIMEOUT);
+    //Mask all interupts
+    writel( BK_PVT_INTR_MASK_ALL, addr + BK_PVT_INTR_MASK);
+    writel( mod, addr + BK_PVT_CTRL);
+    writel( ((BK_PVT_CTRL_EN_BIT)|mod), addr + BK_PVT_CTRL);
+    pr_debug("BK PVT MOD %x\n",readl(addr+BK_PVT_CTRL));
+}
+
+static int data2temp(int data)
+{
+    int temp, temp4, temp3, temp2, temp1, temp0;
+    pr_debug("pvt %d and data %d \n",( BK_PVT_DATA_MASK), data);
+    /*Dont changer the order of multiplication !!! */
+    temp4 = (COEF4) * data / 1000 * data / 1000 * data / 1000 * data / 1000;
+    temp3 = (COEF3) * data / 1000 * data / 1000 * data / 1000;
+    temp2 = (COEF2) * data / 1000 * data / 1000;
+    temp1 = (COEF1) * data / 1000;
+    temp0 = (COEF0) ;
+    temp = temp0 + temp1 + temp2 + temp3 + temp4;
+    pr_debug("BK PVT temp  %d = %d + %d + %d + %d + %d \n",temp, temp4, temp3, temp2 ,temp1, temp0);
+    return temp;
+}
+
+static irqreturn_t pvt_hwmon_irq(int irq, void *data)
+{
+    long int val;
+    struct pvt_hwmon *hwmon = data;
+    val = readl( hwmon->base + BK_PVT_INTR_STAT);
+    if (BK_PVT_INTR_STAT_TTHRES_LO & val)
+        printk(KERN_INFO "PVT WARNING Lo Temperature \n");
+    if (BK_PVT_INTR_STAT_TTHRES_HI & val)
+        printk(KERN_INFO "PVT WARNING Hi Temperature \n");
+    if (BK_PVT_INTR_STAT_VTHRES_LO & val)
+        printk(KERN_INFO "PVT WARNING Lo Voltage \n");
+    if (BK_PVT_INTR_STAT_VTHRES_HI & val)
+        printk(KERN_INFO "PVT WARNING Lo Voltage \n");
+    val = readl( hwmon->base + BK_PVT_CLR_INTR);
+    complete(&hwmon->read_completion);
+    return IRQ_HANDLED;
+}
+
+static ssize_t pvt_show_name(struct device *dev,
+    struct device_attribute *dev_attr, char *buf)
+{
+    return sprintf(buf, "pvt-baikal\n");
+}
+
+static ssize_t pvt_show_mon_mod(struct device *dev,
+    struct device_attribute *dev_attr, char *buf)
+{
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    return sprintf(buf, "%d\n",hwmon->mon_mod);
+}
+
+static ssize_t set_mon_mod(struct device *dev, struct device_attribute *devattr,
+             const char *buf, size_t count)
+{
+    int err;
+    long data;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    err = kstrtol(buf, 10, &data);
+    if (err)
+        return err;
+    mutex_lock(&hwmon->lock);
+    hwmon->mon_mod = data;
+    switch_to_mon_mod(hwmon);
+    mutex_unlock(&hwmon->lock);
+    return count;
+}
+
+/* sysfs attributes for hwmon */
+static ssize_t pvt_show_temp(struct device *dev, struct device_attribute *da,
+             char *buf)
+{
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    int data, temp;
+    mutex_lock(&hwmon->lock);
+    switch_pvt_mod(hwmon->base,BK_PVT_CTRL_TMOD);
+    data = read_valid_datareg(hwmon);
+    temp = data2temp(data);
+    hwmon->temp = temp;
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", temp);
+}
+
+static int temp2data(int temp)
+{
+    int data3, data2, data1, data0, data;
+
+    if( temp > TEMP_PVT_MAX )
+        temp = TEMP_PVT_MAX;
+    if( temp < TEMP_PVT_MIN )
+        temp = TEMP_PVT_MIN;
+
+    /*Dont changer the order of multiplication !!! */
+    data3 = DCOEF3 * temp / 1000000 * temp / 1000000 * temp / 100000;
+    data2 = DCOEF2 * temp / 1000000 * temp / 1000000;
+    data1 = DCOEF1 * temp / 1000000;
+    data0 = DCOEF0;
+    data = data0 + data1 + data2 + data3;
+
+    pr_debug("pvt %d and data %d \n", (BK_PVT_DATA_MASK), data);
+
+    return data;
+}
+
+static int data2volt(int data)
+{
+    /* DATA = 1865.8 *  VOLTAGE- 1157.2 */
+    return (COEF0_V + ( data * 10000 ) / COEF1_V);
+}
+
+int volt2data(int volt)
+{
+    if( volt > VOLT_PVT_MAX )
+        volt = VOLT_PVT_MAX;
+    if( volt < VOLT_PVT_MIN )
+        volt = VOLT_PVT_MIN;
+    /* DATA = 1865.8 *  VOLTAGE- 1157.2 */
+    return (18658 * volt / 10000 - 1157 );
+}
+
+static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr,
+                        const char *buf, size_t count)
+{
+    unsigned int val, data;
+    long int temp;
+    int err;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    err = kstrtol(buf, 10, &temp);
+    if (err)
+        return err;
+    mutex_lock(&hwmon->lock);
+    data = readl(hwmon->base + BK_PVT_TTHRES);
+    val = temp2data(temp);
+    data = (data & BK_PVT_THRES_HI) + (BK_PVT_THRES_LO & val);
+    writel( data, hwmon->base + BK_PVT_TTHRES);
+    mutex_unlock(&hwmon->lock);
+    return count;
+}
+
+static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr,
+             const char *buf, size_t count)
+{
+    unsigned int val, data;
+    long int temp;
+    int err;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    err = kstrtol(buf, 10, &temp);
+    if (err)
+        return err;
+    mutex_lock(&hwmon->lock);
+    data = readl(hwmon->base + BK_PVT_TTHRES);
+    val = temp2data(temp);
+    data = ( (val<<10) & BK_PVT_THRES_HI) + (BK_PVT_THRES_LO & data);
+    writel( data, hwmon->base + BK_PVT_TTHRES);
+    mutex_unlock(&hwmon->lock);
+    return count;
+}
+
+static ssize_t set_volt_min(struct device *dev, struct device_attribute *devattr,
+             const char *buf, size_t count)
+{
+    unsigned int val, data;
+    long int volt;
+    int err;
+
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    err = kstrtol(buf, 10, &volt);
+    if (err)
+        return err;
+    mutex_lock(&hwmon->lock);
+    data = readl(hwmon->base + BK_PVT_VTHRES);
+    val = volt2data(volt);
+    data = (data & BK_PVT_THRES_HI) + (BK_PVT_THRES_LO & val);
+    writel( data, hwmon->base + BK_PVT_VTHRES);
+    mutex_unlock(&hwmon->lock);
+    return count;
+}
+
+static ssize_t set_volt_max(struct device *dev, struct device_attribute *devattr,
+             const char *buf, size_t count)
+{
+    unsigned int val, data;
+    long int volt;
+    int err;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    err = kstrtol(buf, 10, &volt);
+    if (err)
+        return err;
+    mutex_lock(&hwmon->lock);
+    data = readl(hwmon->base + BK_PVT_VTHRES);
+    val = volt2data(volt);
+    pr_debug("pvt set volt max %ld and val %x\n",volt,val);
+    data = ( (val<<10) & BK_PVT_THRES_HI) + (BK_PVT_THRES_LO & data);
+    writel( data, hwmon->base + BK_PVT_VTHRES);
+    mutex_unlock(&hwmon->lock);
+    return count;
+}
+
+static ssize_t pvt_show_temp_min(struct device *dev, struct device_attribute *devattr,
+             char *buf)
+{
+    unsigned int val, data;
+    int temp;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    mutex_lock(&hwmon->lock);
+    val = readl(hwmon->base + BK_PVT_TTHRES);
+    data = BK_PVT_THRES_LO & val;
+    temp = data2temp(data);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t pvt_show_temp_max(struct device *dev, struct device_attribute *devattr,
+             char *buf)
+{
+    unsigned int val, data;
+    int temp;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    mutex_lock(&hwmon->lock);
+    val = readl(hwmon->base + BK_PVT_TTHRES);
+    data = (BK_PVT_THRES_HI & val) >> 10;
+    temp = data2temp(data);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t pvt_show_volt_min(struct device *dev, struct device_attribute *devattr,
+             char *buf)
+{
+    unsigned int val, data;
+    int volt;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    mutex_lock(&hwmon->lock);
+    val = readl(hwmon->base + BK_PVT_VTHRES);
+    data = BK_PVT_THRES_LO & val;
+    volt = data2volt(data);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", volt);
+}
+
+
+static ssize_t pvt_show_volt_max(struct device *dev, struct device_attribute *devattr,
+             char *buf)
+{
+    unsigned int val, data;
+    int volt;
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    mutex_lock(&hwmon->lock);
+    val = readl(hwmon->base + BK_PVT_VTHRES);
+    data = (BK_PVT_THRES_HI & val) >> 10;
+    volt = data2volt(data);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", volt);
+}
+
+static ssize_t pvt_show_voltage(struct device *dev, struct device_attribute *da,
+             char *buf)
+{
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    int data, volt;
+    mutex_lock(&hwmon->lock);
+    switch_pvt_mod(hwmon->base,BK_PVT_CTRL_VMOD);
+    data = read_valid_datareg(hwmon);
+    /* Don't change the order of multiplication!!! */
+    volt = data2volt(data);
+    hwmon->volt = volt;
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", volt);
+}
+
+static ssize_t lvt_show(struct device *dev, struct device_attribute *da,
+             char *buf)
+{
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    int data;
+    mutex_lock(&hwmon->lock);
+    switch_pvt_mod(hwmon->base,BK_PVT_CTRL_LVTMOD);
+    data = read_valid_datareg(hwmon);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t hvt_show(struct device *dev, struct device_attribute *da,
+             char *buf)
+{
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    int data;
+    mutex_lock(&hwmon->lock);
+    switch_pvt_mod(hwmon->base,BK_PVT_CTRL_HVTMOD);
+    data = read_valid_datareg(hwmon);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t svt_show(struct device *dev, struct device_attribute *da,
+             char *buf)
+{
+    struct pvt_hwmon *hwmon = dev_get_drvdata(dev);
+    int data;
+    mutex_lock(&hwmon->lock);
+    switch_pvt_mod(hwmon->base,BK_PVT_CTRL_SVTMOD);
+    data = read_valid_datareg(hwmon);
+    mutex_unlock(&hwmon->lock);
+    return sprintf(buf, "%d\n", data);
+}
+
+
+static DEVICE_ATTR(name, S_IRUGO, pvt_show_name, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, pvt_show_temp, NULL);
+static DEVICE_ATTR(in1_input, S_IRUGO, pvt_show_voltage, NULL);
+static DEVICE_ATTR(lvt_input, S_IRUGO, lvt_show, NULL);
+static DEVICE_ATTR(hvt_input, S_IRUGO, hvt_show, NULL);
+static DEVICE_ATTR(svt_input, S_IRUGO, svt_show, NULL);
+static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, pvt_show_temp_min, set_temp_min);
+static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, pvt_show_temp_max, set_temp_max);
+static DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, pvt_show_volt_min, set_volt_min);
+static DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, pvt_show_volt_max, set_volt_max);
+static DEVICE_ATTR(mon_mod, S_IWUSR | S_IRUGO, pvt_show_mon_mod, set_mon_mod);
+
+static struct attribute *pvt_attrs[] = {
+    &dev_attr_name.attr,
+    &dev_attr_temp1_input.attr,
+    &dev_attr_temp1_min.attr,
+    &dev_attr_temp1_max.attr,
+    &dev_attr_in1_input.attr,
+    &dev_attr_in1_min.attr,
+    &dev_attr_in1_max.attr,
+    &dev_attr_lvt_input.attr,
+    &dev_attr_hvt_input.attr,
+    &dev_attr_svt_input.attr,
+    &dev_attr_mon_mod.attr,
+    NULL
+};
+
+static const struct attribute_group pvt_attr_group = {
+    .attrs = pvt_attrs,
+};
+
+
+static int pvt_probe(struct platform_device *pdev)
+{
+    int ret;
+    struct pvt_hwmon *hwmon;
+    struct resource *mem;
+
+    pr_debug("driver pvt_probe\n");
+    hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+    if (!hwmon)
+        return -ENOMEM;
+
+    hwmon->cell = mfd_get_cell(pdev);
+    hwmon->irq = platform_get_irq(pdev, 0);
+    if (hwmon->irq < 0) {
+        dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
+            hwmon->irq);
+        return hwmon->irq;
+    }
+
+    pr_debug("pvt_probe platform_get_irq");
+    mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+    hwmon->base = devm_ioremap_resource(&pdev->dev, mem);
+    if (IS_ERR(hwmon->base))
+        return PTR_ERR(hwmon->base);
+    init_completion(&hwmon->read_completion);
+    mutex_init(&hwmon->lock);
+
+    //Mask all interupts except TTRES_HILO
+    writel( BK_PVT_INTR_MASK_TVONLY, hwmon->base + BK_PVT_INTR_MASK);
+    pr_debug("pvt_probe BK_PVT_INTR_MASK %x\n",readl(hwmon->base + BK_PVT_INTR_MASK));
+
+    //Set timeout of PVT measurment
+    writel( BK_PVT_TTIMEOUT_SET, hwmon->base + BK_PVT_TTIMEOUT);
+    pr_debug("pvt_probe BK_PVT_TTIMEOUT %d\n",readl(hwmon->base + BK_PVT_TTIMEOUT));
+
+
+    platform_set_drvdata(pdev, hwmon);
+    ret = devm_request_irq(&pdev->dev, hwmon->irq, pvt_hwmon_irq, 0,
+                   pdev->name, hwmon);
+    if (ret) {
+        dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
+        return ret;
+    }
+
+    ret = sysfs_create_group(&pdev->dev.kobj, &pvt_attr_group);
+    if (ret) {
+        dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret);
+        return ret;
+    }
+
+    hwmon->hwmon = hwmon_device_register_with_info(&pdev->dev, pdev->name, hwmon, NULL, NULL);
+    if (IS_ERR(hwmon->hwmon)) {
+        ret = PTR_ERR(hwmon->hwmon);
+        goto err_remove_file;
+    }
+
+    //Set Monitoring mod for temperature
+    hwmon->mon_mod = 0;
+    switch_to_mon_mod(hwmon);
+    pr_debug("pvt_probe hwmon_device_register %d\n",ret);
+    return 0;
+
+err_remove_file:
+    sysfs_remove_group(&pdev->dev.kobj, &pvt_attr_group);
+    return ret;
+}
+
+static int pvt_remove(struct platform_device *pdev)
+{
+    struct pvt_hwmon *hwmon = platform_get_drvdata(pdev);
+    hwmon_device_unregister(hwmon->hwmon);
+    sysfs_remove_group(&pdev->dev.kobj, &pvt_attr_group);
+    return 0;
+}
+
+static const struct of_device_id pvt_dt_match[] = {
+    { .compatible = "baikal,pvt" },
+    { },
+};
+
+static struct platform_driver pvt_hwmon_driver = {
+    .probe       = pvt_probe,
+    .remove      = pvt_remove,
+    .driver = {
+        .name = "pvt-hwmon",
+        .of_match_table = of_match_ptr(pvt_dt_match),
+    },
+};
+
+module_platform_driver(pvt_hwmon_driver);
+
+MODULE_DESCRIPTION("PVT BAIKAL driver");
+MODULE_AUTHOR("Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pvt-hwmon");
index 9f965cdfff5c99db9ff3f8d1d94a1f753cbbbd3a..727c983309703384f82a135f1a1ede5e8e952963 100644 (file)
@@ -179,6 +179,7 @@ config AMD_XGBE
        select BITREVERSE
        select CRC32
        select PHYLIB
+       select PHYLINK
        select AMD_XGBE_HAVE_ECC if X86
        imply PTP_1588_CLOCK
        ---help---
@@ -202,4 +203,12 @@ config AMD_XGBE_HAVE_ECC
        bool
        default n
 
+
+config BAIKAL_XGBE
+       bool "Baikal XGBE support"
+       default n
+       depends on AMD_XGBE
+       ---help---
+         Say Y to enable Baikal XGBE support
+
 endif # NET_VENDOR_AMD
index 620785ffbd51946d82addf34f8769d7965d214a7..24e21b66dd14f92d1c763af0f018aa5485084b80 100644 (file)
@@ -7,6 +7,7 @@ amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
                 xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
                 xgbe-platform.o
 
+amd-xgbe-$(CONFIG_BAIKAL_XGBE) += baikal-mdio.o
 amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
 amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
 amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/baikal-mdio.c b/drivers/net/ethernet/amd/xgbe/baikal-mdio.c
new file mode 100644 (file)
index 0000000..bab029b
--- /dev/null
@@ -0,0 +1,622 @@
+/*
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Advanced Micro Devices, Inc. nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define DELAY_COUNT     50
+
+static int be_xgbe_an_restart_kr_training(struct xgbe_prv_data *pdata)
+{
+       int reg = 0;
+
+       DBGPR("%s\n", __FUNCTION__);
+
+       /* Restart training */
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, 0x0096, 3);
+       msleep(500);
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, 0x0096, 1);
+       
+       /* The worse case when training continue till 500ms */
+       msleep(500);
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, 0x0097);
+       /* Check training failure */
+       if (reg & (1 << 3))
+               return -1;
+
+       /* Success */
+       return 0;
+}
+
+static int be_xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
+{
+       DBGPR("%s\n", __FUNCTION__);
+       
+       /* Enable training */
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, 0x0096, 2);
+       
+       return 0;
+}
+
+static int be_xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
+{
+       int ret;
+       DBGPR("%s\n", __FUNCTION__);
+
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+       ret |= MDIO_CTRL1_LPOWER;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+       usleep_range(75, 100);
+
+       ret &= ~MDIO_CTRL1_LPOWER;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+       return 0;
+}
+
+static int be_xgbe_phy_xgmii_mode_kx4(struct xgbe_prv_data *pdata)
+{
+       int  ret, count;
+
+       DBGPR_MDIO("%s\n", __FUNCTION__);
+
+       /* Write 2'b01 to Bits[1:0] of SR PCS Control2 to set the xpcx_kr_0
+        * output to 0.
+        */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+
+       ret &= ~MDIO_PCS_CTRL2_TYPE;
+       ret |= MDIO_PCS_CTRL2_10GBX;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+       /* Set Bit 13 SR PMA MMD Control1 Register (for back plane) to 1. */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+
+       ret |= 0x2000;
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_CTRL1, ret);
+
+       /* Set LANE_MODE TO KX4 (4). */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL);
+
+       ret &= ~VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_MASK;
+       ret |= VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_KX4;
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL, ret);
+
+       /* Set LANE_WIDTH (2) 4 lanes per link. */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL);
+
+       ret &= ~VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_MASK;
+       ret |= VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_4;
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_ENT_GEN5_GEN_CTL, ret);
+
+       /* Initiate Software Reset. */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+
+       ret |= VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1, ret);
+
+       /* Wait until reset done. */
+       count = DELAY_COUNT;
+       do {
+               msleep(20);
+               ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+       } while (!!(ret & VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST) && --count);
+
+       if (ret & VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int be_xgbe_phy_xgmii_mode_kr(struct xgbe_prv_data *pdata)
+{
+       int ret;
+       DBGPR("%s\n", __FUNCTION__);
+       
+       /* Enable KR training */
+       ret = be_xgbe_an_enable_kr_training(pdata);
+       if (ret < 0)
+               return ret;
+
+       /* Set PCS to KR/10G speed */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+
+       ret &= ~MDIO_PCS_CTRL2_TYPE;
+       ret |= MDIO_PCS_CTRL2_10GBR;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+       ret &= ~MDIO_CTRL1_SPEEDSEL;
+       ret |= MDIO_CTRL1_SPEED10G;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+       ret = be_xgbe_phy_pcs_power_cycle(pdata);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int be_xgbe_phy_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+    struct device *dev = pdata->dev;
+    char mode[32];
+    const char *pm = mode;
+
+    if(!of_property_read_string(dev->of_node, "be,pcs-mode", &pm)) {
+        if(strcasecmp(pm, "KX4") == 0){
+            DBGPR("xgbe: mode KX4 = 0x%X function: %s\n", mode, __FUNCTION__);
+            return be_xgbe_phy_xgmii_mode_kx4(pdata);
+        }
+    }
+
+    DBGPR("xgbe: mode KR = 0x%X function: %s\n", mode, __FUNCTION__);
+
+    return be_xgbe_phy_xgmii_mode_kr(pdata);
+}
+
+static int __maybe_unused be_xgbe_phy_soft_reset(struct xgbe_prv_data *pdata)
+{
+       int count, ret;
+       DBGPR("%s\n", __FUNCTION__);
+
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+       ret |= MDIO_CTRL1_RESET;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+       count = DELAY_COUNT;
+       do {
+               msleep(20);
+               ret = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+               if (ret < 0)
+                       return ret;
+       } while ((ret & MDIO_CTRL1_RESET) && --count);
+
+       if (ret & MDIO_CTRL1_RESET)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int be_xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+       int reg;
+
+       DBGPR("%s\n", __FUNCTION__);
+
+       pdata->link_check = jiffies;
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+       
+       /* Disable auto negotiation in any case! */
+       reg &= ~MDIO_AN_CTRL1_ENABLE;
+       pdata->phy.autoneg = AUTONEG_DISABLE;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
+       return 0;
+}
+
+static int ext_phy_probe(struct device *pdev, struct phy_device **phy_dev)
+{
+        struct device_node *xmit_node;
+        struct phy_device *phydev;
+        struct device *dev = pdev;
+        int ret;
+
+        /* Retrieve the xmit-handle */
+        xmit_node = of_parse_phandle(dev->of_node, "ext-phy-handle", 0);
+        if (!xmit_node)
+               return -ENODEV;
+
+        phydev = of_phy_find_device(xmit_node);
+        if (!phydev)
+               return -ENODEV;
+
+        ret = phy_init_hw(phydev);
+        if (ret < 0)
+                return ret;
+
+        if ((phydev->speed != SPEED_10000) && (phydev->duplex != DUPLEX_FULL))
+               return -ENODEV;
+
+        *phy_dev = phydev;
+
+        return 0;
+}
+
+int be_xgbe_phy_config_init(struct xgbe_prv_data *pdata)
+{
+       int ret = 0;
+       int count = DELAY_COUNT;
+       DBGPR("%s\n", __FUNCTION__);
+
+        if(!ext_phy_probe(&pdata->platdev->dev, &pdata->phydev))
+        {
+               netdev_info(pdata->netdev, "use external PHY\n");
+
+               /* Initialize supported features */
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                               pdata->phydev->supported, 1);
+               linkmode_copy(pdata->phydev->advertising, pdata->phydev->supported);
+       } else {
+                netdev_info(pdata->netdev, "no one external PHY found\n");
+       }
+
+       pdata->phy.pause_autoneg = 0;
+       pdata->phy.tx_pause = 0;
+       pdata->phy.rx_pause = 0;
+       
+        /* Switch XGMAC PHY PLL to use external ref clock from pad */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_Gen5_MPLL_CTRL);
+       ret &= ~(VR_XS_PMA_MII_Gen5_MPLL_CTRL_REF_CLK_SEL_bit);
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, VR_XS_PMA_MII_Gen5_MPLL_CTRL, ret);
+       wmb();
+
+       /* Make vendor specific soft reset */
+       ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+       ret |= VR_XS_PCS_DIG_CTRL1_VR_RST_Bit;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1, ret);
+       wmb();
+
+       /* Wait reset finish */
+       count = DELAY_COUNT;
+       do {
+               usleep_range(500, 600);
+               ret = XMDIO_READ(pdata, MDIO_MMD_PCS, VR_XS_PCS_DIG_CTRL1);
+       } while(((ret & VR_XS_PCS_DIG_CTRL1_VR_RST_Bit) != 0) && count--);
+
+
+       DBGPR("%s %x\n", __FUNCTION__, ret);
+       /*
+        * Wait for the RST (bit 15) of the “SR XS or PCS MMD Control1” Register is 0.
+        * This bit is self-cleared when Bits[4:2] in VR XS or PCS MMD Digital
+        * Status Register are equal to 3’b100, that is, Tx/Rx clocks are stable
+        * and in Power_Good state.
+        */
+       count = DELAY_COUNT;
+       do {
+               usleep_range(500, 600);
+               ret = XMDIO_READ(pdata, MDIO_MMD_PCS, SR_XC_or_PCS_MMD_Control1);
+       } while(((ret & SR_XC_or_PCS_MMD_Control1_RST_Bit) != 0) && count--);
+
+       /*
+        * This bit is self-cleared when Bits[4:2] in VR XS or PCS MMD Digital
+        * Status Register are equal to 3’b100, that is, Tx/Rx clocks are stable
+        * and in Power_Good state.
+        */
+       count = DELAY_COUNT;
+       do {
+               usleep_range(500, 600);
+               ret = XMDIO_READ(pdata, MDIO_MMD_PCS, DWC_GLBL_PLL_MONITOR);
+       } while(((ret & SDS_PCS_CLOCK_READY_mask) != SDS_PCS_CLOCK_READY_bit) && count-- );
+
+       /* Turn off and clear interrupts */
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       wmb();
+
+       be_xgbe_phy_config_aneg(pdata);
+
+       ret = be_xgbe_phy_xgmii_mode(pdata);
+    
+       count = DELAY_COUNT;
+       do
+       {
+               msleep(10);
+               ret = XMDIO_READ(pdata, MDIO_MMD_PCS, 0x0001);
+       } while(((ret & 0x0004) != 0x0004) && count--);
+
+       return 0;
+}
+
+static int be_xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+       int reg;
+       DBGPR("%s\n", __FUNCTION__);
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1);
+
+       return (reg & MDIO_AN_STAT1_COMPLETE) ? 1 : 0;
+}
+
+static int be_xgbe_phy_update_link(struct xgbe_prv_data *pdata)
+{
+       int new_state = 0;
+       int ret = 0;
+       struct phy_device *phydev;
+
+       if(!pdata || !pdata->phydev)
+           return 1;
+
+       phydev = pdata->phydev;
+       ret = phy_read_mmd(phydev, MDIO_MMD_PHYXS, 0x1001);
+
+       if (pdata->phy.link) {
+               /* Flow control support */
+               pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+               if (pdata->tx_pause != pdata->phy.tx_pause) {
+                       new_state = 1;
+                       pdata->hw_if.config_tx_flow_control(pdata);
+                       pdata->tx_pause = pdata->phy.tx_pause;
+               }
+
+               if (pdata->rx_pause != pdata->phy.rx_pause) {
+                       new_state = 1;
+                       pdata->hw_if.config_rx_flow_control(pdata);
+                       pdata->rx_pause = pdata->phy.rx_pause;
+               }
+
+               /* Speed support */
+               if (pdata->phy_speed != pdata->phy.speed) {
+                       new_state = 1;
+                       pdata->phy_speed = pdata->phy.speed;
+               }
+
+               if (pdata->phy_link != pdata->phy.link) {
+                       new_state = 1;
+                       pdata->phy_link = pdata->phy.link;
+               }
+       } else if (pdata->phy_link) {
+               new_state = 1;
+               pdata->phy_link = 0;
+               pdata->phy_speed = SPEED_UNKNOWN;
+       }
+
+       return 0;
+}
+
+static void be_xgbe_phy_read_status(struct xgbe_prv_data *pdata)
+{
+       int reg, link_aneg;
+       int old_link = pdata->phy.link;
+
+       pdata->phy.link = 1;
+
+       if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+               netif_carrier_off(pdata->netdev);
+
+               pdata->phy.link = 0;
+               goto update_link;
+       }
+
+       link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+       /* If there is no external PHY just skip it */
+       if (pdata->phydev) {
+               pdata->phydev->drv->read_status(pdata->phydev);
+               /* Pop out old values */
+               pdata->phydev->drv->read_status(pdata->phydev);
+               if (!pdata->phydev->link){
+                       pdata->phydev->link = 0;
+                       pdata->phy.link &= pdata->phydev->link;
+               }
+       } 
+
+       /* Check Baikal PHY link */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+       pdata->phy.link &= (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+       
+       reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_STAT1);
+       pdata->phy.link &= (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+       /* If nothing has happened with a link */
+       if ((old_link && pdata->phy.link) || (!old_link && !pdata->phy.link))
+               goto update_link;
+
+       if (pdata->phy.link) {
+               if (link_aneg && !be_xgbe_phy_aneg_done(pdata)) {
+                       return;
+               }
+
+               if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+                       clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+               netif_carrier_on(pdata->netdev);
+               netdev_info(pdata->netdev, "NIC Link is Up\n");
+       } else {
+               netif_carrier_off(pdata->netdev);
+               netdev_info(pdata->netdev, "NIC Link is Down\n");
+
+               /* If KX4 mode is enabled training doesn't affect behavior */
+               be_xgbe_an_restart_kr_training(pdata);
+               /* Pop out old values */
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+               XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_STAT1);
+       }
+
+update_link:
+       be_xgbe_phy_update_link(pdata);
+}
+
+static void be_xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+       netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+       /* Disable auto-negotiation interrupts */
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+       pdata->phy.link = 0;
+       netif_carrier_off(pdata->netdev);
+
+       be_xgbe_phy_update_link(pdata);
+}
+
+/**
+ * be_xgbe_phy_start() - dummy
+ */
+int be_xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+       pdata->phy.link = 0;
+       netif_carrier_off(pdata->netdev);
+        return 0;
+}
+
+/**
+ * be_xgbe_phy_exit() - dummy
+ */
+void be_xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+       return;
+}
+
+/**
+ * be_an_isr() - dummy
+ */
+irqreturn_t be_an_isr(struct xgbe_prv_data *pdata)
+{
+       return IRQ_HANDLED;
+}
+
+void xgbe_init_function_ptrs_phy_baikal(struct xgbe_phy_if *phy_if)
+{
+       phy_if->phy_init        = be_xgbe_phy_config_init;
+       phy_if->phy_exit        = be_xgbe_phy_exit;
+       phy_if->phy_reset       = be_xgbe_phy_soft_reset;
+       phy_if->phy_stop        = be_xgbe_phy_stop;
+       phy_if->phy_status      = be_xgbe_phy_read_status;
+       phy_if->phy_config_aneg = be_xgbe_phy_config_aneg;
+       phy_if->phy_start       = be_xgbe_phy_start;
+       phy_if->an_isr          = be_an_isr;
+}
index 230726d7b74f6343e7d5262e1a02ba455fc0b713..ebd1985c361455e61497dea4c8dbb1427c4f0eb1 100644 (file)
@@ -366,8 +366,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
        }
 
        if (!ring->rx_buf_pa.pages) {
+#ifdef CONFIG_BAIKAL_XGBE
+               ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
+                                      0, ring->node);
+#else
                ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
                                       PAGE_ALLOC_COSTLY_ORDER, ring->node);
+#endif
                if (ret)
                        return ret;
        }
index decc1c09a031b4b61e554e08e5480c66734034ae..01f4d2d7e58e65ca66d26ab527e489c41b00070b 100644 (file)
@@ -696,7 +696,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
                         *          per channel interrupts in edge triggered
                         *          mode)
                         */
+#ifndef CONFIG_BAIKAL_XGBE
                        if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+#endif
                                XGMAC_SET_BITS(channel->curr_ier,
                                               DMA_CH_IER, TIE, 1);
                }
@@ -708,7 +710,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
                         *          mode)
                         */
                        XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
+#ifndef CONFIG_BAIKAL_XGBE
                        if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+#endif
                                XGMAC_SET_BITS(channel->curr_ier,
                                               DMA_CH_IER, RIE, 1);
                }
index 7f705483c1c574f22526e6353ba3fc563d50f470..bda2d710847e1bb9a1178b0b59a12b073cc8a4db 100644 (file)
@@ -154,6 +154,57 @@ module_param(ecc_ded_period, uint, 0644);
 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
 #endif
 
+#ifdef CONFIG_BAIKAL_XGBE
+static void xgbe_validate(struct phylink_config *config,
+                           unsigned long *supported,
+                           struct phylink_link_state *state)
+{
+       /* Dummy */
+}
+
+static int xgbe_mac_link_state(struct phylink_config *config,
+                                struct phylink_link_state *state)
+{
+       /* Dummy */
+       state->link = 0;
+       return 1;
+}
+
+
+static void xgbe_mac_an_restart(struct phylink_config *config)
+{
+       /* Dummy */
+}
+
+static void xgbe_mac_config(struct phylink_config *config, unsigned int mode,
+                             const struct phylink_link_state *state)
+{
+       /* Dummy */
+}
+
+static void xgbe_mac_link_down(struct phylink_config *config,
+                                unsigned int mode, phy_interface_t interface)
+{
+       /* Dummy */
+}
+
+static void xgbe_mac_link_up(struct phylink_config *config, unsigned int mode,
+                              phy_interface_t interface,
+                              struct phy_device *phy)
+{
+       /* Dummy */
+}
+
+const struct phylink_mac_ops xgbe_phylink_ops = {
+       .validate = xgbe_validate,
+       .mac_link_state = xgbe_mac_link_state,
+       .mac_an_restart = xgbe_mac_an_restart,
+       .mac_config = xgbe_mac_config,
+       .mac_link_down = xgbe_mac_link_down,
+       .mac_link_up = xgbe_mac_link_up,
+};
+#endif
+
 static int xgbe_one_poll(struct napi_struct *, int);
 static int xgbe_all_poll(struct napi_struct *, int);
 static void xgbe_stop(struct xgbe_prv_data *);
@@ -216,8 +267,15 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
                channel->node = node;
                cpumask_set_cpu(cpu, &channel->affinity_mask);
 
+#ifndef CONFIG_BAIKAL_XGBE
                if (pdata->per_channel_irq)
                        channel->dma_irq = pdata->channel_irq[i];
+#else
+               if (pdata->per_channel_irq) {
+                       channel->tx_dma_irq = pdata->channel_tx_irq[i];
+                       channel->rx_dma_irq = pdata->channel_rx_irq[i];
+               }
+#endif
 
                if (i < pdata->tx_ring_count) {
                        ring = xgbe_alloc_node(sizeof(*ring), node);
@@ -244,10 +302,22 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
                netif_dbg(pdata, drv, pdata->netdev,
                          "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
 
+#ifndef CONFIG_BAIKAL_XGBE
                netif_dbg(pdata, drv, pdata->netdev,
                          "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
                          channel->name, channel->dma_regs, channel->dma_irq,
                          channel->tx_ring, channel->rx_ring);
+#else
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "%s: dma_regs=%p, tx_dma_irq=%d, tx=%p, rx=%p\n",
+                         channel->name, channel->dma_regs, channel->tx_dma_irq,
+                         channel->tx_ring, channel->rx_ring);
+
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "%s: dma_regs=%p, rx_dma_irq=%d, tx=%p, rx=%p\n",
+                         channel->name, channel->dma_regs, channel->rx_dma_irq,
+                         channel->tx_ring, channel->rx_ring);
+#endif
        }
 
        pdata->channel_count = count;
@@ -623,10 +693,16 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
         */
        if (napi_schedule_prep(&channel->napi)) {
                /* Disable Tx and Rx interrupts */
+#ifndef CONFIG_BAIKAL_XGBE
                if (pdata->channel_irq_mode)
                        xgbe_disable_rx_tx_int(pdata, channel);
                else
                        disable_irq_nosync(channel->dma_irq);
+#else
+               xgbe_disable_rx_tx_int(pdata, channel);
+               disable_irq_nosync(channel->tx_dma_irq);
+               disable_irq_nosync(channel->rx_dma_irq);
+#endif
 
                /* Turn on polling */
                __napi_schedule_irqoff(&channel->napi);
@@ -654,10 +730,17 @@ static void xgbe_tx_timer(struct timer_list *t)
        if (napi_schedule_prep(napi)) {
                /* Disable Tx and Rx interrupts */
                if (pdata->per_channel_irq)
+#ifndef CONFIG_BAIKAL_XGBE
                        if (pdata->channel_irq_mode)
                                xgbe_disable_rx_tx_int(pdata, channel);
                        else
                                disable_irq_nosync(channel->dma_irq);
+#else
+               {
+                               disable_irq_nosync(channel->tx_dma_irq);
+                               disable_irq_nosync(channel->rx_dma_irq);
+               }
+#endif
                else
                        xgbe_disable_rx_tx_ints(pdata);
 
@@ -1092,6 +1175,7 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
        if (!pdata->per_channel_irq)
                return 0;
 
+#ifndef CONFIG_BAIKAL_XGBE
        for (i = 0; i < pdata->channel_count; i++) {
                channel = pdata->channel[i];
                snprintf(channel->dma_irq_name,
@@ -1112,6 +1196,61 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
                                      &channel->affinity_mask);
        }
 
+       err_dma_irq:
+               /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+               for (i--; i < pdata->channel_count; i--) {
+                       channel = pdata->channel[i];
+
+                       irq_set_affinity_hint(channel->dma_irq, NULL);
+                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
+               }
+
+               if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+                       devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+
+       err_dev_irq:
+               devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+               return ret;
+#else
+       for (i = 0; i < pdata->channel_count; i++) {
+               channel = pdata->channel[i];
+               /* Tx */
+               snprintf(channel->tx_dma_irq_name,
+                        sizeof(channel->tx_dma_irq_name) - 1,
+                        "%s-Tx-%u", netdev_name(netdev),
+                        channel->queue_index);
+
+               ret = devm_request_irq(pdata->dev, channel->tx_dma_irq,
+                                      xgbe_dma_isr, 0,
+                                      channel->tx_dma_irq_name, channel);
+               if (ret) {
+                       netdev_alert(netdev, "error requesting irq %d\n",
+                                    channel->tx_dma_irq);
+                       goto err_dma_irq;
+               }
+
+               irq_set_affinity_hint(channel->tx_dma_irq,
+                                                     &channel->affinity_mask);
+
+               /* Rx */
+               snprintf(channel->rx_dma_irq_name,
+                        sizeof(channel->rx_dma_irq_name) - 1,
+                        "%s-Rx-%u", netdev_name(netdev),
+                        channel->queue_index);
+
+               ret = devm_request_irq(pdata->dev, channel->rx_dma_irq,
+                                      xgbe_dma_isr, 0,
+                                      channel->rx_dma_irq_name, channel);
+               if (ret) {
+                       netdev_alert(netdev, "error requesting irq %d\n",
+                                    channel->rx_dma_irq);
+                       goto err_dma_irq;
+               }
+
+               irq_set_affinity_hint(channel->rx_dma_irq,
+                                                     &channel->affinity_mask);
+       }
        return 0;
 
 err_dma_irq:
@@ -1119,8 +1258,10 @@ err_dma_irq:
        for (i--; i < pdata->channel_count; i--) {
                channel = pdata->channel[i];
 
-               irq_set_affinity_hint(channel->dma_irq, NULL);
-               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+               devm_free_irq(pdata->dev, channel->tx_dma_irq, channel);
+               devm_free_irq(pdata->dev, channel->rx_dma_irq, channel);
+               irq_set_affinity_hint(channel->tx_dma_irq, NULL);
+               irq_set_affinity_hint(channel->rx_dma_irq, NULL);
        }
 
        if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
@@ -1130,6 +1271,7 @@ err_dev_irq:
        devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
 
        return ret;
+#endif
 }
 
 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
@@ -1151,8 +1293,15 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->channel_count; i++) {
                channel = pdata->channel[i];
 
+#ifndef CONFIG_BAIKAL_XGBE
                irq_set_affinity_hint(channel->dma_irq, NULL);
                devm_free_irq(pdata->dev, channel->dma_irq, channel);
+#else
+               irq_set_affinity_hint(channel->tx_dma_irq, NULL);
+               irq_set_affinity_hint(channel->rx_dma_irq, NULL);
+               devm_free_irq(pdata->dev, channel->tx_dma_irq, channel);
+               devm_free_irq(pdata->dev, channel->rx_dma_irq, channel);
+#endif
        }
 }
 
@@ -1233,10 +1382,14 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
 
 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
 {
+#ifndef CONFIG_BAIKAL_XGBE
        pdata->phy_link = -1;
        pdata->phy_speed = SPEED_UNKNOWN;
 
        return pdata->phy_if.phy_reset(pdata);
+#else
+       return 0;
+#endif
 }
 
 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
@@ -1408,6 +1561,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
        if (ret)
                goto err_irqs;
 
+#ifdef CONFIG_BAIKAL_XGBE
+       if (pdata->phylink)
+               phylink_start(pdata->phylink);
+#endif
+
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
 
@@ -1459,6 +1617,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
        hw_if->disable_rx(pdata);
 
        phy_if->phy_stop(pdata);
+#ifndef CONFIG_BAIKAL_XGBE
+       if (pdata->phylink) {
+               phylink_stop(pdata->phylink);
+               phylink_destroy(pdata->phylink);
+       }
+#endif
 
        xgbe_free_irqs(pdata);
 
@@ -2904,10 +3068,16 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
        /* If we processed everything, we are done */
        if ((processed < budget) && napi_complete_done(napi, processed)) {
                /* Enable Tx and Rx interrupts */
+#ifndef CONFIG_BAIKAL_XGBE
                if (pdata->channel_irq_mode)
                        xgbe_enable_rx_tx_int(pdata, channel);
                else
                        enable_irq(channel->dma_irq);
+#else
+               xgbe_enable_rx_tx_int(pdata, channel);
+               enable_irq(channel->tx_dma_irq);
+               enable_irq(channel->rx_dma_irq);
+#endif
        }
 
        DBGPR("<--xgbe_one_poll: received = %d\n", processed);
@@ -3014,3 +3184,4 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
+
index 7ce9c69e9c44f3d4288d04f710b96222ebf2fb77..5192e41247170e1350d261112e0a7ad4861cb803 100644 (file)
 #include <linux/etherdevice.h>
 #include <linux/io.h>
 #include <linux/notifier.h>
+#ifdef CONFIG_BAIKAL_XGBE
+#include <linux/clk.h>
+#endif
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -142,7 +145,11 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
        DBGPR("-->xgbe_default_config\n");
 
        pdata->blen = DMA_SBMR_BLEN_64;
-       pdata->pbl = DMA_PBL_128;
+#ifdef CONFIG_BAIKAL_XGBE
+       pdata->pbl = DMA_PBL_16;
+#else
+       pdata->pbl = DMA_PBL_256;
+#endif
        pdata->aal = 1;
        pdata->rd_osr_limit = 8;
        pdata->wr_osr_limit = 8;
@@ -330,6 +337,19 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
        XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
        XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
+#ifdef CONFIG_BAIKAL_XGBE
+       ret = clk_prepare_enable(pdata->sysclk);
+       if (ret) {
+               netdev_alert(netdev, "gmac clk_prepare_enable failed\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(pdata->ptpclk);
+       if (ret) {
+               netdev_alert(netdev, "dma clk_prepare_enable failed\n");
+               return ret;
+       }
+#endif
        /* Call MDIO/PHY initialization routine */
        pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
        ret = pdata->phy_if.phy_init(pdata);
index 4ebd2410185a9544b515ce1192ca93836427f21f..3abb3b85fd3bf5a04a8f7dac6ea974a51b9296fd 100644 (file)
 #include <linux/property.h>
 #include <linux/acpi.h>
 #include <linux/mdio.h>
+#ifdef CONFIG_BAIKAL_XGBE
+#include <linux/phy.h>
+#endif
 
 #include "xgbe.h"
 #include "xgbe-common.h"
 
+#ifdef CONFIG_BAIKAL_XGBE
+extern const struct phylink_mac_ops xgbe_phylink_ops;
+#endif
+
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id xgbe_acpi_match[];
 
@@ -306,7 +313,9 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        unsigned int dma_irqnum, dma_irqend;
        enum dev_dma_attr attr;
        int ret;
-
+#ifdef CONFIG_BAIKAL_XGBE
+       struct fwnode_reference_args ref;
+#endif
        pdata = xgbe_alloc_pdata(dev);
        if (IS_ERR(pdata)) {
                ret = PTR_ERR(pdata);
@@ -323,6 +332,27 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        /* Get the version data */
        pdata->vdata = xgbe_get_vdata(pdata);
 
+#ifdef CONFIG_BAIKAL_XGBE
+       ret = fwnode_property_get_reference_args(pdev->dev.fwnode, "sfp", NULL,
+               0, 0, &ref);
+
+       if (ret == 0) {
+               pdata->phylink_phy_mode = PHY_INTERFACE_MODE_10GKR;
+
+               pdata->phylink_config.dev = &pdata->netdev->dev;
+               pdata->phylink_config.type = PHYLINK_NETDEV;
+
+               /* Create an instance, bind with SFP, etc. */
+               pdata->phylink = phylink_create(&pdata->phylink_config,
+                       pdev->dev.fwnode, pdata->phylink_phy_mode,
+                       &xgbe_phylink_ops);
+
+               if (IS_ERR(pdata->phylink))
+                       dev_info(pdata->dev, "cat't create phylink instance (%ld)\n",
+                               PTR_ERR(pdata->phylink));
+       }
+#endif
+
        phy_pdev = xgbe_get_phy_pdev(pdata);
        if (!phy_pdev) {
                dev_err(dev, "unable to obtain phy device\n");
@@ -351,6 +381,20 @@ static int xgbe_platform_probe(struct platform_device *pdev)
                dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
        }
 
+#ifdef CONFIG_BAIKAL_XGBE
+       pdata->axiclk = devm_clk_get(dev, XGBE_AXI_CLOCK);
+       if (IS_ERR(pdata->axiclk)) {
+               dev_err(dev, "axi devm_clk_get failed\n");
+               goto err_io;    
+       }
+
+       ret = clk_prepare_enable(pdata->axiclk);
+       if (ret) {
+               dev_err(dev, "axi clk_prepare_enable failed\n");
+               goto err_io;
+       }
+#endif
+
        /* Obtain the mmio areas for the device */
        pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(pdata->xgmac_regs)) {
@@ -370,6 +414,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        if (netif_msg_probe(pdata))
                dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
 
+#ifndef CONFIG_BAIKAL_XGBE
        pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
                                                          phy_memnum++);
        if (IS_ERR(pdata->rxtx_regs)) {
@@ -399,6 +444,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        }
        if (netif_msg_probe(pdata))
                dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);
+#endif
 
        /* Retrieve the MAC address */
        ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
@@ -425,7 +471,11 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        /* Check for per channel interrupt support */
        if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
                pdata->per_channel_irq = 1;
+#ifndef CONFIG_BAIKAL_XGBE
                pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
+#else
+               pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+#endif
        }
 
        /* Obtain device settings unique to ACPI/OF */
@@ -467,8 +517,12 @@ static int xgbe_platform_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_io;
        pdata->dev_irq = ret;
+#ifdef CONFIG_BAIKAL_XGBE
+       pdata->an_irq = pdata->dev_irq;
+#endif
 
        /* Get the per channel DMA interrupts */
+#ifndef CONFIG_BAIKAL_XGBE
        if (pdata->per_channel_irq) {
                unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
 
@@ -484,12 +538,45 @@ static int xgbe_platform_probe(struct platform_device *pdev)
 
                pdata->irq_count += max;
        }
+#else
+       if (pdata->per_channel_irq) {
+               unsigned int max = ARRAY_SIZE(pdata->channel_tx_irq);
+
+               ret = platform_get_irq(pdata->platdev, 1);
+               if (ret < 0)
+                       goto err_io;
+               pdata->channel_tx_irq[0] = ret;
+
+               ret = platform_get_irq(pdata->platdev, 3);
+               if (ret < 0)
+                       goto err_io;
+               pdata->channel_rx_irq[0] = ret;
+
+               ret = platform_get_irq(pdata->platdev, 2);
+               if (ret < 0)
+                       goto err_io;
+               pdata->channel_tx_irq[1] = ret;
+
+               ret = platform_get_irq(pdata->platdev, 4);
+               if (ret < 0)
+                       goto err_io;
+               pdata->channel_rx_irq[1] = ret;
+
+               dma_irqnum = 2;
+
+               pdata->channel_irq_count = max;
+
+               pdata->irq_count += max;
+       }
+#endif
 
+#ifndef CONFIG_BAIKAL_XGBE
        /* Get the auto-negotiation interrupt */
        ret = platform_get_irq(phy_pdev, phy_irqnum++);
        if (ret < 0)
                goto err_io;
        pdata->an_irq = ret;
+#endif
 
        /* Configure the netdev resource */
        ret = xgbe_config_netdev(pdata);
@@ -573,7 +660,11 @@ static int xgbe_platform_resume(struct device *dev)
 #endif /* CONFIG_PM_SLEEP */
 
 static const struct xgbe_version_data xgbe_v1 = {
+#ifdef CONFIG_BAIKAL_XGBE
+       .init_function_ptrs_phy_impl    = xgbe_init_function_ptrs_phy_baikal,
+#else
        .init_function_ptrs_phy_impl    = xgbe_init_function_ptrs_phy_v1,
+#endif
        .xpcs_access                    = XGBE_XPCS_ACCESS_V1,
        .tx_max_fifo_size               = 81920,
        .rx_max_fifo_size               = 81920,
index 729307a96c50df0fe70f5246b4d83a1250f4b71c..547aa7bae98b9d446e792c216b3f8d83d6cbc72c 100644 (file)
 #include <linux/dcache.h>
 #include <linux/ethtool.h>
 #include <linux/list.h>
+#ifdef CONFIG_BAIKAL_XGBE
+#include <linux/phylink.h>
+#endif
 
 #define XGBE_DRV_NAME          "amd-xgbe"
 #define XGBE_DRV_VERSION       "1.0.3"
 #define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
 
 /* Device-tree clock names */
+#ifdef CONFIG_BAIKAL_XGBE
+#define XGBE_AXI_CLOCK         "axi"
+#endif
 #define XGBE_DMA_CLOCK         "dma_clk"
 #define XGBE_PTP_CLOCK         "ptp_clk"
 
@@ -504,8 +510,15 @@ struct xgbe_channel {
        void __iomem *dma_regs;
 
        /* Per channel interrupt irq number */
+#ifndef CONFIG_BAIKAL_XGBE
        int dma_irq;
        char dma_irq_name[IFNAMSIZ + 32];
+#else
+       int rx_dma_irq;
+       int tx_dma_irq;
+       char rx_dma_irq_name[IFNAMSIZ + 32];
+       char tx_dma_irq_name[IFNAMSIZ + 32];
+#endif
 
        /* Netdev related settings */
        struct napi_struct napi;
@@ -1031,6 +1044,15 @@ struct xgbe_prv_data {
        struct platform_device *phy_platdev;
        struct device *phy_dev;
 
+#ifdef CONFIG_BAIKAL_XGBE
+        /* phydevice - tranciever */
+        struct phy_device *phydev;
+
+       struct phylink *phylink;
+       struct phylink_config phylink_config;
+       int phylink_phy_mode;
+#endif
+
        /* Version related data */
        struct xgbe_version_data *vdata;
 
@@ -1088,7 +1110,12 @@ struct xgbe_prv_data {
        int dev_irq;
        int ecc_irq;
        int i2c_irq;
+
        int channel_irq[XGBE_MAX_DMA_CHANNELS];
+#ifdef CONFIG_BAIKAL_XGBE
+       int channel_tx_irq[XGBE_MAX_DMA_CHANNELS];
+       int channel_rx_irq[XGBE_MAX_DMA_CHANNELS];
+#endif
 
        unsigned int per_channel_irq;
        unsigned int irq_count;
@@ -1194,6 +1221,7 @@ struct xgbe_prv_data {
 
        /* Device clocks */
        struct clk *sysclk;
+       struct clk *axiclk;
        unsigned long sysclk_rate;
        struct clk *ptpclk;
        unsigned long ptpclk_rate;
@@ -1316,6 +1344,28 @@ static inline int xgbe_pci_init(void) { return 0; }
 static inline void xgbe_pci_exit(void) { }
 #endif
 
+#ifdef CONFIG_BAIKAL_XGBE
+void xgbe_init_function_ptrs_phy_baikal(struct xgbe_phy_if *);
+
+#ifndef VR_XS_PMA_MII_Gen5_MPLL_CTRL
+#define VR_XS_PMA_MII_Gen5_MPLL_CTRL                   0x807A
+#endif
+#define VR_XS_PMA_MII_Gen5_MPLL_CTRL_REF_CLK_SEL_bit   (1 << 13)
+#define VR_XS_PCS_DIG_CTRL1                            0x8000
+#define VR_XS_PCS_DIG_CTRL1_VR_RST_Bit                 MDIO_CTRL1_RESET
+#define SR_XC_or_PCS_MMD_Control1                      MDIO_CTRL1
+#define SR_XC_or_PCS_MMD_Control1_RST_Bit              MDIO_CTRL1_RESET
+#define DWC_GLBL_PLL_MONITOR                           0x8010
+#define SDS_PCS_CLOCK_READY_mask                       0x1C
+#define SDS_PCS_CLOCK_READY_bit                        0x10
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL                  0x809C
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_KX4    (4 << 0)
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LANE_MODE_MASK   0x0007
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_4     (2 << 8)
+#define VR_XS_PMA_MII_ENT_GEN5_GEN_CTL_LINK_WIDTH_MASK  0x0700
+#define VR_XS_OR_PCS_MMD_DIGITAL_CTL1_VR_RST            (1 << 15)
+#endif
+
 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
 void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
 void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
@@ -1373,4 +1423,6 @@ static inline void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) {}
 #define DBGPR_MDIO(x...) do { } while (0)
 #endif
 
+void xgbe_dump_phy_registers2(struct xgbe_prv_data *pdata);
+
 #endif
index 338e25a6374e9f626e1980ed7f91d4c224295eea..8a6a5693dc3477560d4ccd9d26177e3c40344767 100644 (file)
@@ -56,6 +56,16 @@ config DWMAC_GENERIC
          platform specific code to function or is using platform
          data for setup.
 
+config DWMAC_BAIKAL
+       tristate "Baikal Electronics DWMAC support"
+       default MIPS_BAIKAL
+       depends on OF
+       help
+         Support for Baikal Electronics DWMAC Ethernet.
+
+         This selects the Baikal-T1 SoC glue layer support for the stmmac
+         device driver.
+
 config DWMAC_ANARION
        tristate "Adaptrum Anarion GMAC support"
        default ARC
index c59926d96bccc85739bed923824cf5a7913cc0e3..57cb66dc4a47a1365b511ee7f9968dd7328a00f0 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o stmmac_xsk.o\
              chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
              dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o     \
              mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o  \
@@ -13,6 +13,7 @@ stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
 # Ordering matters. Generic driver must be last.
 obj-$(CONFIG_STMMAC_PLATFORM)  += stmmac-platform.o
 obj-$(CONFIG_DWMAC_ANARION)    += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_BAIKAL)     += dwmac-baikal.o
 obj-$(CONFIG_DWMAC_IPQ806X)    += dwmac-ipq806x.o
 obj-$(CONFIG_DWMAC_LPC18XX)    += dwmac-lpc18xx.o
 obj-$(CONFIG_DWMAC_MEDIATEK)   += dwmac-mediatek.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-baikal.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-baikal.c
new file mode 100644 (file)
index 0000000..5942014
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Baikal Electronics SoCs DWMAC glue layer
+ *
+ * Copyright (C) 2015,2016 Baikal Electronics JSC
+ * Author:
+ *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+
+static int dwmac_baikal_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       if (pdev->dev.of_node) {
+               plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+               if (IS_ERR(plat_dat)) {
+                       dev_err(&pdev->dev, "dt configuration failed\n");
+                       return PTR_ERR(plat_dat);
+               }
+       } else {
+               plat_dat = dev_get_platdata(&pdev->dev);
+               if (!plat_dat) {
+                       dev_err(&pdev->dev, "no platform data provided\n");
+                       return  -EINVAL;
+               }
+
+               /* Set default value for multicast hash bins */
+               plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+               /* Set default value for unicast filter entries */
+               plat_dat->unicast_filter_entries = 1;
+       }
+
+       plat_dat->has_gmac = 1;
+       plat_dat->enh_desc = 1;
+       plat_dat->tx_coe = 1;
+       plat_dat->rx_coe = 1;
+
+       dev_info(&pdev->dev, "Baikal Electronics DWMAC glue driver\n");
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id dwmac_baikal_match[] = {
+       { .compatible = "be,dwmac-3.710"},
+       { .compatible = "be,dwmac"},
+       { }
+};
+MODULE_DEVICE_TABLE(of, dwmac_baikal_match);
+
+static struct platform_driver dwmac_baikal_driver = {
+       .probe  = dwmac_baikal_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "baikal-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = of_match_ptr(dwmac_baikal_match),
+       },
+};
+module_platform_driver(dwmac_baikal_driver);
+
+MODULE_DESCRIPTION("Baikal dwmac glue driver");
+MODULE_LICENSE("GPL v2");
index 3587b2e7843ef348766da039552fc4673477feb2..8e0c06fc9cd201b9b6ece311a3987e707d5476a8 100644 (file)
@@ -21,6 +21,9 @@
 #define DMA_CONTROL            0x00001018      /* Ctrl (Operational Mode) */
 #define DMA_INTR_ENA           0x0000101c      /* Interrupt Enable */
 #define DMA_MISSED_FRAME_CTR   0x00001020      /* Missed Frame Counter */
+#define GMAC_GPIO              0x000000e0      /* GPIO register */
+
+#define GMAC_GPIO_GPO0         (1 << 8)        /* 0-output port */
 
 /* SW Reset */
 #define DMA_BUS_MODE_SFT_RESET 0x00000001      /* Software Reset */
index 1bc25aa86dbd2bc1e9a92aab6adf04c30959fb00..f75d81e1c957652f7e56f63ea9b9c64c3474b5cd 100644 (file)
@@ -22,6 +22,13 @@ int dwmac_dma_reset(void __iomem *ioaddr)
        value |= DMA_BUS_MODE_SFT_RESET;
        writel(value, ioaddr + DMA_BUS_MODE);
 
+#ifdef CONFIG_MACH_BAIKAL_BFK3
+       /* Clear PHY reset */
+       value = readl(ioaddr + GMAC_GPIO);
+       value |= GMAC_GPIO_GPO0;
+       writel(value, ioaddr + GMAC_GPIO);
+#endif /* CONFIG_MACH_BAIKAL_BFK3 */
+
        err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
                                 !(value & DMA_BUS_MODE_SFT_RESET),
                                 10000, 100000);
index d993fc7e82c3a818957eeda00f5831615bc36c76..f5dbfc405a7583b6deb41951ae20b62c75d01127 100644 (file)
@@ -12,6 +12,8 @@
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 #define DRV_MODULE_VERSION     "Jan_2016"
 
+#define STMMAC_TX_NEED_RESCHED (-1)
+
 #include <linux/clk.h>
 #include <linux/if_vlan.h>
 #include <linux/stmmac.h>
@@ -37,6 +39,9 @@ struct stmmac_tx_info {
        unsigned len;
        bool last_segment;
        bool is_jumbo;
+       u8 tx_source_type;
+       void *page_addr;
+       struct page *page;
 };
 
 /* Frequently used values are kept adjacent for cache effect */
@@ -56,6 +61,15 @@ struct stmmac_tx_queue {
        u32 mss;
 };
 
+/* For XSK socket */
+struct stmmac_xsk_desc_map {
+       /* For Rx descriptors only */
+       dma_addr_t dma_addr;
+       void *cpu_addr;
+       u64 handle;
+       u32 page_offset;
+};
+
 struct stmmac_rx_buffer {
        struct page *page;
        struct page *sec_page;
@@ -82,6 +96,15 @@ struct stmmac_rx_queue {
                unsigned int len;
                unsigned int error;
        } state;
+
+       /* AF_XDP support */
+       struct xdp_rxq_info xdp_rxq;
+       struct zero_copy_allocator zca;
+       /* Buffer info (extra data) is used for XSK */
+       struct stmmac_xsk_desc_map desc_map[DMA_RX_SIZE];
+       struct timer_list rx_init_timer;
+       struct timer_list rx_refill_timer;
+       bool rx_empty;
 };
 
 struct stmmac_channel {
@@ -220,6 +243,7 @@ struct stmmac_priv {
 
        unsigned long state;
        struct workqueue_struct *wq;
+       struct workqueue_struct *refill_wq;
        struct work_struct service_task;
 
        /* TC Handling */
@@ -234,6 +258,19 @@ struct stmmac_priv {
 
        /* Receive Side Scaling */
        struct stmmac_rss rss;
+
+       /* AF_XDP support */
+       struct bpf_prog *xdp_prog;
+       struct xdp_umem **xsk_umems;
+       struct zero_copy_allocator zca;
+       u16 num_xsk_umems_used;
+       u16 num_xsk_umems;
+
+       /* AF_XDP add to start_xmit() asynchronous way of transferring data so add
+        * locking mechanism to stop competition. Rx lock mechanism is build in
+        * Rx cleaning function.
+        */
+       atomic_t tx_lock;
 };
 
 enum stmmac_state {
@@ -243,6 +280,15 @@ enum stmmac_state {
        STMMAC_SERVICE_SCHED,
 };
 
+int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue);
+int stmmac_hw_restrict_setup(struct net_device *dev, bool init_ptp);
+int init_dma_desc_rings(struct net_device *dev, gfp_t flags);
+void init_dma_rx_desc_rings_xsk(struct net_device *dev);
+void stmmac_stop_all_dma(struct stmmac_priv *priv);
+void stmmac_start_all_dma(struct stmmac_priv *priv);
+void free_dma_rx_desc_resources(struct stmmac_priv *priv);
+int alloc_dma_rx_desc_resources(struct stmmac_priv *priv);
+
 int stmmac_mdio_unregister(struct net_device *ndev);
 int stmmac_mdio_register(struct net_device *ndev);
 int stmmac_mdio_reset(struct mii_bus *mii);
@@ -258,6 +304,7 @@ int stmmac_dvr_probe(struct device *device,
                     struct stmmac_resources *res);
 void stmmac_disable_eee_mode(struct stmmac_priv *priv);
 bool stmmac_eee_init(struct stmmac_priv *priv);
+u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue);
 
 #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
 void stmmac_selftest_run(struct net_device *dev,
index 6a3b0f76d9729a9252fa98ca276749a24f42b648..37f26446ff6ee8c2424b6cea8b536bdeea3c310b 100644 (file)
@@ -44,6 +44,7 @@
 #include "dwmac1000.h"
 #include "dwxgmac2.h"
 #include "hwif.h"
+#include "stmmac_xsk.h"
 
 #define        STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
 #define        TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
@@ -262,7 +263,7 @@ static void print_pkt(unsigned char *buf, int len)
        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 }
 
-static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
+inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        u32 avail;
@@ -280,7 +281,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
  * @priv: driver private structure
  * @queue: RX queue index
  */
-static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
+u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        u32 dirty;
@@ -1263,6 +1264,47 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
        }
 }
 
+/**
+ * init_dma_rx_desc_rings_xsk() - initialize allocated XSK descriptor rings
+ * @dev: associated net device
+ */
+void init_dma_rx_desc_rings_xsk(struct net_device *dev)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_count = priv->plat->rx_queues_to_use;
+       int bfsize = 0;
+       int queue;
+
+       bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+       if (bfsize < 0)
+               bfsize = 0;
+
+       if (bfsize < BUF_SIZE_16KiB)
+               bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+
+       priv->dma_buf_sz = bfsize;
+
+       for (queue = 0; queue < rx_count; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               netif_dbg(priv, probe, priv->dev,
+                         "(%s) dma_rx_phy=0x%08x\n", __func__,
+                         (u32)rx_q->dma_rx_phy);
+
+               stmmac_clear_rx_descriptors(priv, queue);
+
+               /* Setup the chained descriptor addresses */
+               if (priv->mode == STMMAC_CHAIN_MODE) {
+                       if (priv->extend_desc)
+                               stmmac_mode_init(priv, rx_q->dma_erx,
+                                                rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
+                       else
+                               stmmac_mode_init(priv, rx_q->dma_rx,
+                                                rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
+               }
+       }
+}
+
 /**
  * init_dma_rx_desc_rings - init the RX descriptor rings
  * @dev: net device structure
@@ -1271,7 +1313,7 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  * and allocates the socket buffers. It supports the chained and ring
  * modes.
  */
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 rx_count = priv->plat->rx_queues_to_use;
@@ -1402,7 +1444,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
  * and allocates the socket buffers. It supports the chained and ring
  * modes.
  */
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
@@ -1464,7 +1506,7 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
  * free_dma_rx_desc_resources - free RX dma desc resources
  * @priv: private structure
  */
-static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+void free_dma_rx_desc_resources(struct stmmac_priv *priv)
 {
        u32 rx_count = priv->plat->rx_queues_to_use;
        u32 queue;
@@ -1531,7 +1573,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
  * reception, for example, it pre-allocated the RX socket buffer in order to
  * allow zero-copy mechanism.
  */
-static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
 {
        u32 rx_count = priv->plat->rx_queues_to_use;
        int ret = -ENOMEM;
@@ -1759,7 +1801,7 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
  * Description:
  * This starts all the RX and TX DMA channels
  */
-static void stmmac_start_all_dma(struct stmmac_priv *priv)
+void stmmac_start_all_dma(struct stmmac_priv *priv)
 {
        u32 rx_channels_count = priv->plat->rx_queues_to_use;
        u32 tx_channels_count = priv->plat->tx_queues_to_use;
@@ -1778,7 +1820,7 @@ static void stmmac_start_all_dma(struct stmmac_priv *priv)
  * Description:
  * This stops the RX and TX DMA channels
  */
-static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+void stmmac_stop_all_dma(struct stmmac_priv *priv)
 {
        u32 rx_channels_count = priv->plat->rx_queues_to_use;
        u32 tx_channels_count = priv->plat->tx_queues_to_use;
@@ -1860,17 +1902,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
  * @queue: TX queue index
  * Description: it reclaims the transmit resources after transmission completes.
  */
-static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        unsigned int bytes_compl = 0, pkts_compl = 0;
        unsigned int entry, count = 0;
+       struct stmmac_tx_info *tx_info;
+       unsigned int xsk_return = 0;
 
        __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
-
        priv->xstats.tx_clean++;
-
        entry = tx_q->dirty_tx;
+
        while ((entry != tx_q->cur_tx) && (count < budget)) {
                struct sk_buff *skb = tx_q->tx_skbuff[entry];
                struct dma_desc *p;
@@ -1881,14 +1924,54 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
                else
                        p = tx_q->dma_tx + entry;
 
-               status = stmmac_tx_status(priv, &priv->dev->stats,
-                               &priv->xstats, p, priv->ioaddr);
-               /* Check if the descriptor is owned by the DMA */
+               status = stmmac_tx_status(priv, &priv->dev->stats, &priv->xstats, p,
+                                                                 priv->ioaddr);
+
+               /* Check if descriptor is ready to use */
                if (unlikely(status & tx_dma_own))
                        break;
 
                count++;
 
+               /* Different types of descriptors use different ways to clean */
+               tx_info = tx_q->tx_skbuff_dma + entry;
+               if (tx_info->tx_source_type == STMMAC_TX_SOURCE_UMEM) {
+                       /* UMEM */
+                       tx_info->tx_source_type = STMMAC_TX_SOURCE_RESET;
+
+                       stmmac_clean_desc3(priv, tx_q, p);
+                       stmmac_release_tx_desc(priv, p, priv->mode);
+                       tx_q->tx_skbuff_dma[entry].buf = 0;
+                       tx_q->tx_skbuff_dma[entry].len = 0;
+                       tx_q->tx_skbuff_dma[entry].map_as_page = false;
+                       tx_q->tx_skbuff_dma[entry].last_segment = false;
+                       tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+                       entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+                       xsk_return++;
+                       continue;
+               } else if (tx_info->tx_source_type == STMMAC_TX_SOURCE_FRAME) {
+                       /* UMEM frame */
+                       tx_info->tx_source_type = STMMAC_TX_SOURCE_RESET;
+
+                       dma_unmap_single(priv->device,
+                                        tx_q->tx_skbuff_dma[entry].buf,
+                                        tx_q->tx_skbuff_dma[entry].len,
+                                        DMA_TO_DEVICE);
+
+                       stmmac_clean_desc3(priv, tx_q, p);
+                       stmmac_release_tx_desc(priv, p, priv->mode);
+                       tx_q->tx_skbuff_dma[entry].buf = 0;
+                       tx_q->tx_skbuff_dma[entry].len = 0;
+                       tx_q->tx_skbuff_dma[entry].map_as_page = false;
+                       tx_q->tx_skbuff_dma[entry].last_segment = false;
+                       tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+                       entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+                       continue;
+               }
+               /* if STMMAC_TX_SOURCE_SKB */
+
                /* Make sure descriptor fields are read after reading
                 * the own bit.
                 */
@@ -1912,11 +1995,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
                                               tx_q->tx_skbuff_dma[entry].buf,
                                               tx_q->tx_skbuff_dma[entry].len,
                                               DMA_TO_DEVICE);
-                       else
+                       else {
                                dma_unmap_single(priv->device,
                                                 tx_q->tx_skbuff_dma[entry].buf,
                                                 tx_q->tx_skbuff_dma[entry].len,
                                                 DMA_TO_DEVICE);
+                       }
                        tx_q->tx_skbuff_dma[entry].buf = 0;
                        tx_q->tx_skbuff_dma[entry].len = 0;
                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
@@ -1940,6 +2024,13 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
        }
        tx_q->dirty_tx = entry;
 
+       if (xsk_return != 0) {
+               xsk_umem_complete_tx(priv->xsk_umems[queue], xsk_return);
+
+               if (xsk_umem_uses_need_wakeup(priv->xsk_umems[queue]))
+                       xsk_set_tx_need_wakeup(priv->xsk_umems[queue]);
+       }
+
        netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
                                  pkts_compl, bytes_compl);
 
@@ -1947,8 +2038,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
                                                                queue))) &&
            stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
 
-               netif_dbg(priv, tx_done, priv->dev,
-                         "%s: restart transmit\n", __func__);
+               netif_dbg(priv, tx_done, priv->dev, "%s: restart transmit\n", __func__);
                netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
@@ -2506,6 +2596,111 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
        }
 }
 
+/**
+ * stmmac_hw_restrict_setup() - setup GEMAC HW without configure and start DMA.
+ * @dev: network device associated with HW
+ * @init_ptp: flag to do initialization of ptp
+ *
+ * This restricted version of stmmac_hw_setup() is used to support AF_XDP.
+ * The main reason is not allow to start DMA transaction. Also it doesn't HW
+ * reset whole GEMAC controller just set registers to initial values. It helps
+ * to do restart quicker than HW restart all GEMAC.
+ */
+int stmmac_hw_restrict_setup(struct net_device *dev, bool init_ptp)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+       u32 chan;
+       int ret;
+
+       /* DMA initialization and SW reset */
+       ret = stmmac_init_dma_engine_xsk(priv);
+       if (ret < 0) {
+               netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
+                          __func__);
+               return ret;
+       }
+
+       /* Copy the MAC addr into the HW  */
+       stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+
+       /* PS and related bits will be programmed according to the speed */
+       if (priv->hw->pcs) {
+               int speed = priv->plat->mac_port_sel_speed;
+
+               if ((speed == SPEED_10) || (speed == SPEED_100) ||
+                   (speed == SPEED_1000)) {
+                       priv->hw->ps = speed;
+               } else {
+                       dev_warn(priv->device, "invalid port speed\n");
+                       priv->hw->ps = 0;
+               }
+       }
+
+       /* Initialize MTL*/
+       stmmac_mtl_configuration(priv);
+
+       /* Initialize Safety Features */
+       stmmac_safety_feat_configuration(priv);
+
+       ret = stmmac_rx_ipc(priv, priv->hw);
+       if (!ret) {
+               netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
+               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+               priv->hw->rx_csum = 0;
+       }
+
+       /* Set the HW DMA mode and the COE */
+       stmmac_dma_operation_mode(priv);
+
+       stmmac_mmc_setup(priv);
+
+       if (init_ptp) {
+               ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+               if (ret < 0)
+                       netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
+
+               ret = stmmac_init_ptp(priv);
+               if (ret == -EOPNOTSUPP)
+                       netdev_warn(priv->dev, "PTP not supported by HW\n");
+               else if (ret)
+                       netdev_warn(priv->dev, "PTP init failed\n");
+       }
+
+       priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+
+       if (priv->use_riwt) {
+               ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
+               if (!ret)
+                       priv->rx_riwt = MIN_DMA_RIWT;
+       }
+
+       if (priv->hw->pcs)
+               stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+
+       /* set TX and RX rings length */
+       stmmac_set_rings_length(priv);
+
+       /* Enable TSO */
+       if (priv->tso) {
+               for (chan = 0; chan < tx_cnt; chan++)
+                       stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
+       }
+
+       /* Enable Split Header */
+       if (priv->sph && priv->hw->rx_csum) {
+               for (chan = 0; chan < rx_cnt; chan++)
+                       stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
+       }
+
+       /* VLAN Tag Insertion */
+       if (priv->dma_cap.vlins)
+               stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
+       return 0;
+}
+
 /**
  * stmmac_hw_setup - setup mac in a usable state.
  *  @dev : pointer to the device structure.
@@ -2734,6 +2929,8 @@ static int stmmac_open(struct net_device *dev)
        stmmac_enable_all_queues(priv);
        netif_tx_start_all_queues(priv->dev);
 
+       atomic_set(&priv->tx_lock, 0);
+
        return 0;
 
 lpiirq_error:
@@ -3133,27 +3330,39 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_addr_t des;
        bool has_vlan;
        int entry;
+       u32 tx_avail = stmmac_tx_avail(priv, queue);
 
        tx_q = &priv->tx_queue[queue];
 
+       if (atomic_read(&priv->tx_lock))
+               return NETDEV_TX_BUSY;
+       else
+               atomic_set(&priv->tx_lock, 1);
+
        if (priv->tx_path_in_lpi_mode)
                stmmac_disable_eee_mode(priv);
 
        /* Manage oversized TCP frames for GMAC4 device */
        if (skb_is_gso(skb) && priv->tso) {
-               if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
-                       return stmmac_tso_xmit(skb, dev);
+               if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+                       netdev_tx_t result;
+
+                       result = stmmac_tso_xmit(skb, dev);
+                       atomic_set(&priv->tx_lock, 0);
+
+                       return result;
+               }
        }
 
-       if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+       if (unlikely(tx_avail < nfrags + 1)) {
                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
-                       netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
-                                                               queue));
+                       netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
                        /* This is a hard error, log it. */
-                       netdev_err(priv->dev,
-                                  "%s: Tx Ring full when queue awake\n",
-                                  __func__);
+                       netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n",
+                                          __func__);
                }
+               atomic_set(&priv->tx_lock, 0);
+
                return NETDEV_TX_BUSY;
        }
 
@@ -3330,12 +3539,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
        stmmac_tx_timer_arm(priv, queue);
 
+       atomic_set(&priv->tx_lock, 0);
+
        return NETDEV_TX_OK;
 
 dma_map_err:
        netdev_err(priv->dev, "Tx DMA map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
+
+       atomic_set(&priv->tx_lock, 0);
+
        return NETDEV_TX_OK;
 }
 
@@ -3667,9 +3881,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
 
        priv->xstats.napi_poll++;
 
-       work_done = stmmac_rx(priv, budget, chan);
+    if (priv->xsk_umems && priv->xsk_umems[chan])
+            work_done = stmmac_rx_xsk(priv, budget, chan);
+    else
+            work_done = stmmac_rx(priv, budget, chan);
+
        if (work_done < budget && napi_complete_done(napi, work_done))
                stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+
        return work_done;
 }
 
@@ -3680,12 +3899,26 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
        struct stmmac_priv *priv = ch->priv_data;
        struct stmmac_tx_queue *tx_q;
        u32 chan = ch->index;
-       int work_done;
+       int work_done = 0;
 
        priv->xstats.napi_poll++;
 
-       work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
-       work_done = min(work_done, budget);
+       if (priv->xsk_umems && priv->xsk_umems[chan]) {
+               stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
+       } else {
+               work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
+               work_done = min(work_done, budget);
+       }
+
+       /* If UMEM is used then start transmit with zero-copy */
+       if (!atomic_read(&priv->tx_lock)) {
+               atomic_set(&priv->tx_lock, 1);
+
+               if (priv->xsk_umems && priv->xsk_umems[chan])
+                       work_done = stmmac_xdp_transmit_zc(priv, budget);
+
+               atomic_set(&priv->tx_lock, 0);
+       }
 
        if (work_done < budget)
                napi_complete_done(napi, work_done);
@@ -3712,7 +3945,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
 static void stmmac_tx_timeout(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
-
        stmmac_global_err(priv);
 }
 
@@ -4305,6 +4537,9 @@ static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_set_mac_address = stmmac_set_mac_address,
        .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
+       .ndo_bpf = stmmac_bpf,
+       .ndo_xdp_xmit = stmmac_xdp_xmit,
+       .ndo_xsk_wakeup = stmmac_xsk_wakeup,
 };
 
 static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -4492,6 +4727,12 @@ int stmmac_dvr_probe(struct device *device,
                return -ENOMEM;
        }
 
+       priv->refill_wq = create_singlethread_workqueue("stmmac_refill_wq");
+       if (!priv->refill_wq) {
+               dev_err(priv->device, "failed to create refill workqueue\n");
+               return -ENOMEM;
+       }
+
        INIT_WORK(&priv->service_task, stmmac_service_task);
 
        /* Override with kernel parameters if supplied XXX CRS XXX
@@ -4693,6 +4934,7 @@ error_mdio_register:
        }
 error_hw_init:
        destroy_workqueue(priv->wq);
+       destroy_workqueue(priv->refill_wq);
 
        return ret;
 }
@@ -4729,6 +4971,7 @@ int stmmac_dvr_remove(struct device *dev)
            priv->hw->pcs != STMMAC_PCS_RTBI)
                stmmac_mdio_unregister(ndev);
        destroy_workqueue(priv->wq);
+       destroy_workqueue(priv->refill_wq);
        mutex_destroy(&priv->lock);
 
        return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xsk.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xsk.c
new file mode 100644 (file)
index 0000000..52e7a88
--- /dev/null
@@ -0,0 +1,1534 @@
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+#include "stmmac_xsk.h"
+
+/**
+ * stmmac_zca_free() - this function reuse UMEM memory again with dirty desc.
+ * @alloc: copy allocator structure
+ * @handle: pointer of UMEM memory
+ *
+ * Called in case of using ZERO copy memory model when convert to XDP frame.
+ */
+static void stmmac_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
+{
+       int queue = 0;
+       struct stmmac_priv *priv = container_of(alloc, struct stmmac_priv, zca);
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       unsigned int entry = rx_q->dirty_rx;
+       struct stmmac_xsk_desc_map *buf = &rx_q->desc_map[entry];
+       struct xdp_umem *umem = priv->xsk_umems[queue];
+       struct dma_desc *p;
+       u64 hr, mask;
+       size_t len;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       if (priv->extend_desc)
+               p = (struct dma_desc *)(rx_q->dma_erx + entry);
+       else
+               p = rx_q->dma_rx + entry;
+
+       hr = umem->headroom + XDP_PACKET_HEADROOM;
+       mask = umem->chunk_mask;
+       handle &= mask;
+
+       buf->dma_addr = xdp_umem_get_dma(umem, handle);
+       buf->dma_addr += hr;
+       buf->cpu_addr = xdp_umem_get_data(umem, handle);
+       buf->cpu_addr += hr;
+       buf->handle = xsk_umem_adjust_offset(umem, (u64)handle, umem->headroom);
+
+       len = priv->xsk_umems[queue]->chunk_size_nohr - XDP_PACKET_HEADROOM;
+       dma_sync_single_range_for_device(priv->device, buf->dma_addr,
+                                        buf->page_offset, len,
+                                        DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(priv->device, buf->dma_addr)) {
+               netdev_err(priv->dev, "Rx DMA map failed\n");
+               return;
+       }
+
+       stmmac_set_desc_addr(priv, p, buf->dma_addr);
+       stmmac_refill_desc3(priv, rx_q, p);
+
+       dma_wmb();
+       stmmac_set_rx_owner(priv, p, priv->use_riwt);
+       dma_wmb();
+
+       rx_q->dirty_rx = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * stmmac_alloc_frames_for_xsk() - allocate memory for XSK frames
+ * @priv: gemac main structure
+ * @queue: queue number of the net device
+ *
+ * Any XSK packet form UMEM can be converted to frame for use in stack or
+ * retransmit so allocate memory in advance.
+ */
+static int stmmac_alloc_frames_for_xsk(struct stmmac_priv *priv, int queue)
+{
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+       size_t len = priv->xsk_umems[queue]->chunk_size_nohr - XDP_PACKET_HEADROOM;
+       int err_i;
+       int i;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       for (i = 0; i < DMA_TX_SIZE; i++) {
+               tx_q->tx_skbuff_dma[i].page = kcalloc(1, len, GFP_KERNEL);
+               if (!tx_q->tx_skbuff_dma[i].page)
+                       goto err;
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+
+err:
+       pr_err("AF_XDP: can not allocate memory for XSK frames\n");
+
+       err_i = i;
+       for (i = 0; i < err_i; ++i)
+               kfree(tx_q->tx_skbuff_dma[i].page);
+
+       return -ENOMEM;
+}
+
+/**
+ * stmmac_free_frames_for_xsk() - free memory for XSK frames
+ * @priv: gemac main structure
+ * @queue: queue number of the net device
+ */
+static void stmmac_free_frames_for_xsk(struct stmmac_priv *priv, int queue)
+{
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+       int i;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       for (i = 0; i < DMA_TX_SIZE; i++)
+               kfree(tx_q->tx_skbuff_dma[i].page);
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * stmmac_xmit_xdp_frame() - transmit one XSK frame
+ * @priv: gemac main structure
+ * @xdpf: pointer to the frame for transmitting
+ *
+ * Return: STMMAC_XDP_TX - ok  STMMAC_XDP_CONSUMED - failed
+ */
+static int stmmac_xmit_xdp_frame(struct stmmac_priv *priv, struct xdp_frame *xdpf)
+{
+       int queue = 0;
+       struct dma_desc *desc;
+       struct stmmac_tx_queue *tx_q = priv->tx_queue + queue;
+       int entry = tx_q->cur_tx;
+       dma_addr_t dma;
+       u32 tx_avail = stmmac_tx_avail(priv, queue);
+
+       if (atomic_read(&priv->tx_lock))
+               return STMMAC_XDP_CONSUMED;
+       else
+               atomic_set(&priv->tx_lock, 1);
+
+       if (!tx_avail)
+               goto err;
+
+       if (priv->extend_desc)
+               desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+       else
+               desc = tx_q->dma_tx + entry;
+
+       memcpy(tx_q->tx_skbuff_dma[entry].page, xdpf->data, xdpf->len);
+       dma = dma_map_single(priv->device, tx_q->tx_skbuff_dma[entry].page,
+                            xdpf->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->device, dma))
+               goto err;
+
+       tx_q->cur_tx = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+       tx_q->tx_skbuff_dma[entry].tx_source_type = STMMAC_TX_SOURCE_FRAME;
+       tx_q->tx_skbuff_dma[entry].buf = dma;
+       tx_q->tx_skbuff_dma[entry].len = xdpf->len;
+       tx_q->tx_skbuff_dma[entry].page_addr = xdpf->data;
+
+       stmmac_set_desc_addr(priv, desc, dma);
+       dma_wmb();
+       stmmac_prepare_tx_desc(priv, desc, 1, xdpf->len, 0, priv->mode, 1, 1,
+                              xdpf->len);
+       dma_wmb();
+       stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+       __free_page(virt_to_page(xdpf->data));
+
+       atomic_set(&priv->tx_lock, 0);
+
+       return STMMAC_XDP_TX;
+
+err:
+       __free_page(virt_to_page(xdpf->data));
+
+       atomic_set(&priv->tx_lock, 0);
+
+       return STMMAC_XDP_CONSUMED;
+}
+
+/**
+ * stmmac_xdp_transmit_zc() - transmit UMEM packets
+ * @priv: gemac main structure
+ * @napi_budget: budget to retransmit
+ *
+ * Main transmitting routine. Transmit packets got only from UMEM.
+ *
+ * Return: number of packets has been transmitted.
+ */
+int stmmac_xdp_transmit_zc(struct stmmac_priv *priv, int napi_budget)
+{
+       struct dma_desc *desc;
+       struct xdp_desc xdp_desc;
+       struct stmmac_tx_queue *tx_q = priv->tx_queue;
+       struct stmmac_tx_info *tx_info;
+       int entry = tx_q->cur_tx;
+       int csum_insertion = 0;
+       int queue = 0;
+       u32 desc_processed = 0;
+       u32 desc_avaliable = stmmac_tx_avail(priv, queue);
+       dma_addr_t dma;
+
+       /* Save batch of descriptors to ndo_xmit() */
+       if (desc_avaliable < STMMAC_TX_XMIT_SAFE_AMOUNT)
+               return 0;
+
+       while ((napi_budget-- > 0) && (desc_avaliable-- > 0)) {
+               /* Acquire data from UMEM */
+               if (!xsk_umem_consume_tx(priv->xsk_umems[queue], &xdp_desc))
+                       break;
+
+               /* Get descriptor by index */
+               if (likely(priv->extend_desc))
+                       desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+               else
+                       desc = tx_q->dma_tx + entry;
+
+               /* We use source type when clear Tx descriptors */
+               tx_info = tx_q->tx_skbuff_dma + entry;
+               tx_info->tx_source_type = STMMAC_TX_SOURCE_UMEM;
+
+               /* Prepare for use with GEMAC */
+               dma = xdp_umem_get_dma(priv->xsk_umems[queue], xdp_desc.addr);
+               dma_sync_single_for_device(priv->device, dma, xdp_desc.len,
+                                          DMA_BIDIRECTIONAL);
+
+               /* Fill in descriptors with data */
+               stmmac_set_desc_addr(priv, desc, dma);
+               stmmac_prepare_tx_desc(priv, desc, 1, xdp_desc.len,
+                                      csum_insertion, priv->mode, 1, 1,
+                                      xdp_desc.len);
+
+               entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+               tx_q->cur_tx = entry;
+
+               ++desc_processed;
+       }
+
+       /* Notify socket in user space about written data */
+       if (desc_processed)
+               xsk_umem_consume_tx_done(priv->xsk_umems[queue]);
+
+       stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+       return desc_processed;
+}
+
+/**
+ * stmmac_rx_refill_xsk() - try to acquire descriptors for XSK from UMEM
+ * @priv: gemac main structure
+ * @queue: queue number of the net device
+ *
+ * Return: number of descriptors acquired form UMEM
+ */
+static int stmmac_rx_refill_xsk(struct stmmac_priv *priv, u32 queue)
+{
+       static atomic_t lock = ATOMIC_INIT(0);
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       struct stmmac_xsk_desc_map *buf;
+       struct xdp_umem *umem = NULL;
+       unsigned int entry = rx_q->dirty_rx;
+       unsigned long timeout = jiffies + msecs_to_jiffies(1);
+       int dirty = stmmac_rx_dirty(priv, queue);
+       int cleaned = 0;
+       u64 hr = 0;
+       u64 handle;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       if ((priv->xsk_umems == NULL) || (priv->xsk_umems[queue] == NULL))
+               return -EPERM;
+
+       umem = priv->xsk_umems[queue];
+       hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+       if (atomic_read(&lock))
+               return -EBUSY;
+
+       atomic_set(&lock, 1);
+
+       while (dirty-- > 0) {
+               struct dma_desc *p;
+               size_t buf_size;
+
+               /* Buffer info (extra data) is used for XSK */
+               buf = &rx_q->desc_map[entry];
+
+               if (priv->extend_desc)
+                       p = (struct dma_desc *)(rx_q->dma_erx + entry);
+               else
+                       p = rx_q->dma_rx + entry;
+
+               /* Acquire UMEM handle */
+               if (!xsk_umem_peek_addr(priv->xsk_umems[queue], &handle)) {
+                       if (rx_q->rx_empty) {
+                               /* Notify user space to clear RX queue and refill FQ queue */
+                               if (xsk_umem_uses_need_wakeup(priv->xsk_umems[queue]))
+                                       xsk_set_rx_need_wakeup(priv->xsk_umems[queue]);
+
+                               /* Try to acquire descriptors greedily */
+                               if (time_after(jiffies, timeout))
+                                       break;
+                               else
+                                       continue;
+                       }
+
+                       break;
+               }
+               dirty--;
+
+               buf->dma_addr = xdp_umem_get_dma(umem, handle);
+               buf->dma_addr += hr;
+               buf->cpu_addr = xdp_umem_get_data(umem, handle);
+               buf->cpu_addr += hr;
+               buf->handle = handle + umem->headroom;
+
+               /* Notify UMEM that we have taken one element */
+               xsk_umem_discard_addr(priv->xsk_umems[queue]);
+
+               rx_q->rx_empty = false;
+
+               buf_size = priv->xsk_umems[queue]->chunk_size_nohr - XDP_PACKET_HEADROOM;
+               dma_sync_single_range_for_device(priv->device, buf->dma_addr,
+                                                buf->page_offset, buf_size,
+                                                DMA_BIDIRECTIONAL);
+
+               if (dma_mapping_error(priv->device, buf->dma_addr)) {
+                       netdev_err(priv->dev, "Rx DMA map failed\n");
+                       break;
+               }
+
+               stmmac_set_desc_addr(priv, p, buf->dma_addr);
+               stmmac_refill_desc3(priv, rx_q, p);
+
+               if (rx_q->rx_zeroc_thresh > 0)
+                       rx_q->rx_zeroc_thresh--;
+
+               dma_wmb();
+               stmmac_set_rx_owner(priv, p, priv->use_riwt);
+               dma_wmb();
+               entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+
+               ++cleaned;
+       }
+       rx_q->dirty_rx = entry;
+
+       atomic_set(&lock, 0);
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return cleaned;
+}
+
+/**
+ * stmmac_initial_refill() - try to acquire descriptors for XSK from UMEM in
+ * initialization process.
+ * @priv: gemac main structure
+ * @queue: queue number of the net device
+ *
+ * Return: number of descriptors acquired form UMEM
+ */
+static int stmmac_initial_refill(struct stmmac_priv *priv, u32 queue)
+{
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       struct dma_desc *p;
+       struct xdp_umem *umem;
+       int result = 0;
+       int count = 0;
+       int i;
+       u64 hr;
+       u64 handle;
+       size_t len;
+
+       /* Check if UMEM is initialized */
+       if (priv->num_xsk_umems_used == 0)
+               return result;
+
+       umem = priv->xsk_umems[queue];
+       hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+       for (i = 0; i < DMA_RX_SIZE; ++i) {
+               /* Get descriptor pointer */
+               if (priv->extend_desc)
+                       p = (struct dma_desc *)(rx_q->dma_erx + i);
+               else
+                       p = rx_q->dma_rx + i;
+
+               /* Peek UMEM element */
+               if (!xsk_umem_peek_addr(priv->xsk_umems[queue], &handle))
+                       break;
+
+               /* Place UMEM element to store */
+               rx_q->desc_map[i].dma_addr = xdp_umem_get_dma(umem, handle);
+               rx_q->desc_map[i].dma_addr += hr;
+               rx_q->desc_map[i].cpu_addr = xdp_umem_get_data(umem, handle);
+               rx_q->desc_map[i].cpu_addr += hr;
+               rx_q->desc_map[i].handle = handle + umem->headroom;
+
+               /* Notify UMEM that we take one element */
+               xsk_umem_discard_addr(priv->xsk_umems[queue]);
+
+               /* Sync DMA for use on device */
+               len = priv->xsk_umems[queue]->chunk_size_nohr - XDP_PACKET_HEADROOM;
+               dma_sync_single_range_for_device(priv->device,
+                                                rx_q->desc_map[i].dma_addr,
+                                                rx_q->desc_map[i].page_offset,
+                                                len,
+                                                DMA_BIDIRECTIONAL);
+
+               if (dma_mapping_error(priv->device, rx_q->desc_map[i].dma_addr)) {
+                       netdev_err(priv->dev, "Rx DMA map failed\n");
+                       break;
+               }
+
+               /* Setup DMA descriptor with new value */
+               stmmac_set_desc_addr(priv, p, rx_q->desc_map[i].dma_addr);
+               stmmac_refill_desc3(priv, rx_q, p);
+
+               dma_wmb();
+               stmmac_set_rx_owner(priv, p, priv->use_riwt);
+               dma_wmb();
+
+               ++count;
+       }
+
+       if (count)
+               rx_q->rx_empty = false;
+
+       /* Setup ring descriptor pointers */
+       rx_q->cur_rx = 0;
+       rx_q->dirty_rx = count % DMA_RX_SIZE;
+
+       DBG("Ring pointers [cur:dirty] = [%u:%u]\n", rx_q->cur_rx, rx_q->dirty_rx);
+
+       /* This is unusual case just notify about it */
+       if (count < DMA_RX_SIZE)
+               pr_info("AF_XDP: Rx DMA ring is not filled completely %u of %u\n",
+                       count, DMA_RX_SIZE);
+
+       return count;
+}
+
+/**
+ * stmmac_refill_timer() - timer routine for deferred refilling
+ * @t: associated timer
+ */
+static void stmmac_refill_timer(struct timer_list *t)
+{
+       struct stmmac_rx_queue *rx_q = from_timer(rx_q, t, rx_refill_timer);
+       struct stmmac_priv *priv = rx_q->priv_data;
+
+       stmmac_rx_refill_xsk(priv, rx_q->queue_index);
+
+       /* Timer can be adjusted to different time in rx_poll_xsk() if necessary */
+       mod_timer(&rx_q->rx_refill_timer,
+                 jiffies + msecs_to_jiffies(STMMAC_REFILL_MS));
+}
+
+/**
+ * stmmac_rx_timer() - timer routine to service Rx initialization and refilling
+ * @t: associated timer
+ *
+ * It can happens there are no UMEMs descriptors when initialization finish so
+ * run timer to try acquire UMEMs descriptors and finish ring initialization.
+ */
+static void stmmac_rx_timer(struct timer_list *t)
+{
+        struct stmmac_rx_queue *rx_q = from_timer(rx_q, t, rx_init_timer);
+        struct stmmac_priv *priv = rx_q->priv_data;
+        int is_refilled = 0;
+
+        DBG("%s-->\n", __FUNCTION__);
+
+        /* Try to do initial refill till it has succeed */
+        is_refilled = stmmac_initial_refill(priv, rx_q->queue_index);
+        if (!is_refilled) {
+               mod_timer(&rx_q->rx_init_timer,
+                         jiffies + msecs_to_jiffies(STMMAC_INITIAL_REFILL_MS));
+            return;
+        }
+
+        /* It helps to solve problem with first descriptor owing */
+        init_dma_rx_desc_rings_xsk(priv->dev);
+
+        pr_info("AF_XDP: started\n");
+       stmmac_mac_set(priv, priv->ioaddr, true);
+       stmmac_start_all_dma(priv);
+
+       timer_setup(&rx_q->rx_refill_timer, stmmac_refill_timer, 0);
+       mod_timer(&rx_q->rx_refill_timer,
+                 jiffies + msecs_to_jiffies(STMMAC_REFILL_MS));
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * stmmac_init_rx_service_timer() - service timer initialization routine
+ * @priv: gemac main structure
+ */
+static void stmmac_init_rx_service_timer(struct stmmac_priv *priv)
+{
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[0];
+
+       timer_setup(&rx_q->rx_init_timer, stmmac_rx_timer, 0);
+       mod_timer(&rx_q->rx_init_timer, jiffies + msecs_to_jiffies(500));
+}
+
+/**
+ * stmmac_run_xdp_zc() - run XDP filter and make actions based on the result
+ * @priv: gemac main structure
+ * @xdp: buffer to make action on it
+ *
+ * Return: code of action made on xdp buffer
+ */
+static int stmmac_run_xdp_zc(struct stmmac_priv *priv, struct xdp_buff *xdp)
+{
+       struct bpf_prog *xdp_prog;
+       struct xdp_frame *xdpf;
+       int result = STMMAC_XDP_PASS;
+       int err;
+       u64 offset;
+       u32 act;
+
+       rcu_read_lock();
+
+       xdp_prog = READ_ONCE(priv->xdp_prog);
+       if (xdp_prog)
+               act = bpf_prog_run_xdp(xdp_prog, xdp);
+       else
+               return -1;
+
+       offset = xdp->data - xdp->data_hard_start;
+       xdp->handle = xsk_umem_adjust_offset(priv->xsk_umems[0], xdp->handle,
+                                            offset);
+
+       switch (act) {
+       case XDP_PASS:
+               break;
+       case XDP_TX:
+               xdpf = convert_to_xdp_frame(xdp);
+               if (unlikely(!xdpf)) {
+                       result = STMMAC_XDP_CONSUMED;
+                       break;
+               }
+               result = stmmac_xmit_xdp_frame(priv, xdpf);
+               break;
+       case XDP_REDIRECT:
+               err = xdp_do_redirect(priv->dev, xdp, xdp_prog);
+               result = !err ? STMMAC_XDP_REDIR : STMMAC_XDP_CONSUMED;
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+       case XDP_ABORTED:
+               trace_xdp_exception(priv->dev, xdp_prog, act);
+       case XDP_DROP:
+               result = STMMAC_XDP_CONSUMED;
+               break;
+       }
+
+       rcu_read_unlock();
+
+       return result;
+}
+
+/**
+ * stmmac_rx_xsk() - main receiving packets routine. Rx NAPI registered routine.
+ * @priv: gemac main structure
+ * @limit: NAPI budget
+ * @queue: queue number of the net device
+ *
+ * This function can block (set flag) receive queue so there is no any receiving
+ * operation until queue will be refilled
+ *
+ * Return: numbers of received packets
+ */
+int stmmac_rx_xsk(struct stmmac_priv *priv, int limit, u32 queue)
+{
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       struct stmmac_channel *ch = &priv->channel[queue];
+       struct sk_buff *skb = NULL;
+       struct xdp_buff xdp;
+       unsigned int next_entry = rx_q->cur_rx;
+       unsigned int count = 0;
+       unsigned int error = 0;
+       unsigned int len = 0;
+       unsigned int xdp_res;
+       int status = 0;
+       int coe = priv->hw->rx_csum;
+       bool do_flush = false;
+
+       if (netif_msg_rx_status(priv)) {
+               void *rx_head;
+
+               netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+               if (priv->extend_desc)
+                       rx_head = (void *)rx_q->dma_erx;
+               else
+                       rx_head = (void *)rx_q->dma_rx;
+
+               stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
+       }
+
+       while ((count < limit) && !rx_q->rx_empty) {
+               struct stmmac_xsk_desc_map *buf;
+               struct dma_desc *np, *p;
+               unsigned int sec_len;
+               unsigned int hlen = 0, prev_len = 0;
+               enum pkt_hash_types hash_type;
+               int entry;
+               u32 hash;
+
+               if (!count && rx_q->state_saved) {
+                       skb = rx_q->state.skb;
+                       error = rx_q->state.error;
+                       len = rx_q->state.len;
+               } else {
+                       rx_q->state_saved = false;
+                       skb = NULL;
+                       error = 0;
+                       len = 0;
+               }
+
+               if (count >= limit)
+                       break;
+
+read_again:
+               sec_len = 0;
+               entry = next_entry;
+               buf = rx_q->desc_map + entry;
+
+               if (priv->extend_desc)
+                       p = (struct dma_desc *)(rx_q->dma_erx + entry);
+               else
+                       p = rx_q->dma_rx + entry;
+
+               status = stmmac_rx_status(priv, &priv->dev->stats,
+                                         &priv->xstats, p);
+
+               /* Check if descriptor is ready to use */
+               if (unlikely(status & dma_own))
+                       break;
+
+               if (STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE) == rx_q->dirty_rx) {
+                       /* There are no more owned and refilled descriptors.
+                        * All descriptors are read and queue is empty. Notify upper level.
+                        */
+                       rx_q->rx_empty = true;
+               } else {
+                       rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+                       next_entry = rx_q->cur_rx;
+               }
+
+               if (priv->extend_desc)
+                       np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+               else
+                       np = rx_q->dma_rx + next_entry;
+
+               prefetch(np);
+               prefetch(buf->cpu_addr);
+
+               if (priv->extend_desc)
+                       stmmac_rx_extended_status(priv, &priv->dev->stats,
+                                                 &priv->xstats,
+                                                 rx_q->dma_erx + entry);
+
+               if (unlikely(status == discard_frame)) {
+                       error = 1;
+                       if (!priv->hwts_rx_en)
+                               priv->dev->stats.rx_errors++;
+               }
+
+               if (unlikely(error && (status & rx_not_ls)))
+                       goto read_again;
+
+               if (unlikely(error)) {
+                       dev_kfree_skb(skb);
+                       count++;
+                       continue;
+               }
+
+               /* Buffer is good. Go on. */
+
+               if (likely(status & rx_not_ls)) {
+                       len += priv->dma_buf_sz;
+               } else {
+                       prev_len = len;
+                       len = stmmac_get_rx_frame_len(priv, p, coe);
+
+                       /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+                        * Type frames (LLC/LLC-SNAP)
+                        *
+                        * llc_snap is never checked in GMAC >= 4, so this ACS
+                        * feature is always disabled and packets need to be
+                        * stripped manually.
+                        */
+                       if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+                               unlikely(status != llc_snap)) {
+                                       len -= ETH_FCS_LEN;
+                       }
+               }
+
+               /* Sanity check */
+               if (!len)
+                       continue;
+
+               /* It's time to run XDP program */
+               dma_sync_single_range_for_cpu(priv->device, buf->dma_addr,
+                                             buf->page_offset, len,
+                                             DMA_BIDIRECTIONAL);
+
+               xdp.rxq = &rx_q->xdp_rxq;
+               xdp.data = buf->cpu_addr;
+               xdp.data_meta = xdp.data;
+               xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+               xdp.data_end = xdp.data + len;
+               xdp.handle = buf->handle;
+
+               xdp_res = stmmac_run_xdp_zc(priv, &xdp);
+               if ((xdp_res == STMMAC_XDP_REDIR)) {
+                       count++;
+                       do_flush = true;
+                       continue;
+               } else if ((xdp_res == STMMAC_XDP_TX) || (xdp_res == STMMAC_XDP_CONSUMED)) {
+                       count++;
+                       continue;
+               }
+               /* Pass XDP packet forward to the network stack */
+
+               /* Allocate SKB if necessary */
+               if (!skb) {
+                       int ret = stmmac_get_rx_header_len(priv, p, &hlen);
+
+                       if (priv->sph && !ret && (hlen > 0)) {
+                               sec_len = len;
+                               if (!(status & rx_not_ls))
+                                       sec_len = sec_len - hlen;
+                               len = hlen;
+
+                               priv->xstats.rx_split_hdr_pkt_n++;
+                       }
+
+                       skb = napi_alloc_skb(&ch->rx_napi, len);
+                       if (!skb) {
+                               priv->dev->stats.rx_dropped++;
+                               count++;
+                               continue;
+                       }
+
+                       dma_sync_single_range_for_cpu(priv->device,
+                                                     buf->dma_addr,
+                                                     buf->page_offset, len,
+                                                     DMA_BIDIRECTIONAL);
+
+                       skb_copy_to_linear_data(skb, buf->cpu_addr, len);
+                       skb_put(skb, len);
+               } else {
+                       unsigned int buf_len = len - prev_len;
+
+                       if (likely(status & rx_not_ls))
+                               buf_len = priv->dma_buf_sz;
+
+                       dma_sync_single_range_for_cpu(priv->device,
+                                                     buf->dma_addr,
+                                                     buf->page_offset, len,
+                                                     DMA_BIDIRECTIONAL);
+
+                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                                       buf->cpu_addr, 0, buf_len,
+                                       priv->dma_buf_sz);
+               }
+
+               if (likely(status & rx_not_ls))
+                       goto read_again;
+
+               /* Got entire packet into SKB. Finish it. */
+               skb->protocol = eth_type_trans(skb, priv->dev);
+
+               if (unlikely(!coe))
+                       skb_checksum_none_assert(skb);
+               else
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+                       skb_set_hash(skb, hash, hash_type);
+
+               skb_record_rx_queue(skb, queue);
+               napi_gro_receive(&ch->rx_napi, skb);
+
+               priv->dev->stats.rx_packets++;
+               priv->dev->stats.rx_bytes += len;
+               count++;
+       }
+
+       if (status & rx_not_ls) {
+               rx_q->state_saved = true;
+               rx_q->state.skb = skb;
+               rx_q->state.error = error;
+               rx_q->state.len = len;
+       }
+
+       if (do_flush)
+               xdp_do_flush_map();
+
+       stmmac_rx_refill_xsk(priv, queue);
+       /* Make a decision whether we restart refilling and when */
+       if (stmmac_rx_dirty(priv, queue) > STMMAC_REFILL_GREEDILY_THRESHOLD) {
+               /* Notify user application we run out of descriptors */
+               if (xsk_umem_uses_need_wakeup(priv->xsk_umems[queue]))
+                       xsk_set_rx_need_wakeup(priv->xsk_umems[queue]);
+
+               /* Start looking for descriptors in hard way */
+               mod_timer(&rx_q->rx_refill_timer,
+                         jiffies + msecs_to_jiffies(STMMAC_REFILL_GREEDILY_MS));
+       } else {
+               /* We don't want to notify user application to start
+                * looking for descriptors in hard way so clear flag
+                */
+               if (xsk_umem_uses_need_wakeup(priv->xsk_umems[queue]))
+                       xsk_clear_rx_need_wakeup(priv->xsk_umems[queue]);
+
+               /* Just from time to time check if clearing go well */
+               mod_timer(&rx_q->rx_refill_timer,
+                         jiffies + msecs_to_jiffies(STMMAC_REFILL_MS));
+       }
+
+       priv->xstats.rx_pkt_n += count;
+
+       /* Sanity check. If it happens notify user and let the NAPI works  */
+       if (WARN_ONCE(count > limit, "NAPI return value higher than budget!\n"))
+               count = limit;
+
+       return count;
+}
+
+/**
+ * stmmac_remove_xsk_umem() - free UMEM resources
+ * @priv: gemac main structure
+ * @qid: queue number of the net device
+ */
+static void stmmac_remove_xsk_umem(struct stmmac_priv *priv, u16 qid)
+{
+       DBG("%s-->\n", __FUNCTION__);
+
+       priv->xsk_umems[qid] = NULL;
+       priv->num_xsk_umems_used--;
+
+       if (priv->num_xsk_umems == 0) {
+               kfree(priv->xsk_umems);
+               priv->xsk_umems = NULL;
+               priv->num_xsk_umems = 0;
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * stmmac_alloc_xsk_umems() - alloc UMEM resources
+ * @priv: gemac main structure
+ *
+ * Return: 0 - ok      -ENOMEM - fail
+ */
+static int stmmac_alloc_xsk_umems(struct stmmac_priv *priv)
+{
+       if (priv->xsk_umems)
+               return 0;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       priv->num_xsk_umems_used = 0;
+       priv->num_xsk_umems = MTL_MAX_RX_QUEUES;
+       priv->xsk_umems = kcalloc(priv->num_xsk_umems, sizeof(*priv->xsk_umems),
+                                 GFP_KERNEL);
+       if (!priv->xsk_umems) {
+               priv->num_xsk_umems = 0;
+               return -ENOMEM;
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+}
+
+/**
+ * stmmac_add_xsk_umem() - add to UMEM auxiliary data
+ * @priv: gemac main structure
+ * @umem: allocated UMEM
+ * @qid: queue number of the net device
+ *
+ * Return: 0 - ok      -ENOMEM - fail
+ */
+static int stmmac_add_xsk_umem(struct stmmac_priv *priv, struct xdp_umem *umem,
+                              u16 qid)
+{
+       int err;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       err = stmmac_alloc_xsk_umems(priv);
+       if (err)
+               return err;
+
+       priv->xsk_umems[qid] = umem;
+       priv->num_xsk_umems_used++;
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+}
+
+/**
+ * stmmac_xsk_umem_dma_map() - map DMA memory for UMEM descriptors
+ * @priv: gemac main structure
+ * @qid: associated UMEM
+ *
+ * Return: 0 - ok      -ENOMEM - fail
+ */
+static int stmmac_xsk_umem_dma_map(struct stmmac_priv *priv,
+                                  struct xdp_umem *umem)
+{
+       struct device *dev = priv->device;
+       unsigned int i, j;
+       dma_addr_t dma;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       for (i = 0; i < umem->npgs; i++) {
+               dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
+                                        DMA_BIDIRECTIONAL, DMA_ATTR);
+               if (dma_mapping_error(dev, dma))
+                       goto out_unmap;
+
+               umem->pages[i].dma = dma;
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+
+out_unmap:
+       for (j = 0; j < i; j++) {
+               dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+                                    DMA_BIDIRECTIONAL, DMA_ATTR);
+               umem->pages[i].dma = 0;
+       }
+
+       return -ENOMEM;
+}
+
+/**
+ * stmmac_xdp_setup() - setup new XDP filter
+ * @dev: gemac main structure
+ * @prog: filter program
+ *
+ * Return: 0 - ok
+ */
+static int stmmac_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+       struct bpf_prog *old_prog;
+       bool need_reset;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       old_prog = xchg(&priv->xdp_prog, prog);
+       need_reset = (!!prog != !!old_prog);
+
+       if (old_prog)
+               bpf_prog_put(old_prog);
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+}
+
+/**
+ * free_dma_rx_desc_resources_xsk() - free DMA descriptors for every ring
+ * @priv: gemac main structure
+ */
+void free_dma_rx_desc_resources_xsk(struct stmmac_priv *priv)
+{
+       u32 rx_count = priv->plat->rx_queues_to_use;
+       u32 queue;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       /* Free RX queue resources */
+       for (queue = 0; queue < rx_count; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               /* Free DMA regions of consistent memory previously allocated */
+               if (!priv->extend_desc)
+                       dma_free_coherent(priv->device,
+                                         DMA_RX_SIZE * sizeof(struct dma_desc),
+                                         rx_q->dma_rx, rx_q->dma_rx_phy);
+               else
+                       dma_free_coherent(priv->device,
+                                         DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+                                         rx_q->dma_erx, rx_q->dma_rx_phy);
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * alloc_dma_rx_desc_resources_xsk() - allocate DMA descriptors for every ring
+ * @priv: gemac main structure
+ *
+ * Return: 0 - ok      -ENOMEM - fail
+ */
+int alloc_dma_rx_desc_resources_xsk(struct stmmac_priv *priv)
+{
+       u32 rx_count = priv->plat->rx_queues_to_use;
+       u32 queue;
+       u32 err_queue;
+       size_t len;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       /* RX queues buffers and DMA */
+       for (queue = 0; queue < rx_count; ++queue) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               rx_q->queue_index = queue;
+               rx_q->priv_data = priv;
+
+               if (priv->extend_desc) {
+                       len = DMA_RX_SIZE * sizeof(struct dma_extended_desc);
+                       rx_q->dma_erx = dma_alloc_coherent(priv->device,
+                                                          len,
+                                                          &rx_q->dma_rx_phy,
+                                                          GFP_KERNEL);
+                       if (!rx_q->dma_erx)
+                               goto err_dma;
+               } else {
+                       len = DMA_RX_SIZE * sizeof(struct dma_desc);
+                       rx_q->dma_rx = dma_alloc_coherent(priv->device,
+                                                         len,
+                                                         &rx_q->dma_rx_phy,
+                                                         GFP_KERNEL);
+                       if (!rx_q->dma_rx)
+                               goto err_dma;
+               }
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+
+err_dma:
+       pr_err("AF_XDP: Can not allocate DMA coherent memory!\n");
+
+       err_queue = queue;
+       for (queue = 0; queue < err_queue; ++queue) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               if (priv->extend_desc) {
+                       len = DMA_RX_SIZE * sizeof(struct dma_extended_desc);
+                       dma_free_coherent(priv->device,
+                                        len, rx_q->dma_erx, rx_q->dma_rx_phy);
+               } else {
+                       len = DMA_RX_SIZE * sizeof(struct dma_desc);
+                       dma_free_coherent(priv->device,
+                                         len,  rx_q->dma_rx, rx_q->dma_rx_phy);
+               }
+       }
+
+       return -ENOMEM;
+}
+
+/**
+ * stmmac_txrx_ring_disable() - stop and free resources for ring. Stop DMA engine
+ * @priv: gemac main structure
+ * @ring: number of associated ring
+ */
+static void stmmac_txrx_ring_disable(struct stmmac_priv *priv, int ring)
+{
+       struct stmmac_channel *ch = &priv->channel[ring];
+       u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       /* Sanity check */
+       if (ring > maxq)
+               return;
+
+       /* Stop GEMAC engine */
+       stmmac_mac_set(priv, priv->ioaddr, false);
+       stmmac_stop_all_dma(priv);
+
+       /* Wait finishing last transactions */
+       msleep(100);
+
+       netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, ring));
+
+       /* Everything is ready to stop NAPIs */
+       if (ring < rx_queues_cnt)
+               napi_disable(&ch->rx_napi);
+       if (ring < tx_queues_cnt)
+               napi_disable(&ch->tx_napi);
+
+       if (priv->num_xsk_umems_used && priv->xsk_umems[ring]) {
+               /* Disable UMEM resources */
+               DBG("%s: UMEM memory model disable\n", __FUNCTION__);
+
+               xdp_do_flush_map();
+               xdp_rxq_info_unreg(&priv->rx_queue[ring].xdp_rxq);
+               stmmac_free_frames_for_xsk(priv, ring);
+               free_dma_rx_desc_resources_xsk(priv);
+       } else {
+               /* Disable resources in case of using pool of pages */
+               DBG("%s: page pool memory model disable\n", __FUNCTION__);
+
+               free_dma_rx_desc_resources(priv);
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * stmmac_reset_watchdog_envent() - defer transmit time to avoid network schedule
+ * timeout.
+ * @priv: gemac main structure
+ * @ring: ring number
+ *
+ * Reset start time to allow acquire UMEM descriptors. It would be better to
+ * disable ring till it own UMEM but now it's made that way.
+ */
+static void stmmac_reset_watchdog_envent(struct stmmac_priv *priv, int ring)
+{
+       struct netdev_queue *txq;
+
+       txq = netdev_get_tx_queue(priv->dev, ring);
+       txq->trans_start = jiffies + priv->dev->watchdog_timeo;
+}
+
+/**
+ *
+ * stmmac_txrx_ring_enable() - allocate resources and run ring. Start DMA engine
+ * @priv: gemac main structure
+ * @ring: number of associated ring
+ *
+ * Return: 0 - ok      -ENOMEM - failure
+ */
+static int stmmac_txrx_ring_enable(struct stmmac_priv *priv, int ring)
+{
+       struct stmmac_channel *ch = &priv->channel[ring];
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[ring];
+       u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+       bool enable_gemac = false;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       /* Sanity check */
+       if (ring > maxq)
+               return -EPERM;
+
+       if (priv->num_xsk_umems_used && priv->xsk_umems[ring]) {
+               /* Allocate UMEM resources */
+               DBG("%s: UMEM memory model enable\n", __FUNCTION__);
+
+               priv->zca.free = stmmac_zca_free;
+               WARN_ON(xdp_rxq_info_reg_mem_model(&priv->rx_queue[ring].xdp_rxq,
+                                                  MEM_TYPE_ZERO_COPY,
+                                                  &priv->zca));
+
+               if (alloc_dma_rx_desc_resources_xsk(priv))
+                       goto err;
+               stmmac_alloc_frames_for_xsk(priv, ring);
+
+               stmmac_init_rx_service_timer(priv);
+       } else {
+               /* Allocate resources in case of using pool of pages */
+               DBG("%s: page pool memory model enable\n", __FUNCTION__);
+
+               if (alloc_dma_rx_desc_resources(priv))
+                       goto err;
+               init_dma_desc_rings(priv->dev, GFP_KERNEL);
+
+               /* Do restrict setup instead of full because driver isn't ready to run */
+               stmmac_hw_restrict_setup(priv->dev, true);
+
+               enable_gemac = true;
+       }
+
+       stmmac_reset_watchdog_envent(priv, ring);
+       netif_tx_start_queue(netdev_get_tx_queue(priv->dev, ring));
+
+       stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+                           rx_q->dma_rx_phy, ring);
+
+       rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+                            (DMA_RX_SIZE * sizeof(struct dma_desc));
+       stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+                              rx_q->rx_tail_addr, ring);
+
+       /* In case of UMEM this variables will be overridden in initial refill */
+       rx_q->cur_rx = 0;
+       rx_q->dirty_rx = 0;
+
+       /* Ready to start NAPIs */
+       if (ring < rx_queues_cnt)
+               napi_enable(&ch->rx_napi);
+       if (ring < tx_queues_cnt)
+               napi_enable(&ch->tx_napi);
+
+       /* Enable GEMAC engine here in case of using page pool */
+       if (enable_gemac) {
+               stmmac_mac_set(priv, priv->ioaddr, true);
+               stmmac_start_all_dma(priv);
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+
+err:
+       pr_err("AF_XDP: can not enable ring %d\n", ring);
+       return -ENOMEM;
+}
+
+/**
+ * stmmac_umem_enable() - allocate resources and enable UMEM
+ * @priv: gemac main structure
+ * @umem: pointer to socket UMEM representation
+ * @qid: number of the queue to associate with
+ *
+ * Return: 0 - ok      < 0 - fail
+ */
+static int stmmac_umem_enable(struct stmmac_priv *priv, struct xdp_umem *umem,
+                             u16 qid)
+{
+       struct xdp_umem_fq_reuse *reuseq;
+       int err = -1;
+       bool if_running;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+
+       if (qid >= priv->plat->rx_queues_to_use)
+               return -EINVAL;
+
+       err = xdp_rxq_info_reg(&priv->rx_queue[0].xdp_rxq, priv->dev, 0);
+       if (err)
+               return err;
+
+       reuseq = xsk_reuseq_prepare(DMA_RX_SIZE);
+       if (!reuseq)
+               return -ENOMEM;
+
+       if_running = netif_running(priv->dev);
+       if (if_running)
+               stmmac_txrx_ring_disable(priv, qid);
+
+       /* Setup UMEM and XDP auxiliary data */
+       if (stmmac_add_xsk_umem(priv, umem, qid))
+               return err;
+
+       xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+       err = stmmac_xsk_umem_dma_map(priv, umem);
+       if (err)
+               return err;
+
+       priv->xsk_umems[qid] = umem;
+
+       /* Enable rings */
+       if (if_running)
+               stmmac_txrx_ring_enable(priv, qid);
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+}
+
+/**
+ * stmmac_xsk_umem_dma_unmap() - free UMEM DMA resources
+ * @priv: gemac main structure
+ * @umem: associated UMEM
+ */
+static void stmmac_xsk_umem_dma_unmap(struct stmmac_priv *priv,
+                                     struct xdp_umem *umem)
+{
+       struct device *dev = priv->device;
+       unsigned int i;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       for (i = 0; i < umem->npgs; i++) {
+               dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+                                    DMA_BIDIRECTIONAL, DMA_ATTR);
+               umem->pages[i].dma = 0;
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+}
+
+/**
+ * stmmac_umem_disable() - free resources and disable UMEM
+ * @priv: gemac main structure
+ * @qid: number of the queue to associate with
+ *
+ * Return: 0 - ok      < 0 - fail
+ */
+static int stmmac_umem_disable(struct stmmac_priv *priv, u16 qid)
+{
+       struct xdp_umem *umem;
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[qid];
+       bool if_running;
+       
+       DBG("%s-->\n", __FUNCTION__);
+
+       umem = xdp_get_umem_from_qid(priv->dev, qid);
+       if (!umem)
+               return -EINVAL;
+
+       if_running = netif_running(priv->dev);
+
+       if (if_running)
+               stmmac_txrx_ring_disable(priv, qid);
+
+       stmmac_xsk_umem_dma_unmap(priv, umem);
+       stmmac_remove_xsk_umem(priv, qid);
+
+       del_timer_sync(&rx_q->rx_init_timer);
+       del_timer_sync(&rx_q->rx_refill_timer);
+
+       if (if_running)
+               stmmac_txrx_ring_enable(priv, qid);
+
+       priv->xsk_umems = NULL;
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+}
+
+/**
+ * stmmac_umem_setup() - wrapper for enable/disable UMEM
+ * @priv: gemac main structure
+ * @umem: pointer to socket UMEM representation
+ * @qid: number of the associated queue
+ *
+ * Return: 0 - ok      < 0 - fail
+ */
+static int stmmac_umem_setup(struct stmmac_priv *priv, struct xdp_umem *umem,
+                            u16 qid)
+{
+       return umem ? stmmac_umem_enable(priv, umem, qid) : \
+                     stmmac_umem_disable(priv, qid);
+}
+
+/**
+ * stmmac_bpf() - callback of network stack for setup bpf or enable/disable UMEM
+ * @priv: gemac main structure
+ * @bpf: network stack representation of bpf
+ *
+ * Return: 0 - ok      < 0 - fail
+ */
+int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       switch (bpf->command) {
+               case XDP_SETUP_PROG:
+                       if (!priv->xsk_umems) {
+                               pr_err("AF_XDP: Copy mode is not supported\n");
+                               return -EPERM;
+                       }
+                       return stmmac_xdp_setup(dev, bpf->prog);
+               case XDP_QUERY_PROG:
+                       bpf->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+                       return 0;
+               case XDP_SETUP_XSK_UMEM:
+                       return stmmac_umem_setup(priv, bpf->xsk.umem,
+                                                bpf->xsk.queue_id);
+               default:
+                       return -EINVAL;
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return -EPERM;
+}
+
+/**
+ * stmmac_xdp_xmit() - do redirect to non mapped place as external network
+ * @dev: network device to transmit
+ * @n: number of XDP frames
+ * @xdp: pointer to xdp frames array
+ * @flags: extra flags from network stack
+ *
+ * Return: number of redirected frames
+ */
+int stmmac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdp,
+                   u32 flags)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+       int drops = 0;
+       int result;
+       int i;
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       for (i = 0; i < n; ++i) {
+               result = stmmac_xmit_xdp_frame(priv, xdp[i]);
+               if (result != STMMAC_XDP_TX) {
+                       xdp_return_frame_rx_napi(xdp[i]);
+                       drops++;
+               }
+       }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return n - drops;
+}
+
+/**
+ * stmmac_xsk_wakeup() - Wake up Rx or/and Tx queue
+ * @dev: associated network device
+ * @queue_id: number of the queue
+ * @flags: sent action
+ *
+ * User space application or network stack can wake up driver in case of absence
+ * resource.
+ *
+ * Return: 0 - ok
+ */
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       DBG("%s-->\n", __FUNCTION__);
+
+       /* Wake up request can be sent from poll of socket */
+
+       if (flags & XDP_WAKEUP_TX)
+               /* Run NAPI tx engine to kick transfer or clean descriptors */
+               if (likely(napi_schedule_prep(&priv->channel[0].tx_napi))) {
+                       __napi_schedule(&priv->channel[queue_id].tx_napi);
+                       //xsk_clear_tx_need_wakeup(priv->xsk_umems[queue_id]);
+               }
+
+       if (flags & XDP_WAKEUP_RX)
+               /* Run NAPI rx engine to start receiving or clean descriptors */
+               if (likely(napi_schedule_prep(&priv->channel[0].rx_napi))) {
+                       __napi_schedule(&priv->channel[queue_id].rx_napi);
+                       //xsk_clear_rx_need_wakeup(priv->xsk_umems[queue_id]);
+               }
+
+       DBG("%s<--\n", __FUNCTION__);
+
+       return 0;
+}
+
+/**
+ * stmmac_init_dma_engine_xsk() - initialize DMA engine in case of using XSK
+ * @priv: gemac main structure
+ */
+int stmmac_init_dma_engine_xsk(struct stmmac_priv *priv)
+{
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+       struct stmmac_rx_queue *rx_q;
+       struct stmmac_tx_queue *tx_q;
+       u32 chan = 0;
+       int atds = 0;
+       int ret = 0;
+
+       if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
+               dev_err(priv->device, "Invalid DMA configuration\n");
+               return -EINVAL;
+       }
+
+       if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+               atds = 1;
+
+       /* DMA Configuration */
+       stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+       if (priv->plat->axi)
+               stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
+       /* DMA CSR Channel configuration */
+       for (chan = 0; chan < dma_csr_ch; chan++)
+               stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+       /* DMA RX Channel Configuration */
+       for (chan = 0; chan < rx_channels_count; chan++) {
+               rx_q = &priv->rx_queue[chan];
+
+               stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+                                   rx_q->dma_rx_phy, chan);
+
+               rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+                                    (DMA_RX_SIZE * sizeof(struct dma_desc));
+               stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+                                      rx_q->rx_tail_addr, chan);
+       }
+
+       /* DMA TX Channel Configuration */
+       for (chan = 0; chan < tx_channels_count; chan++) {
+               tx_q = &priv->tx_queue[chan];
+
+               stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+                                   tx_q->dma_tx_phy, chan);
+
+               tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+               stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+                                      tx_q->tx_tail_addr, chan);
+       }
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xsk.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xsk.h
new file mode 100644 (file)
index 0000000..534c8c4
--- /dev/null
@@ -0,0 +1,49 @@
+#ifndef _STMMAC_XSK_H_
+#define _STMMAC_XSK_H_
+
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
+#include "stmmac.h"
+
+//#define DEBUG_XSK
+#ifdef DEBUG_XSK
+#define DBG(...)       pr_info(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+#define STMMAC_XDP_PASS                0
+#define STMMAC_XDP_CONSUMED    BIT(0)
+#define STMMAC_XDP_TX          BIT(1)
+#define STMMAC_XDP_REDIR       BIT(2)
+
+#define STMMAC_TX_SOURCE_RESET 0
+#define STMMAC_TX_SOURCE_SKB   0
+#define STMMAC_TX_SOURCE_UMEM  1
+#define STMMAC_TX_SOURCE_FRAME 2
+
+#define STMMAC_TX_XMIT_SAFE_AMOUNT             40
+
+/* Refill timer restart time */
+#define STMMAC_REFILL_GREEDILY_MS              1
+#define STMMAC_REFILL_MS                       500
+#define STMMAC_INITIAL_REFILL_MS               500
+
+#define STMMAC_REFILL_GREEDILY_THRESHOLD       10
+
+#define DMA_ATTR \
+       (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* NDO prototypes */
+int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int stmmac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags);
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+
+/* Inner usage */
+inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue);
+int stmmac_rx_xsk(struct stmmac_priv *priv, int limit, u32 queue);
+int stmmac_xdp_transmit_zc(struct stmmac_priv *priv, int napi_budget);
+int stmmac_init_dma_engine_xsk(struct stmmac_priv *priv);
+
+#endif
index dcf2051ef2c04f1a6f193b0e72b52797d0f0510c..897b4c4e324a7d865a1f3aaabd9b92c0009d9c50 100644 (file)
@@ -10,6 +10,12 @@ menuconfig MDIO_DEVICE
 
 if MDIO_DEVICE
 
+config BAIKAL_MDIO
+        tristate "Driver for the Baikal Electronics GPIO MDIO bus"
+        depends on OF && GPIOLIB && MDIO_BITBANG
+        ---help---
+          Support for Baikal Electronics GPIO MDIO bus 
+
 config MDIO_BUS
        tristate
        default m if PHYLIB=m
@@ -258,6 +264,13 @@ config SFP
        depends on HWMON || HWMON=n
        select MDIO_I2C
 
+config 88X2222_PHY
+        tristate "Drivers for Marvell 88X2222 Tanceiver"
+        ---help---
+          Support for Marvell Integrated Dual-port
+          Multi-speed Ethernet Transceivers.
+          Currently supports 88x2222
+
 config ADIN_PHY
        tristate "Analog Devices Industrial Ethernet PHYs"
        help
index a03437e091f3b995e5efb810a804625b4f868ca5..1de7307e67dfdb0b72609a628b150b5b5ebf6dec 100644 (file)
@@ -94,3 +94,6 @@ obj-$(CONFIG_STE10XP)         += ste10Xp.o
 obj-$(CONFIG_TERANETICS_PHY)   += teranetics.o
 obj-$(CONFIG_VITESSE_PHY)      += vitesse.o
 obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
+obj-$(CONFIG_BAIKAL_MDIO)       += mdio-be-gpio.o
+obj-$(CONFIG_88X2222_PHY)       += mv88x2222.o
+
diff --git a/drivers/net/phy/mdio-be-gpio.c b/drivers/net/phy/mdio-be-gpio.c
new file mode 100644 (file)
index 0000000..01a2de7
--- /dev/null
@@ -0,0 +1,540 @@
+/*
+ * Baikal Electronics SFP+ mezzanine card MDIO bus driver
+ * Supports OpenFirmware.
+ *
+ * Based on Bitbanged MDIO support driver.
+ * drivers/net/phy/mdio-bitbang.c
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ * Copyright (c) 2007 Freescale Semiconductor
+ *
+ * Based on CPM2 MDIO code which is:
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ *  by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Paritaly based on GPIO based MDIO bitbang driver.
+ * drivers/net/phy/mdio-gpio.c
+ *
+ * Copyright (c) 2015 Baikal Electronics JSC.
+ *
+ * Author:
+ *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Baikal Electronics JSC nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
+
+#define MDIO_READ                      2
+#define MDIO_WRITE                     1
+
+#define MDIO_C45                       (1<<15)
+#define MDIO_C45_ADDR          (MDIO_C45 | 0)
+#define MDIO_C45_READ          (MDIO_C45 | 3)
+#define MDIO_C45_WRITE         (MDIO_C45 | 1)
+#define MDIO_C45_READ_INC      (MDIO_C45 | 2)
+
+/*
+ * Minimum MDC period is 400 ns, plus some margin for error.
+ * MDIO_DELAY is done twice per period.
+ * Baikal-T SoC GPIO pins trigger clock is 1 MHz.
+ */
+#define MDIO_DELAY 14
+
+/*
+ * The PHY may take up to 300 ns to produce data, plus some margin
+ * for error.
+ * Baikal-T SoC GPIO pins trigger clock is 1 MHz.
+ */
+#define MDIO_READ_DELAY_US     10
+#define MDIO_RESET_DELAY_US    100
+
+/*
+ * Driver specific defines
+ */
+#define DRIVER_NAME    "Baikal Electronics mezzanine card MDIO bus driver"
+#define DRIVER_VERSION "1.04a"
+#define DRIVER_DEV     "be-mdio"
+
+/* Default GPIO trigger freq is 1 MHz */
+#define MDIO_TRIG_FREQ 1000000
+
+/*
+ * Basic driver function
+ */
+struct be_mdio_data {
+       struct phy_device *phydev;
+       struct mii_bus *mii;
+       struct clk *clk;
+       int mdc, mdio, mdo, rst;
+       int mdc_active_low, mdio_active_low;
+       int mdo_active_low, rst_active_low;
+       unsigned int delay, read_delay, reset_delay;
+    /* PHY addresses to be ignored when probing */
+    unsigned int phy_mask;
+    /* IRQ mask */
+       int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Physical level of MDIO bus
+ */
+static inline void be_mdio_dir(struct be_mdio_data *data, int dir)
+{
+       if (data->mdo >= 0) {
+               /* Separate output pin. Always set its value to high
+                * when changing direction. If direction is input,
+                * assume the pin serves as pull-up. If direction is
+                * output, the default value is high.
+                */
+               gpio_set_value(data->mdo, 1 ^ data->mdo_active_low);
+               return;
+       }
+
+       if (dir)
+               gpio_direction_output(data->mdio, 1 ^ data->mdio_active_low);
+       else
+               gpio_direction_input(data->mdio);
+}
+
+static inline int be_mdio_get(struct be_mdio_data *data)
+{
+       return gpio_get_value(data->mdio) ^ data->mdio_active_low;
+}
+
+static inline void be_mdio_set(struct be_mdio_data *data, int what)
+{
+       if (data->mdo >= 0)
+               gpio_set_value(data->mdo, what ^ data->mdo_active_low);
+       else
+               gpio_set_value(data->mdio, what ^ data->mdio_active_low);
+}
+
+static inline void be_mdc_set(struct be_mdio_data *data, int what)
+{
+       gpio_set_value(data->mdc, what ^ data->mdc_active_low);
+}
+
+/*
+ * Logical level of MDIO bus
+ */
+
+/* MDIO must already be configured as output. */
+static void be_mdio_send_bit(struct be_mdio_data *data, int val) {
+       be_mdio_set(data, val);
+       //ndelay(MDIO_DELAY);   //udelay(1..3);ndelay(400);
+       udelay(MDIO_DELAY);
+       be_mdc_set(data, 1);
+       //ndelay(MDIO_DELAY);   //udelay(1..3);ndelay(400);
+       udelay(MDIO_DELAY);
+       be_mdc_set(data, 0);
+}
+
+/* MDIO must already be configured as output. */
+/*static void be_mdio_send_ta(struct be_mdio_data *data, int val) {
+       be_mdio_set(data, val);
+       mdelay(data->read_delay);
+       be_mdc_set(data, 1);
+       mdelay(data->read_delay);
+       be_mdc_set(data, 0);
+}*/
+
+/* MDIO must already be configured as input. */
+static int be_mdio_get_bit(struct be_mdio_data *data) {
+       udelay(MDIO_DELAY);
+       be_mdc_set(data, 1);
+       udelay(MDIO_DELAY);
+       be_mdc_set(data, 0);
+       return be_mdio_get(data);
+}
+
+/* MDIO must already be configured as output. */
+static void be_mdio_send_num(struct be_mdio_data *data, u16 val, int bits) {
+       int i;
+
+       be_mdio_dir(data, 1);
+
+       for (i = bits - 1; i >= 0; i--)
+               be_mdio_send_bit(data, (val >> i) & 1);
+}
+
+/* MDIO must already be configured as input. */
+static u16 be_mdio_get_num(struct be_mdio_data *data, int bits) {
+       int i;
+       u16 ret = 0;
+
+       be_mdio_dir(data, 0);
+
+       for (i = bits - 1; i >= 0; i--) {
+               ret <<= 1;
+               ret |= be_mdio_get_bit(data);
+       }
+       return ret;
+}
+
+/*
+ * Utility to send the preamble, address, and
+ * register (common to read and write).
+ */
+static void be_mdio_cmd(struct be_mdio_data *data, int op, u8 phy, u8 reg) {
+       int i;
+
+       be_mdio_dir(data, 1);
+       /*
+        * Send a 32 bit preamble ('1's) with an extra '1' bit for good
+        * measure.  The IEEE spec says this is a PHY optional
+        * requirement. This means that we are doing more preambles
+        * than we need, but it is safer and will be much more robust.
+        */
+       for (i = 0; i < 32; i++)
+               be_mdio_send_bit(data, 1);
+       /*
+        * Send the start bit (01) and the read opcode (10) or write (10).
+        * Clause 45 operation uses 00 for the start and 11, 10 for
+        * read/write.
+        */
+       be_mdio_send_bit(data, 0);
+       if (op & MDIO_C45)
+               be_mdio_send_bit(data, 0);
+       else
+               be_mdio_send_bit(data, 1);
+       be_mdio_send_bit(data, (op >> 1) & 1);
+       be_mdio_send_bit(data, (op >> 0) & 1);
+
+       be_mdio_send_num(data, phy, 5);
+       be_mdio_send_num(data, reg, 5);
+}
+
+/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
+   lower 16 bits of the 21 bit address. This transfer is done identically to a
+   MDIO_WRITE except for a different code. To enable clause 45 mode or
+   MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
+   can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+   phase. */
+static int be_mdio_cmd_addr(struct be_mdio_data *data, int phy, u32 addr) {
+       unsigned int dev_addr = (addr >> 16) & 0x1F;
+       unsigned int reg = addr & 0xFFFF;
+
+       be_mdio_cmd(data, MDIO_C45_ADDR, phy, dev_addr);
+
+       /* send the turnaround (10) */
+       be_mdio_send_bit(data, 1);
+       be_mdio_send_bit(data, 0);
+
+       be_mdio_send_num(data, reg, 16);
+
+       be_mdio_dir(data, 0);
+       be_mdio_get_bit(data);
+
+       return dev_addr;
+}
+
+static int be_mdio_read(struct mii_bus *bus, int phy, int reg) {
+       struct be_mdio_data *data = bus->priv;
+       int ret, i;
+
+       if (reg & MII_ADDR_C45) {
+               reg = be_mdio_cmd_addr(data, phy, reg);
+               be_mdio_cmd(data, MDIO_C45_READ, phy, reg);
+       } else
+               be_mdio_cmd(data, MDIO_READ, phy, reg);
+
+       be_mdio_dir(data, 0);
+
+       /* check the turnaround bit: the PHY should be driving it to zero */
+       if (be_mdio_get_bit(data) != 0) {
+               /* PHY didn't drive TA low -- flush any bits it may be trying to send. */
+               for (i = 0; i < 32; i++)
+                       be_mdio_get_bit(data);
+
+               return 0xffff;
+       }
+
+       ret = be_mdio_get_num(data, 16);
+       be_mdio_get_bit(data);
+       return ret;
+}
+
+static int be_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) {
+       struct be_mdio_data *data = bus->priv;
+
+       if (reg & MII_ADDR_C45) {
+               reg = be_mdio_cmd_addr(data, phy, reg);
+               be_mdio_cmd(data, MDIO_C45_WRITE, phy, reg);
+       } else
+               be_mdio_cmd(data, MDIO_WRITE, phy, reg);
+
+       /* send the turnaround (10) */
+       be_mdio_send_bit(data, 1);
+       be_mdio_send_bit(data, 0);
+
+       be_mdio_send_num(data, val, 16);
+
+       be_mdio_dir(data, 0);
+       be_mdio_get_bit(data);
+
+       return 0;
+}
+
+static int __maybe_unused be_mdio_reset(struct mii_bus *bus) {
+       struct be_mdio_data *data = bus->priv;
+
+       if (data->rst < 0)
+               return 0;
+
+       gpio_set_value(data->rst, 1 ^ data->rst_active_low);
+       mdelay(data->reset_delay);
+
+       gpio_set_value(data->rst, 0 ^ data->rst_active_low);
+       mdelay(data->reset_delay);
+
+       return 0;
+}
+
+/*
+ * MDIO bus open firmware data
+ */
+static void *be_mdio_of_get_data(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct be_mdio_data *pdata;
+       enum of_gpio_flags flags;
+       unsigned int freq = 0; 
+       int ret;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       ret =  of_get_named_gpio_flags(np, "mdc-pin", 0, &flags);
+       if (ret < 0)
+               return NULL;
+
+       pdata->mdc = ret;
+       pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+       ret =  of_get_named_gpio_flags(np, "mdio-pin", 0, &flags);
+       if (ret < 0)
+               return NULL;
+       pdata->mdio = ret;
+       pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+       pdata->mdo = -1;
+       ret = of_get_named_gpio_flags(np, "mdo-pin", 0, &flags);
+       if (ret >= 0) {
+               pdata->mdo = ret;
+               pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW;
+       }
+
+       pdata->rst = -1;
+       ret =  of_get_named_gpio_flags(np, "rst-pin", 0, &flags);
+       if (ret >= 0) {
+               pdata->rst = ret;
+               pdata->rst_active_low = flags & OF_GPIO_ACTIVE_LOW;
+       }
+
+       pdata->clk = of_clk_get(np, 0);
+
+       if (IS_ERR(pdata->clk))
+               of_property_read_u32(pdev->dev.of_node, "clock-frequency", &freq);
+       else
+               freq =  clk_get_rate(pdata->clk);
+
+       if (!freq)
+               freq = MDIO_TRIG_FREQ;
+
+       ret = 1000000 / freq;
+
+       /* Timing */
+       pdata->read_delay = (ret > MDIO_READ_DELAY_US) ? ret : MDIO_READ_DELAY_US;
+       pdata->reset_delay = (ret > MDIO_RESET_DELAY_US) ?  ret : MDIO_RESET_DELAY_US;
+
+       return pdata;
+}
+
+/*
+ * MDIO bus init
+ */
+static struct mii_bus *be_mdio_bus_init(struct device *dev, struct be_mdio_data *pdata, int bus_id) {
+       struct mii_bus *bus;
+       int i;
+
+       bus = mdiobus_alloc();
+       if (!bus) {
+               dev_err(dev, "Unable to allocate MDIO bus\n");
+               goto error;
+       }
+
+       bus->read = be_mdio_read;
+       bus->write = be_mdio_write;
+       bus->priv = pdata;
+
+       bus->name = "Baikal GPIO MDIO bus";
+
+       bus->phy_mask = pdata->phy_mask;
+       memcpy(bus->irq, pdata->irqs, PHY_MAX_ADDR);
+       bus->parent = dev;
+
+       if (bus->phy_mask == ~0) {
+               dev_err(dev, "All PHY's are masked - nothing to attach\n");
+               goto error_free_bus;
+       }
+
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               if (!bus->irq[i])
+                       bus->irq[i] = PHY_POLL;
+
+       snprintf(bus->id, MII_BUS_ID_SIZE, "mdio-gpio%d", bus_id);
+
+       if (devm_gpio_request(dev, pdata->mdc, "mdc")) {
+               dev_err(dev, "MDC line (gpio%d) request failed\n", pdata->mdc);
+               goto error_free_bus;
+       }
+
+       if (devm_gpio_request(dev, pdata->mdio, "mdio")){
+               dev_err(dev, "MDIO line (gpio%d) request failed\n", pdata->mdc);
+               goto error_free_bus;
+       }
+
+       if (pdata->mdo >= 0) {
+               if (devm_gpio_request(dev, pdata->mdo, "mdo"))
+                       goto error_free_bus;
+               gpio_direction_output(pdata->mdo, 1);
+               gpio_direction_input(pdata->mdio);
+       }
+
+       if (pdata->rst >= 0) {
+               if (devm_gpio_request(dev, pdata->rst, "rst"))
+                       pdata->rst= -1;
+               else 
+                       gpio_direction_output(pdata->rst, 0);
+       }
+
+       gpio_direction_output(pdata->mdc, 0);
+
+       dev_set_drvdata(dev, bus);
+
+       return bus;
+
+error_free_bus:
+       mdiobus_free(bus);
+error:
+       return NULL;
+}
+
+static int be_mdio_probe(struct platform_device *pdev)
+{
+       struct be_mdio_data *pdata;
+       struct mii_bus *bus;
+       int ret, bus_id;
+
+       if (pdev->dev.of_node) {
+               pdata = be_mdio_of_get_data(pdev);
+               bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
+       } else {
+               pdata = dev_get_platdata(&pdev->dev);
+               bus_id = pdev->id;
+       }
+
+       if (!pdata) {
+               dev_err(&pdev->dev, "No MDIO bus platform data\n");
+               return -ENODEV;
+       }
+
+       bus = be_mdio_bus_init(&pdev->dev, pdata, bus_id);
+       if (!bus) {
+               dev_err(&pdev->dev, "MDIO bus init failed\n");
+               return -ENODEV;
+       }
+
+       if (pdev->dev.of_node)
+               ret = of_mdiobus_register(bus, pdev->dev.of_node);
+       else
+               ret = mdiobus_register(bus);
+
+       if (ret) {
+               dev_err(&pdev->dev, "MDIO bus register failed\n");
+               goto err_mdiobus_register;
+       }
+
+       // bus->reset = be_mdio_reset;
+
+       pdata->mii = bus;
+       dev_info(&pdev->dev, "MDIO ptr=%p\n", bus);
+
+       dev_info(&pdev->dev, DRIVER_NAME);
+       dev_info(&pdev->dev, "Version: " DRIVER_VERSION);
+
+       return 0;
+
+err_mdiobus_register:
+       mdiobus_free(bus);
+
+       return ret;
+}
+
+static int be_mdio_remove(struct platform_device *pdev)
+{
+
+       struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+       mdiobus_unregister(bus);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static struct of_device_id be_mdio_of_match[] = {
+       { .compatible = "be,mdio-gpio", },
+       { /* sentinel */ }
+};
+
+static struct platform_driver be_mdio_driver = {
+       .probe = be_mdio_probe,
+       .remove = be_mdio_remove,
+       .driver         = {
+               .name   = "be-mdio",
+               .of_match_table = be_mdio_of_match,
+       },
+};
+
+module_platform_driver(be_mdio_driver);
+
+MODULE_ALIAS("platform:be-mdio");
+MODULE_AUTHOR("Dmitry Dunaev");
+MODULE_LICENSE("Proprinetary");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/net/phy/mv88x2222.c b/drivers/net/phy/mv88x2222.c
new file mode 100644 (file)
index 0000000..1d2cdd9
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * drivers/net/phy/mv88x2222c
+ *
+ * Driver for Marvell Integrated Dual-port
+ * Multi-speed Ethernet Transceiver 88x2222
+ *
+ * Copyright (c) 2015, 2016, 2020 Baikal Electronics JSC.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Baikal Electronics JSC nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ */
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mdio.h>
+#include <linux/marvell_phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+MODULE_DESCRIPTION("Marvell Ethernet Transceiver driver");
+MODULE_LICENSE("Proprietary");
+
+/* 31.F002 Line side mode (ch.3.1.2, pg.46) */
+#define MV_MODE_LINE_SHF                       8
+#define MV_MODE_LINE_10GBR                     (0x71UL << 8)
+#define MV_MODE_LINE_10GBW                     (0x74UL << 8)
+#define MV_MODE_LINE_2GBX_AN_OFF               (0x76UL << 8)
+#define MV_MODE_LINE_1GBR_AN_OFF               (0x72UL << 8)
+#define MV_MODE_LINE_1GBR_AN_ON                        (0x73UL << 8)
+#define MV_MODE_LINE_SGMII_SYS_AN_OFF          (0x7CUL << 8)
+#define MV_MODE_LINE_SGMII_SYS_AN_ON           (0x7DUL << 8)
+#define MV_MODE_LINE_SGMII_NET_AN_OFF          (0x7EUL << 8)
+#define MV_MODE_LINE_SGMII_NET_AN_ON           (0x7FUL << 8)
+#define MV_MODE_LINE_DEFAULT                   MV_MODE_LINE_10GBR
+#define MV_MODE_LINE_OF_NAME                   "mv,line-mode"
+
+/* 31.F002 Host side mode (ch.3.1.2, pg.46) */
+#define MV_MODE_HOST_SHF                       0
+#define MV_MODE_HOST_10GBR                     (0x71UL << 0)
+#define MV_MODE_HOST_10GBX2                    (0x72UL << 0)
+#define MV_MODE_HOST_10GBX4                    (0x73UL << 0)
+#define MV_MODE_HOST_2GBX_AN_OFF               (0x76UL << 0)
+#define MV_MODE_HOST_1GBR_AN_OFF               (0x7AUL << 0)
+#define MV_MODE_HOST_1GBR_AN_ON                        (0x7BUL << 0)
+#define MV_MODE_HOST_SGMII_SYS_AN_OFF          (0x7CUL << 0)
+#define MV_MODE_HOST_SGMII_SYS_AN_ON           (0x7DUL << 0)
+#define MV_MODE_HOST_SGMII_NET_AN_OFF          (0x7EUL << 0)
+#define MV_MODE_HOST_SGMII_NET_AN_ON           (0x7FUL << 0)
+#define MV_MODE_HOST_DEFAULT                   MV_MODE_HOST_10GBR
+#define MV_MODE_HOST_OF_NAME                   "mv,host-mode"
+
+/* 31.F402 Host side line muxing (ch.3.1.5, pg.48) */
+#define MV_ATT_10GBX2_SHF                      11
+#define MV_ATT_10GBX2_LANE_0145                        (0UL << 11)
+#define MV_ATT_10GBX2_LANE_0123                        (1UL << 11)
+#define MV_ATT_10GBR_SHF                       9
+#define MV_ATT_10GBR_LANE_0246                 (0UL << 9)
+#define MV_ATT_10GBR_LANE_0123                 (1UL << 9)
+#define MV_ATT_2GBR_SHF                                8
+#define MV_ATT_2GBR_LANE_0246                  (0UL << 8)
+#define MV_ATT_2GBR_LANE_0123                  (1UL << 8)
+#define MV_ATT_1GBR_SHF                                8
+#define MV_ATT_1GBR_LANE_0246                  (0UL << 8)
+#define MV_ATT_1GBR_LANE_0123                  (1UL << 8)
+#define MV_ATT_DEFAULT                         0
+#define MV_ATT_OF_NAME                         "mv,mux"
+
+/* 31.F003 Software reset (ch.3.2 pg.50) */
+#define MV_SW_RST_HOST_SHF                     7
+#define MV_SW_RST_HOST                         (1UL << 7)
+#define MV_SW_RST_LINE_SHF                     15
+#define MV_SW_RST_LINE                         (1UL << 15)
+#define MV_SW_RST_ALL                          (MV_SW_RST_HOST | MV_SW_RST_LINE)
+
+/* 31.F012 GPIO data */
+#define MV_GPIO_TXDISABLE_DATA_SHF             8
+
+/* 31.F013 Tristate Control */
+#define MV_GPIO_TXDISABLE_OUTP_EN_SHF          8
+
+/* 31.F016 Interrupt type 3 */
+#define MV_GPIO_TXDISABLE_FN_SHF               3
+#define MV_GPIO_TXDISABLE_FN_GPIO              0x1
+
+/* Devices in package and registers */
+#define MV_DEV_10GBW_IRQ_ENABLE                        0x8000
+#define MV_DEV_10GBW_IRQ_STATUS                        0x8001
+#define MV_DEV_10GBW_IRQ_REALTIME              0x8002
+
+#define MV_DEV_10GBR_ANEG               0x2000
+#define MV_DEV_10GBR_IRQ_ENABLE                        0x8000
+#define MV_DEV_10GBR_IRQ_STATUS                        0x8001
+#define MV_DEV_10GBR_IRQ_REALTIME              0x8002
+
+#define MV_DEV_GBX_IRQ_ENABLE                  0xA000
+#define MV_DEV_GBX_IRQ_STATUS                  0xA001
+#define MV_DEV_GBX_IRQ_REALTIME                        0xA002
+
+#define MV_DEV_MISC_IRQ_ENABLE                 0xF00A
+#define MV_DEV_MISC_IRQ_STATUS                 0xF00B
+
+#define MV_DEV_GPIO_DATA                       0xF012
+#define MV_DEV_GPIO_TRISTATE_CTL               0xF013
+#define MV_DEV_GPIO_INTERRUPT_TYPE_3           0xF016
+
+#define MV_DEV_CHIP_HOST_LINE                  0xF002
+#define MV_DEV_CHIP_RESET                      0xF003
+#define MV_DEV_CHIP_MUX                                0xF402
+#define MV_DEV_CHIP_IRQ_STATUS                 0xF420
+#define MV_DEV_CHIP_IRQ_CONTROL                        0xF421
+
+#define MV_RESET_DELAY_US                      500
+
+struct mode
+{
+    unsigned int mode_num;
+    char mode_name[16];
+}; 
+
+static struct mode line_modes[] =
+{
+        {MV_MODE_LINE_10GBR, "KR"},
+        {MV_MODE_LINE_10GBW, "10GBW"},
+        {MV_MODE_LINE_2GBX_AN_OFF, "2GBX_AN_OFF"},
+        {MV_MODE_LINE_1GBR_AN_OFF, "1GBR_AN_OFF"},
+        {MV_MODE_LINE_1GBR_AN_ON, "1GBR_AN_ON"},
+        {MV_MODE_LINE_SGMII_SYS_AN_OFF, "SGMII_SYS_AN_OFF"},
+        {MV_MODE_LINE_SGMII_SYS_AN_ON, "SGMI_SYS_AN_ON"},
+        {MV_MODE_LINE_SGMII_NET_AN_OFF, "SMGII_NET_AN_OFF"},
+        {MV_MODE_LINE_SGMII_NET_AN_ON, "SGMII_NET_AN_ON"}
+};
+
+static struct mode host_modes[] =
+{
+        {MV_MODE_HOST_10GBR, "KR"},
+        {MV_MODE_HOST_10GBX2, "10GBX2"},
+        {MV_MODE_HOST_10GBX4, "KX4"},
+        {MV_MODE_HOST_2GBX_AN_OFF, "2GBX_AN_OFF"},
+        {MV_MODE_HOST_1GBR_AN_OFF, "1GBR_AN_OFF"},
+        {MV_MODE_HOST_1GBR_AN_ON, "1GBR_AN_ON"},
+        {MV_MODE_HOST_SGMII_SYS_AN_OFF, "SGMII_SYS_AN_OFF"},
+        {MV_MODE_HOST_SGMII_SYS_AN_ON, "SGMII_SYS_AN_ON"},
+        {MV_MODE_HOST_SGMII_NET_AN_OFF, "SGMII_NE_AN_OFF"},
+        {MV_MODE_HOST_SGMII_NET_AN_ON, "SGMII_NET_AN_ON"}
+};
+
+struct mv88x2222_data {
+       int irq;
+       int rst_active_low, irq_active_low;
+       int line_mode, host_mode, mux;
+};
+
+static void *marvell_of_get_data(struct phy_device *phydev)
+{
+       struct device_node *np = phydev->mdio.dev.of_node;
+       struct mv88x2222_data *pdata;
+       enum of_gpio_flags flags;
+       int ret;
+       char mode[32];
+       unsigned int i = 0;
+       const char *pm = mode;
+
+       pdata = devm_kzalloc(&phydev->mdio.dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       ret =  of_get_named_gpio_flags(np, "irq-pin", 0, &flags);
+       if (ret >= 0) {
+               pdata->irq = ret;
+               pdata->irq_active_low = flags & OF_GPIO_ACTIVE_LOW;
+               dev_info(&phydev->mdio.dev, "irq gpio pin=%d", ret);
+       }
+
+       pdata->line_mode = MV_MODE_LINE_DEFAULT;
+       ret = of_property_read_string(np, MV_MODE_LINE_OF_NAME, &pm);
+       if (!ret) {
+               for(i = 0; i < sizeof(line_modes) / sizeof(struct mode); ++i) {
+                   if(strcasecmp(line_modes[i].mode_name, pm) == 0) {
+                       pdata->line_mode = line_modes[i].mode_num;
+                       break;
+                   }
+               }
+       }
+
+       pdata->host_mode = MV_MODE_HOST_DEFAULT;
+       ret = of_property_read_string(np, MV_MODE_HOST_OF_NAME, &pm);
+       if (!ret) {
+               for(i = 0; i < sizeof(host_modes) / sizeof(struct mode); ++i) {
+                   if(strcasecmp(host_modes[i].mode_name, pm) == 0) {
+                       pdata->host_mode = host_modes[i].mode_num;
+                       break;
+                   }
+               }
+       }
+
+       /* Default value at now */
+       pdata->mux = MV_ATT_DEFAULT;
+
+       return pdata;
+}
+
+static int marvell_soft_reset(struct phy_device *phydev) {
+       int ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_RESET,
+                               MV_SW_RST_ALL);
+       int count = 50;
+
+       if (ret) {
+               dev_warn(&phydev->mdio.dev, "software reset failed\n");
+               return ret;
+       }
+
+       do {
+               usleep_range(MV_RESET_DELAY_US, MV_RESET_DELAY_US + 100);
+               ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_RESET);
+       } while ((ret & MV_SW_RST_ALL) || count--);
+
+       return 0;
+}
+
+static int marvell_config_init(struct phy_device *phydev)
+{
+       struct mv88x2222_data *pdata = phydev->priv;
+       int ret;
+
+       ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE,
+                           pdata->line_mode | pdata->host_mode);
+       if (ret)
+               return 1;
+
+        phydev->speed = SPEED_10000;
+        phydev->duplex = DUPLEX_FULL;
+
+       /*
+        * This must be done after mode set;
+        */
+       ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE);
+       ret |= 0x8000;
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE, ret);
+       
+       marvell_soft_reset(phydev);
+
+       dev_info(&phydev->mdio.dev, "phy(%d, %x)=%x\n", MDIO_MMD_VEND2,
+                       MV_DEV_CHIP_HOST_LINE,
+                       phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE));
+
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+                       phydev->supported, 1);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+                       phydev->supported, 1);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+                       phydev->supported, 1);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+                       phydev->supported, 1);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+                       phydev->supported, 1);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                       phydev->supported, 1);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported, 1);
+
+       phydev->pause = 0;
+       phydev->asym_pause = 0;
+       phydev->interface = PHY_INTERFACE_MODE_XGMII;
+       phydev->duplex = DUPLEX_FULL;
+
+       switch (pdata->line_mode) {
+       case MV_MODE_LINE_10GBR:
+       case MV_MODE_LINE_10GBW:
+               phydev->speed = SPEED_10000;
+               break;
+       case MV_MODE_LINE_2GBX_AN_OFF:
+               phydev->speed = SPEED_2500;
+               break;
+       default:
+               phydev->speed = SPEED_1000;
+               break;
+       }
+
+       return 0;
+}
+
+static int marvell_adjust_tx(struct phy_device *phydev)
+{
+       int reg;
+       int line_link = 1;
+
+       /* Switch tristate to "write to pin/read from register" */
+       reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL,\
+       reg | (1 << MV_GPIO_TXDISABLE_OUTP_EN_SHF));
+
+       /* Switch off TX_DISABLE */
+       reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA, reg & \
+               ~(1 << MV_GPIO_TXDISABLE_DATA_SHF));
+
+       /* Check if opto-cable is plugged */
+       reg = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+       if ((reg < 0) || !(reg & MDIO_STAT1_LSTATUS))
+               line_link = 0;
+
+       if (line_link) {
+               /* It's fine */
+               return 0;
+
+       } else {
+               /* Switch on TX_DISABLE */
+               reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA);
+               phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA, reg | \
+                       (1 << MV_GPIO_TXDISABLE_DATA_SHF));
+       }
+
+       return 1;
+}
+
+static int marvell_update_link(struct phy_device *phydev)
+{
+       int reg;
+       int host_mode = 0;
+       int line_mode = 0;
+
+       /* Default link status */
+       phydev->link = 1;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_CHIP_HOST_LINE);
+       if (reg < 0)
+       {
+               phydev->link = 0;
+               return 0;
+       }
+
+       host_mode = reg & 0x007F;
+       line_mode = reg & 0x7F00;
+
+       /* Read host link status */
+       if (host_mode == MV_MODE_HOST_10GBX4)
+               reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, 0x1001);
+       else
+               reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_STAT1);
+
+       if ((reg < 0) || !(reg & MDIO_STAT1_LSTATUS))
+               phydev->link = 0;
+
+       /* Read line link status */
+       if (line_mode == MV_MODE_LINE_10GBR)
+               reg = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+       else
+               reg = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2001);
+    
+       if ((reg < 0) || !(reg & MDIO_STAT1_LSTATUS))
+               phydev->link = 0;
+
+       /* 
+        * PMAPMD link status is always broken
+        * later we need to update this driver;
+        */
+       reg = marvell_adjust_tx(phydev);
+       if (reg < 0)
+               phydev->link = 0;
+
+       return 0;
+}
+
+static int marvell_read_status(struct phy_device *phydev) 
+{
+       int reg;
+
+       /* Update the link, but return if there was an error */
+       reg = marvell_update_link(phydev);
+       if (reg < 0)
+               return reg;
+
+       /* Read line control reg */
+       reg = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+       if (reg < 0)
+               return reg;
+
+       return 0;
+}
+
+static int marvell_config_aneg(struct phy_device *phydev)
+{
+       linkmode_copy(phydev->advertising, phydev->supported);
+
+       return 0;
+}
+
+static int marvell_probe(struct phy_device *phydev)
+{
+       struct mv88x2222_data *pdata = NULL;
+       int reg = 0;
+
+       if (phydev->mdio.dev.of_node)
+               pdata = marvell_of_get_data(phydev);
+
+       if (!pdata) {
+               dev_err(&phydev->mdio.dev, "No PHY platform data\n");
+               return -ENODEV;
+       }
+
+       phydev->priv = pdata;
+       dev_info(&phydev->mdio.dev, "probed %s at 0x%02x\n",
+                phydev->drv->name, phydev->mdio.addr);
+       reg = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x0002);
+       
+    return 0;
+}
+
+static int marvell_suspend(struct phy_device *phydev)
+{
+       int reg;
+       mutex_lock(&phydev->lock);
+
+       /* Switch tristate to "write to pin/read from register" */
+       reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_TRISTATE_CTL,\
+       reg | (1 << MV_GPIO_TXDISABLE_OUTP_EN_SHF));
+
+       /* Switch on TX_DISABLE */
+       reg = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_DEV_GPIO_DATA, reg | \
+               (1 << MV_GPIO_TXDISABLE_DATA_SHF));
+       /* TBD Probably switch to lowpower mode */
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
+static int marvell_match_phy_device(struct phy_device *phydev)
+{
+       unsigned int phy_id = phydev->c45_ids.device_ids[MDIO_MMD_PCS] & MARVELL_PHY_ID_MASK;
+       
+    return  (phy_id == MARVELL_PHY_ID_88X2222) || (phy_id == MARVELL_PHY_ID_88X2222R);
+}
+
+static struct phy_driver marvell_drivers[] = {
+       {
+               .phy_id = MARVELL_PHY_ID_88X2222,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88X2222",
+               .features = 0,
+               .config_init = marvell_config_init,
+               .config_aneg = marvell_config_aneg,
+               .probe = marvell_probe,
+               .match_phy_device = marvell_match_phy_device,
+               .read_status = marvell_read_status,
+               .soft_reset = marvell_soft_reset,
+               .resume = genphy_resume,
+               .suspend = marvell_suspend,
+       },
+};
+module_phy_driver(marvell_drivers);
+
+static struct mdio_device_id __maybe_unused marvell_tbl[] = {
+       { MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88X2222R, MARVELL_PHY_ID_MASK },
+       { }
+};
+MODULE_DEVICE_TABLE(mdio, marvell_tbl);
index 2842ca205a0a97af675b38dcd926738684a6e21c..bf40c9d04640c06ed48ee9c4d59173e8fdbcc0e1 100644 (file)
@@ -981,7 +981,7 @@ struct ath_hw {
        bool tpc_enabled;
        u8 tx_power[Ar5416RateSize];
        u8 tx_power_stbc[Ar5416RateSize];
-       bool msi_enabled;
+       u32 msi_enabled;
        u32 msi_mask;
        u32 msi_reg;
 };
index 92b2dd396436adb8db52ff74dd9c966f7262c590..502c1b8f7d3a53b3b18671e4cdb952db2b49307d 100644 (file)
@@ -962,22 +962,35 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        sc->mem = pcim_iomap_table(pdev)[0];
        sc->driver_data = id->driver_data;
 
+       if (pdev->irq == 255 || pdev->irq == 0) { /* not configured by PCI BIOS */
+               ret = pci_enable_msi(pdev);
+               dev_info(&pdev->dev, "pci_enable_msi: ret %d, irq %d\n", ret, pdev->irq);
+       }
+
        if (ath9k_use_msi) {
-               if (pci_enable_msi(pdev) == 0) {
-                       msi_enabled = 1;
+               if ((ret = pci_enable_msi_range(pdev, 1, 4)) > 0) {
+                       msi_enabled = ret;
                        dev_err(&pdev->dev, "Using MSI\n");
                } else {
                        dev_err(&pdev->dev, "Using INTx\n");
                }
        }
 
-       if (!msi_enabled)
+       if (!msi_enabled) {
                ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
-       else
-               ret = request_irq(pdev->irq, ath_isr, 0, "ath9k", sc);
+       } else {
+               int i;
+               for (i = 0; i < msi_enabled; i++) {
+                       ret = request_irq(pdev->irq + i, ath_isr, 0, "ath9k", sc);
+                       if (ret) {
+                               for (--i; i >= 0; i--)
+                                       free_irq(pdev->irq + i, sc);
+                       }
+               }
+       }
 
        if (ret) {
-               dev_err(&pdev->dev, "request_irq failed\n");
+               dev_err(&pdev->dev, "request_irq (%d) failed (%d)\n", pdev->irq, ret);
                goto err_irq;
        }
 
@@ -1014,6 +1027,11 @@ static void ath_pci_remove(struct pci_dev *pdev)
                sc->sc_ah->ah_flags |= AH_UNPLUGGED;
        ath9k_deinit_device(sc);
        free_irq(sc->irq, sc);
+       if (sc->sc_ah->msi_enabled > 1) {
+               int i;
+               for (i = sc->sc_ah->msi_enabled - 1; i > 0; i--)
+                       free_irq(sc->irq + i, sc);
+       }
        ieee80211_free_hw(sc->hw);
 }
 
index 900788e4018cef2d9b2657359dd0fad3b234778a..d30d16ac15878f0f6c7ac1f45e8d1f61a942fd3b 100644 (file)
@@ -108,6 +108,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
                   IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW | 0);
 
        rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
+       rtlpci->msi_support = true;
 
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
index a304f5ea11b90b2cffe1b2eec9cc605dc14b1d34..10c1e775097713aa7aad4abaed38f912a6729ad0 100644 (file)
@@ -127,6 +127,9 @@ config PCI_LOCKLESS_CONFIG
 config PCI_BRIDGE_EMUL
        bool
 
+config PCI_ECAM
+       bool "PCI ECAM support"
+
 config PCI_IOV
        bool "PCI IOV support"
        select PCI_ATS
index 3cef835b375fd6a6adaacd1384987a5f905e9c46..27bf05c97986a4eb4f2ebd8804d0326aca148bbc 100644 (file)
@@ -135,11 +135,30 @@ static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
  */
 static void pci_clip_resource_to_region(struct pci_bus *bus,
                                        struct resource *res,
-                                       struct pci_bus_region *region)
+                                       struct pci_bus_region *region,
+                                       resource_size_t *align)
 {
        struct pci_bus_region r;
+       resource_size_t new_align, offset;
 
        pcibios_resource_to_bus(bus, &r, res);
+
+       offset = res->start - r.start;
+       if (offset & (*align - 1) && (r.start & (*align - 1)) == 0) {
+               /*
+                * a) CPU address (resource) differs from PCI bus address
+                * (pci_bus_region), i.e. address translation is in effect;
+                * b) PCI bus address is aligned as required;
+                * c) CPU address is not aligned.
+                * So, we can relax alignment requirement for CPU address.
+                */
+               new_align = 1 << __ffs(offset);
+               dev_info(&bus->dev,
+                        "pci_clip_resource_to_region: relaxing alignment from %pa to %pa\n",
+                        align, &new_align);
+               *align = new_align;
+       }
+
        if (r.start < region->start)
                r.start = region->start;
        if (r.end > region->end)
@@ -169,6 +188,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
 
        pci_bus_for_each_resource(bus, r, i) {
                resource_size_t min_used = min;
+               resource_size_t res_align = align;
 
                if (!r)
                        continue;
@@ -184,7 +204,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
                        continue;
 
                avail = *r;
-               pci_clip_resource_to_region(bus, &avail, region);
+               pci_clip_resource_to_region(bus, &avail, region, &res_align);
 
                /*
                 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
@@ -199,7 +219,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
 
                /* Ok, try it out.. */
                ret = allocate_resource(r, res, size, min_used, max,
-                                       align, alignf, alignf_data);
+                                       res_align, alignf, alignf_data);
                if (ret == 0)
                        return 0;
        }
index 0ba988b5b5bc6901fa71c261be827a953d707e4a..3d6885ff2557d506da568f077490df4071b17ca7 100644 (file)
@@ -268,4 +268,10 @@ config PCIE_AL
          required only for DT-based platforms. ACPI platforms with the
          Annapurna Labs PCIe controller don't need to enable this.
 
+config PCIE_BAIKAL
+       bool "Baikal-T PCIe controller"
+       depends on MIPS_BAIKAL || COMPILE_TEST
+       depends on PCI_MSI_IRQ_DOMAIN
+       select PCIE_DW_HOST
+
 endmenu
index 69faff371f118f133a1973b52cc093cbb6ec1840..4495c8dc0868243968deafd4c0248029d45f326d 100644 (file)
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
 obj-$(CONFIG_PCI_MESON) += pci-meson.o
 obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
 obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_BAIKAL) += pcie-baikal.o
 
 # The following drivers are for devices that use the generic ACPI
 # pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pcie-baikal.c b/drivers/pci/controller/dwc/pcie-baikal.c
new file mode 100644 (file)
index 0000000..1c2f3e2
--- /dev/null
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_CFG_BASE                                          0xBF052000
+#define PMU_BASE                                                       0xBF04D000
+
+#define LINK_RETRAIN_TIMEOUT                           HZ
+#define LINK_UP_TIMEOUT                                        HZ
+
+/* Macros to read/write PMU registers. */
+#define READ_PMU_REG(r)                readl((const volatile void *)(r))
+#define WRITE_PMU_REG(r, v)    writel(v, (volatile void *)(r))
+
+#define PCIE_LINK_CONTROL2_LINK_STATUS2_REG    (0xa0)  /* Link Control 2 and Status 2 Register. */
+#define PCIE_LINK_CAPABILITIES_REG                     (0x7c)  /* Link Capabilities Register. */
+#define PCIE_LINK_CONTROL_LINK_STATUS_REG      (0x80)  /* Link Control and Status Register. */
+
+/* PCIE_LINK_CONTROL2_LINK_STATUS2 */
+#define PCIE_LINK_CONTROL2_GEN_MASK                    (0xF)
+#define PCIE_LINK_CONTROL2_GEN1                                (1)
+
+/* LINK_CONTROL_LINK_STATUS_REG */
+#define PCIE_CAP_LINK_SPEED_SHIFT                      16
+#define PCIE_CAP_LINK_SPEED_MASK                       0xF0000
+#define PCIE_STA_LINK_TRAINING                         0x8000000
+#define PCIE_STA_LINK_WIDTH_MASK                       0x3f00000
+#define PCIE_STA_LINK_WIDTH_SHIFT                      (20)
+
+/* BK_PMU_AXI_PCIE_M_CTL */
+#define PMU_AXI_PCIE_M_CTL_EN                          (1 << 0)        /* Enable AXI PCIe Master clock. */
+
+/* BK_PMU_AXI_PCIE_S_CTL */
+#define PMU_AXI_PCIE_S_CTL_EN                          (1 << 0)        /* Enable AXI PCIe Slave clock. */
+#define PMU_AXI_PCIE_S_CTL_RST                         (1 << 1)        /* Software AXI PCIe Slave clock domain reset. */
+
+/* BK_PMU_PCIE_RSTC */
+#define PMU_PCIE_RSTC_PHY_RESET                                (1 << 0)        /* PCIe PHY phy_rts_n reset control bit. */
+#define PMU_PCIE_RSTC_CORE_RST                         (1 << 8)        /* PCIe core core_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_STICKY_RST                       (1 << 10)       /* PCIe core sticky_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_NONSTICKY_RST                    (1 << 11)       /* PCIe core nonsticky_rst_n reset control bit. */
+#define PMU_PCIE_RSTC_REQ_PHY_RST                      (1 << 16)
+#define PMU_PCIE_RSTC_REQ_CORE_RST                     (1 << 24)
+#define PMU_PCIE_RSTC_REQ_STICKY_RST           (1 << 26)
+#define PMU_PCIE_RSTC_REQ_NON_STICKY_RST       (1 << 27)
+
+/* BK_PMU_PCIE_GENC */
+#define PMU_PCIE_GENC_LTSSM_ENABLE                     (1 << 1)        /* LTSSM enable bit. */
+#define PMU_PCIE_GENC_DBI2_MODE                                (1 << 2)        /* PCIe core registers access mode bit: DBI(=0) / DBI2(=1) */
+
+/* BK_PMU_PCIE_PMSC */
+#define PMU_PCIE_PMSC_LTSSM_STATE_MASK         (0x3F)
+#define LTSSM_L0                                                       0x11
+
+/* Register map */
+#define BK_AXI_PCIE_M_CTL_OFFSET                       0x048
+#define BK_AXI_PCIE_S_CTL_OFFSET                       0x04C
+#define BK_PCIE_RSTC_OFFSET                                    0x144
+#define BK_PCIE_PMSC_OFFSET                                    0x148
+#define BK_PCIE_GENC_OFFSET                                    0x14C
+
+#define BK_PMU_AXI_PCIE_M_CTL                          (PMU_BASE + BK_AXI_PCIE_M_CTL_OFFSET)
+#define BK_PMU_AXI_PCIE_S_CTL                          (PMU_BASE + BK_AXI_PCIE_S_CTL_OFFSET)
+#define BK_PMU_PCIE_RSTC                                       (PMU_BASE + BK_PCIE_RSTC_OFFSET)
+#define BK_PMU_PCIE_PMSC                                       (PMU_BASE + BK_PCIE_PMSC_OFFSET)
+#define BK_PMU_PCIE_GENC                                       (PMU_BASE + BK_PCIE_GENC_OFFSET)
+
+/*
+ * The below access functions are needed for Baikal-MIPS
+ * because PCI supports aligned word access only.
+ */
+
+static int be_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+       uintptr_t a = (uintptr_t)addr;
+       int adj = (a & 3) * 8;
+       u32 t;
+
+       if (!IS_ALIGNED((uintptr_t)addr, size)) {
+               *val = 0;
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       }
+
+       addr = (void __iomem *)(a & ~3);
+       t = readl(addr);
+
+       if (size == 4) {
+               *val = t;
+       } else if (size == 2) {
+               *val = (t >> adj) & 0xffff;
+       } else if (size == 1) {
+               *val = (t >> adj) & 0xff;
+       } else {
+               *val = 0;
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       }
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int be_pcie_write(void __iomem *addr, int size, u32 val)
+{
+       uintptr_t a = (uintptr_t)addr;
+       int adj = (a & 3) * 8;
+       u32 t;
+
+       if (!IS_ALIGNED((uintptr_t)addr, size))
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+
+       if (size == 4) {
+               writel(val, addr);
+               return PCIBIOS_SUCCESSFUL;
+       } else if (size != 2 && size != 1) {
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       }
+
+       addr = (void __iomem *)(a & ~3);
+       t = readl(addr);
+
+       if (size == 2)
+               t = (t & ~(0xffff << adj)) | (val & 0xffff) << adj;
+       else
+               t = (t & ~(0xff << adj)) | (val & 0xff) << adj;
+       writel(t, addr);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int baikal_t1_pcie_link_is_down(void)
+{
+       int reg = READ_PMU_REG(BK_PMU_PCIE_PMSC);
+       return (reg & PMU_PCIE_PMSC_LTSSM_STATE_MASK) != LTSSM_L0;
+}
+
+static inline int baikal_t1_pcie_link_is_training(void)
+{
+       int reg;
+       be_pcie_read((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CONTROL_LINK_STATUS_REG), 4, &reg);
+       return reg & PCIE_STA_LINK_TRAINING;
+}
+
+static void baikal_t1_wait_pcie_link_training_done(void)
+{
+       unsigned long start_jiffies = jiffies;
+       while (baikal_t1_pcie_link_is_training()) {
+               if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+                       pr_err("%s: link retrained for too long, timeout occured\n", __func__);
+                       break;
+               }
+               udelay(100);
+       }
+}
+
+static inline void baikal_t1_pcie_link_retrain(int target_speed)
+{
+       int reg;
+       unsigned long start_jiffies;
+
+       // In case link is already training wait for training to complete
+       baikal_t1_wait_pcie_link_training_done();
+
+       wmb();
+
+       // Set desired speed
+       be_pcie_read((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CONTROL2_LINK_STATUS2_REG), 4, &reg);
+       reg &= ~PCIE_LINK_CONTROL2_GEN_MASK;
+       reg |= target_speed;
+       be_pcie_write((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CONTROL2_LINK_STATUS2_REG), 4, reg);
+
+       wmb();
+
+       // Set Retrain Link bit
+       be_pcie_read((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CONTROL_LINK_STATUS_REG), 4, &reg);
+       reg |= PCI_EXP_LNKCTL_RL;
+       be_pcie_write((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CONTROL_LINK_STATUS_REG), 4, reg);
+
+       wmb();
+
+       /* Wait for link training begin */
+       start_jiffies = jiffies;
+       while (!baikal_t1_pcie_link_is_training()) {
+               if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+                       pr_err("%s: link retrained for too long, timeout occured\n", __func__);
+                       break;
+               }
+               udelay(100);
+       }
+
+       /* Wait for link training end */
+       baikal_t1_wait_pcie_link_training_done();
+
+       /* Wait for link is up */
+       start_jiffies = jiffies;
+       while (baikal_t1_pcie_link_is_down()) {
+               if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+                       pr_err("%s: link is down for too long, timeout occured\n", __func__);
+                       break;
+               }
+               udelay(100);
+       }
+}
+
+static int baikal_t1_report_link_performance(struct pci_dev *pdev)
+{
+       int reg, speed, width;
+       be_pcie_read((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CONTROL_LINK_STATUS_REG), 4, &reg);
+       speed = (reg & PCIE_CAP_LINK_SPEED_MASK) >> PCIE_CAP_LINK_SPEED_SHIFT;
+       width = (reg & PCIE_STA_LINK_WIDTH_MASK) >> PCIE_STA_LINK_WIDTH_SHIFT; 
+       dev_info(&pdev->dev, "Link Status is     GEN%d, x%d\n", speed, width);
+       return speed;
+}
+
+static void baikal_t1_pcie_link_speed_fixup(struct pci_dev *pdev)
+{
+       int reg, speed, width, target_speed;
+       be_pcie_read((void __iomem *)(PCIE_CFG_BASE + PCIE_LINK_CAPABILITIES_REG), 4, &reg);
+       speed = reg & PCI_EXP_LNKCAP_SLS;
+       if (speed > PCI_EXP_LNKCAP_SLS_2_5GB) {
+               pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg);
+               speed = reg & PCI_EXP_LNKCAP_SLS;
+               width = (reg & PCI_EXP_LNKCAP_MLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+               dev_info(&pdev->dev, "Link Capability is GEN%d, x%d\n", speed, width);
+               if (speed > PCI_EXP_LNKCAP_SLS_2_5GB) {
+                       target_speed = speed;
+                       if (baikal_t1_report_link_performance(pdev) < target_speed) {
+                               dev_info(&pdev->dev, "Retrain link to    GEN%d\n", target_speed);
+                               baikal_t1_pcie_link_retrain(target_speed);
+                               baikal_t1_report_link_performance(pdev);
+                               return;
+                       }
+               }
+       }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, baikal_t1_pcie_link_speed_fixup);
+
+static int dw_pcie_init(void)
+{
+       u32 reg, rstc_mask = 0;
+
+       /* 1. Read value of BK_PMU_AXI_PCIE_M_CTL, set EN bit. */
+       reg = READ_PMU_REG(BK_PMU_AXI_PCIE_M_CTL);
+       reg |= PMU_AXI_PCIE_M_CTL_EN;
+       WRITE_PMU_REG(BK_PMU_AXI_PCIE_M_CTL, reg);
+
+       /* 2. Read value of BK_PMU_AXI_PCIE_S_CTL, set EN bit. */
+       reg = READ_PMU_REG(BK_PMU_AXI_PCIE_S_CTL);
+       reg |= PMU_AXI_PCIE_S_CTL_EN;
+       WRITE_PMU_REG(BK_PMU_AXI_PCIE_S_CTL, reg);
+
+       /*
+        * 3. Manage RESET* bits
+        * (PHY_RESET, PIPE_RESET, CORE_RST, PWR_RST, STICKY_RST, NONSTICKY_RST)
+        */
+       reg = READ_PMU_REG(BK_PMU_PCIE_RSTC);
+
+       /* We have Baikal-T1 chip, perform enhanced reset procedure */
+       if (reg & PMU_PCIE_RSTC_REQ_PHY_RST)
+               rstc_mask |= PMU_PCIE_RSTC_PHY_RESET;
+       if (reg & PMU_PCIE_RSTC_REQ_CORE_RST)
+               rstc_mask |= PMU_PCIE_RSTC_CORE_RST;
+       if (reg & PMU_PCIE_RSTC_REQ_STICKY_RST)
+               rstc_mask |= PMU_PCIE_RSTC_STICKY_RST;
+       if (reg & PMU_PCIE_RSTC_REQ_NON_STICKY_RST)
+               rstc_mask |= PMU_PCIE_RSTC_NONSTICKY_RST;
+       WRITE_PMU_REG(BK_PMU_PCIE_RSTC, reg | rstc_mask);
+
+       usleep_range(1, 10);
+
+       reg = READ_PMU_REG(BK_PMU_PCIE_RSTC);
+       reg &= ~rstc_mask;
+       reg = READ_PMU_REG(BK_PMU_PCIE_RSTC);
+       if (reg & 0x3f11) {
+               reg &= ~0x3f11;
+               WRITE_PMU_REG(BK_PMU_PCIE_RSTC, reg);
+               usleep_range(1, 10);
+               reg = READ_PMU_REG(BK_PMU_PCIE_RSTC);
+       }
+
+       /*
+        * 4. Set GEN1 speed. In case EP supports GEN2
+        * a link will be retrained later.
+        * At that moment it's impossible to
+        * configure a link on GEN3 speed.
+        */
+       be_pcie_read((void __iomem *)PCIE_CFG_BASE, 4, &reg);
+       reg &= ~PCIE_LINK_CONTROL2_GEN_MASK;
+       reg |= PCIE_LINK_CONTROL2_GEN1;
+       be_pcie_write((void __iomem *)PCIE_CFG_BASE, 4, reg);
+
+       wmb();
+
+       /* 5. Set LTSSM enable, app_ltssm_enable = 0x1 */
+       reg = READ_PMU_REG(BK_PMU_PCIE_GENC);
+       reg |= PMU_PCIE_GENC_LTSSM_ENABLE;
+       WRITE_PMU_REG(BK_PMU_PCIE_GENC, reg);
+
+       return 0;
+}
+
+u32 be_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size)
+{
+       int ret;
+       u32 val;
+
+       ret = be_pcie_read(base + reg, size, &val);
+       if (ret)
+               dev_err(pci->dev, "Read DBI address failed\n");
+
+       return val;
+}
+
+void be_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val)
+{
+       int ret;
+
+       ret = be_pcie_write(base + reg, size, val);
+       if (ret)
+               dev_err(pci->dev, "Write DBI address failed\n");
+}
+
+static int be_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+                                u32 *val)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+       return be_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static int be_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+                                  u32 val)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+       return be_pcie_write(pci->dbi_base + where, size, val);
+}
+
+static int dw_be_pcie_host_init(struct pcie_port *pp)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+       dw_pcie_setup_rc(pp);
+       dw_pcie_wait_for_link(pci);
+
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               dw_pcie_msi_init(pp);
+
+       return 0;
+}
+
+static void dw_be_set_num_vectors(struct pcie_port *pp)
+{
+       pp->num_vectors = MAX_MSI_IRQS;
+}
+
+static const struct dw_pcie_host_ops dw_be_pcie_host_ops = {
+       .rd_own_conf = be_pcie_rd_own_conf,
+       .wr_own_conf = be_pcie_wr_own_conf,
+       .host_init = dw_be_pcie_host_init,
+       .set_num_vectors = dw_be_set_num_vectors,
+};
+
+static const struct dw_pcie_ops dw_be_pcie_ops = {
+       .read_dbi = be_pcie_read_dbi,
+       .write_dbi = be_pcie_write_dbi,
+};
+
+static int dw_be_add_pcie_port(struct dw_pcie *pci,
+                                struct platform_device *pdev)
+{
+       struct pcie_port *pp = &pci->pp;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       pp->irq = platform_get_irq(pdev, 1);
+       if (pp->irq < 0)
+               return pp->irq;
+
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               pp->msi_irq = platform_get_irq(pdev, 0);
+               if (pp->msi_irq < 0)
+                       return pp->msi_irq;
+       }
+
+       pp->ops = &dw_be_pcie_host_ops;
+
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(dev, "Failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dw_be_pcie_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct dw_pcie *pci;
+       struct resource *res;  /* Resource from DT */
+       int ret;
+
+       PCIBIOS_MIN_IO = 0x100; /* MIPS does not allow to define it per platform */
+
+       dw_pcie_init();
+
+       pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+       if (!pci)
+               return -ENOMEM;
+
+       pci->dev = dev;
+       pci->ops = &dw_be_pcie_ops;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+       if (!res)
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       pci->dbi_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pci->dbi_base))
+               return PTR_ERR(pci->dbi_base);
+
+       ret = dw_be_add_pcie_port(pci, pdev);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static const struct of_device_id dw_be_pcie_of_match[] = {
+       {
+               .compatible = "snps,dw-pcie",
+       },
+       {},
+};
+
+static struct platform_driver dw_be_pcie_driver = {
+       .driver = {
+               .name   = "dw-pcie",
+               .of_match_table = dw_be_pcie_of_match,
+               .suppress_bind_attrs = true,
+       },
+       .probe = dw_be_pcie_probe,
+};
+builtin_platform_driver(dw_be_pcie_driver);
index 44f4866d95d8c4ba6c0811e1e403de5f765816ea..78c95b3f0bd604fb85a4b8e679687456681f973b 100644 (file)
@@ -946,12 +946,15 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
        resource_size_t min_align = 0;
        int order;
 
-       for (order = 0; order <= max_order; order++) {
+       for (order = 0; order < max_order; order++) {
                resource_size_t align1 = 1;
 
+               if (!aligns[order])
+                       continue;
+
                align1 <<= (order + 20);
 
-               if (!align)
+               if (!min_align)
                        min_align = align1;
                else if (ALIGN(align + min_align, min_align) < align1)
                        min_align = align1 >> 1;
@@ -987,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
                         struct list_head *realloc_head)
 {
        struct pci_dev *dev;
-       resource_size_t min_align, align, size, size0, size1;
+       resource_size_t min_align, align, size, size0, size1, max_align;
        resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
        int order, max_order;
        struct resource *b_res = find_bus_resource_of_type(bus,
@@ -1067,6 +1070,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
 
        min_align = calculate_mem_align(aligns, max_order);
        min_align = max(min_align, window_alignment(bus, b_res->flags));
+       max_align = 1 << (max_order + 20);
+       if (min_align >= max_align/2)
+       max_align = min_align;
        size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
        add_align = max(min_align, add_align);
        size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
@@ -1079,8 +1085,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
                b_res->flags = 0;
                return 0;
        }
-       b_res->start = min_align;
-       b_res->end = size0 + min_align - 1;
+       b_res->start = max_align;
+       b_res->end = size0 + max_align - 1;
        b_res->flags |= IORESOURCE_STARTALIGN;
        if (size1 > size0 && realloc_head) {
                add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
index 5bf7542087776410ac16a4f53e5c53cec1b2b37c..d687bb867c19f70394e1daf7a919d103253afe7a 100644 (file)
@@ -230,6 +230,9 @@ config SPI_DW_MMIO
        tristate "Memory-mapped io interface driver for DW SPI core"
        depends on SPI_DESIGNWARE
 
+config SPI_BAIKAL
+       tristate "SPI Driver for Baikal-T SoC"
+
 config SPI_DLN2
        tristate "Diolan DLN-2 USB SPI adapter"
        depends on MFD_DLN2
index bb49c9e6d0a0c00cddf2275a84cb74fdc08ddf74..6674c2a5d21052f7dc59c4ed0267bc5bed95f88f 100644 (file)
@@ -35,6 +35,7 @@ obj-$(CONFIG_SPI_DAVINCI)             += spi-davinci.o
 obj-$(CONFIG_SPI_DLN2)                 += spi-dln2.o
 obj-$(CONFIG_SPI_DESIGNWARE)           += spi-dw.o
 obj-$(CONFIG_SPI_DW_MMIO)              += spi-dw-mmio.o
+obj-$(CONFIG_SPI_BAIKAL)               += spi-baikal.o spi-baikal-dma.o spi-baikal-boot.o
 obj-$(CONFIG_SPI_DW_PCI)               += spi-dw-midpci.o
 spi-dw-midpci-objs                     := spi-dw-pci.o spi-dw-mid.o
 obj-$(CONFIG_SPI_EFM32)                        += spi-efm32.o
diff --git a/drivers/spi/spi-baikal-boot.c b/drivers/spi/spi-baikal-boot.c
new file mode 100644 (file)
index 0000000..11da637
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+ * Baikal Electronics Spi Flash Driver.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/property.h>
+#include <linux/spinlock.h>
+#include "spi-dw.h"
+
+
+struct dw_boot_spi {
+       void __iomem *regs;
+       unsigned long paddr;
+
+       u32 fifo_len;       /* depth of the FIFO buffer */
+       u32 max_freq;       /* max bus freq supported */
+       u32 reg_io_width;   /* DR I/O width in bytes */
+       u32 bus_num;
+       u32 num_cs;         /* supported slave numbers */
+
+       char tx[512];
+
+       struct clk *clk;
+};
+
+static inline u32 dw_boot_readl (struct dw_boot_spi *dws, u32 offset)
+{
+       return __raw_readl(dws->regs + offset);
+}
+
+static inline void dw_boot_writel (struct dw_boot_spi *dws, u32 offset, u32 val)
+{
+       __raw_writel(val, dws->regs + offset);
+}
+
+static void spi_set_mode (struct dw_boot_spi *dws, int mode)
+{
+       struct {
+               uint32_t dfs    :4; /* data frame size */
+               uint32_t frf    :2; /* frame format (0-spi, 1-ssp, 2-micro, 3-reserved) */
+               uint32_t scph   :1; /* clk phase */
+               uint32_t scpol  :1; /* clk polarity */
+               uint32_t tmod   :2; /* transfer mode (0-tx|rx, 1-tx, 2-rx, 3-eeprom) */
+               uint32_t slv_oe :1; /* (ignore) slave output enable */
+               uint32_t srl    :1; /* (ignore) shift register loop */
+               uint32_t cfs    :4; /* (ignore) control frame size */
+               uint32_t _      :16;
+       } ctr0;
+
+       *(u32*)&ctr0 = dw_boot_readl (dws, DW_SPI_CTRL0);
+       ctr0.tmod = mode;
+       dw_boot_writel(dws, DW_SPI_CTRL0, *(u32*)&ctr0);
+}
+
+
+static int boot_spi_write (struct spi_master *master, int chip_select,
+       const uint8_t* tx1, const uint8_t* tx2, int len1, int len2)
+{
+       struct dw_boot_spi *dws;
+       int i, n1, n2;
+       const uint8_t* end1 = tx1 + len1;
+       const uint8_t* end2 = tx2 + len2;
+       unsigned long flags;
+
+
+       DEFINE_SPINLOCK(mLock);
+       spin_lock_irqsave(&mLock, flags);   /* Critical section - ON */
+
+       dws = spi_master_get_devdata(master);
+
+       dw_boot_writel(dws, DW_SPI_SER, 0);
+       dw_boot_writel(dws, DW_SPI_SSIENR, 0);
+       spi_set_mode(dws, SPI_TMOD_TO);
+
+       dw_boot_writel(dws, DW_SPI_SSIENR, 1);  /* ebable fifo */
+
+       n1 = (len1 > dws->fifo_len    )? dws->fifo_len     : len1;  /* fill fifo */
+       n2 = (len2 > dws->fifo_len -n1)? dws->fifo_len -n1 : len2;
+       for (i = 0; i < n1; i++)
+               dw_boot_writel(dws, DW_SPI_DR, *tx1++);
+       for (i = 0; i < n2; i++)
+               dw_boot_writel(dws, DW_SPI_DR, *tx2++);
+
+       dw_boot_writel(dws, DW_SPI_SER, chip_select);   /* start sending */
+
+       while (tx1 != end1) {   /* regular transfer 1 */
+               if(dw_boot_readl(dws, DW_SPI_SR) & SR_TF_NOT_FULL)
+                       dw_boot_writel(dws, DW_SPI_DR, *tx1++);
+       }
+
+       while (tx2 != end2) {   /* regular transfer 2 */
+               if(dw_boot_readl(dws, DW_SPI_SR) & SR_TF_NOT_FULL)
+                       dw_boot_writel(dws, DW_SPI_DR, *tx2++);
+       }
+
+       while(!(dw_boot_readl(dws, DW_SPI_SR) & SR_BUSY))   /* wait */
+               ;
+
+       spin_unlock_irqrestore(&mLock, flags);  /* Critical section - OFF */
+
+       udelay(10);
+
+       return 0;
+}
+
+static int boot_spi_read (struct spi_master *master, int chip_select,
+       const uint8_t* tx, uint8_t* rx, int lentx, int lenrx)
+{
+       int i;
+       uint8_t* const rxend = rx + lenrx;
+       struct dw_boot_spi *dws;
+       unsigned long flags;
+
+       DEFINE_SPINLOCK(mLock);
+       spin_lock_irqsave(&mLock, flags);                   /* Critical section - ON */
+
+       dws = spi_master_get_devdata(master);
+
+       dw_boot_writel(dws, DW_SPI_SER, 0);
+       dw_boot_writel(dws, DW_SPI_SSIENR, 0);
+       spi_set_mode(dws, SPI_TMOD_EPROMREAD);
+
+       dw_boot_writel(dws, DW_SPI_CTRL1, lenrx - 1);       /* rx config */
+       dw_boot_writel(dws, DW_SPI_SSIENR, 1);              /* ebable fifo */
+
+       for (i = 0; i < lentx; i++)                         /* fill config */
+               dw_boot_writel(dws, DW_SPI_DR, tx[i]);
+
+       dw_boot_writel(dws, DW_SPI_SER, chip_select);       /* start sending */
+
+       while (rx != rxend) {                               /* read incoming data */
+               if(dw_boot_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT)
+                       *rx++ = dw_boot_readl(dws, DW_SPI_DR);
+       }
+       spin_unlock_irqrestore(&mLock, flags);              /* Critical section - OFF */
+
+       return 0;
+}
+
+static int boot_spi_transfer_one_message (struct spi_master *master, struct spi_message *msg)
+{
+       struct list_head *const head = &msg->transfers;
+       struct spi_transfer *pos, *next;
+       int select = BIT(msg->spi->chip_select);
+       int err = 0;
+       int i;
+
+       char *rx = NULL;
+       int rx_len = 0;
+       int tx_len = 0;
+
+       struct dw_boot_spi *dws = spi_master_get_devdata(master);
+
+       /* decode */
+       list_for_each_entry(pos, head, transfer_list)
+       {
+               if (pos->tx_buf) {
+                       for (i=0; i<pos->len; ++i)
+                               dws->tx[tx_len+i] = ((char*)pos->tx_buf)[i];
+
+                       tx_len += pos->len;
+                       if(tx_len > sizeof(dws->tx)){
+                               err = -2;
+                               goto exit;
+                       }
+               }
+
+               if (pos->rx_buf) {
+                       if (rx) {
+                               err = -3;
+                               goto exit;
+                       }
+                       rx = pos->rx_buf;
+                       rx_len += pos->len;
+               }
+       }
+       msg->actual_length += tx_len + rx_len;
+
+       /* send */
+       if(rx)
+               boot_spi_read  (master, select, dws->tx, rx,   tx_len, rx_len);
+       else
+               boot_spi_write (master, select, dws->tx, NULL, tx_len, 0);
+
+
+
+exit:
+       msg->status = err;
+       spi_finalize_current_message(master);
+       if(err)
+               dev_err(&master->dev, "-- error %d\n", err);
+
+       return err;
+}
+
+static int fifo_len (struct dw_boot_spi *dws)
+{
+       u32 txfltr = dw_boot_readl(dws, DW_SPI_TXFLTR);
+       u32 fifo;
+       for (fifo = 1; fifo < 256; fifo++) {
+               dw_boot_writel(dws, DW_SPI_TXFLTR, fifo);
+               if (fifo != dw_boot_readl(dws, DW_SPI_TXFLTR))
+                       break;
+       }
+       dw_boot_writel(dws, DW_SPI_TXFLTR, txfltr);
+
+       return (fifo == 1) ? 0:fifo;
+}
+
+static void init (struct dw_boot_spi *dws)
+{
+       /* clear */
+       int i;
+       for (i = 0; i < DW_SPI_DR; i+= sizeof(uint32_t))
+               dw_boot_writel(dws, i, 0);
+
+       /* baudr */
+       dw_boot_writel(dws, DW_SPI_BAUDR, 6);   /* todo: use dws->clk to init baudrate */
+}
+
+static int add_host (struct device *dev, struct dw_boot_spi *dws)
+{
+       struct spi_master *master;
+       int ret;
+
+       master = spi_alloc_master(dev, 0);
+       if (!master){
+               dev_err(&master->dev, "-- alloc\n");
+               return -ENOMEM;
+       }
+
+       master->bus_num                 = dws->bus_num;
+       master->num_chipselect          = dws->num_cs;
+       master->mode_bits               = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+       master->bits_per_word_mask      = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+       master->max_speed_hz            = dws->max_freq;
+       master->dev.of_node             = dev->of_node;
+       master->transfer_one_message    = boot_spi_transfer_one_message;
+
+       spi_master_set_devdata(master, dws);
+       ret = devm_spi_register_master(dev, master);
+       if (ret) {
+               dev_err(&master->dev, "-- problem registering spi master\n");
+               spi_master_put(master);
+       }
+       return ret;
+}
+
+static int probe (struct platform_device *pdev)
+{
+       struct dw_boot_spi *dws;
+       struct resource *mem;
+       int ret;
+
+       /* alloc dws */
+       dws = devm_kzalloc(&pdev->dev, sizeof(struct dw_boot_spi), GFP_KERNEL);
+       if (!dws) {
+               dev_err(&pdev->dev, "-- alloc\n");
+               return -ENOMEM;
+       }
+
+       /* Get basic io resource and map it */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               dev_err(&pdev->dev, "-- get resource?\n");
+               return -EINVAL;
+       }
+
+       dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(dws->regs)) {
+               dev_err(&pdev->dev, "-- ioremap\n");
+               return PTR_ERR(dws->regs);
+       }
+
+       dws->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dws->clk)) {
+               dev_err(&pdev->dev, "-- clk get\n");
+               return PTR_ERR(dws->clk);
+       }
+
+       ret = clk_prepare_enable(dws->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "-- clk prepare\n");
+               return ret;
+       }
+
+       /* get spi parameters */
+       dws->bus_num = of_alias_get_id(pdev->dev.of_node, "ssi");
+       dws->max_freq = clk_get_rate(dws->clk);
+       device_property_read_u32(&pdev->dev, "num-cs", &dws->num_cs);
+       dws->fifo_len = fifo_len(dws);
+
+       init(dws);
+
+       /* add host */
+       ret = add_host(&pdev->dev, dws);
+       if (ret){
+               clk_disable_unprepare(dws->clk);
+               dev_err(&pdev->dev, "-- add_host\n");
+               return ret;
+       }
+       platform_set_drvdata(pdev, dws);
+
+       return 0;
+}
+
+static int remove (struct platform_device *pdev)
+{
+       struct dw_boot_spi *dws = platform_get_drvdata(pdev);
+       clk_disable_unprepare(dws->clk);
+       dw_boot_writel(dws, DW_SPI_SSIENR, 0);
+       dw_boot_writel(dws, DW_SPI_BAUDR, 0);
+       return 0;
+}
+
+static const struct of_device_id   be_spi_boot_table[] = {
+       { .compatible = "be,dw-spi-boot", },
+       { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, be_spi_boot_table);
+
+static struct platform_driver be_spi_boot_driver = {
+       .probe      = probe,
+       .remove     = remove,
+       .driver     = {
+               .name   = "be,dw-spi-boot",
+               .of_match_table = be_spi_boot_table,
+       },
+};
+module_platform_driver(be_spi_boot_driver);
+
+MODULE_DESCRIPTION("Baikal Electronics Spi Flash Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/spi/spi-baikal-dma.c b/drivers/spi/spi-baikal-dma.c
new file mode 100644 (file)
index 0000000..8218874
--- /dev/null
@@ -0,0 +1,470 @@
+/*
+ * Baikal Electronics Spi Flash Driver.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/platform_data/dma-dw.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/property.h>
+#include "spi-dw.h"
+
+
+
+#define DESC_DATA_SIZE   (4*1024)
+#define RX_BUSY     0
+#define TX_BUSY     1
+
+/* local */
+static int  transfer     (struct dw_spi *dws, struct spi_transfer *xfer);
+static int  channel_get  (struct dw_spi *dws);
+static void channel_free (struct dw_spi *dws);
+static void stop         (struct dw_spi *dws);
+
+/* extern */
+int  dw_spi_baikal_add_host    (struct device *dev, struct dw_spi *dws);
+void dw_spi_baikal_remove_host (struct dw_spi *dws);
+
+static int spi_check_status (struct dw_spi *dws)
+{
+       return (dw_readl(dws, DW_SPI_SR) & (SR_BUSY | SR_RF_NOT_EMPT));
+}
+
+static void spi_wait_status(struct dw_spi *dws)
+{
+       long int us = dws->len * 1000;
+       while (spi_check_status(dws) && us--)
+               udelay(1);
+
+       if(!us)
+               dws->master->cur_msg->status = -EIO;
+}
+
+static void tx_done (void *arg)
+{
+       struct dw_spi *dws = arg;
+       spi_wait_status(dws);
+       clear_bit(TX_BUSY, &dws->dma_chan_busy);
+       if (test_bit(RX_BUSY, &dws->dma_chan_busy))
+               return;
+       channel_free(dws);
+       spi_finalize_current_transfer(dws->master);
+}
+
+static void rx_done (void *arg)
+{
+       struct dw_spi *dws = arg;
+       spi_wait_status(dws);
+
+       size_t len = min(dws->len, DESC_DATA_SIZE);
+       dws->len -= len;
+       dws->rx  += len;
+
+       if(!dws->len){
+               clear_bit(RX_BUSY, &dws->dma_chan_busy);
+               if (test_bit(TX_BUSY, &dws->dma_chan_busy))
+                       return;
+               channel_free(dws);
+               spi_finalize_current_transfer(dws->master);
+       }else{
+               transfer(dws, NULL);  /* next part */
+       }
+}
+
+static struct dma_async_tx_descriptor *prepare_tx (
+       struct dw_spi       *dws,
+       struct spi_transfer *xfer)
+{
+       if (!dws->tx)
+               return NULL;
+
+
+       /* slave config */
+       struct dma_slave_config config;
+       memset(&config, 0, sizeof(config));
+       config.direction      = DMA_MEM_TO_DEV;
+       config.device_fc      = false;
+
+       config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       config.src_maxburst   = dws->fifo_len/2;
+       config.src_addr       = dws->tx;
+
+       config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       config.dst_maxburst   = dws->fifo_len/2;
+       config.dst_addr       = CPHYSADDR(dws->dma_addr);
+
+       dmaengine_slave_config(dws->txchan, &config);
+
+       /* descriptor */
+       struct dma_async_tx_descriptor *desc;
+       desc = dmaengine_prep_slave_single(
+               dws->txchan,                            /* chan */
+               CPHYSADDR(dws->tx),                     /* dws->tx, buf_tx */
+               dws->len,                               /* len */
+               DMA_MEM_TO_DEV,                         /* dir */
+               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);     /* flags */
+       if (!desc)
+               return NULL;
+
+       /* callback */
+       desc->callback = tx_done;
+       desc->callback_param = dws;
+
+       return desc;
+}
+
+static struct dma_async_tx_descriptor *prepare_rx (
+       struct dw_spi       *dws,
+       struct spi_transfer *xfer)
+{
+       if (!dws->rx)
+               return NULL;
+
+       /* slave config */
+       struct dma_slave_config config;
+       memset(&config, 0, sizeof(config));
+       config.direction      = DMA_DEV_TO_MEM;
+       config.device_fc      = false;
+
+       config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       config.src_maxburst   = dws->fifo_len/2;
+       config.src_addr       = CPHYSADDR(dws->dma_addr);
+
+       config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       config.dst_maxburst   = dws->fifo_len/2;
+       config.dst_addr       = dws->rx;
+
+       dmaengine_slave_config(dws->rxchan, &config);
+
+       size_t len = min(dws->len,DESC_DATA_SIZE);
+       spi_enable_chip(dws, 0);
+       dw_writel(dws, DW_SPI_CTRL1, len-1);
+       spi_enable_chip(dws, 1);
+
+       /* descriptor */
+       struct dma_async_tx_descriptor *desc;
+       desc = dmaengine_prep_slave_single(
+               dws->rxchan,                            /* chan */
+               CPHYSADDR(dws->rx),                     /* dws->rx, buf_rx */
+               len,                                    /* len */
+               DMA_DEV_TO_MEM,                         /* dir */
+               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);     /* flags */
+       if (!desc)
+               return NULL;
+
+       /* callback */
+       desc->callback = rx_done;
+       desc->callback_param = dws;
+
+       return desc;
+}
+
+static int init (struct dw_spi *dws)
+{
+       /* clear */
+       dws->rxchan = 0;
+       dws->txchan = 0;
+       dws->master->dma_rx = 0;
+       dws->master->dma_tx = 0;
+       dws->transfer_handler = NULL;
+       clear_bit(TX_BUSY, &dws->dma_chan_busy);
+       clear_bit(RX_BUSY, &dws->dma_chan_busy);
+
+       /* init */
+       dws->dma_inited = 1;
+       return 0;
+}
+
+static void exit (struct dw_spi *dws)
+{
+       stop(dws);
+}
+
+static bool can (
+       struct spi_master *master,
+       struct spi_device *spi,
+       struct spi_transfer *xfer)
+{
+       struct dw_spi *dws = spi_master_get_devdata(master);
+       return (dws->dma_inited) && (xfer->len >= dws->fifo_len);
+}
+
+static int setup (struct dw_spi *dws, struct spi_transfer *xfer)
+{
+       /* busy */
+       if(dws->rx)
+               set_bit(RX_BUSY, &dws->dma_chan_busy);
+       if(dws->tx)
+               set_bit(TX_BUSY, &dws->dma_chan_busy);
+
+       /* dma */
+       if(channel_get(dws)){
+               dws->dma_inited = 0;
+               return -EBUSY;
+       }
+
+       /* spi */
+       /* clear */
+       dw_writel(dws, DW_SPI_DMACR, 0);
+
+       /* MODE */
+       uint32_t tmode;
+       if (dws->rx && dws->tx)
+               tmode = SPI_TMOD_TR;
+       else if (dws->rx)
+               tmode = SPI_TMOD_RO;
+       else
+               tmode = SPI_TMOD_TO;
+
+       /* CTRL0 */
+       uint32_t cr0;
+       cr0 = dw_readl(dws, DW_SPI_CTRL0);
+       cr0 &= ~SPI_TMOD_MASK;
+       cr0 |= (tmode << SPI_TMOD_OFFSET);
+       dw_writel(dws, DW_SPI_CTRL0, cr0);
+
+       /* DMATDLR */
+       dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len/2);
+       dw_writel(dws, DW_SPI_DMARDLR, dws->fifo_len/2 -1);
+
+       /* DMACR */
+       uint16_t dma_ctrl = 0;
+       if(dws->tx)
+               dma_ctrl |= SPI_DMA_TDMAE;
+       if(dws->rx)
+               dma_ctrl |= SPI_DMA_RDMAE;
+       dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
+
+       return 0;
+}
+
+static int transfer (struct dw_spi *dws, struct spi_transfer *xfer)
+{
+       struct dma_async_tx_descriptor *rxdesc;
+       struct dma_async_tx_descriptor *txdesc;
+
+       /* rx must be started before tx due to spi instinct */
+       rxdesc = prepare_rx(dws, xfer);
+       if (rxdesc) {
+               dmaengine_submit(rxdesc);
+               dma_async_issue_pending(dws->rxchan);    /* start */
+       }
+
+       txdesc = prepare_tx(dws, xfer);
+       if (txdesc) {
+               dmaengine_submit(txdesc);
+               dma_async_issue_pending(dws->txchan);    /* start */
+       }
+
+       if (!dws->tx && dws->rx)
+               dw_writel(dws, DW_SPI_DR, 0);  /* write dummy data to start read-only mode */
+       dw_writel(dws, DW_SPI_SER, 1<<1);  /* start spi */
+
+       return 0;
+}
+
+static void stop (struct dw_spi *dws)
+{
+       if (!dws->dma_inited)
+               return;
+       if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
+               dmaengine_terminate_all(dws->txchan);
+               clear_bit(TX_BUSY, &dws->dma_chan_busy);
+       }
+       if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
+               dmaengine_terminate_all(dws->rxchan);
+               clear_bit(RX_BUSY, &dws->dma_chan_busy);
+       }
+       channel_free(dws);
+       dws->dma_inited = 0;
+}
+
+static struct dw_spi_dma_ops  dma_ops = {
+       .dma_init     = init,
+       .dma_exit     = exit,
+       .can_dma      = can,
+       .dma_setup    = setup,
+       .dma_transfer = transfer,
+       .dma_stop     = stop,
+};
+
+static void channel_free (struct dw_spi *dws)
+{
+       if(dws->txchan)
+               dma_release_channel(dws->txchan);
+       if(dws->rxchan)
+               dma_release_channel(dws->rxchan);
+       dws->master->dma_tx = 0;
+       dws->master->dma_rx = 0;
+       dws->txchan = 0;
+       dws->rxchan = 0;
+}
+
+static int channel_get (struct dw_spi *dws)
+{
+       struct device *dev = &(dws->master->dev);
+
+       if (dws->tx) {
+               dws->txchan = dma_request_slave_channel(dev, "tx");
+               dws->master->dma_tx = dws->txchan;
+               if(!dws->txchan)
+                       goto err;
+       }
+       if (dws->rx) {
+               dws->rxchan = dma_request_slave_channel(dev, "rx");
+               dws->master->dma_rx = dws->rxchan;
+               if(!dws->rxchan)
+                       goto err;
+       }
+       return 0;
+
+err:
+       channel_free(dws);
+       return -EBUSY;
+}
+
+
+/*
+----------------------
+MMIO
+----------------------
+*/
+
+struct dw_spi_mmio_dma {
+       struct dw_spi  dws;
+       struct clk    *clk;
+};
+
+void spi_dma_init (struct dw_spi *dws)
+{
+       dws->dma_ops = &dma_ops;
+}
+
+static int probe(struct platform_device *pdev)
+{
+       struct dw_spi_mmio_dma *dwsmmio;
+       struct dw_spi *dws;
+       struct resource *mem;
+       int ret;
+
+       dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio_dma), GFP_KERNEL);
+       if (!dwsmmio) {
+               return -ENOMEM;
+       }
+
+       dws = &dwsmmio->dws;
+
+       /* Get basic io resource and map it */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               dev_err(&pdev->dev, "no mem resource?\n");
+               return -EINVAL;
+       }
+
+       dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(dws->regs)) {
+               dev_err(&pdev->dev, "SPI region map failed\n");
+               return PTR_ERR(dws->regs);
+       }
+       dws->paddr = dws->regs;
+
+       dws->irq = platform_get_irq(pdev, 0);
+       if (dws->irq < 0) {
+               dev_err(&pdev->dev, "no irq resource?\n");
+               return dws->irq; /* -ENXIO */
+       }
+
+       dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dwsmmio->clk)) {
+               return PTR_ERR(dwsmmio->clk);
+       }
+       ret = clk_prepare_enable(dwsmmio->clk);
+       if (ret) {
+               return ret;
+       }
+
+       dws->bus_num = of_alias_get_id(pdev->dev.of_node, "ssi");
+       dws->max_freq = clk_get_rate(dwsmmio->clk);
+       device_property_read_u16(&pdev->dev, "num-cs", &dws->num_cs);
+
+       if (pdev->dev.of_node) {
+               int i;
+               for (i = 0; i < dws->num_cs; i++) {
+                       int cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i);
+
+                       if (cs_gpio == -EPROBE_DEFER) {
+                               ret = cs_gpio;
+                               goto out;
+                       }
+
+                       if (gpio_is_valid(cs_gpio)) {
+                               ret = devm_gpio_request(&pdev->dev, cs_gpio, dev_name(&pdev->dev));
+                               if (ret)
+                                       goto out;
+                       }
+               }
+       }
+
+       spi_dma_init(dws);
+
+       ret = dw_spi_baikal_add_host(&pdev->dev, dws);
+       if (ret)
+               goto out;
+
+       platform_set_drvdata(pdev, dwsmmio);
+       return 0;
+
+out:
+       clk_disable_unprepare(dwsmmio->clk);
+       return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+       struct dw_spi_mmio_dma *dwsmmio = platform_get_drvdata(pdev);
+
+       dw_spi_baikal_remove_host(&dwsmmio->dws);
+       clk_disable_unprepare(dwsmmio->clk);
+
+       return 0;
+}
+
+static const struct of_device_id be_spi_dma_table[] = {
+       { .compatible = "be,dw-spi-dma", },
+       { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, be_spi_dma_table);
+
+static struct platform_driver be_spi_dma_driver = {
+       .probe      = probe,
+       .remove     = remove,
+       .driver     = {
+               .name   = "be,dw-spi-dma",
+               .of_match_table = be_spi_dma_table,
+       },
+};
+module_platform_driver(be_spi_dma_driver);
+
+MODULE_DESCRIPTION("Baikal Electronics Spi Flash Driver with DMA support");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/spi/spi-baikal.c b/drivers/spi/spi-baikal.c
new file mode 100644 (file)
index 0000000..35dbf6c
--- /dev/null
@@ -0,0 +1,539 @@
+/*
+ * Baikal Electronics Spi Flash Driver.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/acpi.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "spi-dw.h"
+
+
+/* Slave spi_dev related */
+struct chip_data {
+       u8 tmode;       /* TR/TO/RO/EEPROM */
+       u8 type;        /* SPI/SSP/MicroWire */
+
+       u8 poll_mode;   /* 1 means use poll mode */
+
+       u16 clk_div;    /* baud rate divider */
+       u32 speed_hz;   /* baud rate */
+       void (*cs_control)(u32 command);
+};
+
+static void dw_set_cs (struct spi_device *spi, bool enable)
+{
+       struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+       struct chip_data *chip = spi_get_ctldata(spi);
+
+       if (chip && chip->cs_control)
+               chip->cs_control(enable);
+
+       if (enable)
+               dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+       else if (dws->cs_override)
+               dw_writel(dws, DW_SPI_SER, 0);
+}
+
+/* Return the max entries we can fill into tx fifo */
+static inline u32 tx_max (struct dw_spi *dws)
+{
+       u32 tx_left, tx_room, rxtx_gap;
+
+       tx_left = (dws->tx_end -dws->tx) / dws->n_bytes;
+       tx_room = dws->fifo_len -dw_readl(dws, DW_SPI_TXFLR);
+
+       /*
+        * Another concern is about the tx/rx mismatch, we
+        * though to use (dws->fifo_len - rxflr - txflr) as
+        * one maximum value for tx, but it doesn't cover the
+        * data which is out of tx/rx fifo and inside the
+        * shift registers. So a control from sw point of
+        * view is taken.
+        */
+       rxtx_gap = ((dws->rx_end -dws->rx) -(dws->tx_end -dws->tx))/dws->n_bytes;
+
+       return min3(tx_left, tx_room, (u32) (dws->fifo_len -rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static inline u32 rx_max (struct dw_spi *dws)
+{
+       u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
+
+       return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
+}
+
+static void dw_writer (struct dw_spi *dws)
+{
+       u32 max = tx_max(dws);
+       u16 txw = 0;
+
+       while (max--) {
+               /* Set the tx word if the transfer's original "tx" is not null */
+               if (dws->tx_end - dws->len) {
+                       if (dws->n_bytes == 1)
+                               txw = *(u8 *)(dws->tx);
+                       else
+                               txw = *(u16 *)(dws->tx);
+               }
+               dw_write_io_reg(dws, DW_SPI_DR, txw);
+               dws->tx += dws->n_bytes;
+       }
+
+       /* restart transmite, if stoped */
+       dw_writel(dws, DW_SPI_SER, 1);  // !! fixme, use BIT(spi->chip_select)
+}
+
+static void dw_reader (struct dw_spi *dws)
+{
+       u32 max = rx_max(dws);
+       u16 rxw;
+
+       while (max--) {
+               rxw = dw_read_io_reg(dws, DW_SPI_DR);
+               /* Care rx only if the transfer's original "rx" is not null */
+               if (dws->rx_end - dws->len) {
+                       if (dws->n_bytes == 1)
+                               *(u8 *)(dws->rx) = rxw;
+                       else
+                               *(u16 *)(dws->rx) = rxw;
+               }
+               dws->rx += dws->n_bytes;
+       }
+}
+
+static void int_error_stop (struct dw_spi *dws, const char *msg)
+{
+       spi_reset_chip(dws);
+
+       dev_err(&dws->master->dev, "%s\n", msg);
+       dws->master->cur_msg->status = -EIO;
+       spi_finalize_current_transfer(dws->master);
+}
+
+static irqreturn_t interrupt_transfer (struct dw_spi *dws)
+{
+       u16 irq_status = dw_readl(dws, DW_SPI_ISR);
+
+       /* Error handling */
+       if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
+               dw_readl(dws, DW_SPI_ICR);
+               int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
+               return IRQ_HANDLED;
+       }
+
+       dw_reader(dws);
+       if (dws->rx_end == dws->rx) {
+               spi_mask_intr(dws, SPI_INT_TXEI);
+               spi_finalize_current_transfer(dws->master);
+               return IRQ_HANDLED;
+       }
+       if (irq_status & SPI_INT_TXEI) {
+               spi_mask_intr(dws, SPI_INT_TXEI);
+               dw_writer(dws);
+               /* Enable TX irq always, it will be disabled when RX finished */
+               spi_umask_intr(dws, SPI_INT_TXEI);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t dw_spi_irq (int irq, void *dev_id)
+{
+       struct spi_controller *master = dev_id;
+       struct dw_spi *dws = spi_controller_get_devdata(master);
+       u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
+
+       if (!irq_status)
+               return IRQ_NONE;
+
+       if (!master->cur_msg) {
+               spi_mask_intr(dws, SPI_INT_TXEI);
+               return IRQ_HANDLED;
+       }
+
+       return dws->transfer_handler(dws);
+}
+
+/* Must be called inside pump_transfers() */
+static int poll_transfer (struct dw_spi *dws)
+{
+       do {
+               dw_writer(dws);
+               dw_reader(dws);
+               cpu_relax();
+       } while (dws->rx_end > dws->rx);
+
+       return 0;
+}
+
+static int dw_spi_transfer_one (struct spi_controller *master,
+               struct spi_device *spi, struct spi_transfer *transfer)
+{
+       struct dw_spi *dws = spi_controller_get_devdata(master);
+       struct chip_data *chip = spi_get_ctldata(spi);
+       u8 imask = 0;
+       u16 txlevel = 0;
+       u32 cr0;
+       int ret;
+
+       dws->dma_mapped = 0;
+       dws->tx = (void *)transfer->tx_buf;
+       dws->tx_end = dws->tx + transfer->len;
+       dws->rx = transfer->rx_buf;
+       dws->rx_end = dws->rx + transfer->len;
+       dws->len = transfer->len;
+
+       spi_enable_chip(dws, 0);
+
+       /* Handle per transfer options for bpw and speed */
+       if (transfer->speed_hz != dws->current_freq) {
+               if (transfer->speed_hz != chip->speed_hz) {
+                       /* clk_div doesn't support odd number */
+                       chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
+                       chip->speed_hz = transfer->speed_hz;
+               }
+               dws->current_freq = transfer->speed_hz;
+               spi_set_clk(dws, chip->clk_div);
+       }
+
+       dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
+       dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
+
+       /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+       cr0 = (transfer->bits_per_word - 1)
+               | (chip->type << SPI_FRF_OFFSET)
+               | (((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET)
+               | (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET)
+               | (chip->tmode << SPI_TMOD_OFFSET);
+
+       /*
+        * Adjust transfer mode if necessary. Requires platform dependent
+        * chipselect mechanism.
+        */
+       if (chip->cs_control) {
+               if (dws->rx && dws->tx)
+                       chip->tmode = SPI_TMOD_TR;
+               else if (dws->rx)
+                       chip->tmode = SPI_TMOD_RO;
+               else
+                       chip->tmode = SPI_TMOD_TO;
+               cr0 &= ~SPI_TMOD_MASK;
+               cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
+       }
+       dw_writel(dws, DW_SPI_CTRL0, cr0);
+
+       /* Check if current transfer is a DMA transaction */
+       if (master->can_dma && master->can_dma(master, spi, transfer))
+               dws->dma_mapped = master->cur_msg_mapped;
+
+       /* For poll mode just disable all interrupts */
+       spi_mask_intr(dws, 0xff);
+
+       /*
+        * Interrupt mode
+        * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
+        */
+       if (dws->dma_mapped) {
+               ret = dws->dma_ops->dma_setup(dws, transfer);
+               if (ret < 0) {
+                       spi_enable_chip(dws, 1);
+                       return ret;
+               }
+       } else if (!chip->poll_mode) {
+               txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
+               dw_writel(dws, DW_SPI_TXFLTR, txlevel);
+
+               /* Set the interrupt mask */
+               imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
+               spi_umask_intr(dws, imask);
+               dws->transfer_handler = interrupt_transfer;
+       }
+       spi_enable_chip(dws, 1);
+       if (dws->dma_mapped) {
+               ret = dws->dma_ops->dma_transfer(dws, transfer);
+               if (ret < 0)
+                       return ret;
+       }
+       if (chip->poll_mode)
+               return poll_transfer(dws);
+       return 1;
+}
+
+static void dw_spi_handle_err (struct spi_controller *master,
+               struct spi_message *msg)
+{
+       struct dw_spi *dws = spi_controller_get_devdata(master);
+       if (dws->dma_mapped)
+               dws->dma_ops->dma_stop(dws);
+       spi_reset_chip(dws);
+}
+
+/* This may be called twice for each spi dev */
+static int dw_spi_setup (struct spi_device *spi)
+{
+       struct dw_spi_chip *chip_info = NULL;
+       struct chip_data *chip;
+
+       /* Only alloc on first setup */
+       chip = spi_get_ctldata(spi);
+       if (!chip) {
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+               if (!chip)
+                       return -ENOMEM;
+               spi_set_ctldata(spi, chip);
+       }
+
+       /*
+        * Protocol drivers may change the chip settings, so...
+        * if chip_info exists, use it
+        */
+       chip_info = spi->controller_data;
+
+       /* chip_info doesn't always exist */
+       if (chip_info) {
+               if (chip_info->cs_control)
+                       chip->cs_control = chip_info->cs_control;
+               chip->poll_mode = chip_info->poll_mode;
+               chip->type = chip_info->type;
+       }
+       chip->tmode = SPI_TMOD_TR;
+       return 0;
+}
+
+static void dw_spi_cleanup (struct spi_device *spi)
+{
+       struct chip_data *chip = spi_get_ctldata(spi);
+       kfree(chip);
+       spi_set_ctldata(spi, NULL);
+}
+
+/* Restart the controller, disable all interrupts, clean rx fifo */
+static void spi_hw_init (struct device *dev, struct dw_spi *dws)
+{
+       spi_reset_chip(dws);
+
+       /*
+        * Try to detect the FIFO depth if not set by interface driver,
+        * the depth could be from 2 to 256 from HW spec
+        */
+       if (!dws->fifo_len) {
+               u32 fifo;
+               for (fifo = 1; fifo < 256; fifo++) {
+                       dw_writel(dws, DW_SPI_TXFLTR, fifo);
+                       if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
+                               break;
+               }
+               dw_writel(dws, DW_SPI_TXFLTR, 0);
+               dws->fifo_len = (fifo == 1) ? 0 : fifo;
+               dws->fifo_len /= 4;
+               dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
+       }
+
+       /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
+       if (dws->cs_override)
+               dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
+}
+
+int dw_spi_baikal_add_host (struct device *dev, struct dw_spi *dws)
+{
+       struct spi_controller *master;
+       int ret;
+
+       BUG_ON(dws == NULL);
+
+       master = spi_alloc_master(dev, 0);
+       if (!master)
+               return -ENOMEM;
+
+       dws->master = master;
+       dws->type = SSI_MOTO_SPI;
+       dws->dma_inited = 0;
+       dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+       spi_controller_set_devdata(master, dws);
+
+       ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
+                         master);
+       if (ret < 0) {
+               dev_err(dev, "can not get IRQ\n");
+               goto err_free_master;
+       }
+
+       master->use_gpio_descriptors = true;
+       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+       master->bits_per_word_mask =  SPI_BPW_RANGE_MASK(4, 16);
+       master->bus_num = dws->bus_num;
+       master->num_chipselect = dws->num_cs;
+       master->setup = dw_spi_setup;
+       master->cleanup = dw_spi_cleanup;
+       master->transfer_one = dw_spi_transfer_one;
+       master->handle_err = dw_spi_handle_err;
+       master->max_speed_hz = dws->max_freq;
+       master->dev.of_node = dev->of_node;
+       master->dev.fwnode = dev->fwnode;
+
+       if (dws->set_cs)
+               master->set_cs = dws->set_cs;
+
+       /* Basic HW init */
+       spi_hw_init(dev, dws);
+
+       if (dws->dma_ops && dws->dma_ops->dma_init) {
+               ret = dws->dma_ops->dma_init(dws);
+               if (ret) {
+                       dev_warn(dev, "DMA init failed\n");
+                       dws->dma_inited = 0;
+               } else
+                       master->can_dma = dws->dma_ops->can_dma;
+       }
+
+       ret = devm_spi_register_controller(dev, master);
+       if (ret) {
+               dev_err(&master->dev, "problem registering spi master\n");
+               goto err_dma_exit;
+       }
+       return 0;
+
+err_dma_exit:
+       if (dws->dma_ops && dws->dma_ops->dma_exit)
+               dws->dma_ops->dma_exit(dws);
+       spi_enable_chip(dws, 0);
+       free_irq(dws->irq, master);
+err_free_master:
+       spi_controller_put(master);
+       return ret;
+}
+
+void dw_spi_baikal_remove_host (struct dw_spi *dws)
+{
+       if (dws->dma_ops && dws->dma_ops->dma_exit)
+               dws->dma_ops->dma_exit(dws);
+       spi_shutdown_chip(dws);
+       free_irq(dws->irq, dws->master);
+}
+
+
+/*
+----------------------
+MMIO
+----------------------
+*/
+
+struct dw_spi_mmio {
+       struct dw_spi  dws;
+       struct clk     *clk;
+       void           *priv;
+};
+
+static int probe (struct platform_device *pdev)
+{
+       int (*init_func)(struct platform_device *pdev,
+                        struct dw_spi_mmio *dwsmmio);
+       struct dw_spi_mmio *dwsmmio;
+       struct dw_spi *dws;
+       struct resource *mem;
+       int ret;
+       int num_cs;
+
+       dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio), GFP_KERNEL);
+       if (!dwsmmio)
+               return -ENOMEM;
+
+       dws = &dwsmmio->dws;
+
+       /* Get basic io resource and map it */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(dws->regs)) {
+               dev_err(&pdev->dev, "SPI region map failed\n");
+               return PTR_ERR(dws->regs);
+       }
+
+       dws->irq = platform_get_irq(pdev, 0);
+       if (dws->irq < 0) {
+               dev_err(&pdev->dev, "no irq resource?\n");
+               return dws->irq; /* -ENXIO */
+       }
+
+       dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dwsmmio->clk))
+               return PTR_ERR(dwsmmio->clk);
+       ret = clk_prepare_enable(dwsmmio->clk);
+       if (ret)
+               return ret;
+
+       dws->bus_num = pdev->id;
+       dws->max_freq = clk_get_rate(dwsmmio->clk);
+       num_cs = 4;
+       device_property_read_u32(&pdev->dev, "num-cs", &num_cs);
+       dws->num_cs = num_cs;
+
+       init_func = device_get_match_data(&pdev->dev);
+       if (init_func) {
+               ret = init_func(pdev, dwsmmio);
+               if (ret)
+                       goto out;
+       }
+
+       ret = dw_spi_baikal_add_host(&pdev->dev, dws);
+       if (ret)
+               goto out;
+
+       pdev->dev.dma_mask = NULL;
+
+       platform_set_drvdata(pdev, dwsmmio);
+       return 0;
+out:
+       clk_disable_unprepare(dwsmmio->clk);
+       return ret;
+}
+
+static int remove (struct platform_device *pdev)
+{
+       struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
+       dw_spi_baikal_remove_host(&dwsmmio->dws);
+       clk_disable_unprepare(dwsmmio->clk);
+       return 0;
+}
+
+static const struct of_device_id  be_spi_table[] = {
+       { .compatible = "be,dw-spi", },
+       { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, be_spi_table);
+
+static struct platform_driver   be_spi_driver = {
+       .probe      = probe,
+       .remove     = remove,
+       .driver     = {
+               .name   = "be,dw-spi",
+               .of_match_table = be_spi_table,
+       },
+};
+module_platform_driver(be_spi_driver);
+
+MODULE_DESCRIPTION("Baikal Electronics Spi Flash Driver");
+MODULE_LICENSE("Dual BSD/GPL");
index 556a876c7896251811288f99968f8636ecf310b6..f156818773403b57a6c4f80b1d222f8e4cfce8da 100644 (file)
@@ -138,4 +138,13 @@ config USB_DWC3_QCOM
          for peripheral mode support.
          Say 'Y' or 'M' if you have one such device.
 
+config USB_DWC3_BAIKAL
+       tristate "Baikal Electronics Platforms"
+       depends on MIPS_BAIKAL && OF
+       default USB_DWC3
+       help
+         Baikal Electronics SoCs with one DesignWare Core USB3 IP
+         inside.
+         Say 'Y' or 'M' if you have one such device.
+
 endif
index ae86da0dc5bd1bedd44edbb5a8db3df1907eda70..bc8cd93ad7f03d9758d271cc48f1d72af5a16dd1 100644 (file)
@@ -50,4 +50,5 @@ obj-$(CONFIG_USB_DWC3_KEYSTONE)               += dwc3-keystone.o
 obj-$(CONFIG_USB_DWC3_MESON_G12A)      += dwc3-meson-g12a.o
 obj-$(CONFIG_USB_DWC3_OF_SIMPLE)       += dwc3-of-simple.o
 obj-$(CONFIG_USB_DWC3_ST)              += dwc3-st.o
+obj-$(CONFIG_USB_DWC3_BAIKAL)          += dwc3-baikal.o
 obj-$(CONFIG_USB_DWC3_QCOM)            += dwc3-qcom.o
diff --git a/drivers/usb/dwc3/dwc3-baikal.c b/drivers/usb/dwc3/dwc3-baikal.c
new file mode 100644 (file)
index 0000000..2a75b46
--- /dev/null
@@ -0,0 +1,124 @@
+/**
+ * dwc3-baikal.c - Baikal Electronics SoCs Specific Glue layer
+ *
+ * Copyright (C) 2015 Baikal Electronics JSC - http://www.baikalelectronics.ru
+ *
+ * Author: Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+struct dwc3_baikal {
+       struct device   *dev;
+       struct clk      *clk;
+};
+
+static int be_dwc3_probe(struct platform_device *pdev)
+{
+       struct device           *dev = &pdev->dev;
+       struct device_node      *node = pdev->dev.of_node;
+       struct dwc3_baikal      *dwc;
+       int                     ret;
+
+       dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+       if (!dwc)
+               return -ENOMEM;
+
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, dwc);
+       dwc->dev = dev;
+
+       dwc->clk = devm_clk_get(dwc->dev, "usb");
+       if (IS_ERR(dwc->clk)) {
+               dev_err(dev, "no interface clk specified\n");
+               return -EINVAL;
+       }
+
+       ret = clk_prepare_enable(dwc->clk);
+       if (ret < 0) {
+               dev_err(dwc->dev, "unable to enable usb clock\n");
+               return ret;
+       }
+
+       if (node) {
+                       ret = of_platform_populate(node, NULL, NULL, dev);
+                       if (ret) {
+                               dev_err(&pdev->dev, "failed to create dwc3 core\n");
+                               goto __error;
+                       }
+       } else {
+               dev_err(dev, "no device node, failed to add dwc3 core\n");
+               ret = -ENODEV;
+               goto __error;
+       }
+
+       return 0;
+
+__error:
+       clk_disable_unprepare(dwc->clk);
+
+       return ret;
+}
+
+static int be_dwc3_remove_core(struct device *dev, void *c)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+
+       platform_device_unregister(pdev);
+
+       return 0;
+}
+
+static int be_dwc3_remove(struct platform_device *pdev)
+{
+       struct dwc3_baikal *dwc = platform_get_drvdata(pdev);
+
+       device_for_each_child(&pdev->dev, NULL, be_dwc3_remove_core);
+       clk_disable_unprepare(dwc->clk);
+
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id be_dwc3_of_match[] = {
+       { .compatible = "be,baikal-dwc3", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, be_dwc3_of_match);
+
+static struct platform_driver be_dwc3_driver = {
+       .probe          = be_dwc3_probe,
+       .remove         = be_dwc3_remove,
+       .driver         = {
+               .name   = "baikal-dwc3",
+               .of_match_table = be_dwc3_of_match,
+       },
+};
+
+module_platform_driver(be_dwc3_driver);
+
+MODULE_ALIAS("platform:baikal-dwc3");
+MODULE_AUTHOR("Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 Baikal SoCs Glue Layer");
index fef7c61f5555aea07b4485b9c76ffa2628dce29b..cd578843277e5c1e62b1150b498e8ac057f073c3 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright 2010-2011 Picochip Ltd., Jamie Iles
- * http://www.picochip.com
+ * https://www.picochip.com
  *
  * This file implements a driver for the Synopsys DesignWare watchdog device
  * in the many subsystems. The watchdog has 16 different timeout periods
 
 #include <linux/bitops.h>
 #include <linux/clk.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/limits.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/of.h>
-#include <linux/pm.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/reset.h>
 #include <linux/watchdog.h>
 
 #define WDOG_CURRENT_COUNT_REG_OFFSET      0x08
 #define WDOG_COUNTER_RESTART_REG_OFFSET     0x0c
 #define WDOG_COUNTER_RESTART_KICK_VALUE            0x76
-
-/* The maximum TOP (timeout period) value that can be set in the watchdog. */
-#define DW_WDT_MAX_TOP         15
+#define WDOG_INTERRUPT_STATUS_REG_OFFSET    0x10
+#define WDOG_INTERRUPT_CLEAR_REG_OFFSET     0x14
+#define WDOG_COMP_PARAMS_5_REG_OFFSET       0xe4
+#define WDOG_COMP_PARAMS_4_REG_OFFSET       0xe8
+#define WDOG_COMP_PARAMS_3_REG_OFFSET       0xec
+#define WDOG_COMP_PARAMS_2_REG_OFFSET       0xf0
+#define WDOG_COMP_PARAMS_1_REG_OFFSET       0xf4
+#define WDOG_COMP_PARAMS_1_USE_FIX_TOP      BIT(6)
+#define WDOG_COMP_VERSION_REG_OFFSET        0xf8
+#define WDOG_COMP_TYPE_REG_OFFSET           0xfc
+
+/* There are sixteen TOPs (timeout periods) that can be set in the watchdog. */
+#define DW_WDT_NUM_TOPS                16
+#define DW_WDT_FIX_TOP(_idx)   (1U << (16 + _idx))
 
 #define DW_WDT_DEFAULT_SECONDS 30
 
+static const u32 dw_wdt_fix_tops[DW_WDT_NUM_TOPS] = {
+       DW_WDT_FIX_TOP(0), DW_WDT_FIX_TOP(1), DW_WDT_FIX_TOP(2),
+       DW_WDT_FIX_TOP(3), DW_WDT_FIX_TOP(4), DW_WDT_FIX_TOP(5),
+       DW_WDT_FIX_TOP(6), DW_WDT_FIX_TOP(7), DW_WDT_FIX_TOP(8),
+       DW_WDT_FIX_TOP(9), DW_WDT_FIX_TOP(10), DW_WDT_FIX_TOP(11),
+       DW_WDT_FIX_TOP(12), DW_WDT_FIX_TOP(13), DW_WDT_FIX_TOP(14),
+       DW_WDT_FIX_TOP(15)
+};
+
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
                 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+enum dw_wdt_rmod {
+       DW_WDT_RMOD_RESET = 1,
+       DW_WDT_RMOD_IRQ = 2
+};
+
+struct dw_wdt_timeout {
+       u32 top_val;
+       unsigned int sec;
+       unsigned int msec;
+};
+
 struct dw_wdt {
        void __iomem            *regs;
        struct clk              *clk;
+       struct clk              *pclk;
        unsigned long           rate;
+       enum dw_wdt_rmod        rmod;
+       struct dw_wdt_timeout   timeouts[DW_WDT_NUM_TOPS];
        struct watchdog_device  wdd;
        struct reset_control    *rst;
        /* Save/restore */
        u32                     control;
        u32                     timeout;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *dbgfs_dir;
+#endif
 };
 
 #define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
@@ -64,20 +105,84 @@ static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
                WDOG_CONTROL_REG_WDT_EN_MASK;
 }
 
-static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top)
+static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod)
+{
+       u32 val;
+
+       val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+       if (rmod == DW_WDT_RMOD_IRQ)
+               val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
+       else
+               val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+       writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
+       dw_wdt->rmod = rmod;
+}
+
+static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt,
+                                        unsigned int timeout, u32 *top_val)
+{
+       int idx;
+
+       /*
+        * Find a TOP with timeout greater or equal to the requested number.
+        * Note we'll select a TOP with maximum timeout if the requested
+        * timeout couldn't be reached.
+        */
+       for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+               if (dw_wdt->timeouts[idx].sec >= timeout)
+                       break;
+       }
+
+       if (idx == DW_WDT_NUM_TOPS)
+               --idx;
+
+       *top_val = dw_wdt->timeouts[idx].top_val;
+
+       return dw_wdt->timeouts[idx].sec;
+}
+
+static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt)
 {
+       int idx;
+
        /*
-        * There are 16 possible timeout values in 0..15 where the number of
-        * cycles is 2 ^ (16 + i) and the watchdog counts down.
+        * We'll find a timeout greater or equal to one second anyway because
+        * the driver probe would have failed if there was none.
         */
-       return (1U << (16 + top)) / dw_wdt->rate;
+       for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+               if (dw_wdt->timeouts[idx].sec)
+                       break;
+       }
+
+       return dw_wdt->timeouts[idx].sec;
 }
 
-static int dw_wdt_get_top(struct dw_wdt *dw_wdt)
+static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt)
 {
-       int top = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+       struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1];
+       u64 msec;
+
+       msec = (u64)timeout->sec * MSEC_PER_SEC + timeout->msec;
 
-       return dw_wdt_top_in_seconds(dw_wdt, top);
+       return msec < UINT_MAX ? msec : UINT_MAX;
+}
+
+static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt)
+{
+       int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+       int idx;
+
+       for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+               if (dw_wdt->timeouts[idx].top_val == top_val)
+                       break;
+       }
+
+       /*
+        * In IRQ mode due to the two stages counter, the actual timeout is
+        * twice greater than the TOP setting.
+        */
+       return dw_wdt->timeouts[idx].sec * dw_wdt->rmod;
 }
 
 static int dw_wdt_ping(struct watchdog_device *wdd)
@@ -93,17 +198,23 @@ static int dw_wdt_ping(struct watchdog_device *wdd)
 static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
 {
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
-       int i, top_val = DW_WDT_MAX_TOP;
+       unsigned int timeout;
+       u32 top_val;
 
        /*
-        * Iterate over the timeout values until we find the closest match. We
-        * always look for >=.
+        * Note IRQ mode being enabled means having a non-zero pre-timeout
+        * setup. In this case we try to find a TOP as close to the half of the
+        * requested timeout as possible since DW Watchdog IRQ mode is designed
+        * in two stages way - first timeout rises the pre-timeout interrupt,
+        * second timeout performs the system reset. So basically the effective
+        * watchdog-caused reset happens after two watchdog TOPs elapsed.
         */
-       for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
-               if (dw_wdt_top_in_seconds(dw_wdt, i) >= top_s) {
-                       top_val = i;
-                       break;
-               }
+       timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod),
+                                      &top_val);
+       if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
+               wdd->pretimeout = timeout;
+       else
+               wdd->pretimeout = 0;
 
        /*
         * Set the new value in the watchdog.  Some versions of dw_wdt
@@ -114,7 +225,34 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
        writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
               dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
 
-       wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+       /* Kick new TOP value into the watchdog counter if activated. */
+       if (watchdog_active(wdd))
+               dw_wdt_ping(wdd);
+
+       /*
+        * In case users set bigger timeout value than HW can support,
+        * kernel(watchdog_dev.c) helps to feed watchdog before
+        * wdd->max_hw_heartbeat_ms
+        */
+       if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
+               wdd->timeout = timeout * dw_wdt->rmod;
+       else
+               wdd->timeout = top_s;
+
+       return 0;
+}
+
+static int dw_wdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
+{
+       struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+
+       /*
+        * We ignore actual value of the timeout passed from user-space
+        * using it as a flag whether the pretimeout functionality is intended
+        * to be activated.
+        */
+       dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET);
+       dw_wdt_set_timeout(wdd, wdd->timeout);
 
        return 0;
 }
@@ -123,8 +261,11 @@ static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
 {
        u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
 
-       /* Disable interrupt mode; always perform system reset. */
-       val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+       /* Disable/enable interrupt mode depending on the RMOD flag. */
+       if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
+               val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
+       else
+               val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
        /* Enable watchdog. */
        val |= WDOG_CONTROL_REG_WDT_EN_MASK;
        writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
@@ -135,6 +276,7 @@ static int dw_wdt_start(struct watchdog_device *wdd)
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
 
        dw_wdt_set_timeout(wdd, wdd->timeout);
+       dw_wdt_ping(&dw_wdt->wdd);
        dw_wdt_arm_system_reset(dw_wdt);
 
        return 0;
@@ -161,6 +303,7 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
 
        writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+       dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
        if (dw_wdt_is_enabled(dw_wdt))
                writel(WDOG_COUNTER_RESTART_KICK_VALUE,
                       dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
@@ -176,9 +319,19 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
 static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
 {
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+       unsigned int sec;
+       u32 val;
 
-       return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
-               dw_wdt->rate;
+       val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET);
+       sec = val / dw_wdt->rate;
+
+       if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) {
+               val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
+               if (!val)
+                       sec += wdd->pretimeout;
+       }
+
+       return sec;
 }
 
 static const struct watchdog_info dw_wdt_ident = {
@@ -187,16 +340,41 @@ static const struct watchdog_info dw_wdt_ident = {
        .identity       = "Synopsys DesignWare Watchdog",
 };
 
+static const struct watchdog_info dw_wdt_pt_ident = {
+       .options        = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+                         WDIOF_PRETIMEOUT | WDIOF_MAGICCLOSE,
+       .identity       = "Synopsys DesignWare Watchdog",
+};
+
 static const struct watchdog_ops dw_wdt_ops = {
        .owner          = THIS_MODULE,
        .start          = dw_wdt_start,
        .stop           = dw_wdt_stop,
        .ping           = dw_wdt_ping,
        .set_timeout    = dw_wdt_set_timeout,
+       .set_pretimeout = dw_wdt_set_pretimeout,
        .get_timeleft   = dw_wdt_get_timeleft,
        .restart        = dw_wdt_restart,
 };
 
+static irqreturn_t dw_wdt_irq(int irq, void *devid)
+{
+       struct dw_wdt *dw_wdt = devid;
+       u32 val;
+
+       /*
+        * We don't clear the IRQ status. It's supposed to be done by the
+        * following ping operations.
+        */
+       val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
+       if (!val)
+               return IRQ_NONE;
+
+       watchdog_notify_pretimeout(&dw_wdt->wdd);
+
+       return IRQ_HANDLED;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int dw_wdt_suspend(struct device *dev)
 {
@@ -205,6 +383,7 @@ static int dw_wdt_suspend(struct device *dev)
        dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
        dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
 
+       clk_disable_unprepare(dw_wdt->pclk);
        clk_disable_unprepare(dw_wdt->clk);
 
        return 0;
@@ -218,6 +397,12 @@ static int dw_wdt_resume(struct device *dev)
        if (err)
                return err;
 
+       err = clk_prepare_enable(dw_wdt->pclk);
+       if (err) {
+               clk_disable_unprepare(dw_wdt->clk);
+               return err;
+       }
+
        writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
        writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
 
@@ -229,6 +414,139 @@ static int dw_wdt_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
 
+/*
+ * In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
+ * TOPs array can be arbitrary ordered with nearly any sixteen uint numbers
+ * depending on the system engineer imagination. The next method handles the
+ * passed TOPs array to pre-calculate the effective timeouts and to sort the
+ * TOP items out in the ascending order with respect to the timeouts.
+ */
+
+static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops)
+{
+       struct dw_wdt_timeout tout, *dst;
+       int val, tidx;
+       u64 msec;
+
+       /*
+        * We walk over the passed TOPs array and calculate corresponding
+        * timeouts in seconds and milliseconds. The milliseconds granularity
+        * is needed to distinguish the TOPs with very close timeouts and to
+        * set the watchdog max heartbeat setting further.
+        */
+       for (val = 0; val < DW_WDT_NUM_TOPS; ++val) {
+               tout.top_val = val;
+               tout.sec = tops[val] / dw_wdt->rate;
+               msec = (u64)tops[val] * MSEC_PER_SEC;
+               do_div(msec, dw_wdt->rate);
+               tout.msec = msec - ((u64)tout.sec * MSEC_PER_SEC);
+
+               /*
+                * Find a suitable place for the current TOP in the timeouts
+                * array so that the list is remained in the ascending order.
+                */
+               for (tidx = 0; tidx < val; ++tidx) {
+                       dst = &dw_wdt->timeouts[tidx];
+                       if (tout.sec > dst->sec || (tout.sec == dst->sec &&
+                           tout.msec >= dst->msec))
+                               continue;
+                       else
+                               swap(*dst, tout);
+               }
+
+               dw_wdt->timeouts[val] = tout;
+       }
+}
+
+static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev)
+{
+       u32 data, of_tops[DW_WDT_NUM_TOPS];
+       const u32 *tops;
+       int ret;
+
+       /*
+        * Retrieve custom or fixed counter values depending on the
+        * WDT_USE_FIX_TOP flag found in the component specific parameters
+        * #1 register.
+        */
+       data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET);
+       if (data & WDOG_COMP_PARAMS_1_USE_FIX_TOP) {
+               tops = dw_wdt_fix_tops;
+       } else {
+               ret = of_property_read_variable_u32_array(dev_of_node(dev),
+                       "snps,watchdog-tops", of_tops, DW_WDT_NUM_TOPS,
+                       DW_WDT_NUM_TOPS);
+               if (ret < 0) {
+                       dev_warn(dev, "No valid TOPs array specified\n");
+                       tops = dw_wdt_fix_tops;
+               } else {
+                       tops = of_tops;
+               }
+       }
+
+       /* Convert the specified TOPs into an array of watchdog timeouts. */
+       dw_wdt_handle_tops(dw_wdt, tops);
+       if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) {
+               dev_err(dev, "No any valid TOP detected\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DW_WDT_DBGFS_REG(_name, _off) \
+{                                    \
+       .name = _name,                \
+       .offset = _off                \
+}
+
+static const struct debugfs_reg32 dw_wdt_dbgfs_regs[] = {
+       DW_WDT_DBGFS_REG("cr", WDOG_CONTROL_REG_OFFSET),
+       DW_WDT_DBGFS_REG("torr", WDOG_TIMEOUT_RANGE_REG_OFFSET),
+       DW_WDT_DBGFS_REG("ccvr", WDOG_CURRENT_COUNT_REG_OFFSET),
+       DW_WDT_DBGFS_REG("crr", WDOG_COUNTER_RESTART_REG_OFFSET),
+       DW_WDT_DBGFS_REG("stat", WDOG_INTERRUPT_STATUS_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param5", WDOG_COMP_PARAMS_5_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param4", WDOG_COMP_PARAMS_4_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param3", WDOG_COMP_PARAMS_3_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param2", WDOG_COMP_PARAMS_2_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param1", WDOG_COMP_PARAMS_1_REG_OFFSET),
+       DW_WDT_DBGFS_REG("version", WDOG_COMP_VERSION_REG_OFFSET),
+       DW_WDT_DBGFS_REG("type", WDOG_COMP_TYPE_REG_OFFSET)
+};
+
+static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt)
+{
+       struct device *dev = dw_wdt->wdd.parent;
+       struct debugfs_regset32 *regset;
+
+       regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+       if (!regset)
+               return;
+
+       regset->regs = dw_wdt_dbgfs_regs;
+       regset->nregs = ARRAY_SIZE(dw_wdt_dbgfs_regs);
+       regset->base = dw_wdt->regs;
+
+       dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+       debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset);
+}
+
+static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt)
+{
+       debugfs_remove_recursive(dw_wdt->dbgfs_dir);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {}
+static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {}
+
+#endif /* !CONFIG_DEBUG_FS */
+
 static int dw_wdt_drv_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -244,9 +562,18 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
        if (IS_ERR(dw_wdt->regs))
                return PTR_ERR(dw_wdt->regs);
 
-       dw_wdt->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(dw_wdt->clk))
-               return PTR_ERR(dw_wdt->clk);
+       /*
+        * Try to request the watchdog dedicated timer clock source. It must
+        * be supplied if asynchronous mode is enabled. Otherwise fallback
+        * to the common timer/bus clocks configuration, in which the very
+        * first found clock supply both timer and APB signals.
+        */
+       dw_wdt->clk = devm_clk_get(dev, "tclk");
+       if (IS_ERR(dw_wdt->clk)) {
+               dw_wdt->clk = devm_clk_get(dev, NULL);
+               if (IS_ERR(dw_wdt->clk))
+                       return PTR_ERR(dw_wdt->clk);
+       }
 
        ret = clk_prepare_enable(dw_wdt->clk);
        if (ret)
@@ -258,20 +585,64 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
                goto out_disable_clk;
        }
 
+       /*
+        * Request APB clock if device is configured with async clocks mode.
+        * In this case both tclk and pclk clocks are supposed to be specified.
+        * Alas we can't know for sure whether async mode was really activated,
+        * so the pclk phandle reference is left optional. If it couldn't be
+        * found we consider the device configured in synchronous clocks mode.
+        */
+       dw_wdt->pclk = devm_clk_get_optional(dev, "pclk");
+       if (IS_ERR(dw_wdt->pclk)) {
+               ret = PTR_ERR(dw_wdt->pclk);
+               goto out_disable_clk;
+       }
+
+       ret = clk_prepare_enable(dw_wdt->pclk);
+       if (ret)
+               goto out_disable_clk;
+
        dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
        if (IS_ERR(dw_wdt->rst)) {
                ret = PTR_ERR(dw_wdt->rst);
-               goto out_disable_clk;
+               goto out_disable_pclk;
+       }
+
+       /* Enable normal reset without pre-timeout by default. */
+       dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
+
+       /*
+        * Pre-timeout IRQ is optional, since some hardware may lack support
+        * of it. Note we must request rising-edge IRQ, since the lane is left
+        * pending either until the next watchdog kick event or up to the
+        * system reset.
+        */
+       ret = platform_get_irq_optional(pdev, 0);
+       if (ret > 0) {
+               ret = devm_request_irq(dev, ret, dw_wdt_irq,
+                                      IRQF_SHARED | IRQF_TRIGGER_RISING,
+                                      pdev->name, dw_wdt);
+               if (ret)
+                       goto out_disable_pclk;
+
+               dw_wdt->wdd.info = &dw_wdt_pt_ident;
+       } else {
+               if (ret == -EPROBE_DEFER)
+                       goto out_disable_pclk;
+
+               dw_wdt->wdd.info = &dw_wdt_ident;
        }
 
        reset_control_deassert(dw_wdt->rst);
 
+       ret = dw_wdt_init_timeouts(dw_wdt, dev);
+       if (ret)
+               goto out_disable_clk;
+
        wdd = &dw_wdt->wdd;
-       wdd->info = &dw_wdt_ident;
        wdd->ops = &dw_wdt_ops;
-       wdd->min_timeout = 1;
-       wdd->max_hw_heartbeat_ms =
-               dw_wdt_top_in_seconds(dw_wdt, DW_WDT_MAX_TOP) * 1000;
+       wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt);
+       wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt);
        wdd->parent = dev;
 
        watchdog_set_drvdata(wdd, dw_wdt);
@@ -284,7 +655,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
         * devicetree.
         */
        if (dw_wdt_is_enabled(dw_wdt)) {
-               wdd->timeout = dw_wdt_get_top(dw_wdt);
+               wdd->timeout = dw_wdt_get_timeout(dw_wdt);
                set_bit(WDOG_HW_RUNNING, &wdd->status);
        } else {
                wdd->timeout = DW_WDT_DEFAULT_SECONDS;
@@ -297,10 +668,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
 
        ret = watchdog_register_device(wdd);
        if (ret)
-               goto out_disable_clk;
+               goto out_disable_pclk;
+
+       dw_wdt_dbgfs_init(dw_wdt);
 
        return 0;
 
+out_disable_pclk:
+       clk_disable_unprepare(dw_wdt->pclk);
+
 out_disable_clk:
        clk_disable_unprepare(dw_wdt->clk);
        return ret;
@@ -310,8 +686,11 @@ static int dw_wdt_drv_remove(struct platform_device *pdev)
 {
        struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
 
+       dw_wdt_dbgfs_clear(dw_wdt);
+
        watchdog_unregister_device(&dw_wdt->wdd);
        reset_control_assert(dw_wdt->rst);
+       clk_disable_unprepare(dw_wdt->pclk);
        clk_disable_unprepare(dw_wdt->clk);
 
        return 0;
index 19eadac415c42dd381161d122eaf4299e712ca92..40ec1433f05de25d15cedaeaf2388b2cc8d0298c 100644 (file)
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_GENERIC_GPIO_H
 #define _ASM_GENERIC_GPIO_H
 
  */
 
 #ifndef ARCH_NR_GPIOS
-#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#else
 #define ARCH_NR_GPIOS          512
 #endif
-#endif
 
 /*
  * "valid" GPIO numbers are nonnegative and may be passed to
index 1847a07842437965e1501fbad322fc10c623ea39..067af22fcd858013c9afa87e36f8181943d2b988 100644 (file)
 #define MARVELL_PHY_ID_88E1540         0x01410eb0
 #define MARVELL_PHY_ID_88E1545         0x01410ea0
 #define MARVELL_PHY_ID_88E3016         0x01410e60
+#define MARVELL_PHY_ID_88X2222         0x01410f10
+#define MARVELL_PHY_ID_88X2222R                0x014131b0
 #define MARVELL_PHY_ID_88X3310         0x002b09a0
 #define MARVELL_PHY_ID_88E2110         0x002b09b0
 
-/* These Ethernet switch families contain embedded PHYs, but they do
+/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
  * not have a model ID. So the switch driver traps reads to the ID2
  * register and returns the switch family ID
  */
-#define MARVELL_PHY_ID_88E6341_FAMILY  0x01410f41
-#define MARVELL_PHY_ID_88E6390_FAMILY  0x01410f90
+#define MARVELL_PHY_ID_88E6390         0x01410f90
 
 #define MARVELL_PHY_FAMILY_ID(id)      ((id) >> 4)
 
index be328c59389d56861f95aeb488860ed81ef19e0c..40cb44e32ce2cb14a0e039c2712ffe8331d2e7c4 100644 (file)
@@ -88,8 +88,8 @@ struct xdp_options {
 /* Pgoff for mmaping the rings */
 #define XDP_PGOFF_RX_RING                        0
 #define XDP_PGOFF_TX_RING               0x80000000
-#define XDP_UMEM_PGOFF_FILL_RING       0x100000000ULL
-#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+#define XDP_UMEM_PGOFF_FILL_RING       0x10000000UL
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x18000000UL
 
 /* Masks for unaligned chunks mode */
 #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48
index d1d7624cec4a6e18a9e2694d6c61183660c9df9f..c08660770371cc62285eae48fd50586b6b9aa83b 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3449,6 +3449,10 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
        if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
                return;
 
+#ifdef ARCH_WANTS_TLB_PREFETCH
+       tlb_prefetch((unsigned long)ac);
+#endif
+
        if (ac->avail < ac->limit) {
                STATS_INC_FREEHIT(cachep);
        } else {
index c8dacb4dda80c0d44548e81391bd4a65f276048b..6311788951a33e5d086763e07c77865f1e966bd6 100644 (file)
@@ -169,4 +169,11 @@ config SAMPLE_VFS
          as mount API and statx().  Note that this is restricted to the x86
          arch whilst it accesses system calls that aren't yet in all arches.
 
+config SAMPLE_BPF
+       bool "Build BPF sample code"
+       depends on HEADERS_INSTALL
+       help
+         Build samples of BPF filters using various methods of
+         BPF filter construction.
+
 endif # SAMPLES
index 7d6e4ca28d696ac539f1e690eee8d36c8635f917..3be686d2fa45c556447cea445f6547b5e5679484 100644 (file)
@@ -15,6 +15,7 @@ subdir-$(CONFIG_SAMPLE_PIDFD)         += pidfd
 obj-$(CONFIG_SAMPLE_QMI_CLIENT)                += qmi/
 obj-$(CONFIG_SAMPLE_RPMSG_CLIENT)      += rpmsg/
 subdir-$(CONFIG_SAMPLE_SECCOMP)                += seccomp
+subdir-$(CONFIG_SAMPLE_BPF)                    += bpf
 obj-$(CONFIG_SAMPLE_TRACE_EVENTS)      += trace_events/
 obj-$(CONFIG_SAMPLE_TRACE_PRINTK)      += trace_printk/
 obj-$(CONFIG_VIDEO_PCI_SKELETON)       += v4l/
index 6d1df7117e1170dddc347c1ffbfb024a4339cdc5..f055b4f7ee284c6f96cb0a4a90ec3773100b03a5 100644 (file)
@@ -112,64 +112,6 @@ hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
-always += sockex1_kern.o
-always += sockex2_kern.o
-always += sockex3_kern.o
-always += tracex1_kern.o
-always += tracex2_kern.o
-always += tracex3_kern.o
-always += tracex4_kern.o
-always += tracex5_kern.o
-always += tracex6_kern.o
-always += tracex7_kern.o
-always += sock_flags_kern.o
-always += test_probe_write_user_kern.o
-always += trace_output_kern.o
-always += tcbpf1_kern.o
-always += tc_l2_redirect_kern.o
-always += lathist_kern.o
-always += offwaketime_kern.o
-always += spintest_kern.o
-always += map_perf_test_kern.o
-always += test_overhead_tp_kern.o
-always += test_overhead_raw_tp_kern.o
-always += test_overhead_kprobe_kern.o
-always += parse_varlen.o parse_simple.o parse_ldabs.o
-always += test_cgrp2_tc_kern.o
-always += xdp1_kern.o
-always += xdp2_kern.o
-always += xdp_router_ipv4_kern.o
-always += test_current_task_under_cgroup_kern.o
-always += trace_event_kern.o
-always += sampleip_kern.o
-always += lwt_len_hist_kern.o
-always += xdp_tx_iptunnel_kern.o
-always += test_map_in_map_kern.o
-always += cookie_uid_helper_example.o
-always += tcp_synrto_kern.o
-always += tcp_rwnd_kern.o
-always += tcp_bufs_kern.o
-always += tcp_cong_kern.o
-always += tcp_iw_kern.o
-always += tcp_clamp_kern.o
-always += tcp_basertt_kern.o
-always += tcp_tos_reflect_kern.o
-always += tcp_dumpstats_kern.o
-always += xdp_redirect_kern.o
-always += xdp_redirect_map_kern.o
-always += xdp_redirect_cpu_kern.o
-always += xdp_monitor_kern.o
-always += xdp_rxq_info_kern.o
-always += xdp2skb_meta_kern.o
-always += syscall_tp_kern.o
-always += cpustat_kern.o
-always += xdp_adjust_tail_kern.o
-always += xdp_fwd_kern.o
-always += task_fd_query_kern.o
-always += xdp_sample_pkts_kern.o
-always += ibumad_kern.o
-always += hbm_out_kern.o
-always += hbm_edt_kern.o
 
 KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include
 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
index 8dadaa0fbb4397568ac28e0bbfac9862a86110e6..fa9a2ff4b409a789fdbc55f4f17f2a8a2d0ffaf3 100644 (file)
@@ -1,8 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0-only
 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
+ifdef CROSS_COMPILE
+fixdep:
+else
 fixdep:
        $(Q)$(MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep
+endif
 
 fixdep-clean:
        $(Q)$(MAKE) -C $(srctree)/tools/build clean
index be328c59389d56861f95aeb488860ed81ef19e0c..40cb44e32ce2cb14a0e039c2712ffe8331d2e7c4 100644 (file)
@@ -88,8 +88,8 @@ struct xdp_options {
 /* Pgoff for mmaping the rings */
 #define XDP_PGOFF_RX_RING                        0
 #define XDP_PGOFF_TX_RING               0x80000000
-#define XDP_UMEM_PGOFF_FILL_RING       0x100000000ULL
-#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+#define XDP_UMEM_PGOFF_FILL_RING       0x10000000UL
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x18000000UL
 
 /* Masks for unaligned chunks mode */
 #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48